1/* $NetBSD: amdgpu_pm.c,v 1.5 2021/12/19 12:21:29 riastradh Exp $ */ 2 3/* 4 * Copyright 2017 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 * Authors: Rafa�� Mi��ecki <zajec5@gmail.com> 25 * Alex Deucher <alexdeucher@gmail.com> 26 */ 27 28#include <sys/cdefs.h> 29__KERNEL_RCSID(0, "$NetBSD: amdgpu_pm.c,v 1.5 2021/12/19 12:21:29 riastradh Exp $"); 30 31#include <drm/drm_debugfs.h> 32 33#include "amdgpu.h" 34#include "amdgpu_drv.h" 35#include "amdgpu_pm.h" 36#include "amdgpu_dpm.h" 37#include "amdgpu_display.h" 38#include "amdgpu_smu.h" 39#include "atom.h" 40#include <linux/power_supply.h> 41#include <linux/pci.h> 42#include <linux/hwmon.h> 43#include <linux/hwmon-sysfs.h> 44#include <linux/nospec.h> 45#include <linux/pm_runtime.h> 46#include "hwmgr.h" 47#define WIDTH_4K 3840 48 49#ifndef __NetBSD__ /* XXX sysfs */ 50static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev); 51 52static const struct cg_flag_name clocks[] = { 53 {AMD_CG_SUPPORT_GFX_MGCG, "Graphics Medium Grain Clock Gating"}, 54 {AMD_CG_SUPPORT_GFX_MGLS, "Graphics Medium Grain memory Light Sleep"}, 55 {AMD_CG_SUPPORT_GFX_CGCG, "Graphics Coarse Grain Clock Gating"}, 56 {AMD_CG_SUPPORT_GFX_CGLS, "Graphics Coarse Grain memory Light Sleep"}, 57 {AMD_CG_SUPPORT_GFX_CGTS, "Graphics Coarse Grain Tree Shader Clock Gating"}, 58 {AMD_CG_SUPPORT_GFX_CGTS_LS, "Graphics Coarse Grain Tree Shader Light Sleep"}, 59 {AMD_CG_SUPPORT_GFX_CP_LS, "Graphics Command Processor Light Sleep"}, 60 {AMD_CG_SUPPORT_GFX_RLC_LS, "Graphics Run List Controller Light Sleep"}, 61 {AMD_CG_SUPPORT_GFX_3D_CGCG, "Graphics 3D Coarse Grain Clock Gating"}, 62 {AMD_CG_SUPPORT_GFX_3D_CGLS, "Graphics 3D Coarse Grain memory Light Sleep"}, 63 {AMD_CG_SUPPORT_MC_LS, "Memory Controller Light Sleep"}, 64 {AMD_CG_SUPPORT_MC_MGCG, "Memory Controller Medium Grain Clock Gating"}, 65 {AMD_CG_SUPPORT_SDMA_LS, "System Direct Memory Access Light Sleep"}, 66 {AMD_CG_SUPPORT_SDMA_MGCG, "System Direct Memory Access Medium Grain Clock Gating"}, 67 {AMD_CG_SUPPORT_BIF_MGCG, "Bus Interface Medium Grain Clock Gating"}, 68 {AMD_CG_SUPPORT_BIF_LS, "Bus Interface Light Sleep"}, 69 {AMD_CG_SUPPORT_UVD_MGCG, "Unified Video Decoder Medium Grain Clock Gating"}, 70 {AMD_CG_SUPPORT_VCE_MGCG, "Video Compression Engine Medium Grain Clock Gating"}, 71 {AMD_CG_SUPPORT_HDP_LS, "Host Data Path Light Sleep"}, 72 {AMD_CG_SUPPORT_HDP_MGCG, "Host Data Path Medium Grain Clock Gating"}, 73 {AMD_CG_SUPPORT_DRM_MGCG, "Digital Right Management Medium Grain Clock Gating"}, 74 {AMD_CG_SUPPORT_DRM_LS, "Digital Right Management Light Sleep"}, 75 {AMD_CG_SUPPORT_ROM_MGCG, "Rom Medium Grain Clock Gating"}, 76 {AMD_CG_SUPPORT_DF_MGCG, "Data Fabric Medium Grain Clock Gating"}, 77 78 {AMD_CG_SUPPORT_ATHUB_MGCG, "Address Translation Hub Medium Grain Clock Gating"}, 79 {AMD_CG_SUPPORT_ATHUB_LS, "Address Translation Hub Light Sleep"}, 80 {0, NULL}, 81}; 82 83static const struct hwmon_temp_label { 84 enum PP_HWMON_TEMP channel; 85 const char *label; 86} temp_label[] = { 87 {PP_TEMP_EDGE, "edge"}, 88 {PP_TEMP_JUNCTION, "junction"}, 89 {PP_TEMP_MEM, "mem"}, 90}; 91#endif /* __NetBSD__ */ 92 93void amdgpu_pm_acpi_event_handler(struct amdgpu_device *adev) 94{ 95 if (adev->pm.dpm_enabled) { 96 mutex_lock(&adev->pm.mutex); 97 if (power_supply_is_system_supplied() > 0) 98 adev->pm.ac_power = true; 99 else 100 adev->pm.ac_power = false; 101 if (adev->powerplay.pp_funcs->enable_bapm) 102 amdgpu_dpm_enable_bapm(adev, adev->pm.ac_power); 103 mutex_unlock(&adev->pm.mutex); 104 } 105} 106 107int amdgpu_dpm_read_sensor(struct amdgpu_device *adev, enum amd_pp_sensors sensor, 108 void *data, uint32_t *size) 109{ 110 int ret = 0; 111 112 if (!data || !size) 113 return -EINVAL; 114 115 if (is_support_sw_smu(adev)) 116 ret = smu_read_sensor(&adev->smu, sensor, data, size); 117 else { 118 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->read_sensor) 119 ret = adev->powerplay.pp_funcs->read_sensor((adev)->powerplay.pp_handle, 120 sensor, data, size); 121 else 122 ret = -EINVAL; 123 } 124 125 return ret; 126} 127 128#ifndef __NetBSD__ /* XXX sysfs */ 129/** 130 * DOC: power_dpm_state 131 * 132 * The power_dpm_state file is a legacy interface and is only provided for 133 * backwards compatibility. The amdgpu driver provides a sysfs API for adjusting 134 * certain power related parameters. The file power_dpm_state is used for this. 135 * It accepts the following arguments: 136 * 137 * - battery 138 * 139 * - balanced 140 * 141 * - performance 142 * 143 * battery 144 * 145 * On older GPUs, the vbios provided a special power state for battery 146 * operation. Selecting battery switched to this state. This is no 147 * longer provided on newer GPUs so the option does nothing in that case. 148 * 149 * balanced 150 * 151 * On older GPUs, the vbios provided a special power state for balanced 152 * operation. Selecting balanced switched to this state. This is no 153 * longer provided on newer GPUs so the option does nothing in that case. 154 * 155 * performance 156 * 157 * On older GPUs, the vbios provided a special power state for performance 158 * operation. Selecting performance switched to this state. This is no 159 * longer provided on newer GPUs so the option does nothing in that case. 160 * 161 */ 162static ssize_t amdgpu_get_dpm_state(struct device *dev, 163 struct device_attribute *attr, 164 char *buf) 165{ 166 struct drm_device *ddev = dev_get_drvdata(dev); 167 struct amdgpu_device *adev = ddev->dev_private; 168 enum amd_pm_state_type pm; 169 int ret; 170 171 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 172 return 0; 173 174 ret = pm_runtime_get_sync(ddev->dev); 175 if (ret < 0) 176 return ret; 177 178 if (is_support_sw_smu(adev)) { 179 if (adev->smu.ppt_funcs->get_current_power_state) 180 pm = smu_get_current_power_state(&adev->smu); 181 else 182 pm = adev->pm.dpm.user_state; 183 } else if (adev->powerplay.pp_funcs->get_current_power_state) { 184 pm = amdgpu_dpm_get_current_power_state(adev); 185 } else { 186 pm = adev->pm.dpm.user_state; 187 } 188 189 pm_runtime_mark_last_busy(ddev->dev); 190 pm_runtime_put_autosuspend(ddev->dev); 191 192 return snprintf(buf, PAGE_SIZE, "%s\n", 193 (pm == POWER_STATE_TYPE_BATTERY) ? "battery" : 194 (pm == POWER_STATE_TYPE_BALANCED) ? "balanced" : "performance"); 195} 196 197static ssize_t amdgpu_set_dpm_state(struct device *dev, 198 struct device_attribute *attr, 199 const char *buf, 200 size_t count) 201{ 202 struct drm_device *ddev = dev_get_drvdata(dev); 203 struct amdgpu_device *adev = ddev->dev_private; 204 enum amd_pm_state_type state; 205 int ret; 206 207 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 208 return -EINVAL; 209 210 if (strncmp("battery", buf, strlen("battery")) == 0) 211 state = POWER_STATE_TYPE_BATTERY; 212 else if (strncmp("balanced", buf, strlen("balanced")) == 0) 213 state = POWER_STATE_TYPE_BALANCED; 214 else if (strncmp("performance", buf, strlen("performance")) == 0) 215 state = POWER_STATE_TYPE_PERFORMANCE; 216 else 217 return -EINVAL; 218 219 ret = pm_runtime_get_sync(ddev->dev); 220 if (ret < 0) 221 return ret; 222 223 if (is_support_sw_smu(adev)) { 224 mutex_lock(&adev->pm.mutex); 225 adev->pm.dpm.user_state = state; 226 mutex_unlock(&adev->pm.mutex); 227 } else if (adev->powerplay.pp_funcs->dispatch_tasks) { 228 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_ENABLE_USER_STATE, &state); 229 } else { 230 mutex_lock(&adev->pm.mutex); 231 adev->pm.dpm.user_state = state; 232 mutex_unlock(&adev->pm.mutex); 233 234 amdgpu_pm_compute_clocks(adev); 235 } 236 pm_runtime_mark_last_busy(ddev->dev); 237 pm_runtime_put_autosuspend(ddev->dev); 238 239 return count; 240} 241 242 243/** 244 * DOC: power_dpm_force_performance_level 245 * 246 * The amdgpu driver provides a sysfs API for adjusting certain power 247 * related parameters. The file power_dpm_force_performance_level is 248 * used for this. It accepts the following arguments: 249 * 250 * - auto 251 * 252 * - low 253 * 254 * - high 255 * 256 * - manual 257 * 258 * - profile_standard 259 * 260 * - profile_min_sclk 261 * 262 * - profile_min_mclk 263 * 264 * - profile_peak 265 * 266 * auto 267 * 268 * When auto is selected, the driver will attempt to dynamically select 269 * the optimal power profile for current conditions in the driver. 270 * 271 * low 272 * 273 * When low is selected, the clocks are forced to the lowest power state. 274 * 275 * high 276 * 277 * When high is selected, the clocks are forced to the highest power state. 278 * 279 * manual 280 * 281 * When manual is selected, the user can manually adjust which power states 282 * are enabled for each clock domain via the sysfs pp_dpm_mclk, pp_dpm_sclk, 283 * and pp_dpm_pcie files and adjust the power state transition heuristics 284 * via the pp_power_profile_mode sysfs file. 285 * 286 * profile_standard 287 * profile_min_sclk 288 * profile_min_mclk 289 * profile_peak 290 * 291 * When the profiling modes are selected, clock and power gating are 292 * disabled and the clocks are set for different profiling cases. This 293 * mode is recommended for profiling specific work loads where you do 294 * not want clock or power gating for clock fluctuation to interfere 295 * with your results. profile_standard sets the clocks to a fixed clock 296 * level which varies from asic to asic. profile_min_sclk forces the sclk 297 * to the lowest level. profile_min_mclk forces the mclk to the lowest level. 298 * profile_peak sets all clocks (mclk, sclk, pcie) to the highest levels. 299 * 300 */ 301 302static ssize_t amdgpu_get_dpm_forced_performance_level(struct device *dev, 303 struct device_attribute *attr, 304 char *buf) 305{ 306 struct drm_device *ddev = dev_get_drvdata(dev); 307 struct amdgpu_device *adev = ddev->dev_private; 308 enum amd_dpm_forced_level level = 0xff; 309 int ret; 310 311 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 312 return 0; 313 314 ret = pm_runtime_get_sync(ddev->dev); 315 if (ret < 0) 316 return ret; 317 318 if (is_support_sw_smu(adev)) 319 level = smu_get_performance_level(&adev->smu); 320 else if (adev->powerplay.pp_funcs->get_performance_level) 321 level = amdgpu_dpm_get_performance_level(adev); 322 else 323 level = adev->pm.dpm.forced_level; 324 325 pm_runtime_mark_last_busy(ddev->dev); 326 pm_runtime_put_autosuspend(ddev->dev); 327 328 return snprintf(buf, PAGE_SIZE, "%s\n", 329 (level == AMD_DPM_FORCED_LEVEL_AUTO) ? "auto" : 330 (level == AMD_DPM_FORCED_LEVEL_LOW) ? "low" : 331 (level == AMD_DPM_FORCED_LEVEL_HIGH) ? "high" : 332 (level == AMD_DPM_FORCED_LEVEL_MANUAL) ? "manual" : 333 (level == AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD) ? "profile_standard" : 334 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK) ? "profile_min_sclk" : 335 (level == AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK) ? "profile_min_mclk" : 336 (level == AMD_DPM_FORCED_LEVEL_PROFILE_PEAK) ? "profile_peak" : 337 "unknown"); 338} 339 340static ssize_t amdgpu_set_dpm_forced_performance_level(struct device *dev, 341 struct device_attribute *attr, 342 const char *buf, 343 size_t count) 344{ 345 struct drm_device *ddev = dev_get_drvdata(dev); 346 struct amdgpu_device *adev = ddev->dev_private; 347 enum amd_dpm_forced_level level; 348 enum amd_dpm_forced_level current_level = 0xff; 349 int ret = 0; 350 351 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 352 return -EINVAL; 353 354 if (strncmp("low", buf, strlen("low")) == 0) { 355 level = AMD_DPM_FORCED_LEVEL_LOW; 356 } else if (strncmp("high", buf, strlen("high")) == 0) { 357 level = AMD_DPM_FORCED_LEVEL_HIGH; 358 } else if (strncmp("auto", buf, strlen("auto")) == 0) { 359 level = AMD_DPM_FORCED_LEVEL_AUTO; 360 } else if (strncmp("manual", buf, strlen("manual")) == 0) { 361 level = AMD_DPM_FORCED_LEVEL_MANUAL; 362 } else if (strncmp("profile_exit", buf, strlen("profile_exit")) == 0) { 363 level = AMD_DPM_FORCED_LEVEL_PROFILE_EXIT; 364 } else if (strncmp("profile_standard", buf, strlen("profile_standard")) == 0) { 365 level = AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD; 366 } else if (strncmp("profile_min_sclk", buf, strlen("profile_min_sclk")) == 0) { 367 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK; 368 } else if (strncmp("profile_min_mclk", buf, strlen("profile_min_mclk")) == 0) { 369 level = AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK; 370 } else if (strncmp("profile_peak", buf, strlen("profile_peak")) == 0) { 371 level = AMD_DPM_FORCED_LEVEL_PROFILE_PEAK; 372 } else { 373 return -EINVAL; 374 } 375 376 ret = pm_runtime_get_sync(ddev->dev); 377 if (ret < 0) 378 return ret; 379 380 if (is_support_sw_smu(adev)) 381 current_level = smu_get_performance_level(&adev->smu); 382 else if (adev->powerplay.pp_funcs->get_performance_level) 383 current_level = amdgpu_dpm_get_performance_level(adev); 384 385 if (current_level == level) { 386 pm_runtime_mark_last_busy(ddev->dev); 387 pm_runtime_put_autosuspend(ddev->dev); 388 return count; 389 } 390 391 /* profile_exit setting is valid only when current mode is in profile mode */ 392 if (!(current_level & (AMD_DPM_FORCED_LEVEL_PROFILE_STANDARD | 393 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_SCLK | 394 AMD_DPM_FORCED_LEVEL_PROFILE_MIN_MCLK | 395 AMD_DPM_FORCED_LEVEL_PROFILE_PEAK)) && 396 (level == AMD_DPM_FORCED_LEVEL_PROFILE_EXIT)) { 397 pr_err("Currently not in any profile mode!\n"); 398 pm_runtime_mark_last_busy(ddev->dev); 399 pm_runtime_put_autosuspend(ddev->dev); 400 return -EINVAL; 401 } 402 403 if (is_support_sw_smu(adev)) { 404 ret = smu_force_performance_level(&adev->smu, level); 405 if (ret) { 406 pm_runtime_mark_last_busy(ddev->dev); 407 pm_runtime_put_autosuspend(ddev->dev); 408 return -EINVAL; 409 } 410 } else if (adev->powerplay.pp_funcs->force_performance_level) { 411 mutex_lock(&adev->pm.mutex); 412 if (adev->pm.dpm.thermal_active) { 413 mutex_unlock(&adev->pm.mutex); 414 pm_runtime_mark_last_busy(ddev->dev); 415 pm_runtime_put_autosuspend(ddev->dev); 416 return -EINVAL; 417 } 418 ret = amdgpu_dpm_force_performance_level(adev, level); 419 if (ret) { 420 mutex_unlock(&adev->pm.mutex); 421 pm_runtime_mark_last_busy(ddev->dev); 422 pm_runtime_put_autosuspend(ddev->dev); 423 return -EINVAL; 424 } else { 425 adev->pm.dpm.forced_level = level; 426 } 427 mutex_unlock(&adev->pm.mutex); 428 } 429 pm_runtime_mark_last_busy(ddev->dev); 430 pm_runtime_put_autosuspend(ddev->dev); 431 432 return count; 433} 434 435static ssize_t amdgpu_get_pp_num_states(struct device *dev, 436 struct device_attribute *attr, 437 char *buf) 438{ 439 struct drm_device *ddev = dev_get_drvdata(dev); 440 struct amdgpu_device *adev = ddev->dev_private; 441 struct pp_states_info data; 442 int i, buf_len, ret; 443 444 ret = pm_runtime_get_sync(ddev->dev); 445 if (ret < 0) 446 return ret; 447 448 if (is_support_sw_smu(adev)) { 449 ret = smu_get_power_num_states(&adev->smu, &data); 450 if (ret) 451 return ret; 452 } else if (adev->powerplay.pp_funcs->get_pp_num_states) 453 amdgpu_dpm_get_pp_num_states(adev, &data); 454 455 pm_runtime_mark_last_busy(ddev->dev); 456 pm_runtime_put_autosuspend(ddev->dev); 457 458 buf_len = snprintf(buf, PAGE_SIZE, "states: %d\n", data.nums); 459 for (i = 0; i < data.nums; i++) 460 buf_len += snprintf(buf + buf_len, PAGE_SIZE, "%d %s\n", i, 461 (data.states[i] == POWER_STATE_TYPE_INTERNAL_BOOT) ? "boot" : 462 (data.states[i] == POWER_STATE_TYPE_BATTERY) ? "battery" : 463 (data.states[i] == POWER_STATE_TYPE_BALANCED) ? "balanced" : 464 (data.states[i] == POWER_STATE_TYPE_PERFORMANCE) ? "performance" : "default"); 465 466 return buf_len; 467} 468 469static ssize_t amdgpu_get_pp_cur_state(struct device *dev, 470 struct device_attribute *attr, 471 char *buf) 472{ 473 struct drm_device *ddev = dev_get_drvdata(dev); 474 struct amdgpu_device *adev = ddev->dev_private; 475 struct pp_states_info data; 476 struct smu_context *smu = &adev->smu; 477 enum amd_pm_state_type pm = 0; 478 int i = 0, ret = 0; 479 480 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 481 return 0; 482 483 ret = pm_runtime_get_sync(ddev->dev); 484 if (ret < 0) 485 return ret; 486 487 if (is_support_sw_smu(adev)) { 488 pm = smu_get_current_power_state(smu); 489 ret = smu_get_power_num_states(smu, &data); 490 if (ret) 491 return ret; 492 } else if (adev->powerplay.pp_funcs->get_current_power_state 493 && adev->powerplay.pp_funcs->get_pp_num_states) { 494 pm = amdgpu_dpm_get_current_power_state(adev); 495 amdgpu_dpm_get_pp_num_states(adev, &data); 496 } 497 498 pm_runtime_mark_last_busy(ddev->dev); 499 pm_runtime_put_autosuspend(ddev->dev); 500 501 for (i = 0; i < data.nums; i++) { 502 if (pm == data.states[i]) 503 break; 504 } 505 506 if (i == data.nums) 507 i = -EINVAL; 508 509 return snprintf(buf, PAGE_SIZE, "%d\n", i); 510} 511 512static ssize_t amdgpu_get_pp_force_state(struct device *dev, 513 struct device_attribute *attr, 514 char *buf) 515{ 516 struct drm_device *ddev = dev_get_drvdata(dev); 517 struct amdgpu_device *adev = ddev->dev_private; 518 519 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 520 return 0; 521 522 if (adev->pp_force_state_enabled) 523 return amdgpu_get_pp_cur_state(dev, attr, buf); 524 else 525 return snprintf(buf, PAGE_SIZE, "\n"); 526} 527 528static ssize_t amdgpu_set_pp_force_state(struct device *dev, 529 struct device_attribute *attr, 530 const char *buf, 531 size_t count) 532{ 533 struct drm_device *ddev = dev_get_drvdata(dev); 534 struct amdgpu_device *adev = ddev->dev_private; 535 enum amd_pm_state_type state = 0; 536 unsigned long idx; 537 int ret; 538 539 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 540 return -EINVAL; 541 542 if (strlen(buf) == 1) 543 adev->pp_force_state_enabled = false; 544 else if (is_support_sw_smu(adev)) 545 adev->pp_force_state_enabled = false; 546 else if (adev->powerplay.pp_funcs->dispatch_tasks && 547 adev->powerplay.pp_funcs->get_pp_num_states) { 548 struct pp_states_info data; 549 550 ret = kstrtoul(buf, 0, &idx); 551 if (ret || idx >= ARRAY_SIZE(data.states)) 552 return -EINVAL; 553 554 idx = array_index_nospec(idx, ARRAY_SIZE(data.states)); 555 556 amdgpu_dpm_get_pp_num_states(adev, &data); 557 state = data.states[idx]; 558 559 ret = pm_runtime_get_sync(ddev->dev); 560 if (ret < 0) 561 return ret; 562 563 /* only set user selected power states */ 564 if (state != POWER_STATE_TYPE_INTERNAL_BOOT && 565 state != POWER_STATE_TYPE_DEFAULT) { 566 amdgpu_dpm_dispatch_task(adev, 567 AMD_PP_TASK_ENABLE_USER_STATE, &state); 568 adev->pp_force_state_enabled = true; 569 } 570 pm_runtime_mark_last_busy(ddev->dev); 571 pm_runtime_put_autosuspend(ddev->dev); 572 } 573 574 return count; 575} 576 577/** 578 * DOC: pp_table 579 * 580 * The amdgpu driver provides a sysfs API for uploading new powerplay 581 * tables. The file pp_table is used for this. Reading the file 582 * will dump the current power play table. Writing to the file 583 * will attempt to upload a new powerplay table and re-initialize 584 * powerplay using that new table. 585 * 586 */ 587 588static ssize_t amdgpu_get_pp_table(struct device *dev, 589 struct device_attribute *attr, 590 char *buf) 591{ 592 struct drm_device *ddev = dev_get_drvdata(dev); 593 struct amdgpu_device *adev = ddev->dev_private; 594 char *table = NULL; 595 int size, ret; 596 597 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 598 return 0; 599 600 ret = pm_runtime_get_sync(ddev->dev); 601 if (ret < 0) 602 return ret; 603 604 if (is_support_sw_smu(adev)) { 605 size = smu_sys_get_pp_table(&adev->smu, (void **)&table); 606 pm_runtime_mark_last_busy(ddev->dev); 607 pm_runtime_put_autosuspend(ddev->dev); 608 if (size < 0) 609 return size; 610 } else if (adev->powerplay.pp_funcs->get_pp_table) { 611 size = amdgpu_dpm_get_pp_table(adev, &table); 612 pm_runtime_mark_last_busy(ddev->dev); 613 pm_runtime_put_autosuspend(ddev->dev); 614 if (size < 0) 615 return size; 616 } else { 617 pm_runtime_mark_last_busy(ddev->dev); 618 pm_runtime_put_autosuspend(ddev->dev); 619 return 0; 620 } 621 622 if (size >= PAGE_SIZE) 623 size = PAGE_SIZE - 1; 624 625 memcpy(buf, table, size); 626 627 return size; 628} 629 630static ssize_t amdgpu_set_pp_table(struct device *dev, 631 struct device_attribute *attr, 632 const char *buf, 633 size_t count) 634{ 635 struct drm_device *ddev = dev_get_drvdata(dev); 636 struct amdgpu_device *adev = ddev->dev_private; 637 int ret = 0; 638 639 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 640 return -EINVAL; 641 642 ret = pm_runtime_get_sync(ddev->dev); 643 if (ret < 0) 644 return ret; 645 646 if (is_support_sw_smu(adev)) { 647 ret = smu_sys_set_pp_table(&adev->smu, (void *)buf, count); 648 if (ret) { 649 pm_runtime_mark_last_busy(ddev->dev); 650 pm_runtime_put_autosuspend(ddev->dev); 651 return ret; 652 } 653 } else if (adev->powerplay.pp_funcs->set_pp_table) 654 amdgpu_dpm_set_pp_table(adev, buf, count); 655 656 pm_runtime_mark_last_busy(ddev->dev); 657 pm_runtime_put_autosuspend(ddev->dev); 658 659 return count; 660} 661 662/** 663 * DOC: pp_od_clk_voltage 664 * 665 * The amdgpu driver provides a sysfs API for adjusting the clocks and voltages 666 * in each power level within a power state. The pp_od_clk_voltage is used for 667 * this. 668 * 669 * < For Vega10 and previous ASICs > 670 * 671 * Reading the file will display: 672 * 673 * - a list of engine clock levels and voltages labeled OD_SCLK 674 * 675 * - a list of memory clock levels and voltages labeled OD_MCLK 676 * 677 * - a list of valid ranges for sclk, mclk, and voltage labeled OD_RANGE 678 * 679 * To manually adjust these settings, first select manual using 680 * power_dpm_force_performance_level. Enter a new value for each 681 * level by writing a string that contains "s/m level clock voltage" to 682 * the file. E.g., "s 1 500 820" will update sclk level 1 to be 500 MHz 683 * at 820 mV; "m 0 350 810" will update mclk level 0 to be 350 MHz at 684 * 810 mV. When you have edited all of the states as needed, write 685 * "c" (commit) to the file to commit your changes. If you want to reset to the 686 * default power levels, write "r" (reset) to the file to reset them. 687 * 688 * 689 * < For Vega20 > 690 * 691 * Reading the file will display: 692 * 693 * - minimum and maximum engine clock labeled OD_SCLK 694 * 695 * - maximum memory clock labeled OD_MCLK 696 * 697 * - three <frequency, voltage> points labeled OD_VDDC_CURVE. 698 * They can be used to calibrate the sclk voltage curve. 699 * 700 * - a list of valid ranges for sclk, mclk, and voltage curve points 701 * labeled OD_RANGE 702 * 703 * To manually adjust these settings: 704 * 705 * - First select manual using power_dpm_force_performance_level 706 * 707 * - For clock frequency setting, enter a new value by writing a 708 * string that contains "s/m index clock" to the file. The index 709 * should be 0 if to set minimum clock. And 1 if to set maximum 710 * clock. E.g., "s 0 500" will update minimum sclk to be 500 MHz. 711 * "m 1 800" will update maximum mclk to be 800Mhz. 712 * 713 * For sclk voltage curve, enter the new values by writing a 714 * string that contains "vc point clock voltage" to the file. The 715 * points are indexed by 0, 1 and 2. E.g., "vc 0 300 600" will 716 * update point1 with clock set as 300Mhz and voltage as 717 * 600mV. "vc 2 1000 1000" will update point3 with clock set 718 * as 1000Mhz and voltage 1000mV. 719 * 720 * - When you have edited all of the states as needed, write "c" (commit) 721 * to the file to commit your changes 722 * 723 * - If you want to reset to the default power levels, write "r" (reset) 724 * to the file to reset them 725 * 726 */ 727 728static ssize_t amdgpu_set_pp_od_clk_voltage(struct device *dev, 729 struct device_attribute *attr, 730 const char *buf, 731 size_t count) 732{ 733 struct drm_device *ddev = dev_get_drvdata(dev); 734 struct amdgpu_device *adev = ddev->dev_private; 735 int ret; 736 uint32_t parameter_size = 0; 737 long parameter[64]; 738 char buf_cpy[128]; 739 char *tmp_str; 740 char *sub_str; 741 const char delimiter[3] = {' ', '\n', '\0'}; 742 uint32_t type; 743 744 if (amdgpu_sriov_vf(adev)) 745 return -EINVAL; 746 747 if (count > 127) 748 return -EINVAL; 749 750 if (*buf == 's') 751 type = PP_OD_EDIT_SCLK_VDDC_TABLE; 752 else if (*buf == 'm') 753 type = PP_OD_EDIT_MCLK_VDDC_TABLE; 754 else if(*buf == 'r') 755 type = PP_OD_RESTORE_DEFAULT_TABLE; 756 else if (*buf == 'c') 757 type = PP_OD_COMMIT_DPM_TABLE; 758 else if (!strncmp(buf, "vc", 2)) 759 type = PP_OD_EDIT_VDDC_CURVE; 760 else 761 return -EINVAL; 762 763 memcpy(buf_cpy, buf, count+1); 764 765 tmp_str = buf_cpy; 766 767 if (type == PP_OD_EDIT_VDDC_CURVE) 768 tmp_str++; 769 while (isspace(*++tmp_str)); 770 771 while (tmp_str[0]) { 772 sub_str = strsep(&tmp_str, delimiter); 773 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 774 if (ret) 775 return -EINVAL; 776 parameter_size++; 777 778 while (isspace(*tmp_str)) 779 tmp_str++; 780 } 781 782 ret = pm_runtime_get_sync(ddev->dev); 783 if (ret < 0) 784 return ret; 785 786 if (is_support_sw_smu(adev)) { 787 ret = smu_od_edit_dpm_table(&adev->smu, type, 788 parameter, parameter_size); 789 790 if (ret) { 791 pm_runtime_mark_last_busy(ddev->dev); 792 pm_runtime_put_autosuspend(ddev->dev); 793 return -EINVAL; 794 } 795 } else { 796 if (adev->powerplay.pp_funcs->odn_edit_dpm_table) { 797 ret = amdgpu_dpm_odn_edit_dpm_table(adev, type, 798 parameter, parameter_size); 799 if (ret) { 800 pm_runtime_mark_last_busy(ddev->dev); 801 pm_runtime_put_autosuspend(ddev->dev); 802 return -EINVAL; 803 } 804 } 805 806 if (type == PP_OD_COMMIT_DPM_TABLE) { 807 if (adev->powerplay.pp_funcs->dispatch_tasks) { 808 amdgpu_dpm_dispatch_task(adev, 809 AMD_PP_TASK_READJUST_POWER_STATE, 810 NULL); 811 pm_runtime_mark_last_busy(ddev->dev); 812 pm_runtime_put_autosuspend(ddev->dev); 813 return count; 814 } else { 815 pm_runtime_mark_last_busy(ddev->dev); 816 pm_runtime_put_autosuspend(ddev->dev); 817 return -EINVAL; 818 } 819 } 820 } 821 pm_runtime_mark_last_busy(ddev->dev); 822 pm_runtime_put_autosuspend(ddev->dev); 823 824 return count; 825} 826 827static ssize_t amdgpu_get_pp_od_clk_voltage(struct device *dev, 828 struct device_attribute *attr, 829 char *buf) 830{ 831 struct drm_device *ddev = dev_get_drvdata(dev); 832 struct amdgpu_device *adev = ddev->dev_private; 833 ssize_t size; 834 int ret; 835 836 if (amdgpu_sriov_vf(adev)) 837 return 0; 838 839 ret = pm_runtime_get_sync(ddev->dev); 840 if (ret < 0) 841 return ret; 842 843 if (is_support_sw_smu(adev)) { 844 size = smu_print_clk_levels(&adev->smu, SMU_OD_SCLK, buf); 845 size += smu_print_clk_levels(&adev->smu, SMU_OD_MCLK, buf+size); 846 size += smu_print_clk_levels(&adev->smu, SMU_OD_VDDC_CURVE, buf+size); 847 size += smu_print_clk_levels(&adev->smu, SMU_OD_RANGE, buf+size); 848 } else if (adev->powerplay.pp_funcs->print_clock_levels) { 849 size = amdgpu_dpm_print_clock_levels(adev, OD_SCLK, buf); 850 size += amdgpu_dpm_print_clock_levels(adev, OD_MCLK, buf+size); 851 size += amdgpu_dpm_print_clock_levels(adev, OD_VDDC_CURVE, buf+size); 852 size += amdgpu_dpm_print_clock_levels(adev, OD_RANGE, buf+size); 853 } else { 854 size = snprintf(buf, PAGE_SIZE, "\n"); 855 } 856 pm_runtime_mark_last_busy(ddev->dev); 857 pm_runtime_put_autosuspend(ddev->dev); 858 859 return size; 860} 861 862/** 863 * DOC: pp_features 864 * 865 * The amdgpu driver provides a sysfs API for adjusting what powerplay 866 * features to be enabled. The file pp_features is used for this. And 867 * this is only available for Vega10 and later dGPUs. 868 * 869 * Reading back the file will show you the followings: 870 * - Current ppfeature masks 871 * - List of the all supported powerplay features with their naming, 872 * bitmasks and enablement status('Y'/'N' means "enabled"/"disabled"). 873 * 874 * To manually enable or disable a specific feature, just set or clear 875 * the corresponding bit from original ppfeature masks and input the 876 * new ppfeature masks. 877 */ 878static ssize_t amdgpu_set_pp_feature_status(struct device *dev, 879 struct device_attribute *attr, 880 const char *buf, 881 size_t count) 882{ 883 struct drm_device *ddev = dev_get_drvdata(dev); 884 struct amdgpu_device *adev = ddev->dev_private; 885 uint64_t featuremask; 886 int ret; 887 888 if (amdgpu_sriov_vf(adev)) 889 return -EINVAL; 890 891 ret = kstrtou64(buf, 0, &featuremask); 892 if (ret) 893 return -EINVAL; 894 895 pr_debug("featuremask = 0x%llx\n", featuremask); 896 897 ret = pm_runtime_get_sync(ddev->dev); 898 if (ret < 0) 899 return ret; 900 901 if (is_support_sw_smu(adev)) { 902 ret = smu_sys_set_pp_feature_mask(&adev->smu, featuremask); 903 if (ret) { 904 pm_runtime_mark_last_busy(ddev->dev); 905 pm_runtime_put_autosuspend(ddev->dev); 906 return -EINVAL; 907 } 908 } else if (adev->powerplay.pp_funcs->set_ppfeature_status) { 909 ret = amdgpu_dpm_set_ppfeature_status(adev, featuremask); 910 if (ret) { 911 pm_runtime_mark_last_busy(ddev->dev); 912 pm_runtime_put_autosuspend(ddev->dev); 913 return -EINVAL; 914 } 915 } 916 pm_runtime_mark_last_busy(ddev->dev); 917 pm_runtime_put_autosuspend(ddev->dev); 918 919 return count; 920} 921 922static ssize_t amdgpu_get_pp_feature_status(struct device *dev, 923 struct device_attribute *attr, 924 char *buf) 925{ 926 struct drm_device *ddev = dev_get_drvdata(dev); 927 struct amdgpu_device *adev = ddev->dev_private; 928 ssize_t size; 929 int ret; 930 931 if (amdgpu_sriov_vf(adev)) 932 return 0; 933 934 ret = pm_runtime_get_sync(ddev->dev); 935 if (ret < 0) 936 return ret; 937 938 if (is_support_sw_smu(adev)) 939 size = smu_sys_get_pp_feature_mask(&adev->smu, buf); 940 else if (adev->powerplay.pp_funcs->get_ppfeature_status) 941 size = amdgpu_dpm_get_ppfeature_status(adev, buf); 942 else 943 size = snprintf(buf, PAGE_SIZE, "\n"); 944 945 pm_runtime_mark_last_busy(ddev->dev); 946 pm_runtime_put_autosuspend(ddev->dev); 947 948 return size; 949} 950 951/** 952 * DOC: pp_dpm_sclk pp_dpm_mclk pp_dpm_socclk pp_dpm_fclk pp_dpm_dcefclk pp_dpm_pcie 953 * 954 * The amdgpu driver provides a sysfs API for adjusting what power levels 955 * are enabled for a given power state. The files pp_dpm_sclk, pp_dpm_mclk, 956 * pp_dpm_socclk, pp_dpm_fclk, pp_dpm_dcefclk and pp_dpm_pcie are used for 957 * this. 958 * 959 * pp_dpm_socclk and pp_dpm_dcefclk interfaces are only available for 960 * Vega10 and later ASICs. 961 * pp_dpm_fclk interface is only available for Vega20 and later ASICs. 962 * 963 * Reading back the files will show you the available power levels within 964 * the power state and the clock information for those levels. 965 * 966 * To manually adjust these states, first select manual using 967 * power_dpm_force_performance_level. 968 * Secondly, enter a new value for each level by inputing a string that 969 * contains " echo xx xx xx > pp_dpm_sclk/mclk/pcie" 970 * E.g., 971 * 972 * .. code-block:: bash 973 * 974 * echo "4 5 6" > pp_dpm_sclk 975 * 976 * will enable sclk levels 4, 5, and 6. 977 * 978 * NOTE: change to the dcefclk max dpm level is not supported now 979 */ 980 981static ssize_t amdgpu_get_pp_dpm_sclk(struct device *dev, 982 struct device_attribute *attr, 983 char *buf) 984{ 985 struct drm_device *ddev = dev_get_drvdata(dev); 986 struct amdgpu_device *adev = ddev->dev_private; 987 ssize_t size; 988 int ret; 989 990 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 991 return 0; 992 993 ret = pm_runtime_get_sync(ddev->dev); 994 if (ret < 0) 995 return ret; 996 997 if (is_support_sw_smu(adev)) 998 size = smu_print_clk_levels(&adev->smu, SMU_SCLK, buf); 999 else if (adev->powerplay.pp_funcs->print_clock_levels) 1000 size = amdgpu_dpm_print_clock_levels(adev, PP_SCLK, buf); 1001 else 1002 size = snprintf(buf, PAGE_SIZE, "\n"); 1003 1004 pm_runtime_mark_last_busy(ddev->dev); 1005 pm_runtime_put_autosuspend(ddev->dev); 1006 1007 return size; 1008} 1009 1010/* 1011 * Worst case: 32 bits individually specified, in octal at 12 characters 1012 * per line (+1 for \n). 1013 */ 1014#define AMDGPU_MASK_BUF_MAX (32 * 13) 1015 1016static ssize_t amdgpu_read_mask(const char *buf, size_t count, uint32_t *mask) 1017{ 1018 int ret; 1019 long level; 1020 char *sub_str = NULL; 1021 char *tmp; 1022 char buf_cpy[AMDGPU_MASK_BUF_MAX + 1]; 1023 const char delimiter[3] = {' ', '\n', '\0'}; 1024 size_t bytes; 1025 1026 *mask = 0; 1027 1028 bytes = min(count, sizeof(buf_cpy) - 1); 1029 memcpy(buf_cpy, buf, bytes); 1030 buf_cpy[bytes] = '\0'; 1031 tmp = buf_cpy; 1032 while (tmp[0]) { 1033 sub_str = strsep(&tmp, delimiter); 1034 if (strlen(sub_str)) { 1035 ret = kstrtol(sub_str, 0, &level); 1036 if (ret) 1037 return -EINVAL; 1038 *mask |= 1 << level; 1039 } else 1040 break; 1041 } 1042 1043 return 0; 1044} 1045 1046static ssize_t amdgpu_set_pp_dpm_sclk(struct device *dev, 1047 struct device_attribute *attr, 1048 const char *buf, 1049 size_t count) 1050{ 1051 struct drm_device *ddev = dev_get_drvdata(dev); 1052 struct amdgpu_device *adev = ddev->dev_private; 1053 int ret; 1054 uint32_t mask = 0; 1055 1056 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1057 return -EINVAL; 1058 1059 ret = amdgpu_read_mask(buf, count, &mask); 1060 if (ret) 1061 return ret; 1062 1063 ret = pm_runtime_get_sync(ddev->dev); 1064 if (ret < 0) 1065 return ret; 1066 1067 if (is_support_sw_smu(adev)) 1068 ret = smu_force_clk_levels(&adev->smu, SMU_SCLK, mask, true); 1069 else if (adev->powerplay.pp_funcs->force_clock_level) 1070 ret = amdgpu_dpm_force_clock_level(adev, PP_SCLK, mask); 1071 1072 pm_runtime_mark_last_busy(ddev->dev); 1073 pm_runtime_put_autosuspend(ddev->dev); 1074 1075 if (ret) 1076 return -EINVAL; 1077 1078 return count; 1079} 1080 1081static ssize_t amdgpu_get_pp_dpm_mclk(struct device *dev, 1082 struct device_attribute *attr, 1083 char *buf) 1084{ 1085 struct drm_device *ddev = dev_get_drvdata(dev); 1086 struct amdgpu_device *adev = ddev->dev_private; 1087 ssize_t size; 1088 int ret; 1089 1090 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1091 return 0; 1092 1093 ret = pm_runtime_get_sync(ddev->dev); 1094 if (ret < 0) 1095 return ret; 1096 1097 if (is_support_sw_smu(adev)) 1098 size = smu_print_clk_levels(&adev->smu, SMU_MCLK, buf); 1099 else if (adev->powerplay.pp_funcs->print_clock_levels) 1100 size = amdgpu_dpm_print_clock_levels(adev, PP_MCLK, buf); 1101 else 1102 size = snprintf(buf, PAGE_SIZE, "\n"); 1103 1104 pm_runtime_mark_last_busy(ddev->dev); 1105 pm_runtime_put_autosuspend(ddev->dev); 1106 1107 return size; 1108} 1109 1110static ssize_t amdgpu_set_pp_dpm_mclk(struct device *dev, 1111 struct device_attribute *attr, 1112 const char *buf, 1113 size_t count) 1114{ 1115 struct drm_device *ddev = dev_get_drvdata(dev); 1116 struct amdgpu_device *adev = ddev->dev_private; 1117 uint32_t mask = 0; 1118 int ret; 1119 1120 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1121 return -EINVAL; 1122 1123 ret = amdgpu_read_mask(buf, count, &mask); 1124 if (ret) 1125 return ret; 1126 1127 ret = pm_runtime_get_sync(ddev->dev); 1128 if (ret < 0) 1129 return ret; 1130 1131 if (is_support_sw_smu(adev)) 1132 ret = smu_force_clk_levels(&adev->smu, SMU_MCLK, mask, true); 1133 else if (adev->powerplay.pp_funcs->force_clock_level) 1134 ret = amdgpu_dpm_force_clock_level(adev, PP_MCLK, mask); 1135 1136 pm_runtime_mark_last_busy(ddev->dev); 1137 pm_runtime_put_autosuspend(ddev->dev); 1138 1139 if (ret) 1140 return -EINVAL; 1141 1142 return count; 1143} 1144 1145static ssize_t amdgpu_get_pp_dpm_socclk(struct device *dev, 1146 struct device_attribute *attr, 1147 char *buf) 1148{ 1149 struct drm_device *ddev = dev_get_drvdata(dev); 1150 struct amdgpu_device *adev = ddev->dev_private; 1151 ssize_t size; 1152 int ret; 1153 1154 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1155 return 0; 1156 1157 ret = pm_runtime_get_sync(ddev->dev); 1158 if (ret < 0) 1159 return ret; 1160 1161 if (is_support_sw_smu(adev)) 1162 size = smu_print_clk_levels(&adev->smu, SMU_SOCCLK, buf); 1163 else if (adev->powerplay.pp_funcs->print_clock_levels) 1164 size = amdgpu_dpm_print_clock_levels(adev, PP_SOCCLK, buf); 1165 else 1166 size = snprintf(buf, PAGE_SIZE, "\n"); 1167 1168 pm_runtime_mark_last_busy(ddev->dev); 1169 pm_runtime_put_autosuspend(ddev->dev); 1170 1171 return size; 1172} 1173 1174static ssize_t amdgpu_set_pp_dpm_socclk(struct device *dev, 1175 struct device_attribute *attr, 1176 const char *buf, 1177 size_t count) 1178{ 1179 struct drm_device *ddev = dev_get_drvdata(dev); 1180 struct amdgpu_device *adev = ddev->dev_private; 1181 int ret; 1182 uint32_t mask = 0; 1183 1184 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1185 return -EINVAL; 1186 1187 ret = amdgpu_read_mask(buf, count, &mask); 1188 if (ret) 1189 return ret; 1190 1191 ret = pm_runtime_get_sync(ddev->dev); 1192 if (ret < 0) 1193 return ret; 1194 1195 if (is_support_sw_smu(adev)) 1196 ret = smu_force_clk_levels(&adev->smu, SMU_SOCCLK, mask, true); 1197 else if (adev->powerplay.pp_funcs->force_clock_level) 1198 ret = amdgpu_dpm_force_clock_level(adev, PP_SOCCLK, mask); 1199 else 1200 ret = 0; 1201 1202 pm_runtime_mark_last_busy(ddev->dev); 1203 pm_runtime_put_autosuspend(ddev->dev); 1204 1205 if (ret) 1206 return -EINVAL; 1207 1208 return count; 1209} 1210 1211static ssize_t amdgpu_get_pp_dpm_fclk(struct device *dev, 1212 struct device_attribute *attr, 1213 char *buf) 1214{ 1215 struct drm_device *ddev = dev_get_drvdata(dev); 1216 struct amdgpu_device *adev = ddev->dev_private; 1217 ssize_t size; 1218 int ret; 1219 1220 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1221 return 0; 1222 1223 ret = pm_runtime_get_sync(ddev->dev); 1224 if (ret < 0) 1225 return ret; 1226 1227 if (is_support_sw_smu(adev)) 1228 size = smu_print_clk_levels(&adev->smu, SMU_FCLK, buf); 1229 else if (adev->powerplay.pp_funcs->print_clock_levels) 1230 size = amdgpu_dpm_print_clock_levels(adev, PP_FCLK, buf); 1231 else 1232 size = snprintf(buf, PAGE_SIZE, "\n"); 1233 1234 pm_runtime_mark_last_busy(ddev->dev); 1235 pm_runtime_put_autosuspend(ddev->dev); 1236 1237 return size; 1238} 1239 1240static ssize_t amdgpu_set_pp_dpm_fclk(struct device *dev, 1241 struct device_attribute *attr, 1242 const char *buf, 1243 size_t count) 1244{ 1245 struct drm_device *ddev = dev_get_drvdata(dev); 1246 struct amdgpu_device *adev = ddev->dev_private; 1247 int ret; 1248 uint32_t mask = 0; 1249 1250 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1251 return -EINVAL; 1252 1253 ret = amdgpu_read_mask(buf, count, &mask); 1254 if (ret) 1255 return ret; 1256 1257 ret = pm_runtime_get_sync(ddev->dev); 1258 if (ret < 0) 1259 return ret; 1260 1261 if (is_support_sw_smu(adev)) 1262 ret = smu_force_clk_levels(&adev->smu, SMU_FCLK, mask, true); 1263 else if (adev->powerplay.pp_funcs->force_clock_level) 1264 ret = amdgpu_dpm_force_clock_level(adev, PP_FCLK, mask); 1265 else 1266 ret = 0; 1267 1268 pm_runtime_mark_last_busy(ddev->dev); 1269 pm_runtime_put_autosuspend(ddev->dev); 1270 1271 if (ret) 1272 return -EINVAL; 1273 1274 return count; 1275} 1276 1277static ssize_t amdgpu_get_pp_dpm_dcefclk(struct device *dev, 1278 struct device_attribute *attr, 1279 char *buf) 1280{ 1281 struct drm_device *ddev = dev_get_drvdata(dev); 1282 struct amdgpu_device *adev = ddev->dev_private; 1283 ssize_t size; 1284 int ret; 1285 1286 if (amdgpu_sriov_vf(adev)) 1287 return 0; 1288 1289 ret = pm_runtime_get_sync(ddev->dev); 1290 if (ret < 0) 1291 return ret; 1292 1293 if (is_support_sw_smu(adev)) 1294 size = smu_print_clk_levels(&adev->smu, SMU_DCEFCLK, buf); 1295 else if (adev->powerplay.pp_funcs->print_clock_levels) 1296 size = amdgpu_dpm_print_clock_levels(adev, PP_DCEFCLK, buf); 1297 else 1298 size = snprintf(buf, PAGE_SIZE, "\n"); 1299 1300 pm_runtime_mark_last_busy(ddev->dev); 1301 pm_runtime_put_autosuspend(ddev->dev); 1302 1303 return size; 1304} 1305 1306static ssize_t amdgpu_set_pp_dpm_dcefclk(struct device *dev, 1307 struct device_attribute *attr, 1308 const char *buf, 1309 size_t count) 1310{ 1311 struct drm_device *ddev = dev_get_drvdata(dev); 1312 struct amdgpu_device *adev = ddev->dev_private; 1313 int ret; 1314 uint32_t mask = 0; 1315 1316 if (amdgpu_sriov_vf(adev)) 1317 return -EINVAL; 1318 1319 ret = amdgpu_read_mask(buf, count, &mask); 1320 if (ret) 1321 return ret; 1322 1323 ret = pm_runtime_get_sync(ddev->dev); 1324 if (ret < 0) 1325 return ret; 1326 1327 if (is_support_sw_smu(adev)) 1328 ret = smu_force_clk_levels(&adev->smu, SMU_DCEFCLK, mask, true); 1329 else if (adev->powerplay.pp_funcs->force_clock_level) 1330 ret = amdgpu_dpm_force_clock_level(adev, PP_DCEFCLK, mask); 1331 else 1332 ret = 0; 1333 1334 pm_runtime_mark_last_busy(ddev->dev); 1335 pm_runtime_put_autosuspend(ddev->dev); 1336 1337 if (ret) 1338 return -EINVAL; 1339 1340 return count; 1341} 1342 1343static ssize_t amdgpu_get_pp_dpm_pcie(struct device *dev, 1344 struct device_attribute *attr, 1345 char *buf) 1346{ 1347 struct drm_device *ddev = dev_get_drvdata(dev); 1348 struct amdgpu_device *adev = ddev->dev_private; 1349 ssize_t size; 1350 int ret; 1351 1352 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1353 return 0; 1354 1355 ret = pm_runtime_get_sync(ddev->dev); 1356 if (ret < 0) 1357 return ret; 1358 1359 if (is_support_sw_smu(adev)) 1360 size = smu_print_clk_levels(&adev->smu, SMU_PCIE, buf); 1361 else if (adev->powerplay.pp_funcs->print_clock_levels) 1362 size = amdgpu_dpm_print_clock_levels(adev, PP_PCIE, buf); 1363 else 1364 size = snprintf(buf, PAGE_SIZE, "\n"); 1365 1366 pm_runtime_mark_last_busy(ddev->dev); 1367 pm_runtime_put_autosuspend(ddev->dev); 1368 1369 return size; 1370} 1371 1372static ssize_t amdgpu_set_pp_dpm_pcie(struct device *dev, 1373 struct device_attribute *attr, 1374 const char *buf, 1375 size_t count) 1376{ 1377 struct drm_device *ddev = dev_get_drvdata(dev); 1378 struct amdgpu_device *adev = ddev->dev_private; 1379 int ret; 1380 uint32_t mask = 0; 1381 1382 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1383 return -EINVAL; 1384 1385 ret = amdgpu_read_mask(buf, count, &mask); 1386 if (ret) 1387 return ret; 1388 1389 ret = pm_runtime_get_sync(ddev->dev); 1390 if (ret < 0) 1391 return ret; 1392 1393 if (is_support_sw_smu(adev)) 1394 ret = smu_force_clk_levels(&adev->smu, SMU_PCIE, mask, true); 1395 else if (adev->powerplay.pp_funcs->force_clock_level) 1396 ret = amdgpu_dpm_force_clock_level(adev, PP_PCIE, mask); 1397 else 1398 ret = 0; 1399 1400 pm_runtime_mark_last_busy(ddev->dev); 1401 pm_runtime_put_autosuspend(ddev->dev); 1402 1403 if (ret) 1404 return -EINVAL; 1405 1406 return count; 1407} 1408 1409static ssize_t amdgpu_get_pp_sclk_od(struct device *dev, 1410 struct device_attribute *attr, 1411 char *buf) 1412{ 1413 struct drm_device *ddev = dev_get_drvdata(dev); 1414 struct amdgpu_device *adev = ddev->dev_private; 1415 uint32_t value = 0; 1416 int ret; 1417 1418 if (amdgpu_sriov_vf(adev)) 1419 return 0; 1420 1421 ret = pm_runtime_get_sync(ddev->dev); 1422 if (ret < 0) 1423 return ret; 1424 1425 if (is_support_sw_smu(adev)) 1426 value = smu_get_od_percentage(&(adev->smu), SMU_OD_SCLK); 1427 else if (adev->powerplay.pp_funcs->get_sclk_od) 1428 value = amdgpu_dpm_get_sclk_od(adev); 1429 1430 pm_runtime_mark_last_busy(ddev->dev); 1431 pm_runtime_put_autosuspend(ddev->dev); 1432 1433 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1434} 1435 1436static ssize_t amdgpu_set_pp_sclk_od(struct device *dev, 1437 struct device_attribute *attr, 1438 const char *buf, 1439 size_t count) 1440{ 1441 struct drm_device *ddev = dev_get_drvdata(dev); 1442 struct amdgpu_device *adev = ddev->dev_private; 1443 int ret; 1444 long int value; 1445 1446 if (amdgpu_sriov_vf(adev)) 1447 return -EINVAL; 1448 1449 ret = kstrtol(buf, 0, &value); 1450 1451 if (ret) 1452 return -EINVAL; 1453 1454 ret = pm_runtime_get_sync(ddev->dev); 1455 if (ret < 0) 1456 return ret; 1457 1458 if (is_support_sw_smu(adev)) { 1459 value = smu_set_od_percentage(&(adev->smu), SMU_OD_SCLK, (uint32_t)value); 1460 } else { 1461 if (adev->powerplay.pp_funcs->set_sclk_od) 1462 amdgpu_dpm_set_sclk_od(adev, (uint32_t)value); 1463 1464 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1465 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1466 } else { 1467 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1468 amdgpu_pm_compute_clocks(adev); 1469 } 1470 } 1471 1472 pm_runtime_mark_last_busy(ddev->dev); 1473 pm_runtime_put_autosuspend(ddev->dev); 1474 1475 return count; 1476} 1477 1478static ssize_t amdgpu_get_pp_mclk_od(struct device *dev, 1479 struct device_attribute *attr, 1480 char *buf) 1481{ 1482 struct drm_device *ddev = dev_get_drvdata(dev); 1483 struct amdgpu_device *adev = ddev->dev_private; 1484 uint32_t value = 0; 1485 int ret; 1486 1487 if (amdgpu_sriov_vf(adev)) 1488 return 0; 1489 1490 ret = pm_runtime_get_sync(ddev->dev); 1491 if (ret < 0) 1492 return ret; 1493 1494 if (is_support_sw_smu(adev)) 1495 value = smu_get_od_percentage(&(adev->smu), SMU_OD_MCLK); 1496 else if (adev->powerplay.pp_funcs->get_mclk_od) 1497 value = amdgpu_dpm_get_mclk_od(adev); 1498 1499 pm_runtime_mark_last_busy(ddev->dev); 1500 pm_runtime_put_autosuspend(ddev->dev); 1501 1502 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1503} 1504 1505static ssize_t amdgpu_set_pp_mclk_od(struct device *dev, 1506 struct device_attribute *attr, 1507 const char *buf, 1508 size_t count) 1509{ 1510 struct drm_device *ddev = dev_get_drvdata(dev); 1511 struct amdgpu_device *adev = ddev->dev_private; 1512 int ret; 1513 long int value; 1514 1515 if (amdgpu_sriov_vf(adev)) 1516 return 0; 1517 1518 ret = kstrtol(buf, 0, &value); 1519 1520 if (ret) 1521 return -EINVAL; 1522 1523 ret = pm_runtime_get_sync(ddev->dev); 1524 if (ret < 0) 1525 return ret; 1526 1527 if (is_support_sw_smu(adev)) { 1528 value = smu_set_od_percentage(&(adev->smu), SMU_OD_MCLK, (uint32_t)value); 1529 } else { 1530 if (adev->powerplay.pp_funcs->set_mclk_od) 1531 amdgpu_dpm_set_mclk_od(adev, (uint32_t)value); 1532 1533 if (adev->powerplay.pp_funcs->dispatch_tasks) { 1534 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_READJUST_POWER_STATE, NULL); 1535 } else { 1536 adev->pm.dpm.current_ps = adev->pm.dpm.boot_ps; 1537 amdgpu_pm_compute_clocks(adev); 1538 } 1539 } 1540 1541 pm_runtime_mark_last_busy(ddev->dev); 1542 pm_runtime_put_autosuspend(ddev->dev); 1543 1544 return count; 1545} 1546 1547/** 1548 * DOC: pp_power_profile_mode 1549 * 1550 * The amdgpu driver provides a sysfs API for adjusting the heuristics 1551 * related to switching between power levels in a power state. The file 1552 * pp_power_profile_mode is used for this. 1553 * 1554 * Reading this file outputs a list of all of the predefined power profiles 1555 * and the relevant heuristics settings for that profile. 1556 * 1557 * To select a profile or create a custom profile, first select manual using 1558 * power_dpm_force_performance_level. Writing the number of a predefined 1559 * profile to pp_power_profile_mode will enable those heuristics. To 1560 * create a custom set of heuristics, write a string of numbers to the file 1561 * starting with the number of the custom profile along with a setting 1562 * for each heuristic parameter. Due to differences across asic families 1563 * the heuristic parameters vary from family to family. 1564 * 1565 */ 1566 1567static ssize_t amdgpu_get_pp_power_profile_mode(struct device *dev, 1568 struct device_attribute *attr, 1569 char *buf) 1570{ 1571 struct drm_device *ddev = dev_get_drvdata(dev); 1572 struct amdgpu_device *adev = ddev->dev_private; 1573 ssize_t size; 1574 int ret; 1575 1576 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1577 return 0; 1578 1579 ret = pm_runtime_get_sync(ddev->dev); 1580 if (ret < 0) 1581 return ret; 1582 1583 if (is_support_sw_smu(adev)) 1584 size = smu_get_power_profile_mode(&adev->smu, buf); 1585 else if (adev->powerplay.pp_funcs->get_power_profile_mode) 1586 size = amdgpu_dpm_get_power_profile_mode(adev, buf); 1587 else 1588 size = snprintf(buf, PAGE_SIZE, "\n"); 1589 1590 pm_runtime_mark_last_busy(ddev->dev); 1591 pm_runtime_put_autosuspend(ddev->dev); 1592 1593 return size; 1594} 1595 1596 1597static ssize_t amdgpu_set_pp_power_profile_mode(struct device *dev, 1598 struct device_attribute *attr, 1599 const char *buf, 1600 size_t count) 1601{ 1602 int ret = 0xff; 1603 struct drm_device *ddev = dev_get_drvdata(dev); 1604 struct amdgpu_device *adev = ddev->dev_private; 1605 uint32_t parameter_size = 0; 1606 long parameter[64]; 1607 char *sub_str, buf_cpy[128]; 1608 char *tmp_str; 1609 uint32_t i = 0; 1610 char tmp[2]; 1611 long int profile_mode = 0; 1612 const char delimiter[3] = {' ', '\n', '\0'}; 1613 1614 tmp[0] = *(buf); 1615 tmp[1] = '\0'; 1616 ret = kstrtol(tmp, 0, &profile_mode); 1617 if (ret) 1618 return -EINVAL; 1619 1620 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1621 return -EINVAL; 1622 1623 if (profile_mode == PP_SMC_POWER_PROFILE_CUSTOM) { 1624 if (count < 2 || count > 127) 1625 return -EINVAL; 1626 while (isspace(*++buf)) 1627 i++; 1628 memcpy(buf_cpy, buf, count-i); 1629 tmp_str = buf_cpy; 1630 while (tmp_str[0]) { 1631 sub_str = strsep(&tmp_str, delimiter); 1632 ret = kstrtol(sub_str, 0, ¶meter[parameter_size]); 1633 if (ret) 1634 return -EINVAL; 1635 parameter_size++; 1636 while (isspace(*tmp_str)) 1637 tmp_str++; 1638 } 1639 } 1640 parameter[parameter_size] = profile_mode; 1641 1642 ret = pm_runtime_get_sync(ddev->dev); 1643 if (ret < 0) 1644 return ret; 1645 1646 if (is_support_sw_smu(adev)) 1647 ret = smu_set_power_profile_mode(&adev->smu, parameter, parameter_size, true); 1648 else if (adev->powerplay.pp_funcs->set_power_profile_mode) 1649 ret = amdgpu_dpm_set_power_profile_mode(adev, parameter, parameter_size); 1650 1651 pm_runtime_mark_last_busy(ddev->dev); 1652 pm_runtime_put_autosuspend(ddev->dev); 1653 1654 if (!ret) 1655 return count; 1656 1657 return -EINVAL; 1658} 1659 1660/** 1661 * DOC: busy_percent 1662 * 1663 * The amdgpu driver provides a sysfs API for reading how busy the GPU 1664 * is as a percentage. The file gpu_busy_percent is used for this. 1665 * The SMU firmware computes a percentage of load based on the 1666 * aggregate activity level in the IP cores. 1667 */ 1668static ssize_t amdgpu_get_busy_percent(struct device *dev, 1669 struct device_attribute *attr, 1670 char *buf) 1671{ 1672 struct drm_device *ddev = dev_get_drvdata(dev); 1673 struct amdgpu_device *adev = ddev->dev_private; 1674 int r, value, size = sizeof(value); 1675 1676 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1677 return 0; 1678 1679 r = pm_runtime_get_sync(ddev->dev); 1680 if (r < 0) 1681 return r; 1682 1683 /* read the IP busy sensor */ 1684 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, 1685 (void *)&value, &size); 1686 1687 pm_runtime_mark_last_busy(ddev->dev); 1688 pm_runtime_put_autosuspend(ddev->dev); 1689 1690 if (r) 1691 return r; 1692 1693 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1694} 1695 1696/** 1697 * DOC: mem_busy_percent 1698 * 1699 * The amdgpu driver provides a sysfs API for reading how busy the VRAM 1700 * is as a percentage. The file mem_busy_percent is used for this. 1701 * The SMU firmware computes a percentage of load based on the 1702 * aggregate activity level in the IP cores. 1703 */ 1704static ssize_t amdgpu_get_memory_busy_percent(struct device *dev, 1705 struct device_attribute *attr, 1706 char *buf) 1707{ 1708 struct drm_device *ddev = dev_get_drvdata(dev); 1709 struct amdgpu_device *adev = ddev->dev_private; 1710 int r, value, size = sizeof(value); 1711 1712 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1713 return 0; 1714 1715 r = pm_runtime_get_sync(ddev->dev); 1716 if (r < 0) 1717 return r; 1718 1719 /* read the IP busy sensor */ 1720 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, 1721 (void *)&value, &size); 1722 1723 pm_runtime_mark_last_busy(ddev->dev); 1724 pm_runtime_put_autosuspend(ddev->dev); 1725 1726 if (r) 1727 return r; 1728 1729 return snprintf(buf, PAGE_SIZE, "%d\n", value); 1730} 1731 1732/** 1733 * DOC: pcie_bw 1734 * 1735 * The amdgpu driver provides a sysfs API for estimating how much data 1736 * has been received and sent by the GPU in the last second through PCIe. 1737 * The file pcie_bw is used for this. 1738 * The Perf counters count the number of received and sent messages and return 1739 * those values, as well as the maximum payload size of a PCIe packet (mps). 1740 * Note that it is not possible to easily and quickly obtain the size of each 1741 * packet transmitted, so we output the max payload size (mps) to allow for 1742 * quick estimation of the PCIe bandwidth usage 1743 */ 1744static ssize_t amdgpu_get_pcie_bw(struct device *dev, 1745 struct device_attribute *attr, 1746 char *buf) 1747{ 1748 struct drm_device *ddev = dev_get_drvdata(dev); 1749 struct amdgpu_device *adev = ddev->dev_private; 1750 uint64_t count0, count1; 1751 int ret; 1752 1753 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1754 return 0; 1755 1756 ret = pm_runtime_get_sync(ddev->dev); 1757 if (ret < 0) 1758 return ret; 1759 1760 amdgpu_asic_get_pcie_usage(adev, &count0, &count1); 1761 1762 pm_runtime_mark_last_busy(ddev->dev); 1763 pm_runtime_put_autosuspend(ddev->dev); 1764 1765 return snprintf(buf, PAGE_SIZE, "%llu %llu %i\n", 1766 count0, count1, pcie_get_mps(adev->pdev)); 1767} 1768 1769/** 1770 * DOC: unique_id 1771 * 1772 * The amdgpu driver provides a sysfs API for providing a unique ID for the GPU 1773 * The file unique_id is used for this. 1774 * This will provide a Unique ID that will persist from machine to machine 1775 * 1776 * NOTE: This will only work for GFX9 and newer. This file will be absent 1777 * on unsupported ASICs (GFX8 and older) 1778 */ 1779static ssize_t amdgpu_get_unique_id(struct device *dev, 1780 struct device_attribute *attr, 1781 char *buf) 1782{ 1783 struct drm_device *ddev = dev_get_drvdata(dev); 1784 struct amdgpu_device *adev = ddev->dev_private; 1785 1786 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 1787 return 0; 1788 1789 if (adev->unique_id) 1790 return snprintf(buf, PAGE_SIZE, "%016llx\n", adev->unique_id); 1791 1792 return 0; 1793} 1794 1795static DEVICE_ATTR(power_dpm_state, S_IRUGO | S_IWUSR, amdgpu_get_dpm_state, amdgpu_set_dpm_state); 1796static DEVICE_ATTR(power_dpm_force_performance_level, S_IRUGO | S_IWUSR, 1797 amdgpu_get_dpm_forced_performance_level, 1798 amdgpu_set_dpm_forced_performance_level); 1799static DEVICE_ATTR(pp_num_states, S_IRUGO, amdgpu_get_pp_num_states, NULL); 1800static DEVICE_ATTR(pp_cur_state, S_IRUGO, amdgpu_get_pp_cur_state, NULL); 1801static DEVICE_ATTR(pp_force_state, S_IRUGO | S_IWUSR, 1802 amdgpu_get_pp_force_state, 1803 amdgpu_set_pp_force_state); 1804static DEVICE_ATTR(pp_table, S_IRUGO | S_IWUSR, 1805 amdgpu_get_pp_table, 1806 amdgpu_set_pp_table); 1807static DEVICE_ATTR(pp_dpm_sclk, S_IRUGO | S_IWUSR, 1808 amdgpu_get_pp_dpm_sclk, 1809 amdgpu_set_pp_dpm_sclk); 1810static DEVICE_ATTR(pp_dpm_mclk, S_IRUGO | S_IWUSR, 1811 amdgpu_get_pp_dpm_mclk, 1812 amdgpu_set_pp_dpm_mclk); 1813static DEVICE_ATTR(pp_dpm_socclk, S_IRUGO | S_IWUSR, 1814 amdgpu_get_pp_dpm_socclk, 1815 amdgpu_set_pp_dpm_socclk); 1816static DEVICE_ATTR(pp_dpm_fclk, S_IRUGO | S_IWUSR, 1817 amdgpu_get_pp_dpm_fclk, 1818 amdgpu_set_pp_dpm_fclk); 1819static DEVICE_ATTR(pp_dpm_dcefclk, S_IRUGO | S_IWUSR, 1820 amdgpu_get_pp_dpm_dcefclk, 1821 amdgpu_set_pp_dpm_dcefclk); 1822static DEVICE_ATTR(pp_dpm_pcie, S_IRUGO | S_IWUSR, 1823 amdgpu_get_pp_dpm_pcie, 1824 amdgpu_set_pp_dpm_pcie); 1825static DEVICE_ATTR(pp_sclk_od, S_IRUGO | S_IWUSR, 1826 amdgpu_get_pp_sclk_od, 1827 amdgpu_set_pp_sclk_od); 1828static DEVICE_ATTR(pp_mclk_od, S_IRUGO | S_IWUSR, 1829 amdgpu_get_pp_mclk_od, 1830 amdgpu_set_pp_mclk_od); 1831static DEVICE_ATTR(pp_power_profile_mode, S_IRUGO | S_IWUSR, 1832 amdgpu_get_pp_power_profile_mode, 1833 amdgpu_set_pp_power_profile_mode); 1834static DEVICE_ATTR(pp_od_clk_voltage, S_IRUGO | S_IWUSR, 1835 amdgpu_get_pp_od_clk_voltage, 1836 amdgpu_set_pp_od_clk_voltage); 1837static DEVICE_ATTR(gpu_busy_percent, S_IRUGO, 1838 amdgpu_get_busy_percent, NULL); 1839static DEVICE_ATTR(mem_busy_percent, S_IRUGO, 1840 amdgpu_get_memory_busy_percent, NULL); 1841static DEVICE_ATTR(pcie_bw, S_IRUGO, amdgpu_get_pcie_bw, NULL); 1842static DEVICE_ATTR(pp_features, S_IRUGO | S_IWUSR, 1843 amdgpu_get_pp_feature_status, 1844 amdgpu_set_pp_feature_status); 1845static DEVICE_ATTR(unique_id, S_IRUGO, amdgpu_get_unique_id, NULL); 1846 1847static ssize_t amdgpu_hwmon_show_temp(struct device *dev, 1848 struct device_attribute *attr, 1849 char *buf) 1850{ 1851 struct amdgpu_device *adev = dev_get_drvdata(dev); 1852 int channel = to_sensor_dev_attr(attr)->index; 1853 int r, temp = 0, size = sizeof(temp); 1854 1855 if (channel >= PP_TEMP_MAX) 1856 return -EINVAL; 1857 1858 r = pm_runtime_get_sync(adev->ddev->dev); 1859 if (r < 0) 1860 return r; 1861 1862 switch (channel) { 1863 case PP_TEMP_JUNCTION: 1864 /* get current junction temperature */ 1865 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_HOTSPOT_TEMP, 1866 (void *)&temp, &size); 1867 break; 1868 case PP_TEMP_EDGE: 1869 /* get current edge temperature */ 1870 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_EDGE_TEMP, 1871 (void *)&temp, &size); 1872 break; 1873 case PP_TEMP_MEM: 1874 /* get current memory temperature */ 1875 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_TEMP, 1876 (void *)&temp, &size); 1877 break; 1878 default: 1879 r = -EINVAL; 1880 break; 1881 } 1882 1883 pm_runtime_mark_last_busy(adev->ddev->dev); 1884 pm_runtime_put_autosuspend(adev->ddev->dev); 1885 1886 if (r) 1887 return r; 1888 1889 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1890} 1891 1892static ssize_t amdgpu_hwmon_show_temp_thresh(struct device *dev, 1893 struct device_attribute *attr, 1894 char *buf) 1895{ 1896 struct amdgpu_device *adev = dev_get_drvdata(dev); 1897 int hyst = to_sensor_dev_attr(attr)->index; 1898 int temp; 1899 1900 if (hyst) 1901 temp = adev->pm.dpm.thermal.min_temp; 1902 else 1903 temp = adev->pm.dpm.thermal.max_temp; 1904 1905 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1906} 1907 1908static ssize_t amdgpu_hwmon_show_hotspot_temp_thresh(struct device *dev, 1909 struct device_attribute *attr, 1910 char *buf) 1911{ 1912 struct amdgpu_device *adev = dev_get_drvdata(dev); 1913 int hyst = to_sensor_dev_attr(attr)->index; 1914 int temp; 1915 1916 if (hyst) 1917 temp = adev->pm.dpm.thermal.min_hotspot_temp; 1918 else 1919 temp = adev->pm.dpm.thermal.max_hotspot_crit_temp; 1920 1921 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1922} 1923 1924static ssize_t amdgpu_hwmon_show_mem_temp_thresh(struct device *dev, 1925 struct device_attribute *attr, 1926 char *buf) 1927{ 1928 struct amdgpu_device *adev = dev_get_drvdata(dev); 1929 int hyst = to_sensor_dev_attr(attr)->index; 1930 int temp; 1931 1932 if (hyst) 1933 temp = adev->pm.dpm.thermal.min_mem_temp; 1934 else 1935 temp = adev->pm.dpm.thermal.max_mem_crit_temp; 1936 1937 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1938} 1939 1940static ssize_t amdgpu_hwmon_show_temp_label(struct device *dev, 1941 struct device_attribute *attr, 1942 char *buf) 1943{ 1944 int channel = to_sensor_dev_attr(attr)->index; 1945 1946 if (channel >= PP_TEMP_MAX) 1947 return -EINVAL; 1948 1949 return snprintf(buf, PAGE_SIZE, "%s\n", temp_label[channel].label); 1950} 1951 1952static ssize_t amdgpu_hwmon_show_temp_emergency(struct device *dev, 1953 struct device_attribute *attr, 1954 char *buf) 1955{ 1956 struct amdgpu_device *adev = dev_get_drvdata(dev); 1957 int channel = to_sensor_dev_attr(attr)->index; 1958 int temp = 0; 1959 1960 if (channel >= PP_TEMP_MAX) 1961 return -EINVAL; 1962 1963 switch (channel) { 1964 case PP_TEMP_JUNCTION: 1965 temp = adev->pm.dpm.thermal.max_hotspot_emergency_temp; 1966 break; 1967 case PP_TEMP_EDGE: 1968 temp = adev->pm.dpm.thermal.max_edge_emergency_temp; 1969 break; 1970 case PP_TEMP_MEM: 1971 temp = adev->pm.dpm.thermal.max_mem_emergency_temp; 1972 break; 1973 } 1974 1975 return snprintf(buf, PAGE_SIZE, "%d\n", temp); 1976} 1977 1978static ssize_t amdgpu_hwmon_get_pwm1_enable(struct device *dev, 1979 struct device_attribute *attr, 1980 char *buf) 1981{ 1982 struct amdgpu_device *adev = dev_get_drvdata(dev); 1983 u32 pwm_mode = 0; 1984 int ret; 1985 1986 ret = pm_runtime_get_sync(adev->ddev->dev); 1987 if (ret < 0) 1988 return ret; 1989 1990 if (is_support_sw_smu(adev)) { 1991 pwm_mode = smu_get_fan_control_mode(&adev->smu); 1992 } else { 1993 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 1994 pm_runtime_mark_last_busy(adev->ddev->dev); 1995 pm_runtime_put_autosuspend(adev->ddev->dev); 1996 return -EINVAL; 1997 } 1998 1999 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2000 } 2001 2002 pm_runtime_mark_last_busy(adev->ddev->dev); 2003 pm_runtime_put_autosuspend(adev->ddev->dev); 2004 2005 return sprintf(buf, "%i\n", pwm_mode); 2006} 2007 2008static ssize_t amdgpu_hwmon_set_pwm1_enable(struct device *dev, 2009 struct device_attribute *attr, 2010 const char *buf, 2011 size_t count) 2012{ 2013 struct amdgpu_device *adev = dev_get_drvdata(dev); 2014 int err, ret; 2015 int value; 2016 2017 err = kstrtoint(buf, 10, &value); 2018 if (err) 2019 return err; 2020 2021 ret = pm_runtime_get_sync(adev->ddev->dev); 2022 if (ret < 0) 2023 return ret; 2024 2025 if (is_support_sw_smu(adev)) { 2026 smu_set_fan_control_mode(&adev->smu, value); 2027 } else { 2028 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2029 pm_runtime_mark_last_busy(adev->ddev->dev); 2030 pm_runtime_put_autosuspend(adev->ddev->dev); 2031 return -EINVAL; 2032 } 2033 2034 amdgpu_dpm_set_fan_control_mode(adev, value); 2035 } 2036 2037 pm_runtime_mark_last_busy(adev->ddev->dev); 2038 pm_runtime_put_autosuspend(adev->ddev->dev); 2039 2040 return count; 2041} 2042 2043static ssize_t amdgpu_hwmon_get_pwm1_min(struct device *dev, 2044 struct device_attribute *attr, 2045 char *buf) 2046{ 2047 return sprintf(buf, "%i\n", 0); 2048} 2049 2050static ssize_t amdgpu_hwmon_get_pwm1_max(struct device *dev, 2051 struct device_attribute *attr, 2052 char *buf) 2053{ 2054 return sprintf(buf, "%i\n", 255); 2055} 2056 2057static ssize_t amdgpu_hwmon_set_pwm1(struct device *dev, 2058 struct device_attribute *attr, 2059 const char *buf, size_t count) 2060{ 2061 struct amdgpu_device *adev = dev_get_drvdata(dev); 2062 int err; 2063 u32 value; 2064 u32 pwm_mode; 2065 2066 err = pm_runtime_get_sync(adev->ddev->dev); 2067 if (err < 0) 2068 return err; 2069 2070 if (is_support_sw_smu(adev)) 2071 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2072 else 2073 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2074 2075 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2076 pr_info("manual fan speed control should be enabled first\n"); 2077 pm_runtime_mark_last_busy(adev->ddev->dev); 2078 pm_runtime_put_autosuspend(adev->ddev->dev); 2079 return -EINVAL; 2080 } 2081 2082 err = kstrtou32(buf, 10, &value); 2083 if (err) { 2084 pm_runtime_mark_last_busy(adev->ddev->dev); 2085 pm_runtime_put_autosuspend(adev->ddev->dev); 2086 return err; 2087 } 2088 2089 value = (value * 100) / 255; 2090 2091 if (is_support_sw_smu(adev)) 2092 err = smu_set_fan_speed_percent(&adev->smu, value); 2093 else if (adev->powerplay.pp_funcs->set_fan_speed_percent) 2094 err = amdgpu_dpm_set_fan_speed_percent(adev, value); 2095 else 2096 err = -EINVAL; 2097 2098 pm_runtime_mark_last_busy(adev->ddev->dev); 2099 pm_runtime_put_autosuspend(adev->ddev->dev); 2100 2101 if (err) 2102 return err; 2103 2104 return count; 2105} 2106 2107static ssize_t amdgpu_hwmon_get_pwm1(struct device *dev, 2108 struct device_attribute *attr, 2109 char *buf) 2110{ 2111 struct amdgpu_device *adev = dev_get_drvdata(dev); 2112 int err; 2113 u32 speed = 0; 2114 2115 err = pm_runtime_get_sync(adev->ddev->dev); 2116 if (err < 0) 2117 return err; 2118 2119 if (is_support_sw_smu(adev)) 2120 err = smu_get_fan_speed_percent(&adev->smu, &speed); 2121 else if (adev->powerplay.pp_funcs->get_fan_speed_percent) 2122 err = amdgpu_dpm_get_fan_speed_percent(adev, &speed); 2123 else 2124 err = -EINVAL; 2125 2126 pm_runtime_mark_last_busy(adev->ddev->dev); 2127 pm_runtime_put_autosuspend(adev->ddev->dev); 2128 2129 if (err) 2130 return err; 2131 2132 speed = (speed * 255) / 100; 2133 2134 return sprintf(buf, "%i\n", speed); 2135} 2136 2137static ssize_t amdgpu_hwmon_get_fan1_input(struct device *dev, 2138 struct device_attribute *attr, 2139 char *buf) 2140{ 2141 struct amdgpu_device *adev = dev_get_drvdata(dev); 2142 int err; 2143 u32 speed = 0; 2144 2145 err = pm_runtime_get_sync(adev->ddev->dev); 2146 if (err < 0) 2147 return err; 2148 2149 if (is_support_sw_smu(adev)) 2150 err = smu_get_fan_speed_rpm(&adev->smu, &speed); 2151 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2152 err = amdgpu_dpm_get_fan_speed_rpm(adev, &speed); 2153 else 2154 err = -EINVAL; 2155 2156 pm_runtime_mark_last_busy(adev->ddev->dev); 2157 pm_runtime_put_autosuspend(adev->ddev->dev); 2158 2159 if (err) 2160 return err; 2161 2162 return sprintf(buf, "%i\n", speed); 2163} 2164 2165static ssize_t amdgpu_hwmon_get_fan1_min(struct device *dev, 2166 struct device_attribute *attr, 2167 char *buf) 2168{ 2169 struct amdgpu_device *adev = dev_get_drvdata(dev); 2170 u32 min_rpm = 0; 2171 u32 size = sizeof(min_rpm); 2172 int r; 2173 2174 r = pm_runtime_get_sync(adev->ddev->dev); 2175 if (r < 0) 2176 return r; 2177 2178 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MIN_FAN_RPM, 2179 (void *)&min_rpm, &size); 2180 2181 pm_runtime_mark_last_busy(adev->ddev->dev); 2182 pm_runtime_put_autosuspend(adev->ddev->dev); 2183 2184 if (r) 2185 return r; 2186 2187 return snprintf(buf, PAGE_SIZE, "%d\n", min_rpm); 2188} 2189 2190static ssize_t amdgpu_hwmon_get_fan1_max(struct device *dev, 2191 struct device_attribute *attr, 2192 char *buf) 2193{ 2194 struct amdgpu_device *adev = dev_get_drvdata(dev); 2195 u32 max_rpm = 0; 2196 u32 size = sizeof(max_rpm); 2197 int r; 2198 2199 r = pm_runtime_get_sync(adev->ddev->dev); 2200 if (r < 0) 2201 return r; 2202 2203 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MAX_FAN_RPM, 2204 (void *)&max_rpm, &size); 2205 2206 pm_runtime_mark_last_busy(adev->ddev->dev); 2207 pm_runtime_put_autosuspend(adev->ddev->dev); 2208 2209 if (r) 2210 return r; 2211 2212 return snprintf(buf, PAGE_SIZE, "%d\n", max_rpm); 2213} 2214 2215static ssize_t amdgpu_hwmon_get_fan1_target(struct device *dev, 2216 struct device_attribute *attr, 2217 char *buf) 2218{ 2219 struct amdgpu_device *adev = dev_get_drvdata(dev); 2220 int err; 2221 u32 rpm = 0; 2222 2223 err = pm_runtime_get_sync(adev->ddev->dev); 2224 if (err < 0) 2225 return err; 2226 2227 if (is_support_sw_smu(adev)) 2228 err = smu_get_fan_speed_rpm(&adev->smu, &rpm); 2229 else if (adev->powerplay.pp_funcs->get_fan_speed_rpm) 2230 err = amdgpu_dpm_get_fan_speed_rpm(adev, &rpm); 2231 else 2232 err = -EINVAL; 2233 2234 pm_runtime_mark_last_busy(adev->ddev->dev); 2235 pm_runtime_put_autosuspend(adev->ddev->dev); 2236 2237 if (err) 2238 return err; 2239 2240 return sprintf(buf, "%i\n", rpm); 2241} 2242 2243static ssize_t amdgpu_hwmon_set_fan1_target(struct device *dev, 2244 struct device_attribute *attr, 2245 const char *buf, size_t count) 2246{ 2247 struct amdgpu_device *adev = dev_get_drvdata(dev); 2248 int err; 2249 u32 value; 2250 u32 pwm_mode; 2251 2252 err = pm_runtime_get_sync(adev->ddev->dev); 2253 if (err < 0) 2254 return err; 2255 2256 if (is_support_sw_smu(adev)) 2257 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2258 else 2259 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2260 2261 if (pwm_mode != AMD_FAN_CTRL_MANUAL) { 2262 pm_runtime_mark_last_busy(adev->ddev->dev); 2263 pm_runtime_put_autosuspend(adev->ddev->dev); 2264 return -ENODATA; 2265 } 2266 2267 err = kstrtou32(buf, 10, &value); 2268 if (err) { 2269 pm_runtime_mark_last_busy(adev->ddev->dev); 2270 pm_runtime_put_autosuspend(adev->ddev->dev); 2271 return err; 2272 } 2273 2274 if (is_support_sw_smu(adev)) 2275 err = smu_set_fan_speed_rpm(&adev->smu, value); 2276 else if (adev->powerplay.pp_funcs->set_fan_speed_rpm) 2277 err = amdgpu_dpm_set_fan_speed_rpm(adev, value); 2278 else 2279 err = -EINVAL; 2280 2281 pm_runtime_mark_last_busy(adev->ddev->dev); 2282 pm_runtime_put_autosuspend(adev->ddev->dev); 2283 2284 if (err) 2285 return err; 2286 2287 return count; 2288} 2289 2290static ssize_t amdgpu_hwmon_get_fan1_enable(struct device *dev, 2291 struct device_attribute *attr, 2292 char *buf) 2293{ 2294 struct amdgpu_device *adev = dev_get_drvdata(dev); 2295 u32 pwm_mode = 0; 2296 int ret; 2297 2298 ret = pm_runtime_get_sync(adev->ddev->dev); 2299 if (ret < 0) 2300 return ret; 2301 2302 if (is_support_sw_smu(adev)) { 2303 pwm_mode = smu_get_fan_control_mode(&adev->smu); 2304 } else { 2305 if (!adev->powerplay.pp_funcs->get_fan_control_mode) { 2306 pm_runtime_mark_last_busy(adev->ddev->dev); 2307 pm_runtime_put_autosuspend(adev->ddev->dev); 2308 return -EINVAL; 2309 } 2310 2311 pwm_mode = amdgpu_dpm_get_fan_control_mode(adev); 2312 } 2313 2314 pm_runtime_mark_last_busy(adev->ddev->dev); 2315 pm_runtime_put_autosuspend(adev->ddev->dev); 2316 2317 return sprintf(buf, "%i\n", pwm_mode == AMD_FAN_CTRL_AUTO ? 0 : 1); 2318} 2319 2320static ssize_t amdgpu_hwmon_set_fan1_enable(struct device *dev, 2321 struct device_attribute *attr, 2322 const char *buf, 2323 size_t count) 2324{ 2325 struct amdgpu_device *adev = dev_get_drvdata(dev); 2326 int err; 2327 int value; 2328 u32 pwm_mode; 2329 2330 err = kstrtoint(buf, 10, &value); 2331 if (err) 2332 return err; 2333 2334 if (value == 0) 2335 pwm_mode = AMD_FAN_CTRL_AUTO; 2336 else if (value == 1) 2337 pwm_mode = AMD_FAN_CTRL_MANUAL; 2338 else 2339 return -EINVAL; 2340 2341 err = pm_runtime_get_sync(adev->ddev->dev); 2342 if (err < 0) 2343 return err; 2344 2345 if (is_support_sw_smu(adev)) { 2346 smu_set_fan_control_mode(&adev->smu, pwm_mode); 2347 } else { 2348 if (!adev->powerplay.pp_funcs->set_fan_control_mode) { 2349 pm_runtime_mark_last_busy(adev->ddev->dev); 2350 pm_runtime_put_autosuspend(adev->ddev->dev); 2351 return -EINVAL; 2352 } 2353 amdgpu_dpm_set_fan_control_mode(adev, pwm_mode); 2354 } 2355 2356 pm_runtime_mark_last_busy(adev->ddev->dev); 2357 pm_runtime_put_autosuspend(adev->ddev->dev); 2358 2359 return count; 2360} 2361 2362static ssize_t amdgpu_hwmon_show_vddgfx(struct device *dev, 2363 struct device_attribute *attr, 2364 char *buf) 2365{ 2366 struct amdgpu_device *adev = dev_get_drvdata(dev); 2367 u32 vddgfx; 2368 int r, size = sizeof(vddgfx); 2369 2370 r = pm_runtime_get_sync(adev->ddev->dev); 2371 if (r < 0) 2372 return r; 2373 2374 /* get the voltage */ 2375 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, 2376 (void *)&vddgfx, &size); 2377 2378 pm_runtime_mark_last_busy(adev->ddev->dev); 2379 pm_runtime_put_autosuspend(adev->ddev->dev); 2380 2381 if (r) 2382 return r; 2383 2384 return snprintf(buf, PAGE_SIZE, "%d\n", vddgfx); 2385} 2386 2387static ssize_t amdgpu_hwmon_show_vddgfx_label(struct device *dev, 2388 struct device_attribute *attr, 2389 char *buf) 2390{ 2391 return snprintf(buf, PAGE_SIZE, "vddgfx\n"); 2392} 2393 2394static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev, 2395 struct device_attribute *attr, 2396 char *buf) 2397{ 2398 struct amdgpu_device *adev = dev_get_drvdata(dev); 2399 u32 vddnb; 2400 int r, size = sizeof(vddnb); 2401 2402 /* only APUs have vddnb */ 2403 if (!(adev->flags & AMD_IS_APU)) 2404 return -EINVAL; 2405 2406 r = pm_runtime_get_sync(adev->ddev->dev); 2407 if (r < 0) 2408 return r; 2409 2410 /* get the voltage */ 2411 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, 2412 (void *)&vddnb, &size); 2413 2414 pm_runtime_mark_last_busy(adev->ddev->dev); 2415 pm_runtime_put_autosuspend(adev->ddev->dev); 2416 2417 if (r) 2418 return r; 2419 2420 return snprintf(buf, PAGE_SIZE, "%d\n", vddnb); 2421} 2422 2423static ssize_t amdgpu_hwmon_show_vddnb_label(struct device *dev, 2424 struct device_attribute *attr, 2425 char *buf) 2426{ 2427 return snprintf(buf, PAGE_SIZE, "vddnb\n"); 2428} 2429 2430static ssize_t amdgpu_hwmon_show_power_avg(struct device *dev, 2431 struct device_attribute *attr, 2432 char *buf) 2433{ 2434 struct amdgpu_device *adev = dev_get_drvdata(dev); 2435 u32 query = 0; 2436 int r, size = sizeof(u32); 2437 unsigned uw; 2438 2439 r = pm_runtime_get_sync(adev->ddev->dev); 2440 if (r < 0) 2441 return r; 2442 2443 /* get the voltage */ 2444 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, 2445 (void *)&query, &size); 2446 2447 pm_runtime_mark_last_busy(adev->ddev->dev); 2448 pm_runtime_put_autosuspend(adev->ddev->dev); 2449 2450 if (r) 2451 return r; 2452 2453 /* convert to microwatts */ 2454 uw = (query >> 8) * 1000000 + (query & 0xff) * 1000; 2455 2456 return snprintf(buf, PAGE_SIZE, "%u\n", uw); 2457} 2458 2459static ssize_t amdgpu_hwmon_show_power_cap_min(struct device *dev, 2460 struct device_attribute *attr, 2461 char *buf) 2462{ 2463 return sprintf(buf, "%i\n", 0); 2464} 2465 2466static ssize_t amdgpu_hwmon_show_power_cap_max(struct device *dev, 2467 struct device_attribute *attr, 2468 char *buf) 2469{ 2470 struct amdgpu_device *adev = dev_get_drvdata(dev); 2471 uint32_t limit = 0; 2472 ssize_t size; 2473 int r; 2474 2475 r = pm_runtime_get_sync(adev->ddev->dev); 2476 if (r < 0) 2477 return r; 2478 2479 if (is_support_sw_smu(adev)) { 2480 smu_get_power_limit(&adev->smu, &limit, true, true); 2481 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2482 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2483 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, true); 2484 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2485 } else { 2486 size = snprintf(buf, PAGE_SIZE, "\n"); 2487 } 2488 2489 pm_runtime_mark_last_busy(adev->ddev->dev); 2490 pm_runtime_put_autosuspend(adev->ddev->dev); 2491 2492 return size; 2493} 2494 2495static ssize_t amdgpu_hwmon_show_power_cap(struct device *dev, 2496 struct device_attribute *attr, 2497 char *buf) 2498{ 2499 struct amdgpu_device *adev = dev_get_drvdata(dev); 2500 uint32_t limit = 0; 2501 ssize_t size; 2502 int r; 2503 2504 r = pm_runtime_get_sync(adev->ddev->dev); 2505 if (r < 0) 2506 return r; 2507 2508 if (is_support_sw_smu(adev)) { 2509 smu_get_power_limit(&adev->smu, &limit, false, true); 2510 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2511 } else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->get_power_limit) { 2512 adev->powerplay.pp_funcs->get_power_limit(adev->powerplay.pp_handle, &limit, false); 2513 size = snprintf(buf, PAGE_SIZE, "%u\n", limit * 1000000); 2514 } else { 2515 size = snprintf(buf, PAGE_SIZE, "\n"); 2516 } 2517 2518 pm_runtime_mark_last_busy(adev->ddev->dev); 2519 pm_runtime_put_autosuspend(adev->ddev->dev); 2520 2521 return size; 2522} 2523 2524 2525static ssize_t amdgpu_hwmon_set_power_cap(struct device *dev, 2526 struct device_attribute *attr, 2527 const char *buf, 2528 size_t count) 2529{ 2530 struct amdgpu_device *adev = dev_get_drvdata(dev); 2531 int err; 2532 u32 value; 2533 2534 if (amdgpu_sriov_vf(adev)) 2535 return -EINVAL; 2536 2537 err = kstrtou32(buf, 10, &value); 2538 if (err) 2539 return err; 2540 2541 value = value / 1000000; /* convert to Watt */ 2542 2543 2544 err = pm_runtime_get_sync(adev->ddev->dev); 2545 if (err < 0) 2546 return err; 2547 2548 if (is_support_sw_smu(adev)) 2549 err = smu_set_power_limit(&adev->smu, value); 2550 else if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->set_power_limit) 2551 err = adev->powerplay.pp_funcs->set_power_limit(adev->powerplay.pp_handle, value); 2552 else 2553 err = -EINVAL; 2554 2555 pm_runtime_mark_last_busy(adev->ddev->dev); 2556 pm_runtime_put_autosuspend(adev->ddev->dev); 2557 2558 if (err) 2559 return err; 2560 2561 return count; 2562} 2563 2564static ssize_t amdgpu_hwmon_show_sclk(struct device *dev, 2565 struct device_attribute *attr, 2566 char *buf) 2567{ 2568 struct amdgpu_device *adev = dev_get_drvdata(dev); 2569 uint32_t sclk; 2570 int r, size = sizeof(sclk); 2571 2572 r = pm_runtime_get_sync(adev->ddev->dev); 2573 if (r < 0) 2574 return r; 2575 2576 /* get the sclk */ 2577 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, 2578 (void *)&sclk, &size); 2579 2580 pm_runtime_mark_last_busy(adev->ddev->dev); 2581 pm_runtime_put_autosuspend(adev->ddev->dev); 2582 2583 if (r) 2584 return r; 2585 2586 return snprintf(buf, PAGE_SIZE, "%d\n", sclk * 10 * 1000); 2587} 2588 2589static ssize_t amdgpu_hwmon_show_sclk_label(struct device *dev, 2590 struct device_attribute *attr, 2591 char *buf) 2592{ 2593 return snprintf(buf, PAGE_SIZE, "sclk\n"); 2594} 2595 2596static ssize_t amdgpu_hwmon_show_mclk(struct device *dev, 2597 struct device_attribute *attr, 2598 char *buf) 2599{ 2600 struct amdgpu_device *adev = dev_get_drvdata(dev); 2601 uint32_t mclk; 2602 int r, size = sizeof(mclk); 2603 2604 r = pm_runtime_get_sync(adev->ddev->dev); 2605 if (r < 0) 2606 return r; 2607 2608 /* get the sclk */ 2609 r = amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, 2610 (void *)&mclk, &size); 2611 2612 pm_runtime_mark_last_busy(adev->ddev->dev); 2613 pm_runtime_put_autosuspend(adev->ddev->dev); 2614 2615 if (r) 2616 return r; 2617 2618 return snprintf(buf, PAGE_SIZE, "%d\n", mclk * 10 * 1000); 2619} 2620 2621static ssize_t amdgpu_hwmon_show_mclk_label(struct device *dev, 2622 struct device_attribute *attr, 2623 char *buf) 2624{ 2625 return snprintf(buf, PAGE_SIZE, "mclk\n"); 2626} 2627 2628/** 2629 * DOC: hwmon 2630 * 2631 * The amdgpu driver exposes the following sensor interfaces: 2632 * 2633 * - GPU temperature (via the on-die sensor) 2634 * 2635 * - GPU voltage 2636 * 2637 * - Northbridge voltage (APUs only) 2638 * 2639 * - GPU power 2640 * 2641 * - GPU fan 2642 * 2643 * - GPU gfx/compute engine clock 2644 * 2645 * - GPU memory clock (dGPU only) 2646 * 2647 * hwmon interfaces for GPU temperature: 2648 * 2649 * - temp[1-3]_input: the on die GPU temperature in millidegrees Celsius 2650 * - temp2_input and temp3_input are supported on SOC15 dGPUs only 2651 * 2652 * - temp[1-3]_label: temperature channel label 2653 * - temp2_label and temp3_label are supported on SOC15 dGPUs only 2654 * 2655 * - temp[1-3]_crit: temperature critical max value in millidegrees Celsius 2656 * - temp2_crit and temp3_crit are supported on SOC15 dGPUs only 2657 * 2658 * - temp[1-3]_crit_hyst: temperature hysteresis for critical limit in millidegrees Celsius 2659 * - temp2_crit_hyst and temp3_crit_hyst are supported on SOC15 dGPUs only 2660 * 2661 * - temp[1-3]_emergency: temperature emergency max value(asic shutdown) in millidegrees Celsius 2662 * - these are supported on SOC15 dGPUs only 2663 * 2664 * hwmon interfaces for GPU voltage: 2665 * 2666 * - in0_input: the voltage on the GPU in millivolts 2667 * 2668 * - in1_input: the voltage on the Northbridge in millivolts 2669 * 2670 * hwmon interfaces for GPU power: 2671 * 2672 * - power1_average: average power used by the GPU in microWatts 2673 * 2674 * - power1_cap_min: minimum cap supported in microWatts 2675 * 2676 * - power1_cap_max: maximum cap supported in microWatts 2677 * 2678 * - power1_cap: selected power cap in microWatts 2679 * 2680 * hwmon interfaces for GPU fan: 2681 * 2682 * - pwm1: pulse width modulation fan level (0-255) 2683 * 2684 * - pwm1_enable: pulse width modulation fan control method (0: no fan speed control, 1: manual fan speed control using pwm interface, 2: automatic fan speed control) 2685 * 2686 * - pwm1_min: pulse width modulation fan control minimum level (0) 2687 * 2688 * - pwm1_max: pulse width modulation fan control maximum level (255) 2689 * 2690 * - fan1_min: an minimum value Unit: revolution/min (RPM) 2691 * 2692 * - fan1_max: an maxmum value Unit: revolution/max (RPM) 2693 * 2694 * - fan1_input: fan speed in RPM 2695 * 2696 * - fan[1-\*]_target: Desired fan speed Unit: revolution/min (RPM) 2697 * 2698 * - fan[1-\*]_enable: Enable or disable the sensors.1: Enable 0: Disable 2699 * 2700 * hwmon interfaces for GPU clocks: 2701 * 2702 * - freq1_input: the gfx/compute clock in hertz 2703 * 2704 * - freq2_input: the memory clock in hertz 2705 * 2706 * You can use hwmon tools like sensors to view this information on your system. 2707 * 2708 */ 2709 2710static SENSOR_DEVICE_ATTR(temp1_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_EDGE); 2711static SENSOR_DEVICE_ATTR(temp1_crit, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 0); 2712static SENSOR_DEVICE_ATTR(temp1_crit_hyst, S_IRUGO, amdgpu_hwmon_show_temp_thresh, NULL, 1); 2713static SENSOR_DEVICE_ATTR(temp1_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_EDGE); 2714static SENSOR_DEVICE_ATTR(temp2_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_JUNCTION); 2715static SENSOR_DEVICE_ATTR(temp2_crit, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 0); 2716static SENSOR_DEVICE_ATTR(temp2_crit_hyst, S_IRUGO, amdgpu_hwmon_show_hotspot_temp_thresh, NULL, 1); 2717static SENSOR_DEVICE_ATTR(temp2_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_JUNCTION); 2718static SENSOR_DEVICE_ATTR(temp3_input, S_IRUGO, amdgpu_hwmon_show_temp, NULL, PP_TEMP_MEM); 2719static SENSOR_DEVICE_ATTR(temp3_crit, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 0); 2720static SENSOR_DEVICE_ATTR(temp3_crit_hyst, S_IRUGO, amdgpu_hwmon_show_mem_temp_thresh, NULL, 1); 2721static SENSOR_DEVICE_ATTR(temp3_emergency, S_IRUGO, amdgpu_hwmon_show_temp_emergency, NULL, PP_TEMP_MEM); 2722static SENSOR_DEVICE_ATTR(temp1_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_EDGE); 2723static SENSOR_DEVICE_ATTR(temp2_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_JUNCTION); 2724static SENSOR_DEVICE_ATTR(temp3_label, S_IRUGO, amdgpu_hwmon_show_temp_label, NULL, PP_TEMP_MEM); 2725static SENSOR_DEVICE_ATTR(pwm1, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1, amdgpu_hwmon_set_pwm1, 0); 2726static SENSOR_DEVICE_ATTR(pwm1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_pwm1_enable, amdgpu_hwmon_set_pwm1_enable, 0); 2727static SENSOR_DEVICE_ATTR(pwm1_min, S_IRUGO, amdgpu_hwmon_get_pwm1_min, NULL, 0); 2728static SENSOR_DEVICE_ATTR(pwm1_max, S_IRUGO, amdgpu_hwmon_get_pwm1_max, NULL, 0); 2729static SENSOR_DEVICE_ATTR(fan1_input, S_IRUGO, amdgpu_hwmon_get_fan1_input, NULL, 0); 2730static SENSOR_DEVICE_ATTR(fan1_min, S_IRUGO, amdgpu_hwmon_get_fan1_min, NULL, 0); 2731static SENSOR_DEVICE_ATTR(fan1_max, S_IRUGO, amdgpu_hwmon_get_fan1_max, NULL, 0); 2732static SENSOR_DEVICE_ATTR(fan1_target, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_target, amdgpu_hwmon_set_fan1_target, 0); 2733static SENSOR_DEVICE_ATTR(fan1_enable, S_IRUGO | S_IWUSR, amdgpu_hwmon_get_fan1_enable, amdgpu_hwmon_set_fan1_enable, 0); 2734static SENSOR_DEVICE_ATTR(in0_input, S_IRUGO, amdgpu_hwmon_show_vddgfx, NULL, 0); 2735static SENSOR_DEVICE_ATTR(in0_label, S_IRUGO, amdgpu_hwmon_show_vddgfx_label, NULL, 0); 2736static SENSOR_DEVICE_ATTR(in1_input, S_IRUGO, amdgpu_hwmon_show_vddnb, NULL, 0); 2737static SENSOR_DEVICE_ATTR(in1_label, S_IRUGO, amdgpu_hwmon_show_vddnb_label, NULL, 0); 2738static SENSOR_DEVICE_ATTR(power1_average, S_IRUGO, amdgpu_hwmon_show_power_avg, NULL, 0); 2739static SENSOR_DEVICE_ATTR(power1_cap_max, S_IRUGO, amdgpu_hwmon_show_power_cap_max, NULL, 0); 2740static SENSOR_DEVICE_ATTR(power1_cap_min, S_IRUGO, amdgpu_hwmon_show_power_cap_min, NULL, 0); 2741static SENSOR_DEVICE_ATTR(power1_cap, S_IRUGO | S_IWUSR, amdgpu_hwmon_show_power_cap, amdgpu_hwmon_set_power_cap, 0); 2742static SENSOR_DEVICE_ATTR(freq1_input, S_IRUGO, amdgpu_hwmon_show_sclk, NULL, 0); 2743static SENSOR_DEVICE_ATTR(freq1_label, S_IRUGO, amdgpu_hwmon_show_sclk_label, NULL, 0); 2744static SENSOR_DEVICE_ATTR(freq2_input, S_IRUGO, amdgpu_hwmon_show_mclk, NULL, 0); 2745static SENSOR_DEVICE_ATTR(freq2_label, S_IRUGO, amdgpu_hwmon_show_mclk_label, NULL, 0); 2746 2747static struct attribute *hwmon_attributes[] = { 2748 &sensor_dev_attr_temp1_input.dev_attr.attr, 2749 &sensor_dev_attr_temp1_crit.dev_attr.attr, 2750 &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr, 2751 &sensor_dev_attr_temp2_input.dev_attr.attr, 2752 &sensor_dev_attr_temp2_crit.dev_attr.attr, 2753 &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr, 2754 &sensor_dev_attr_temp3_input.dev_attr.attr, 2755 &sensor_dev_attr_temp3_crit.dev_attr.attr, 2756 &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr, 2757 &sensor_dev_attr_temp1_emergency.dev_attr.attr, 2758 &sensor_dev_attr_temp2_emergency.dev_attr.attr, 2759 &sensor_dev_attr_temp3_emergency.dev_attr.attr, 2760 &sensor_dev_attr_temp1_label.dev_attr.attr, 2761 &sensor_dev_attr_temp2_label.dev_attr.attr, 2762 &sensor_dev_attr_temp3_label.dev_attr.attr, 2763 &sensor_dev_attr_pwm1.dev_attr.attr, 2764 &sensor_dev_attr_pwm1_enable.dev_attr.attr, 2765 &sensor_dev_attr_pwm1_min.dev_attr.attr, 2766 &sensor_dev_attr_pwm1_max.dev_attr.attr, 2767 &sensor_dev_attr_fan1_input.dev_attr.attr, 2768 &sensor_dev_attr_fan1_min.dev_attr.attr, 2769 &sensor_dev_attr_fan1_max.dev_attr.attr, 2770 &sensor_dev_attr_fan1_target.dev_attr.attr, 2771 &sensor_dev_attr_fan1_enable.dev_attr.attr, 2772 &sensor_dev_attr_in0_input.dev_attr.attr, 2773 &sensor_dev_attr_in0_label.dev_attr.attr, 2774 &sensor_dev_attr_in1_input.dev_attr.attr, 2775 &sensor_dev_attr_in1_label.dev_attr.attr, 2776 &sensor_dev_attr_power1_average.dev_attr.attr, 2777 &sensor_dev_attr_power1_cap_max.dev_attr.attr, 2778 &sensor_dev_attr_power1_cap_min.dev_attr.attr, 2779 &sensor_dev_attr_power1_cap.dev_attr.attr, 2780 &sensor_dev_attr_freq1_input.dev_attr.attr, 2781 &sensor_dev_attr_freq1_label.dev_attr.attr, 2782 &sensor_dev_attr_freq2_input.dev_attr.attr, 2783 &sensor_dev_attr_freq2_label.dev_attr.attr, 2784 NULL 2785}; 2786 2787static umode_t hwmon_attributes_visible(struct kobject *kobj, 2788 struct attribute *attr, int index) 2789{ 2790 struct device *dev = kobj_to_dev(kobj); 2791 struct amdgpu_device *adev = dev_get_drvdata(dev); 2792 umode_t effective_mode = attr->mode; 2793 2794 /* under multi-vf mode, the hwmon attributes are all not supported */ 2795 if (amdgpu_sriov_vf(adev) && !amdgpu_sriov_is_pp_one_vf(adev)) 2796 return 0; 2797 2798 /* there is no fan under pp one vf mode */ 2799 if (amdgpu_sriov_is_pp_one_vf(adev) && 2800 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2801 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2802 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2803 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2804 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2805 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2806 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2807 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2808 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2809 return 0; 2810 2811 /* Skip fan attributes if fan is not present */ 2812 if (adev->pm.no_fan && (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2813 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2814 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2815 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2816 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2817 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2818 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2819 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2820 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2821 return 0; 2822 2823 /* Skip fan attributes on APU */ 2824 if ((adev->flags & AMD_IS_APU) && 2825 (attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2826 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2827 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2828 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2829 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2830 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2831 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2832 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2833 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2834 return 0; 2835 2836 /* Skip limit attributes if DPM is not enabled */ 2837 if (!adev->pm.dpm_enabled && 2838 (attr == &sensor_dev_attr_temp1_crit.dev_attr.attr || 2839 attr == &sensor_dev_attr_temp1_crit_hyst.dev_attr.attr || 2840 attr == &sensor_dev_attr_pwm1.dev_attr.attr || 2841 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr || 2842 attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2843 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr || 2844 attr == &sensor_dev_attr_fan1_input.dev_attr.attr || 2845 attr == &sensor_dev_attr_fan1_min.dev_attr.attr || 2846 attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2847 attr == &sensor_dev_attr_fan1_target.dev_attr.attr || 2848 attr == &sensor_dev_attr_fan1_enable.dev_attr.attr)) 2849 return 0; 2850 2851 if (!is_support_sw_smu(adev)) { 2852 /* mask fan attributes if we have no bindings for this asic to expose */ 2853 if ((!adev->powerplay.pp_funcs->get_fan_speed_percent && 2854 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't query fan */ 2855 (!adev->powerplay.pp_funcs->get_fan_control_mode && 2856 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't query state */ 2857 effective_mode &= ~S_IRUGO; 2858 2859 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 2860 attr == &sensor_dev_attr_pwm1.dev_attr.attr) || /* can't manage fan */ 2861 (!adev->powerplay.pp_funcs->set_fan_control_mode && 2862 attr == &sensor_dev_attr_pwm1_enable.dev_attr.attr)) /* can't manage state */ 2863 effective_mode &= ~S_IWUSR; 2864 } 2865 2866 if (((adev->flags & AMD_IS_APU) || 2867 adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 2868 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 2869 (attr == &sensor_dev_attr_power1_average.dev_attr.attr || 2870 attr == &sensor_dev_attr_power1_cap_max.dev_attr.attr || 2871 attr == &sensor_dev_attr_power1_cap_min.dev_attr.attr|| 2872 attr == &sensor_dev_attr_power1_cap.dev_attr.attr)) 2873 return 0; 2874 2875 if (!is_support_sw_smu(adev)) { 2876 /* hide max/min values if we can't both query and manage the fan */ 2877 if ((!adev->powerplay.pp_funcs->set_fan_speed_percent && 2878 !adev->powerplay.pp_funcs->get_fan_speed_percent) && 2879 (!adev->powerplay.pp_funcs->set_fan_speed_rpm && 2880 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 2881 (attr == &sensor_dev_attr_pwm1_max.dev_attr.attr || 2882 attr == &sensor_dev_attr_pwm1_min.dev_attr.attr)) 2883 return 0; 2884 2885 if ((!adev->powerplay.pp_funcs->set_fan_speed_rpm && 2886 !adev->powerplay.pp_funcs->get_fan_speed_rpm) && 2887 (attr == &sensor_dev_attr_fan1_max.dev_attr.attr || 2888 attr == &sensor_dev_attr_fan1_min.dev_attr.attr)) 2889 return 0; 2890 } 2891 2892 if ((adev->family == AMDGPU_FAMILY_SI || /* not implemented yet */ 2893 adev->family == AMDGPU_FAMILY_KV) && /* not implemented yet */ 2894 (attr == &sensor_dev_attr_in0_input.dev_attr.attr || 2895 attr == &sensor_dev_attr_in0_label.dev_attr.attr)) 2896 return 0; 2897 2898 /* only APUs have vddnb */ 2899 if (!(adev->flags & AMD_IS_APU) && 2900 (attr == &sensor_dev_attr_in1_input.dev_attr.attr || 2901 attr == &sensor_dev_attr_in1_label.dev_attr.attr)) 2902 return 0; 2903 2904 /* no mclk on APUs */ 2905 if ((adev->flags & AMD_IS_APU) && 2906 (attr == &sensor_dev_attr_freq2_input.dev_attr.attr || 2907 attr == &sensor_dev_attr_freq2_label.dev_attr.attr)) 2908 return 0; 2909 2910 /* only SOC15 dGPUs support hotspot and mem temperatures */ 2911 if (((adev->flags & AMD_IS_APU) || 2912 adev->asic_type < CHIP_VEGA10) && 2913 (attr == &sensor_dev_attr_temp2_crit.dev_attr.attr || 2914 attr == &sensor_dev_attr_temp2_crit_hyst.dev_attr.attr || 2915 attr == &sensor_dev_attr_temp3_crit.dev_attr.attr || 2916 attr == &sensor_dev_attr_temp3_crit_hyst.dev_attr.attr || 2917 attr == &sensor_dev_attr_temp1_emergency.dev_attr.attr || 2918 attr == &sensor_dev_attr_temp2_emergency.dev_attr.attr || 2919 attr == &sensor_dev_attr_temp3_emergency.dev_attr.attr || 2920 attr == &sensor_dev_attr_temp2_input.dev_attr.attr || 2921 attr == &sensor_dev_attr_temp3_input.dev_attr.attr || 2922 attr == &sensor_dev_attr_temp2_label.dev_attr.attr || 2923 attr == &sensor_dev_attr_temp3_label.dev_attr.attr)) 2924 return 0; 2925 2926 return effective_mode; 2927} 2928 2929static const struct attribute_group hwmon_attrgroup = { 2930 .attrs = hwmon_attributes, 2931 .is_visible = hwmon_attributes_visible, 2932}; 2933 2934static const struct attribute_group *hwmon_groups[] = { 2935 &hwmon_attrgroup, 2936 NULL 2937}; 2938 2939#endif /* __NetBSD__ */ 2940 2941void amdgpu_dpm_thermal_work_handler(struct work_struct *work) 2942{ 2943 struct amdgpu_device *adev = 2944 container_of(work, struct amdgpu_device, 2945 pm.dpm.thermal.work); 2946 /* switch to the thermal state */ 2947 enum amd_pm_state_type dpm_state = POWER_STATE_TYPE_INTERNAL_THERMAL; 2948 int temp, size = sizeof(temp); 2949 2950 if (!adev->pm.dpm_enabled) 2951 return; 2952 2953 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, 2954 (void *)&temp, &size)) { 2955 if (temp < adev->pm.dpm.thermal.min_temp) 2956 /* switch back the user state */ 2957 dpm_state = adev->pm.dpm.user_state; 2958 } else { 2959 if (adev->pm.dpm.thermal.high_to_low) 2960 /* switch back the user state */ 2961 dpm_state = adev->pm.dpm.user_state; 2962 } 2963 mutex_lock(&adev->pm.mutex); 2964 if (dpm_state == POWER_STATE_TYPE_INTERNAL_THERMAL) 2965 adev->pm.dpm.thermal_active = true; 2966 else 2967 adev->pm.dpm.thermal_active = false; 2968 adev->pm.dpm.state = dpm_state; 2969 mutex_unlock(&adev->pm.mutex); 2970 2971 amdgpu_pm_compute_clocks(adev); 2972} 2973 2974static struct amdgpu_ps *amdgpu_dpm_pick_power_state(struct amdgpu_device *adev, 2975 enum amd_pm_state_type dpm_state) 2976{ 2977 int i; 2978 struct amdgpu_ps *ps; 2979 u32 ui_class; 2980 bool single_display = (adev->pm.dpm.new_active_crtc_count < 2) ? 2981 true : false; 2982 2983 /* check if the vblank period is too short to adjust the mclk */ 2984 if (single_display && adev->powerplay.pp_funcs->vblank_too_short) { 2985 if (amdgpu_dpm_vblank_too_short(adev)) 2986 single_display = false; 2987 } 2988 2989 /* certain older asics have a separare 3D performance state, 2990 * so try that first if the user selected performance 2991 */ 2992 if (dpm_state == POWER_STATE_TYPE_PERFORMANCE) 2993 dpm_state = POWER_STATE_TYPE_INTERNAL_3DPERF; 2994 /* balanced states don't exist at the moment */ 2995 if (dpm_state == POWER_STATE_TYPE_BALANCED) 2996 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 2997 2998restart_search: 2999 /* Pick the best power state based on current conditions */ 3000 for (i = 0; i < adev->pm.dpm.num_ps; i++) { 3001 ps = &adev->pm.dpm.ps[i]; 3002 ui_class = ps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK; 3003 switch (dpm_state) { 3004 /* user states */ 3005 case POWER_STATE_TYPE_BATTERY: 3006 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BATTERY) { 3007 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3008 if (single_display) 3009 return ps; 3010 } else 3011 return ps; 3012 } 3013 break; 3014 case POWER_STATE_TYPE_BALANCED: 3015 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_BALANCED) { 3016 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3017 if (single_display) 3018 return ps; 3019 } else 3020 return ps; 3021 } 3022 break; 3023 case POWER_STATE_TYPE_PERFORMANCE: 3024 if (ui_class == ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) { 3025 if (ps->caps & ATOM_PPLIB_SINGLE_DISPLAY_ONLY) { 3026 if (single_display) 3027 return ps; 3028 } else 3029 return ps; 3030 } 3031 break; 3032 /* internal states */ 3033 case POWER_STATE_TYPE_INTERNAL_UVD: 3034 if (adev->pm.dpm.uvd_ps) 3035 return adev->pm.dpm.uvd_ps; 3036 else 3037 break; 3038 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 3039 if (ps->class & ATOM_PPLIB_CLASSIFICATION_SDSTATE) 3040 return ps; 3041 break; 3042 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 3043 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HDSTATE) 3044 return ps; 3045 break; 3046 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 3047 if (ps->class & ATOM_PPLIB_CLASSIFICATION_HD2STATE) 3048 return ps; 3049 break; 3050 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 3051 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_MVC) 3052 return ps; 3053 break; 3054 case POWER_STATE_TYPE_INTERNAL_BOOT: 3055 return adev->pm.dpm.boot_ps; 3056 case POWER_STATE_TYPE_INTERNAL_THERMAL: 3057 if (ps->class & ATOM_PPLIB_CLASSIFICATION_THERMAL) 3058 return ps; 3059 break; 3060 case POWER_STATE_TYPE_INTERNAL_ACPI: 3061 if (ps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) 3062 return ps; 3063 break; 3064 case POWER_STATE_TYPE_INTERNAL_ULV: 3065 if (ps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) 3066 return ps; 3067 break; 3068 case POWER_STATE_TYPE_INTERNAL_3DPERF: 3069 if (ps->class & ATOM_PPLIB_CLASSIFICATION_3DPERFORMANCE) 3070 return ps; 3071 break; 3072 default: 3073 break; 3074 } 3075 } 3076 /* use a fallback state if we didn't match */ 3077 switch (dpm_state) { 3078 case POWER_STATE_TYPE_INTERNAL_UVD_SD: 3079 dpm_state = POWER_STATE_TYPE_INTERNAL_UVD_HD; 3080 goto restart_search; 3081 case POWER_STATE_TYPE_INTERNAL_UVD_HD: 3082 case POWER_STATE_TYPE_INTERNAL_UVD_HD2: 3083 case POWER_STATE_TYPE_INTERNAL_UVD_MVC: 3084 if (adev->pm.dpm.uvd_ps) { 3085 return adev->pm.dpm.uvd_ps; 3086 } else { 3087 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3088 goto restart_search; 3089 } 3090 case POWER_STATE_TYPE_INTERNAL_THERMAL: 3091 dpm_state = POWER_STATE_TYPE_INTERNAL_ACPI; 3092 goto restart_search; 3093 case POWER_STATE_TYPE_INTERNAL_ACPI: 3094 dpm_state = POWER_STATE_TYPE_BATTERY; 3095 goto restart_search; 3096 case POWER_STATE_TYPE_BATTERY: 3097 case POWER_STATE_TYPE_BALANCED: 3098 case POWER_STATE_TYPE_INTERNAL_3DPERF: 3099 dpm_state = POWER_STATE_TYPE_PERFORMANCE; 3100 goto restart_search; 3101 default: 3102 break; 3103 } 3104 3105 return NULL; 3106} 3107 3108static void amdgpu_dpm_change_power_state_locked(struct amdgpu_device *adev) 3109{ 3110 struct amdgpu_ps *ps; 3111 enum amd_pm_state_type dpm_state; 3112 int ret; 3113 bool equal = false; 3114 3115 /* if dpm init failed */ 3116 if (!adev->pm.dpm_enabled) 3117 return; 3118 3119 if (adev->pm.dpm.user_state != adev->pm.dpm.state) { 3120 /* add other state override checks here */ 3121 if ((!adev->pm.dpm.thermal_active) && 3122 (!adev->pm.dpm.uvd_active)) 3123 adev->pm.dpm.state = adev->pm.dpm.user_state; 3124 } 3125 dpm_state = adev->pm.dpm.state; 3126 3127 ps = amdgpu_dpm_pick_power_state(adev, dpm_state); 3128 if (ps) 3129 adev->pm.dpm.requested_ps = ps; 3130 else 3131 return; 3132 3133 if (amdgpu_dpm == 1 && adev->powerplay.pp_funcs->print_power_state) { 3134 printk("switching from power state:\n"); 3135 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.current_ps); 3136 printk("switching to power state:\n"); 3137 amdgpu_dpm_print_power_state(adev, adev->pm.dpm.requested_ps); 3138 } 3139 3140 /* update whether vce is active */ 3141 ps->vce_active = adev->pm.dpm.vce_active; 3142 if (adev->powerplay.pp_funcs->display_configuration_changed) 3143 amdgpu_dpm_display_configuration_changed(adev); 3144 3145 ret = amdgpu_dpm_pre_set_power_state(adev); 3146 if (ret) 3147 return; 3148 3149 if (adev->powerplay.pp_funcs->check_state_equal) { 3150 if (0 != amdgpu_dpm_check_state_equal(adev, adev->pm.dpm.current_ps, adev->pm.dpm.requested_ps, &equal)) 3151 equal = false; 3152 } 3153 3154 if (equal) 3155 return; 3156 3157 amdgpu_dpm_set_power_state(adev); 3158 amdgpu_dpm_post_set_power_state(adev); 3159 3160 adev->pm.dpm.current_active_crtcs = adev->pm.dpm.new_active_crtcs; 3161 adev->pm.dpm.current_active_crtc_count = adev->pm.dpm.new_active_crtc_count; 3162 3163 if (adev->powerplay.pp_funcs->force_performance_level) { 3164 if (adev->pm.dpm.thermal_active) { 3165 enum amd_dpm_forced_level level = adev->pm.dpm.forced_level; 3166 /* force low perf level for thermal */ 3167 amdgpu_dpm_force_performance_level(adev, AMD_DPM_FORCED_LEVEL_LOW); 3168 /* save the user's level */ 3169 adev->pm.dpm.forced_level = level; 3170 } else { 3171 /* otherwise, user selected level */ 3172 amdgpu_dpm_force_performance_level(adev, adev->pm.dpm.forced_level); 3173 } 3174 } 3175} 3176 3177void amdgpu_dpm_enable_uvd(struct amdgpu_device *adev, bool enable) 3178{ 3179 int ret = 0; 3180 3181 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_UVD, !enable); 3182 if (ret) 3183 DRM_ERROR("Dpm %s uvd failed, ret = %d. \n", 3184 enable ? "enable" : "disable", ret); 3185 3186 /* enable/disable Low Memory PState for UVD (4k videos) */ 3187 if (adev->asic_type == CHIP_STONEY && 3188 adev->uvd.decode_image_width >= WIDTH_4K) { 3189 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3190 3191 if (hwmgr && hwmgr->hwmgr_func && 3192 hwmgr->hwmgr_func->update_nbdpm_pstate) 3193 hwmgr->hwmgr_func->update_nbdpm_pstate(hwmgr, 3194 !enable, 3195 true); 3196 } 3197} 3198 3199void amdgpu_dpm_enable_vce(struct amdgpu_device *adev, bool enable) 3200{ 3201 int ret = 0; 3202 3203 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_VCE, !enable); 3204 if (ret) 3205 DRM_ERROR("Dpm %s vce failed, ret = %d. \n", 3206 enable ? "enable" : "disable", ret); 3207} 3208 3209void amdgpu_pm_print_power_states(struct amdgpu_device *adev) 3210{ 3211 int i; 3212 3213 if (adev->powerplay.pp_funcs->print_power_state == NULL) 3214 return; 3215 3216 for (i = 0; i < adev->pm.dpm.num_ps; i++) 3217 amdgpu_dpm_print_power_state(adev, &adev->pm.dpm.ps[i]); 3218 3219} 3220 3221void amdgpu_dpm_enable_jpeg(struct amdgpu_device *adev, bool enable) 3222{ 3223 int ret = 0; 3224 3225 ret = amdgpu_dpm_set_powergating_by_smu(adev, AMD_IP_BLOCK_TYPE_JPEG, !enable); 3226 if (ret) 3227 DRM_ERROR("Dpm %s jpeg failed, ret = %d. \n", 3228 enable ? "enable" : "disable", ret); 3229} 3230 3231int amdgpu_pm_load_smu_firmware(struct amdgpu_device *adev, uint32_t *smu_version) 3232{ 3233 int r; 3234 3235 if (adev->powerplay.pp_funcs && adev->powerplay.pp_funcs->load_firmware) { 3236 r = adev->powerplay.pp_funcs->load_firmware(adev->powerplay.pp_handle); 3237 if (r) { 3238 pr_err("smu firmware loading failed\n"); 3239 return r; 3240 } 3241 *smu_version = adev->pm.fw_version; 3242 } 3243 return 0; 3244} 3245 3246int amdgpu_pm_sysfs_init(struct amdgpu_device *adev) 3247{ 3248#ifdef __NetBSD__ /* XXX sysfs */ 3249 return 0; 3250#else 3251 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3252 int ret; 3253 3254 if (adev->pm.sysfs_initialized) 3255 return 0; 3256 3257 if (adev->pm.dpm_enabled == 0) 3258 return 0; 3259 3260 adev->pm.int_hwmon_dev = hwmon_device_register_with_groups(adev->dev, 3261 DRIVER_NAME, adev, 3262 hwmon_groups); 3263 if (IS_ERR(adev->pm.int_hwmon_dev)) { 3264 ret = PTR_ERR(adev->pm.int_hwmon_dev); 3265 dev_err(adev->dev, 3266 "Unable to register hwmon device: %d\n", ret); 3267 return ret; 3268 } 3269 3270 ret = device_create_file(adev->dev, &dev_attr_power_dpm_state); 3271 if (ret) { 3272 DRM_ERROR("failed to create device file for dpm state\n"); 3273 return ret; 3274 } 3275 ret = device_create_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 3276 if (ret) { 3277 DRM_ERROR("failed to create device file for dpm state\n"); 3278 return ret; 3279 } 3280 3281 3282 ret = device_create_file(adev->dev, &dev_attr_pp_num_states); 3283 if (ret) { 3284 DRM_ERROR("failed to create device file pp_num_states\n"); 3285 return ret; 3286 } 3287 ret = device_create_file(adev->dev, &dev_attr_pp_cur_state); 3288 if (ret) { 3289 DRM_ERROR("failed to create device file pp_cur_state\n"); 3290 return ret; 3291 } 3292 ret = device_create_file(adev->dev, &dev_attr_pp_force_state); 3293 if (ret) { 3294 DRM_ERROR("failed to create device file pp_force_state\n"); 3295 return ret; 3296 } 3297 ret = device_create_file(adev->dev, &dev_attr_pp_table); 3298 if (ret) { 3299 DRM_ERROR("failed to create device file pp_table\n"); 3300 return ret; 3301 } 3302 3303 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_sclk); 3304 if (ret) { 3305 DRM_ERROR("failed to create device file pp_dpm_sclk\n"); 3306 return ret; 3307 } 3308 3309 /* Arcturus does not support standalone mclk/socclk/fclk level setting */ 3310 if (adev->asic_type == CHIP_ARCTURUS) { 3311 dev_attr_pp_dpm_mclk.attr.mode &= ~S_IWUGO; 3312 dev_attr_pp_dpm_mclk.store = NULL; 3313 3314 dev_attr_pp_dpm_socclk.attr.mode &= ~S_IWUGO; 3315 dev_attr_pp_dpm_socclk.store = NULL; 3316 3317 dev_attr_pp_dpm_fclk.attr.mode &= ~S_IWUGO; 3318 dev_attr_pp_dpm_fclk.store = NULL; 3319 } 3320 3321 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_mclk); 3322 if (ret) { 3323 DRM_ERROR("failed to create device file pp_dpm_mclk\n"); 3324 return ret; 3325 } 3326 if (adev->asic_type >= CHIP_VEGA10) { 3327 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_socclk); 3328 if (ret) { 3329 DRM_ERROR("failed to create device file pp_dpm_socclk\n"); 3330 return ret; 3331 } 3332 if (adev->asic_type != CHIP_ARCTURUS) { 3333 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_dcefclk); 3334 if (ret) { 3335 DRM_ERROR("failed to create device file pp_dpm_dcefclk\n"); 3336 return ret; 3337 } 3338 } 3339 } 3340 if (adev->asic_type >= CHIP_VEGA20) { 3341 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_fclk); 3342 if (ret) { 3343 DRM_ERROR("failed to create device file pp_dpm_fclk\n"); 3344 return ret; 3345 } 3346 } 3347 if (adev->asic_type != CHIP_ARCTURUS) { 3348 ret = device_create_file(adev->dev, &dev_attr_pp_dpm_pcie); 3349 if (ret) { 3350 DRM_ERROR("failed to create device file pp_dpm_pcie\n"); 3351 return ret; 3352 } 3353 } 3354 ret = device_create_file(adev->dev, &dev_attr_pp_sclk_od); 3355 if (ret) { 3356 DRM_ERROR("failed to create device file pp_sclk_od\n"); 3357 return ret; 3358 } 3359 ret = device_create_file(adev->dev, &dev_attr_pp_mclk_od); 3360 if (ret) { 3361 DRM_ERROR("failed to create device file pp_mclk_od\n"); 3362 return ret; 3363 } 3364 ret = device_create_file(adev->dev, 3365 &dev_attr_pp_power_profile_mode); 3366 if (ret) { 3367 DRM_ERROR("failed to create device file " 3368 "pp_power_profile_mode\n"); 3369 return ret; 3370 } 3371 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 3372 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) { 3373 ret = device_create_file(adev->dev, 3374 &dev_attr_pp_od_clk_voltage); 3375 if (ret) { 3376 DRM_ERROR("failed to create device file " 3377 "pp_od_clk_voltage\n"); 3378 return ret; 3379 } 3380 } 3381 ret = device_create_file(adev->dev, 3382 &dev_attr_gpu_busy_percent); 3383 if (ret) { 3384 DRM_ERROR("failed to create device file " 3385 "gpu_busy_level\n"); 3386 return ret; 3387 } 3388 /* APU does not have its own dedicated memory */ 3389 if (!(adev->flags & AMD_IS_APU) && 3390 (adev->asic_type != CHIP_VEGA10)) { 3391 ret = device_create_file(adev->dev, 3392 &dev_attr_mem_busy_percent); 3393 if (ret) { 3394 DRM_ERROR("failed to create device file " 3395 "mem_busy_percent\n"); 3396 return ret; 3397 } 3398 } 3399 /* PCIe Perf counters won't work on APU nodes */ 3400 if (!(adev->flags & AMD_IS_APU)) { 3401 ret = device_create_file(adev->dev, &dev_attr_pcie_bw); 3402 if (ret) { 3403 DRM_ERROR("failed to create device file pcie_bw\n"); 3404 return ret; 3405 } 3406 } 3407 if (adev->unique_id) 3408 ret = device_create_file(adev->dev, &dev_attr_unique_id); 3409 if (ret) { 3410 DRM_ERROR("failed to create device file unique_id\n"); 3411 return ret; 3412 } 3413 ret = amdgpu_debugfs_pm_init(adev); 3414 if (ret) { 3415 DRM_ERROR("Failed to register debugfs file for dpm!\n"); 3416 return ret; 3417 } 3418 3419 if ((adev->asic_type >= CHIP_VEGA10) && 3420 !(adev->flags & AMD_IS_APU)) { 3421 ret = device_create_file(adev->dev, 3422 &dev_attr_pp_features); 3423 if (ret) { 3424 DRM_ERROR("failed to create device file " 3425 "pp_features\n"); 3426 return ret; 3427 } 3428 } 3429 3430 adev->pm.sysfs_initialized = true; 3431 3432 return 0; 3433#endif 3434} 3435 3436void amdgpu_pm_sysfs_fini(struct amdgpu_device *adev) 3437{ 3438#ifndef __NetBSD__ 3439 struct pp_hwmgr *hwmgr = adev->powerplay.pp_handle; 3440 3441 if (adev->pm.dpm_enabled == 0) 3442 return; 3443 3444 if (adev->pm.int_hwmon_dev) 3445 hwmon_device_unregister(adev->pm.int_hwmon_dev); 3446 device_remove_file(adev->dev, &dev_attr_power_dpm_state); 3447 device_remove_file(adev->dev, &dev_attr_power_dpm_force_performance_level); 3448 3449 device_remove_file(adev->dev, &dev_attr_pp_num_states); 3450 device_remove_file(adev->dev, &dev_attr_pp_cur_state); 3451 device_remove_file(adev->dev, &dev_attr_pp_force_state); 3452 device_remove_file(adev->dev, &dev_attr_pp_table); 3453 3454 device_remove_file(adev->dev, &dev_attr_pp_dpm_sclk); 3455 device_remove_file(adev->dev, &dev_attr_pp_dpm_mclk); 3456 if (adev->asic_type >= CHIP_VEGA10) { 3457 device_remove_file(adev->dev, &dev_attr_pp_dpm_socclk); 3458 if (adev->asic_type != CHIP_ARCTURUS) 3459 device_remove_file(adev->dev, &dev_attr_pp_dpm_dcefclk); 3460 } 3461 if (adev->asic_type != CHIP_ARCTURUS) 3462 device_remove_file(adev->dev, &dev_attr_pp_dpm_pcie); 3463 if (adev->asic_type >= CHIP_VEGA20) 3464 device_remove_file(adev->dev, &dev_attr_pp_dpm_fclk); 3465 device_remove_file(adev->dev, &dev_attr_pp_sclk_od); 3466 device_remove_file(adev->dev, &dev_attr_pp_mclk_od); 3467 device_remove_file(adev->dev, 3468 &dev_attr_pp_power_profile_mode); 3469 if ((is_support_sw_smu(adev) && adev->smu.od_enabled) || 3470 (!is_support_sw_smu(adev) && hwmgr->od_enabled)) 3471 device_remove_file(adev->dev, 3472 &dev_attr_pp_od_clk_voltage); 3473 device_remove_file(adev->dev, &dev_attr_gpu_busy_percent); 3474 if (!(adev->flags & AMD_IS_APU) && 3475 (adev->asic_type != CHIP_VEGA10)) 3476 device_remove_file(adev->dev, &dev_attr_mem_busy_percent); 3477 if (!(adev->flags & AMD_IS_APU)) 3478 device_remove_file(adev->dev, &dev_attr_pcie_bw); 3479 if (adev->unique_id) 3480 device_remove_file(adev->dev, &dev_attr_unique_id); 3481 if ((adev->asic_type >= CHIP_VEGA10) && 3482 !(adev->flags & AMD_IS_APU)) 3483 device_remove_file(adev->dev, &dev_attr_pp_features); 3484#endif 3485} 3486 3487void amdgpu_pm_compute_clocks(struct amdgpu_device *adev) 3488{ 3489 int i = 0; 3490 3491 if (!adev->pm.dpm_enabled) 3492 return; 3493 3494 if (adev->mode_info.num_crtc) 3495 amdgpu_display_bandwidth_update(adev); 3496 3497 for (i = 0; i < AMDGPU_MAX_RINGS; i++) { 3498 struct amdgpu_ring *ring = adev->rings[i]; 3499 if (ring && ring->sched.ready) 3500 amdgpu_fence_wait_empty(ring); 3501 } 3502 3503 if (is_support_sw_smu(adev)) { 3504 struct smu_dpm_context *smu_dpm = &adev->smu.smu_dpm; 3505 smu_handle_task(&adev->smu, 3506 smu_dpm->dpm_level, 3507 AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, 3508 true); 3509 } else { 3510 if (adev->powerplay.pp_funcs->dispatch_tasks) { 3511 if (!amdgpu_device_has_dc_support(adev)) { 3512 mutex_lock(&adev->pm.mutex); 3513 amdgpu_dpm_get_active_displays(adev); 3514 adev->pm.pm_display_cfg.num_display = adev->pm.dpm.new_active_crtc_count; 3515 adev->pm.pm_display_cfg.vrefresh = amdgpu_dpm_get_vrefresh(adev); 3516 adev->pm.pm_display_cfg.min_vblank_time = amdgpu_dpm_get_vblank_time(adev); 3517 /* we have issues with mclk switching with refresh rates over 120 hz on the non-DC code. */ 3518 if (adev->pm.pm_display_cfg.vrefresh > 120) 3519 adev->pm.pm_display_cfg.min_vblank_time = 0; 3520 if (adev->powerplay.pp_funcs->display_configuration_change) 3521 adev->powerplay.pp_funcs->display_configuration_change( 3522 adev->powerplay.pp_handle, 3523 &adev->pm.pm_display_cfg); 3524 mutex_unlock(&adev->pm.mutex); 3525 } 3526 amdgpu_dpm_dispatch_task(adev, AMD_PP_TASK_DISPLAY_CONFIG_CHANGE, NULL); 3527 } else { 3528 mutex_lock(&adev->pm.mutex); 3529 amdgpu_dpm_get_active_displays(adev); 3530 amdgpu_dpm_change_power_state_locked(adev); 3531 mutex_unlock(&adev->pm.mutex); 3532 } 3533 } 3534} 3535 3536/* 3537 * Debugfs info 3538 */ 3539#if defined(CONFIG_DEBUG_FS) 3540 3541static int amdgpu_debugfs_pm_info_pp(struct seq_file *m, struct amdgpu_device *adev) 3542{ 3543 uint32_t value; 3544 uint64_t value64; 3545 uint32_t query = 0; 3546 int size; 3547 3548 /* GPU Clocks */ 3549 size = sizeof(value); 3550 seq_printf(m, "GFX Clocks and Power:\n"); 3551 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_MCLK, (void *)&value, &size)) 3552 seq_printf(m, "\t%u MHz (MCLK)\n", value/100); 3553 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GFX_SCLK, (void *)&value, &size)) 3554 seq_printf(m, "\t%u MHz (SCLK)\n", value/100); 3555 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_SCLK, (void *)&value, &size)) 3556 seq_printf(m, "\t%u MHz (PSTATE_SCLK)\n", value/100); 3557 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_STABLE_PSTATE_MCLK, (void *)&value, &size)) 3558 seq_printf(m, "\t%u MHz (PSTATE_MCLK)\n", value/100); 3559 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDGFX, (void *)&value, &size)) 3560 seq_printf(m, "\t%u mV (VDDGFX)\n", value); 3561 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VDDNB, (void *)&value, &size)) 3562 seq_printf(m, "\t%u mV (VDDNB)\n", value); 3563 size = sizeof(uint32_t); 3564 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_POWER, (void *)&query, &size)) 3565 seq_printf(m, "\t%u.%u W (average GPU)\n", query >> 8, query & 0xff); 3566 size = sizeof(value); 3567 seq_printf(m, "\n"); 3568 3569 /* GPU Temp */ 3570 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_TEMP, (void *)&value, &size)) 3571 seq_printf(m, "GPU Temperature: %u C\n", value/1000); 3572 3573 /* GPU Load */ 3574 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_GPU_LOAD, (void *)&value, &size)) 3575 seq_printf(m, "GPU Load: %u %%\n", value); 3576 /* MEM Load */ 3577 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_MEM_LOAD, (void *)&value, &size)) 3578 seq_printf(m, "MEM Load: %u %%\n", value); 3579 3580 seq_printf(m, "\n"); 3581 3582 /* SMC feature mask */ 3583 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_ENABLED_SMC_FEATURES_MASK, (void *)&value64, &size)) 3584 seq_printf(m, "SMC Feature Mask: 0x%016llx\n", value64); 3585 3586 if (adev->asic_type > CHIP_VEGA20) { 3587 /* VCN clocks */ 3588 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCN_POWER_STATE, (void *)&value, &size)) { 3589 if (!value) { 3590 seq_printf(m, "VCN: Disabled\n"); 3591 } else { 3592 seq_printf(m, "VCN: Enabled\n"); 3593 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3594 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3595 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3596 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3597 } 3598 } 3599 seq_printf(m, "\n"); 3600 } else { 3601 /* UVD clocks */ 3602 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_POWER, (void *)&value, &size)) { 3603 if (!value) { 3604 seq_printf(m, "UVD: Disabled\n"); 3605 } else { 3606 seq_printf(m, "UVD: Enabled\n"); 3607 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_DCLK, (void *)&value, &size)) 3608 seq_printf(m, "\t%u MHz (DCLK)\n", value/100); 3609 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_UVD_VCLK, (void *)&value, &size)) 3610 seq_printf(m, "\t%u MHz (VCLK)\n", value/100); 3611 } 3612 } 3613 seq_printf(m, "\n"); 3614 3615 /* VCE clocks */ 3616 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_POWER, (void *)&value, &size)) { 3617 if (!value) { 3618 seq_printf(m, "VCE: Disabled\n"); 3619 } else { 3620 seq_printf(m, "VCE: Enabled\n"); 3621 if (!amdgpu_dpm_read_sensor(adev, AMDGPU_PP_SENSOR_VCE_ECCLK, (void *)&value, &size)) 3622 seq_printf(m, "\t%u MHz (ECCLK)\n", value/100); 3623 } 3624 } 3625 } 3626 3627 return 0; 3628} 3629 3630static void amdgpu_parse_cg_state(struct seq_file *m, u32 flags) 3631{ 3632 int i; 3633 3634 for (i = 0; clocks[i].flag; i++) 3635 seq_printf(m, "\t%s: %s\n", clocks[i].name, 3636 (flags & clocks[i].flag) ? "On" : "Off"); 3637} 3638 3639static int amdgpu_debugfs_pm_info(struct seq_file *m, void *data) 3640{ 3641 struct drm_info_node *node = (struct drm_info_node *) m->private; 3642 struct drm_device *dev = node->minor->dev; 3643 struct amdgpu_device *adev = dev->dev_private; 3644 u32 flags = 0; 3645 int r; 3646 3647 r = pm_runtime_get_sync(dev->dev); 3648 if (r < 0) 3649 return r; 3650 3651 amdgpu_device_ip_get_clockgating_state(adev, &flags); 3652 seq_printf(m, "Clock Gating Flags Mask: 0x%x\n", flags); 3653 amdgpu_parse_cg_state(m, flags); 3654 seq_printf(m, "\n"); 3655 3656 if (!adev->pm.dpm_enabled) { 3657 seq_printf(m, "dpm not enabled\n"); 3658 pm_runtime_mark_last_busy(dev->dev); 3659 pm_runtime_put_autosuspend(dev->dev); 3660 return 0; 3661 } 3662 3663 if (!is_support_sw_smu(adev) && 3664 adev->powerplay.pp_funcs->debugfs_print_current_performance_level) { 3665 mutex_lock(&adev->pm.mutex); 3666 if (adev->powerplay.pp_funcs->debugfs_print_current_performance_level) 3667 adev->powerplay.pp_funcs->debugfs_print_current_performance_level(adev, m); 3668 else 3669 seq_printf(m, "Debugfs support not implemented for this asic\n"); 3670 mutex_unlock(&adev->pm.mutex); 3671 r = 0; 3672 } else { 3673 r = amdgpu_debugfs_pm_info_pp(m, adev); 3674 } 3675 3676 pm_runtime_mark_last_busy(dev->dev); 3677 pm_runtime_put_autosuspend(dev->dev); 3678 3679 return r; 3680} 3681 3682static const struct drm_info_list amdgpu_pm_info_list[] = { 3683 {"amdgpu_pm_info", amdgpu_debugfs_pm_info, 0, NULL}, 3684}; 3685#endif 3686 3687#ifndef __NetBSD__ /* XXX sysfs */ 3688static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev) 3689{ 3690#if defined(CONFIG_DEBUG_FS) 3691 return amdgpu_debugfs_add_files(adev, amdgpu_pm_info_list, ARRAY_SIZE(amdgpu_pm_info_list)); 3692#else 3693 return 0; 3694#endif 3695} 3696#endif 3697