1/* $NetBSD: amdgpu_nbio_v7_4.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $ */ 2 3/* 4 * Copyright 2018 Advanced Micro Devices, Inc. 5 * 6 * Permission is hereby granted, free of charge, to any person obtaining a 7 * copy of this software and associated documentation files (the "Software"), 8 * to deal in the Software without restriction, including without limitation 9 * the rights to use, copy, modify, merge, publish, distribute, sublicense, 10 * and/or sell copies of the Software, and to permit persons to whom the 11 * Software is furnished to do so, subject to the following conditions: 12 * 13 * The above copyright notice and this permission notice shall be included in 14 * all copies or substantial portions of the Software. 15 * 16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 22 * OTHER DEALINGS IN THE SOFTWARE. 23 * 24 */ 25#include <sys/cdefs.h> 26__KERNEL_RCSID(0, "$NetBSD: amdgpu_nbio_v7_4.c,v 1.2 2021/12/18 23:44:58 riastradh Exp $"); 27 28#include "amdgpu.h" 29#include "amdgpu_atombios.h" 30#include "nbio_v7_4.h" 31#include "amdgpu_ras.h" 32 33#include "nbio/nbio_7_4_offset.h" 34#include "nbio/nbio_7_4_sh_mask.h" 35#include "nbio/nbio_7_4_0_smn.h" 36#include "ivsrcid/nbio/irqsrcs_nbif_7_4.h" 37#include <uapi/linux/kfd_ioctl.h> 38 39#define smnNBIF_MGCG_CTRL_LCLK 0x1013a21c 40 41/* 42 * These are nbio v7_4_1 registers mask. Temporarily define these here since 43 * nbio v7_4_1 header is incomplete. 44 */ 45#define GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK 0x00001000L 46#define GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK 0x00002000L 47#define GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK 0x00004000L 48#define GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK 0x00008000L 49#define GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK 0x00010000L 50#define GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK 0x00020000L 51 52#define mmBIF_MMSCH1_DOORBELL_RANGE 0x01dc 53#define mmBIF_MMSCH1_DOORBELL_RANGE_BASE_IDX 2 54//BIF_MMSCH1_DOORBELL_RANGE 55#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET__SHIFT 0x2 56#define BIF_MMSCH1_DOORBELL_RANGE__SIZE__SHIFT 0x10 57#define BIF_MMSCH1_DOORBELL_RANGE__OFFSET_MASK 0x00000FFCL 58#define BIF_MMSCH1_DOORBELL_RANGE__SIZE_MASK 0x001F0000L 59 60static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, 61 void *ras_error_status); 62 63static void nbio_v7_4_remap_hdp_registers(struct amdgpu_device *adev) 64{ 65 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_MEM_FLUSH_CNTL, 66 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL); 67 WREG32_SOC15(NBIO, 0, mmREMAP_HDP_REG_FLUSH_CNTL, 68 adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_REG_FLUSH_CNTL); 69} 70 71static u32 nbio_v7_4_get_rev_id(struct amdgpu_device *adev) 72{ 73 u32 tmp = RREG32_SOC15(NBIO, 0, mmRCC_DEV0_EPF0_STRAP0); 74 75 tmp &= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0_MASK; 76 tmp >>= RCC_DEV0_EPF0_STRAP0__STRAP_ATI_REV_ID_DEV0_F0__SHIFT; 77 78 return tmp; 79} 80 81static void nbio_v7_4_mc_access_enable(struct amdgpu_device *adev, bool enable) 82{ 83 if (enable) 84 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 85 BIF_FB_EN__FB_READ_EN_MASK | BIF_FB_EN__FB_WRITE_EN_MASK); 86 else 87 WREG32_SOC15(NBIO, 0, mmBIF_FB_EN, 0); 88} 89 90static void nbio_v7_4_hdp_flush(struct amdgpu_device *adev, 91 struct amdgpu_ring *ring) 92{ 93 if (!ring || !ring->funcs->emit_wreg) 94 WREG32_NO_KIQ((adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); 95 else 96 amdgpu_ring_emit_wreg(ring, (adev->rmmio_remap.reg_offset + KFD_MMIO_REMAP_HDP_MEM_FLUSH_CNTL) >> 2, 0); 97} 98 99static u32 nbio_v7_4_get_memsize(struct amdgpu_device *adev) 100{ 101 return RREG32_SOC15(NBIO, 0, mmRCC_CONFIG_MEMSIZE); 102} 103 104static void nbio_v7_4_sdma_doorbell_range(struct amdgpu_device *adev, int instance, 105 bool use_doorbell, int doorbell_index, int doorbell_size) 106{ 107 u32 reg, doorbell_range; 108 109 if (instance < 2) 110 reg = instance + 111 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); 112 else 113 /* 114 * These registers address of SDMA2~7 is not consecutive 115 * from SDMA0~1. Need plus 4 dwords offset. 116 * 117 * BIF_SDMA0_DOORBELL_RANGE: 0x3bc0 118 * BIF_SDMA1_DOORBELL_RANGE: 0x3bc4 119 * BIF_SDMA2_DOORBELL_RANGE: 0x3bd8 120 */ 121 reg = instance + 0x4 + 122 SOC15_REG_OFFSET(NBIO, 0, mmBIF_SDMA0_DOORBELL_RANGE); 123 124 doorbell_range = RREG32(reg); 125 126 if (use_doorbell) { 127 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, OFFSET, doorbell_index); 128 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, doorbell_size); 129 } else 130 doorbell_range = REG_SET_FIELD(doorbell_range, BIF_SDMA0_DOORBELL_RANGE, SIZE, 0); 131 132 WREG32(reg, doorbell_range); 133} 134 135static void nbio_v7_4_vcn_doorbell_range(struct amdgpu_device *adev, bool use_doorbell, 136 int doorbell_index, int instance) 137{ 138 u32 reg; 139 u32 doorbell_range; 140 141 if (instance) 142 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH1_DOORBELL_RANGE); 143 else 144 reg = SOC15_REG_OFFSET(NBIO, 0, mmBIF_MMSCH0_DOORBELL_RANGE); 145 146 doorbell_range = RREG32(reg); 147 148 if (use_doorbell) { 149 doorbell_range = REG_SET_FIELD(doorbell_range, 150 BIF_MMSCH0_DOORBELL_RANGE, OFFSET, 151 doorbell_index); 152 doorbell_range = REG_SET_FIELD(doorbell_range, 153 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 8); 154 } else 155 doorbell_range = REG_SET_FIELD(doorbell_range, 156 BIF_MMSCH0_DOORBELL_RANGE, SIZE, 0); 157 158 WREG32(reg, doorbell_range); 159} 160 161static void nbio_v7_4_enable_doorbell_aperture(struct amdgpu_device *adev, 162 bool enable) 163{ 164 WREG32_FIELD15(NBIO, 0, RCC_DOORBELL_APER_EN, BIF_DOORBELL_APER_EN, enable ? 1 : 0); 165} 166 167static void nbio_v7_4_enable_doorbell_selfring_aperture(struct amdgpu_device *adev, 168 bool enable) 169{ 170 u32 tmp = 0; 171 172 if (enable) { 173 tmp = REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_EN, 1) | 174 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_MODE, 1) | 175 REG_SET_FIELD(tmp, DOORBELL_SELFRING_GPA_APER_CNTL, DOORBELL_SELFRING_GPA_APER_SIZE, 0); 176 177 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_LOW, 178 lower_32_bits(adev->doorbell.base)); 179 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_BASE_HIGH, 180 upper_32_bits(adev->doorbell.base)); 181 } 182 183 WREG32_SOC15(NBIO, 0, mmDOORBELL_SELFRING_GPA_APER_CNTL, tmp); 184} 185 186static void nbio_v7_4_ih_doorbell_range(struct amdgpu_device *adev, 187 bool use_doorbell, int doorbell_index) 188{ 189 u32 ih_doorbell_range = RREG32_SOC15(NBIO, 0 , mmBIF_IH_DOORBELL_RANGE); 190 191 if (use_doorbell) { 192 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, OFFSET, doorbell_index); 193 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 2); 194 } else 195 ih_doorbell_range = REG_SET_FIELD(ih_doorbell_range, BIF_IH_DOORBELL_RANGE, SIZE, 0); 196 197 WREG32_SOC15(NBIO, 0, mmBIF_IH_DOORBELL_RANGE, ih_doorbell_range); 198} 199 200 201static void nbio_v7_4_update_medium_grain_clock_gating(struct amdgpu_device *adev, 202 bool enable) 203{ 204 //TODO: Add support for v7.4 205} 206 207static void nbio_v7_4_update_medium_grain_light_sleep(struct amdgpu_device *adev, 208 bool enable) 209{ 210 uint32_t def, data; 211 212 def = data = RREG32_PCIE(smnPCIE_CNTL2); 213 if (enable && (adev->cg_flags & AMD_CG_SUPPORT_BIF_LS)) { 214 data |= (PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 215 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 216 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 217 } else { 218 data &= ~(PCIE_CNTL2__SLV_MEM_LS_EN_MASK | 219 PCIE_CNTL2__MST_MEM_LS_EN_MASK | 220 PCIE_CNTL2__REPLAY_MEM_LS_EN_MASK); 221 } 222 223 if (def != data) 224 WREG32_PCIE(smnPCIE_CNTL2, data); 225} 226 227static void nbio_v7_4_get_clockgating_state(struct amdgpu_device *adev, 228 u32 *flags) 229{ 230 int data; 231 232 /* AMD_CG_SUPPORT_BIF_MGCG */ 233 data = RREG32_PCIE(smnCPM_CONTROL); 234 if (data & CPM_CONTROL__LCLK_DYN_GATE_ENABLE_MASK) 235 *flags |= AMD_CG_SUPPORT_BIF_MGCG; 236 237 /* AMD_CG_SUPPORT_BIF_LS */ 238 data = RREG32_PCIE(smnPCIE_CNTL2); 239 if (data & PCIE_CNTL2__SLV_MEM_LS_EN_MASK) 240 *flags |= AMD_CG_SUPPORT_BIF_LS; 241} 242 243static void nbio_v7_4_ih_control(struct amdgpu_device *adev) 244{ 245 u32 interrupt_cntl; 246 247 /* setup interrupt control */ 248 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL2, adev->dummy_page_addr >> 8); 249 interrupt_cntl = RREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL); 250 /* INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=0 - dummy read disabled with msi, enabled without msi 251 * INTERRUPT_CNTL__IH_DUMMY_RD_OVERRIDE_MASK=1 - dummy read controlled by IH_DUMMY_RD_EN 252 */ 253 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_DUMMY_RD_OVERRIDE, 0); 254 /* INTERRUPT_CNTL__IH_REQ_NONSNOOP_EN_MASK=1 if ring is in non-cacheable memory, e.g., vram */ 255 interrupt_cntl = REG_SET_FIELD(interrupt_cntl, INTERRUPT_CNTL, IH_REQ_NONSNOOP_EN, 0); 256 WREG32_SOC15(NBIO, 0, mmINTERRUPT_CNTL, interrupt_cntl); 257} 258 259static u32 nbio_v7_4_get_hdp_flush_req_offset(struct amdgpu_device *adev) 260{ 261 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_REQ); 262} 263 264static u32 nbio_v7_4_get_hdp_flush_done_offset(struct amdgpu_device *adev) 265{ 266 return SOC15_REG_OFFSET(NBIO, 0, mmGPU_HDP_FLUSH_DONE); 267} 268 269static u32 nbio_v7_4_get_pcie_index_offset(struct amdgpu_device *adev) 270{ 271 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_INDEX2); 272} 273 274static u32 nbio_v7_4_get_pcie_data_offset(struct amdgpu_device *adev) 275{ 276 return SOC15_REG_OFFSET(NBIO, 0, mmPCIE_DATA2); 277} 278 279const struct nbio_hdp_flush_reg nbio_v7_4_hdp_flush_reg = { 280 .ref_and_mask_cp0 = GPU_HDP_FLUSH_DONE__CP0_MASK, 281 .ref_and_mask_cp1 = GPU_HDP_FLUSH_DONE__CP1_MASK, 282 .ref_and_mask_cp2 = GPU_HDP_FLUSH_DONE__CP2_MASK, 283 .ref_and_mask_cp3 = GPU_HDP_FLUSH_DONE__CP3_MASK, 284 .ref_and_mask_cp4 = GPU_HDP_FLUSH_DONE__CP4_MASK, 285 .ref_and_mask_cp5 = GPU_HDP_FLUSH_DONE__CP5_MASK, 286 .ref_and_mask_cp6 = GPU_HDP_FLUSH_DONE__CP6_MASK, 287 .ref_and_mask_cp7 = GPU_HDP_FLUSH_DONE__CP7_MASK, 288 .ref_and_mask_cp8 = GPU_HDP_FLUSH_DONE__CP8_MASK, 289 .ref_and_mask_cp9 = GPU_HDP_FLUSH_DONE__CP9_MASK, 290 .ref_and_mask_sdma0 = GPU_HDP_FLUSH_DONE__SDMA0_MASK, 291 .ref_and_mask_sdma1 = GPU_HDP_FLUSH_DONE__SDMA1_MASK, 292 .ref_and_mask_sdma2 = GPU_HDP_FLUSH_DONE__RSVD_ENG0_MASK, 293 .ref_and_mask_sdma3 = GPU_HDP_FLUSH_DONE__RSVD_ENG1_MASK, 294 .ref_and_mask_sdma4 = GPU_HDP_FLUSH_DONE__RSVD_ENG2_MASK, 295 .ref_and_mask_sdma5 = GPU_HDP_FLUSH_DONE__RSVD_ENG3_MASK, 296 .ref_and_mask_sdma6 = GPU_HDP_FLUSH_DONE__RSVD_ENG4_MASK, 297 .ref_and_mask_sdma7 = GPU_HDP_FLUSH_DONE__RSVD_ENG5_MASK, 298}; 299 300static void nbio_v7_4_detect_hw_virt(struct amdgpu_device *adev) 301{ 302 uint32_t reg; 303 304 reg = RREG32_SOC15(NBIO, 0, mmRCC_IOV_FUNC_IDENTIFIER); 305 if (reg & 1) 306 adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF; 307 308 if (reg & 0x80000000) 309 adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV; 310 311 if (!reg) { 312 if (is_virtual_machine()) /* passthrough mode exclus sriov mod */ 313 adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE; 314 } 315} 316 317static void nbio_v7_4_init_registers(struct amdgpu_device *adev) 318{ 319 320} 321 322static void nbio_v7_4_handle_ras_controller_intr_no_bifring(struct amdgpu_device *adev) 323{ 324 uint32_t bif_doorbell_intr_cntl; 325 struct ras_manager *obj = amdgpu_ras_find_obj(adev, adev->nbio.ras_if); 326 327 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); 328 if (REG_GET_FIELD(bif_doorbell_intr_cntl, 329 BIF_DOORBELL_INT_CNTL, RAS_CNTLR_INTERRUPT_STATUS)) { 330 /* driver has to clear the interrupt status when bif ring is disabled */ 331 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, 332 BIF_DOORBELL_INT_CNTL, 333 RAS_CNTLR_INTERRUPT_CLEAR, 1); 334 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); 335 336 /* 337 * clear error status after ras_controller_intr according to 338 * hw team and count ue number for query 339 */ 340 nbio_v7_4_query_ras_error_count(adev, &obj->err_data); 341 342 DRM_WARN("RAS controller interrupt triggered by NBIF error\n"); 343 344 /* ras_controller_int is dedicated for nbif ras error, 345 * not the global interrupt for sync flood 346 */ 347 amdgpu_ras_reset_gpu(adev); 348 } 349} 350 351static void nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring(struct amdgpu_device *adev) 352{ 353 uint32_t bif_doorbell_intr_cntl; 354 355 bif_doorbell_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL); 356 if (REG_GET_FIELD(bif_doorbell_intr_cntl, 357 BIF_DOORBELL_INT_CNTL, RAS_ATHUB_ERR_EVENT_INTERRUPT_STATUS)) { 358 /* driver has to clear the interrupt status when bif ring is disabled */ 359 bif_doorbell_intr_cntl = REG_SET_FIELD(bif_doorbell_intr_cntl, 360 BIF_DOORBELL_INT_CNTL, 361 RAS_ATHUB_ERR_EVENT_INTERRUPT_CLEAR, 1); 362 WREG32_SOC15(NBIO, 0, mmBIF_DOORBELL_INT_CNTL, bif_doorbell_intr_cntl); 363 364 amdgpu_ras_global_ras_isr(adev); 365 } 366} 367 368 369static int nbio_v7_4_set_ras_controller_irq_state(struct amdgpu_device *adev, 370 struct amdgpu_irq_src *src, 371 unsigned type, 372 enum amdgpu_interrupt_state state) 373{ 374 /* The ras_controller_irq enablement should be done in psp bl when it 375 * tries to enable ras feature. Driver only need to set the correct interrupt 376 * vector for bare-metal and sriov use case respectively 377 */ 378 uint32_t bif_intr_cntl; 379 380 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); 381 if (state == AMDGPU_IRQ_STATE_ENABLE) { 382 /* set interrupt vector select bit to 0 to select 383 * vetcor 1 for bare metal case */ 384 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, 385 BIF_INTR_CNTL, 386 RAS_INTR_VEC_SEL, 0); 387 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); 388 } 389 390 return 0; 391} 392 393static int nbio_v7_4_process_ras_controller_irq(struct amdgpu_device *adev, 394 struct amdgpu_irq_src *source, 395 struct amdgpu_iv_entry *entry) 396{ 397 /* By design, the ih cookie for ras_controller_irq should be written 398 * to BIFring instead of general iv ring. However, due to known bif ring 399 * hw bug, it has to be disabled. There is no chance the process function 400 * will be involked. Just left it as a dummy one. 401 */ 402 return 0; 403} 404 405static int nbio_v7_4_set_ras_err_event_athub_irq_state(struct amdgpu_device *adev, 406 struct amdgpu_irq_src *src, 407 unsigned type, 408 enum amdgpu_interrupt_state state) 409{ 410 /* The ras_controller_irq enablement should be done in psp bl when it 411 * tries to enable ras feature. Driver only need to set the correct interrupt 412 * vector for bare-metal and sriov use case respectively 413 */ 414 uint32_t bif_intr_cntl; 415 416 bif_intr_cntl = RREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL); 417 if (state == AMDGPU_IRQ_STATE_ENABLE) { 418 /* set interrupt vector select bit to 0 to select 419 * vetcor 1 for bare metal case */ 420 bif_intr_cntl = REG_SET_FIELD(bif_intr_cntl, 421 BIF_INTR_CNTL, 422 RAS_INTR_VEC_SEL, 0); 423 WREG32_SOC15(NBIO, 0, mmBIF_INTR_CNTL, bif_intr_cntl); 424 } 425 426 return 0; 427} 428 429static int nbio_v7_4_process_err_event_athub_irq(struct amdgpu_device *adev, 430 struct amdgpu_irq_src *source, 431 struct amdgpu_iv_entry *entry) 432{ 433 /* By design, the ih cookie for err_event_athub_irq should be written 434 * to BIFring instead of general iv ring. However, due to known bif ring 435 * hw bug, it has to be disabled. There is no chance the process function 436 * will be involked. Just left it as a dummy one. 437 */ 438 return 0; 439} 440 441static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_controller_irq_funcs = { 442 .set = nbio_v7_4_set_ras_controller_irq_state, 443 .process = nbio_v7_4_process_ras_controller_irq, 444}; 445 446static const struct amdgpu_irq_src_funcs nbio_v7_4_ras_err_event_athub_irq_funcs = { 447 .set = nbio_v7_4_set_ras_err_event_athub_irq_state, 448 .process = nbio_v7_4_process_err_event_athub_irq, 449}; 450 451static int nbio_v7_4_init_ras_controller_interrupt (struct amdgpu_device *adev) 452{ 453 int r; 454 455 /* init the irq funcs */ 456 adev->nbio.ras_controller_irq.funcs = 457 &nbio_v7_4_ras_controller_irq_funcs; 458 adev->nbio.ras_controller_irq.num_types = 1; 459 460 /* register ras controller interrupt */ 461 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 462 NBIF_7_4__SRCID__RAS_CONTROLLER_INTERRUPT, 463 &adev->nbio.ras_controller_irq); 464 465 return r; 466} 467 468static int nbio_v7_4_init_ras_err_event_athub_interrupt (struct amdgpu_device *adev) 469{ 470 471 int r; 472 473 /* init the irq funcs */ 474 adev->nbio.ras_err_event_athub_irq.funcs = 475 &nbio_v7_4_ras_err_event_athub_irq_funcs; 476 adev->nbio.ras_err_event_athub_irq.num_types = 1; 477 478 /* register ras err event athub interrupt */ 479 r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 480 NBIF_7_4__SRCID__ERREVENT_ATHUB_INTERRUPT, 481 &adev->nbio.ras_err_event_athub_irq); 482 483 return r; 484} 485 486#define smnPARITY_ERROR_STATUS_UNCORR_GRP2 0x13a20030 487 488static void nbio_v7_4_query_ras_error_count(struct amdgpu_device *adev, 489 void *ras_error_status) 490{ 491 uint32_t global_sts, central_sts, int_eoi, parity_sts; 492 uint32_t corr, fatal, non_fatal; 493 struct ras_err_data *err_data = (struct ras_err_data *)ras_error_status; 494 495 global_sts = RREG32_PCIE(smnRAS_GLOBAL_STATUS_LO); 496 corr = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrCorr); 497 fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, ParityErrFatal); 498 non_fatal = REG_GET_FIELD(global_sts, RAS_GLOBAL_STATUS_LO, 499 ParityErrNonFatal); 500 parity_sts = RREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2); 501 502 if (corr) 503 err_data->ce_count++; 504 if (fatal) 505 err_data->ue_count++; 506 507 if (corr || fatal || non_fatal) { 508 central_sts = RREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS); 509 /* clear error status register */ 510 WREG32_PCIE(smnRAS_GLOBAL_STATUS_LO, global_sts); 511 512 if (fatal) 513 /* clear parity fatal error indication field */ 514 WREG32_PCIE(smnPARITY_ERROR_STATUS_UNCORR_GRP2, 515 parity_sts); 516 517 if (REG_GET_FIELD(central_sts, BIFL_RAS_CENTRAL_STATUS, 518 BIFL_RasContller_Intr_Recv)) { 519 /* clear interrupt status register */ 520 WREG32_PCIE(smnBIFL_RAS_CENTRAL_STATUS, central_sts); 521 int_eoi = RREG32_PCIE(smnIOHC_INTERRUPT_EOI); 522 int_eoi = REG_SET_FIELD(int_eoi, 523 IOHC_INTERRUPT_EOI, SMI_EOI, 1); 524 WREG32_PCIE(smnIOHC_INTERRUPT_EOI, int_eoi); 525 } 526 } 527} 528 529static void nbio_v7_4_enable_doorbell_interrupt(struct amdgpu_device *adev, 530 bool enable) 531{ 532 WREG32_FIELD15(NBIO, 0, BIF_DOORBELL_INT_CNTL, 533 DOORBELL_INTERRUPT_DISABLE, enable ? 0 : 1); 534} 535 536const struct amdgpu_nbio_funcs nbio_v7_4_funcs = { 537 .get_hdp_flush_req_offset = nbio_v7_4_get_hdp_flush_req_offset, 538 .get_hdp_flush_done_offset = nbio_v7_4_get_hdp_flush_done_offset, 539 .get_pcie_index_offset = nbio_v7_4_get_pcie_index_offset, 540 .get_pcie_data_offset = nbio_v7_4_get_pcie_data_offset, 541 .get_rev_id = nbio_v7_4_get_rev_id, 542 .mc_access_enable = nbio_v7_4_mc_access_enable, 543 .hdp_flush = nbio_v7_4_hdp_flush, 544 .get_memsize = nbio_v7_4_get_memsize, 545 .sdma_doorbell_range = nbio_v7_4_sdma_doorbell_range, 546 .vcn_doorbell_range = nbio_v7_4_vcn_doorbell_range, 547 .enable_doorbell_aperture = nbio_v7_4_enable_doorbell_aperture, 548 .enable_doorbell_selfring_aperture = nbio_v7_4_enable_doorbell_selfring_aperture, 549 .ih_doorbell_range = nbio_v7_4_ih_doorbell_range, 550 .enable_doorbell_interrupt = nbio_v7_4_enable_doorbell_interrupt, 551 .update_medium_grain_clock_gating = nbio_v7_4_update_medium_grain_clock_gating, 552 .update_medium_grain_light_sleep = nbio_v7_4_update_medium_grain_light_sleep, 553 .get_clockgating_state = nbio_v7_4_get_clockgating_state, 554 .ih_control = nbio_v7_4_ih_control, 555 .init_registers = nbio_v7_4_init_registers, 556 .detect_hw_virt = nbio_v7_4_detect_hw_virt, 557 .remap_hdp_registers = nbio_v7_4_remap_hdp_registers, 558 .handle_ras_controller_intr_no_bifring = nbio_v7_4_handle_ras_controller_intr_no_bifring, 559 .handle_ras_err_event_athub_intr_no_bifring = nbio_v7_4_handle_ras_err_event_athub_intr_no_bifring, 560 .init_ras_controller_interrupt = nbio_v7_4_init_ras_controller_interrupt, 561 .init_ras_err_event_athub_interrupt = nbio_v7_4_init_ras_err_event_athub_interrupt, 562 .query_ras_error_count = nbio_v7_4_query_ras_error_count, 563 .ras_late_init = amdgpu_nbio_ras_late_init, 564}; 565