Lines Matching refs:gfx

927 	amdgpu_ucode_release(&adev->gfx.pfp_fw);
928 amdgpu_ucode_release(&adev->gfx.me_fw);
929 amdgpu_ucode_release(&adev->gfx.ce_fw);
930 amdgpu_ucode_release(&adev->gfx.rlc_fw);
931 amdgpu_ucode_release(&adev->gfx.mec_fw);
934 amdgpu_ucode_release(&adev->gfx.mec2_fw);
936 kfree(adev->gfx.rlc.register_list_format);
986 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
989 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
993 err = amdgpu_ucode_request(adev, &adev->gfx.pfp_fw, fw_name);
997 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.pfp_fw->data;
998 adev->gfx.pfp_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
999 adev->gfx.pfp_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1003 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1006 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1010 err = amdgpu_ucode_request(adev, &adev->gfx.me_fw, fw_name);
1014 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.me_fw->data;
1015 adev->gfx.me_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1017 adev->gfx.me_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1021 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1024 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1028 err = amdgpu_ucode_request(adev, &adev->gfx.ce_fw, fw_name);
1032 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.ce_fw->data;
1033 adev->gfx.ce_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1034 adev->gfx.ce_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1040 if (adev->gfx.ce_feature_version >= 46 &&
1041 adev->gfx.pfp_feature_version >= 46) {
1048 err = amdgpu_ucode_request(adev, &adev->gfx.rlc_fw, fw_name);
1051 rlc_hdr = (const struct rlc_firmware_header_v2_0 *)adev->gfx.rlc_fw->data;
1052 adev->gfx.rlc_fw_version = le32_to_cpu(rlc_hdr->header.ucode_version);
1053 adev->gfx.rlc_feature_version = le32_to_cpu(rlc_hdr->ucode_feature_version);
1055 adev->gfx.rlc.save_and_restore_offset =
1057 adev->gfx.rlc.clear_state_descriptor_offset =
1059 adev->gfx.rlc.avail_scratch_ram_locations =
1061 adev->gfx.rlc.reg_restore_list_size =
1063 adev->gfx.rlc.reg_list_format_start =
1065 adev->gfx.rlc.reg_list_format_separate_start =
1067 adev->gfx.rlc.starting_offsets_start =
1069 adev->gfx.rlc.reg_list_format_size_bytes =
1071 adev->gfx.rlc.reg_list_size_bytes =
1074 adev->gfx.rlc.register_list_format =
1075 kmalloc(adev->gfx.rlc.reg_list_format_size_bytes +
1076 adev->gfx.rlc.reg_list_size_bytes, GFP_KERNEL);
1078 if (!adev->gfx.rlc.register_list_format) {
1085 for (i = 0 ; i < (adev->gfx.rlc.reg_list_format_size_bytes >> 2); i++)
1086 adev->gfx.rlc.register_list_format[i] = le32_to_cpu(tmp[i]);
1088 adev->gfx.rlc.register_restore = adev->gfx.rlc.register_list_format + i;
1092 for (i = 0 ; i < (adev->gfx.rlc.reg_list_size_bytes >> 2); i++)
1093 adev->gfx.rlc.register_restore[i] = le32_to_cpu(tmp[i]);
1097 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1100 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1104 err = amdgpu_ucode_request(adev, &adev->gfx.mec_fw, fw_name);
1108 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1109 adev->gfx.mec_fw_version = le32_to_cpu(cp_hdr->header.ucode_version);
1110 adev->gfx.mec_feature_version = le32_to_cpu(cp_hdr->ucode_feature_version);
1116 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1119 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1123 err = amdgpu_ucode_request(adev, &adev->gfx.mec2_fw, fw_name);
1127 adev->gfx.mec2_fw->data;
1128 adev->gfx.mec2_fw_version =
1130 adev->gfx.mec2_feature_version =
1134 adev->gfx.mec2_fw = NULL;
1140 info->fw = adev->gfx.pfp_fw;
1147 info->fw = adev->gfx.me_fw;
1154 info->fw = adev->gfx.ce_fw;
1161 info->fw = adev->gfx.rlc_fw;
1168 info->fw = adev->gfx.mec_fw;
1174 cp_hdr = (const struct gfx_firmware_header_v1_0 *)adev->gfx.mec_fw->data;
1181 info->fw = adev->gfx.mec_fw;
1186 if (adev->gfx.mec2_fw) {
1189 info->fw = adev->gfx.mec2_fw;
1200 amdgpu_ucode_release(&adev->gfx.pfp_fw);
1201 amdgpu_ucode_release(&adev->gfx.me_fw);
1202 amdgpu_ucode_release(&adev->gfx.ce_fw);
1203 amdgpu_ucode_release(&adev->gfx.rlc_fw);
1204 amdgpu_ucode_release(&adev->gfx.mec_fw);
1205 amdgpu_ucode_release(&adev->gfx.mec2_fw);
1217 if (adev->gfx.rlc.cs_data == NULL)
1229 for (sect = adev->gfx.rlc.cs_data; sect->section != NULL; ++sect) {
1247 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config);
1248 buffer[count++] = cpu_to_le32(adev->gfx.config.rb_config[0][0].raster_config_1);
1270 adev->gfx.rlc.cs_data = vi_cs_data;
1272 cs_data = adev->gfx.rlc.cs_data;
1283 adev->gfx.rlc.cp_table_size = ALIGN(96 * 5 * 4, 2048) + (64 * 1024); /* JT + GDS */
1290 if (adev->gfx.rlc.funcs->update_spm_vmid)
1291 adev->gfx.rlc.funcs->update_spm_vmid(adev, 0xf);
1298 amdgpu_bo_free_kernel(&adev->gfx.mec.hpd_eop_obj, NULL, NULL);
1307 bitmap_zero(adev->gfx.mec_bitmap[0].queue_bitmap, AMDGPU_MAX_COMPUTE_QUEUES);
1312 mec_hpd_size = adev->gfx.num_compute_rings * GFX8_MEC_HPD_SIZE;
1317 &adev->gfx.mec.hpd_eop_obj,
1318 &adev->gfx.mec.hpd_eop_gpu_addr,
1327 amdgpu_bo_kunmap(adev->gfx.mec.hpd_eop_obj);
1328 amdgpu_bo_unreserve(adev->gfx.mec.hpd_eop_obj);
1490 struct amdgpu_ring *ring = &adev->gfx.compute_ring[0];
1662 adev->gfx.config.max_shader_engines = 1;
1663 adev->gfx.config.max_tile_pipes = 2;
1664 adev->gfx.config.max_cu_per_sh = 6;
1665 adev->gfx.config.max_sh_per_se = 1;
1666 adev->gfx.config.max_backends_per_se = 2;
1667 adev->gfx.config.max_texture_channel_caches = 2;
1668 adev->gfx.config.max_gprs = 256;
1669 adev->gfx.config.max_gs_threads = 32;
1670 adev->gfx.config.max_hw_contexts = 8;
1672 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1673 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1674 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1675 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1679 adev->gfx.config.max_shader_engines = 4;
1680 adev->gfx.config.max_tile_pipes = 16;
1681 adev->gfx.config.max_cu_per_sh = 16;
1682 adev->gfx.config.max_sh_per_se = 1;
1683 adev->gfx.config.max_backends_per_se = 4;
1684 adev->gfx.config.max_texture_channel_caches = 16;
1685 adev->gfx.config.max_gprs = 256;
1686 adev->gfx.config.max_gs_threads = 32;
1687 adev->gfx.config.max_hw_contexts = 8;
1689 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1690 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1691 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1692 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1700 adev->gfx.config.max_gprs = 256;
1701 adev->gfx.config.max_gs_threads = 32;
1702 adev->gfx.config.max_hw_contexts = 8;
1704 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1705 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1706 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1707 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1715 adev->gfx.config.max_gprs = 256;
1716 adev->gfx.config.max_gs_threads = 32;
1717 adev->gfx.config.max_hw_contexts = 8;
1719 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1720 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1721 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1722 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1726 adev->gfx.config.max_shader_engines = 4;
1727 adev->gfx.config.max_tile_pipes = 8;
1728 adev->gfx.config.max_cu_per_sh = 8;
1729 adev->gfx.config.max_sh_per_se = 1;
1730 adev->gfx.config.max_backends_per_se = 2;
1731 adev->gfx.config.max_texture_channel_caches = 8;
1732 adev->gfx.config.max_gprs = 256;
1733 adev->gfx.config.max_gs_threads = 32;
1734 adev->gfx.config.max_hw_contexts = 8;
1736 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1737 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1738 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1739 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1743 adev->gfx.config.max_shader_engines = 1;
1744 adev->gfx.config.max_tile_pipes = 2;
1745 adev->gfx.config.max_sh_per_se = 1;
1746 adev->gfx.config.max_backends_per_se = 2;
1747 adev->gfx.config.max_cu_per_sh = 8;
1748 adev->gfx.config.max_texture_channel_caches = 2;
1749 adev->gfx.config.max_gprs = 256;
1750 adev->gfx.config.max_gs_threads = 32;
1751 adev->gfx.config.max_hw_contexts = 8;
1753 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1754 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1755 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1756 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1760 adev->gfx.config.max_shader_engines = 1;
1761 adev->gfx.config.max_tile_pipes = 2;
1762 adev->gfx.config.max_sh_per_se = 1;
1763 adev->gfx.config.max_backends_per_se = 1;
1764 adev->gfx.config.max_cu_per_sh = 3;
1765 adev->gfx.config.max_texture_channel_caches = 2;
1766 adev->gfx.config.max_gprs = 256;
1767 adev->gfx.config.max_gs_threads = 16;
1768 adev->gfx.config.max_hw_contexts = 8;
1770 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1771 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1772 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1773 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1777 adev->gfx.config.max_shader_engines = 2;
1778 adev->gfx.config.max_tile_pipes = 4;
1779 adev->gfx.config.max_cu_per_sh = 2;
1780 adev->gfx.config.max_sh_per_se = 1;
1781 adev->gfx.config.max_backends_per_se = 2;
1782 adev->gfx.config.max_texture_channel_caches = 4;
1783 adev->gfx.config.max_gprs = 256;
1784 adev->gfx.config.max_gs_threads = 32;
1785 adev->gfx.config.max_hw_contexts = 8;
1787 adev->gfx.config.sc_prim_fifo_size_frontend = 0x20;
1788 adev->gfx.config.sc_prim_fifo_size_backend = 0x100;
1789 adev->gfx.config.sc_hiz_tile_fifo_size = 0x30;
1790 adev->gfx.config.sc_earlyz_tile_fifo_size = 0x130;
1795 adev->gfx.config.mc_arb_ramcfg = RREG32(mmMC_ARB_RAMCFG);
1796 mc_arb_ramcfg = adev->gfx.config.mc_arb_ramcfg;
1798 adev->gfx.config.num_banks = REG_GET_FIELD(mc_arb_ramcfg,
1800 adev->gfx.config.num_ranks = REG_GET_FIELD(mc_arb_ramcfg,
1803 adev->gfx.config.num_tile_pipes = adev->gfx.config.max_tile_pipes;
1804 adev->gfx.config.mem_max_burst_length_bytes = 256;
1828 adev->gfx.config.mem_row_size_in_kb = 2;
1830 adev->gfx.config.mem_row_size_in_kb = 1;
1833 adev->gfx.config.mem_row_size_in_kb = (4 * (1 << (8 + tmp))) / 1024;
1834 if (adev->gfx.config.mem_row_size_in_kb > 4)
1835 adev->gfx.config.mem_row_size_in_kb = 4;
1838 adev->gfx.config.shader_engine_tile_size = 32;
1839 adev->gfx.config.num_gpus = 1;
1840 adev->gfx.config.multi_gpu_tile_size = 64;
1843 switch (adev->gfx.config.mem_row_size_in_kb) {
1855 adev->gfx.config.gb_addr_config = gb_addr_config;
1865 struct amdgpu_ring *ring = &adev->gfx.compute_ring[ring_id];
1868 ring = &adev->gfx.compute_ring[ring_id];
1878 ring->eop_gpu_addr = adev->gfx.mec.hpd_eop_gpu_addr
1883 + ((ring->me - 1) * adev->gfx.mec.num_pipe_per_mec)
1889 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq, irq_type,
1915 adev->gfx.mec.num_mec = 2;
1920 adev->gfx.mec.num_mec = 1;
1924 adev->gfx.mec.num_pipe_per_mec = 4;
1925 adev->gfx.mec.num_queue_per_pipe = 8;
1928 r = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, VISLANDS30_IV_SRCID_CP_END_OF_PIPE, &adev->gfx.eop_irq);
1934 &adev->gfx.priv_reg_irq);
1940 &adev->gfx.priv_inst_irq);
1946 &adev->gfx.cp_ecc_error_irq);
1952 &adev->gfx.sq_irq);
1958 INIT_WORK(&adev->gfx.sq_work.work, gfx_v8_0_sq_irq_work_func);
1960 adev->gfx.gfx_current_status = AMDGPU_GFX_NORMAL_MODE;
1964 DRM_ERROR("Failed to load gfx firmware!\n");
1968 r = adev->gfx.rlc.funcs->init(adev);
1980 /* set up the gfx ring */
1981 for (i = 0; i < adev->gfx.num_gfx_rings; i++) {
1982 ring = &adev->gfx.gfx_ring[i];
1984 snprintf(ring->name, sizeof(ring->name), "gfx");
1985 /* no gfx doorbells on iceland */
1991 r = amdgpu_ring_init(adev, ring, 1024, &adev->gfx.eop_irq,
2001 for (i = 0; i < adev->gfx.mec.num_mec; ++i) {
2002 for (j = 0; j < adev->gfx.mec.num_queue_per_pipe; j++) {
2003 for (k = 0; k < adev->gfx.mec.num_pipe_per_mec; k++) {
2025 kiq = &adev->gfx.kiq[0];
2035 adev->gfx.ce_ram_size = 0x8000;
2049 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
2050 amdgpu_ring_fini(&adev->gfx.gfx_ring[i]);
2051 for (i = 0; i < adev->gfx.num_compute_rings; i++)
2052 amdgpu_ring_fini(&adev->gfx.compute_ring[i]);
2055 amdgpu_gfx_kiq_free_ring(&adev->gfx.kiq[0].ring);
2060 amdgpu_bo_free_kernel(&adev->gfx.rlc.clear_state_obj,
2061 &adev->gfx.rlc.clear_state_gpu_addr,
2062 (void **)&adev->gfx.rlc.cs_ptr);
2065 amdgpu_bo_free_kernel(&adev->gfx.rlc.cp_table_obj,
2066 &adev->gfx.rlc.cp_table_gpu_addr,
2067 (void **)&adev->gfx.rlc.cp_table_ptr);
2077 const u32 num_tile_mode_states = ARRAY_SIZE(adev->gfx.config.tile_mode_array);
2078 const u32 num_secondary_tile_mode_states = ARRAY_SIZE(adev->gfx.config.macrotile_mode_array);
2081 modearray = adev->gfx.config.tile_mode_array;
2082 mod2array = adev->gfx.config.macrotile_mode_array;
3436 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_backends_per_se /
3437 adev->gfx.config.max_sh_per_se);
3488 unsigned sh_per_se = max_t(unsigned, adev->gfx.config.max_sh_per_se, 1);
3489 unsigned num_se = max_t(unsigned, adev->gfx.config.max_shader_engines, 1);
3598 u32 rb_bitmap_width_per_sh = adev->gfx.config.max_backends_per_se /
3599 adev->gfx.config.max_sh_per_se;
3603 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3604 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3607 active_rbs |= data << ((i * adev->gfx.config.max_sh_per_se + j) *
3613 adev->gfx.config.backend_enable_mask = active_rbs;
3614 adev->gfx.config.num_rbs = hweight32(active_rbs);
3616 num_rb_pipes = min_t(unsigned, adev->gfx.config.max_backends_per_se *
3617 adev->gfx.config.max_shader_engines, 16);
3621 if (!adev->gfx.config.backend_enable_mask ||
3622 adev->gfx.config.num_rbs >= num_rb_pipes) {
3627 adev->gfx.config.backend_enable_mask,
3632 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3633 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3635 adev->gfx.config.rb_config[i][j].rb_backend_disable =
3637 adev->gfx.config.rb_config[i][j].user_rb_backend_disable =
3639 adev->gfx.config.rb_config[i][j].raster_config =
3641 adev->gfx.config.rb_config[i][j].raster_config_1 =
3706 * Initialize all compute and user-gfx VMIDs to have no GDS, GWS, or OA
3723 adev->gfx.config.double_offchip_lds_buf = 1;
3727 adev->gfx.config.double_offchip_lds_buf = 0;
3738 WREG32(mmGB_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3739 WREG32(mmHDP_ADDR_CONFIG, adev->gfx.config.gb_addr_config);
3740 WREG32(mmDMIF_ADDR_CALC, adev->gfx.config.gb_addr_config);
3795 (adev->gfx.config.sc_prim_fifo_size_frontend <<
3797 (adev->gfx.config.sc_prim_fifo_size_backend <<
3799 (adev->gfx.config.sc_hiz_tile_fifo_size <<
3801 (adev->gfx.config.sc_earlyz_tile_fifo_size <<
3821 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
3822 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
3868 adev->gfx.rlc.funcs->get_csb_buffer(adev, adev->gfx.rlc.cs_ptr);
3871 adev->gfx.rlc.clear_state_gpu_addr >> 32);
3873 adev->gfx.rlc.clear_state_gpu_addr & 0xfffffffc);
3875 adev->gfx.rlc.clear_state_size);
3938 kmemdup(adev->gfx.rlc.register_list_format,
3939 adev->gfx.rlc.reg_list_format_size_bytes, GFP_KERNEL);
3945 adev->gfx.rlc.reg_list_format_size_bytes >> 2,
3957 for (i = 0; i < adev->gfx.rlc.reg_list_size_bytes >> 2; i++)
3958 WREG32(mmRLC_SRM_ARAM_DATA, adev->gfx.rlc.register_restore[i]);
3961 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_list_format_start);
3962 for (i = 0; i < adev->gfx.rlc.reg_list_format_size_bytes >> 2; i++)
3965 list_size = adev->gfx.rlc.reg_list_size_bytes >> 2;
3967 WREG32(mmRLC_GPM_SCRATCH_ADDR, adev->gfx.rlc.reg_restore_list_size);
3972 adev->gfx.rlc.starting_offsets_start);
4037 WREG32(mmRLC_JUMP_TABLE_RESTORE, adev->gfx.rlc.cp_table_gpu_addr >> 8);
4039 WREG32(mmRLC_PG_ALWAYS_ON_CU_MASK, adev->gfx.cu_info.ao_cu_mask);
4086 adev->gfx.rlc.funcs->stop(adev);
4087 adev->gfx.rlc.funcs->reset(adev);
4089 adev->gfx.rlc.funcs->start(adev);
4142 struct amdgpu_ring *ring = &adev->gfx.gfx_ring[0];
4148 WREG32(mmCP_MAX_CONTEXT, adev->gfx.config.max_hw_contexts - 1);
4184 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config);
4185 amdgpu_ring_write(ring, adev->gfx.config.rb_config[0][0].raster_config_1);
4206 /* no gfx doorbells on iceland */
4251 ring = &adev->gfx.gfx_ring[0];
4296 adev->gfx.kiq[0].ring.sched.ready = false;
4318 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4323 if (!test_bit(i, adev->gfx.mec_bitmap[0].queue_bitmap))
4337 r = amdgpu_ring_alloc(kiq_ring, (8 * adev->gfx.num_compute_rings) + 8);
4351 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4352 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4604 if (adev->gfx.kiq[0].mqd_backup)
4605 memcpy(mqd, adev->gfx.kiq[0].mqd_backup, sizeof(struct vi_mqd_allocation));
4628 if (adev->gfx.kiq[0].mqd_backup)
4629 memcpy(adev->gfx.kiq[0].mqd_backup, mqd, sizeof(struct vi_mqd_allocation));
4639 int mqd_idx = ring - &adev->gfx.compute_ring[0];
4651 if (adev->gfx.mec.mqd_backup[mqd_idx])
4652 memcpy(adev->gfx.mec.mqd_backup[mqd_idx], mqd, sizeof(struct vi_mqd_allocation));
4655 if (adev->gfx.mec.mqd_backup[mqd_idx])
4656 memcpy(mqd, adev->gfx.mec.mqd_backup[mqd_idx], sizeof(struct vi_mqd_allocation));
4679 ring = &adev->gfx.kiq[0].ring;
4705 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4706 ring = &adev->gfx.compute_ring[i];
4737 /* collect all the ring_tests here, gfx, kiq, compute */
4738 ring = &adev->gfx.gfx_ring[0];
4743 ring = &adev->gfx.kiq[0].ring;
4748 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4749 ring = &adev->gfx.compute_ring[i];
4798 r = adev->gfx.rlc.funcs->resume(adev);
4810 struct amdgpu_ring *kiq_ring = &adev->gfx.kiq[0].ring;
4812 r = amdgpu_ring_alloc(kiq_ring, 6 * adev->gfx.num_compute_rings);
4816 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
4817 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
4890 amdgpu_irq_put(adev, &adev->gfx.priv_reg_irq, 0);
4891 amdgpu_irq_put(adev, &adev->gfx.priv_inst_irq, 0);
4893 amdgpu_irq_put(adev, &adev->gfx.cp_ecc_error_irq, 0);
4895 amdgpu_irq_put(adev, &adev->gfx.sq_irq, 0);
4910 adev->gfx.rlc.funcs->stop(adev);
4980 adev->gfx.grbm_soft_reset = grbm_soft_reset;
4981 adev->gfx.srbm_soft_reset = srbm_soft_reset;
4984 adev->gfx.grbm_soft_reset = 0;
4985 adev->gfx.srbm_soft_reset = 0;
4995 if ((!adev->gfx.grbm_soft_reset) &&
4996 (!adev->gfx.srbm_soft_reset))
4999 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5002 adev->gfx.rlc.funcs->stop(adev);
5015 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5016 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5037 if ((!adev->gfx.grbm_soft_reset) &&
5038 (!adev->gfx.srbm_soft_reset))
5041 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5042 srbm_soft_reset = adev->gfx.srbm_soft_reset;
5098 if ((!adev->gfx.grbm_soft_reset) &&
5099 (!adev->gfx.srbm_soft_reset))
5102 grbm_soft_reset = adev->gfx.grbm_soft_reset;
5110 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
5111 struct amdgpu_ring *ring = &adev->gfx.compute_ring[i];
5129 adev->gfx.rlc.funcs->start(adev);
5146 mutex_lock(&adev->gfx.gpu_clock_mutex);
5150 mutex_unlock(&adev->gfx.gpu_clock_mutex);
5265 adev->gfx.xcc_mask = 1;
5266 adev->gfx.num_gfx_rings = GFX8_NUM_GFX_RINGS;
5267 adev->gfx.num_compute_rings = min(amdgpu_gfx_get_num_kcq(adev),
5269 adev->gfx.funcs = &gfx_v8_0_gfx_funcs;
5283 r = amdgpu_irq_get(adev, &adev->gfx.priv_reg_irq, 0);
5287 r = amdgpu_irq_get(adev, &adev->gfx.priv_inst_irq, 0);
5296 r = amdgpu_irq_get(adev, &adev->gfx.cp_ecc_error_irq, 0);
5302 r = amdgpu_irq_get(adev, &adev->gfx.sq_irq, 0);
5784 /* read gfx register to wake up cgcg */
6615 amdgpu_fence_process(&adev->gfx.gfx_ring[0]);
6619 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6620 ring = &adev->gfx.compute_ring[i];
6645 drm_sched_fault(&adev->gfx.gfx_ring[0].sched);
6649 for (i = 0; i < adev->gfx.num_compute_rings; i++) {
6650 ring = &adev->gfx.compute_ring[i];
6759 struct amdgpu_device *adev = container_of(work, struct amdgpu_device, gfx.sq_work.work);
6776 if (work_pending(&adev->gfx.sq_work.work)) {
6779 adev->gfx.sq_work.ih_data = ih_data;
6780 schedule_work(&adev->gfx.sq_work.work);
6855 * number of gfx waves. Setting 5 bit will make sure gfx only gets
6866 for (i = 0; i < adev->gfx.mec.num_pipe_per_mec; i++) {
7004 adev->gfx.kiq[0].ring.funcs = &gfx_v8_0_ring_funcs_kiq;
7006 for (i = 0; i < adev->gfx.num_gfx_rings; i++)
7007 adev->gfx.gfx_ring[i].funcs = &gfx_v8_0_ring_funcs_gfx;
7009 for (i = 0; i < adev->gfx.num_compute_rings; i++)
7010 adev->gfx.compute_ring[i].funcs = &gfx_v8_0_ring_funcs_compute;
7040 adev->gfx.eop_irq.num_types = AMDGPU_CP_IRQ_LAST;
7041 adev->gfx.eop_irq.funcs = &gfx_v8_0_eop_irq_funcs;
7043 adev->gfx.priv_reg_irq.num_types = 1;
7044 adev->gfx.priv_reg_irq.funcs = &gfx_v8_0_priv_reg_irq_funcs;
7046 adev->gfx.priv_inst_irq.num_types = 1;
7047 adev->gfx.priv_inst_irq.funcs = &gfx_v8_0_priv_inst_irq_funcs;
7049 adev->gfx.cp_ecc_error_irq.num_types = 1;
7050 adev->gfx.cp_ecc_error_irq.funcs = &gfx_v8_0_cp_ecc_error_irq_funcs;
7052 adev->gfx.sq_irq.num_types = 1;
7053 adev->gfx.sq_irq.funcs = &gfx_v8_0_sq_irq_funcs;
7058 adev->gfx.rlc.funcs = &iceland_rlc_funcs;
7091 mask = amdgpu_gfx_create_bitmask(adev->gfx.config.max_cu_per_sh);
7100 struct amdgpu_cu_info *cu_info = &adev->gfx.cu_info;
7109 ao_cu_num = adev->gfx.config.max_cu_per_sh;
7114 for (i = 0; i < adev->gfx.config.max_shader_engines; i++) {
7115 for (j = 0; j < adev->gfx.config.max_sh_per_se; j++) {
7126 for (k = 0; k < adev->gfx.config.max_cu_per_sh; k ++) {