/linux-master/drivers/gpu/drm/msm/adreno/ |
H A D | a6xx_gmu.c | 20 static void a6xx_gmu_fault(struct a6xx_gmu *gmu) argument 22 struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu); 27 gmu->hung = true; 38 struct a6xx_gmu *gmu = data; local 41 status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS); 42 gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status); 45 dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n"); 47 a6xx_gmu_fault(gmu); 51 dev_err_ratelimited(gmu 62 struct a6xx_gmu *gmu = data; local 77 a6xx_gmu_sptprac_is_on(struct a6xx_gmu *gmu) argument 93 a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu) argument 113 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 173 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 178 a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu) argument 199 a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu) argument 204 a6xx_gmu_start(struct a6xx_gmu *gmu) argument 242 a6xx_gmu_hfi_start(struct a6xx_gmu *gmu) argument 302 a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) argument 347 a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state) argument 365 a6xx_sptprac_enable(struct a6xx_gmu *gmu) argument 387 a6xx_sptprac_disable(struct a6xx_gmu *gmu) argument 409 a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu) argument 427 a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu) argument 464 a6xx_rpmh_start(struct a6xx_gmu *gmu) argument 493 a6xx_rpmh_stop(struct a6xx_gmu *gmu) argument 516 a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu) argument 653 a6xx_gmu_power_config(struct a6xx_gmu *gmu) argument 712 a6xx_gmu_fw_load(struct a6xx_gmu *gmu) argument 768 a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state) argument 917 a6xx_gmu_irq_disable(struct a6xx_gmu *gmu) argument 926 a6xx_gmu_rpmh_off(struct a6xx_gmu *gmu) argument 948 a6xx_gmu_force_off(struct a6xx_gmu *gmu) argument 988 a6xx_gmu_set_initial_freq(struct msm_gpu *gpu, struct a6xx_gmu *gmu) argument 1002 a6xx_gmu_set_initial_bw(struct msm_gpu *gpu, struct a6xx_gmu *gmu) argument 1019 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1111 a6xx_gmu_isidle(struct a6xx_gmu *gmu) argument 1127 a6xx_gmu_shutdown(struct a6xx_gmu *gmu) argument 1189 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1222 a6xx_gmu_memory_free(struct a6xx_gmu *gmu) argument 1235 a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, size_t size, u64 iova, const char *name) argument 1276 a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) argument 1392 a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu) argument 1442 a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu) argument 1476 a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu) argument 1515 a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev, const char *name, irq_handler_t handler) argument 1537 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1586 struct a6xx_gmu *gmu = container_of(nb, struct a6xx_gmu, pd_nb); local 1597 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1660 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local [all...] |
H A D | a6xx_gmu.h | 104 static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset) argument 106 return msm_readl(gmu->mmio + (offset << 2)); 109 static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value) argument 111 msm_writel(value, gmu->mmio + (offset << 2)); 115 gmu_write_bulk(struct a6xx_gmu *gmu, u32 offset, const u32 *data, u32 size) argument 117 memcpy_toio(gmu->mmio + (offset << 2), data, size); 121 static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or) argument 123 u32 val = gmu_read(gmu, reg); 127 gmu_write(gmu, reg, val | or); 130 static inline u64 gmu_read64(struct a6xx_gmu *gmu, u3 argument 144 gmu_read_rscc(struct a6xx_gmu *gmu, u32 offset) argument 149 gmu_write_rscc(struct a6xx_gmu *gmu, u32 offset, u32 value) argument [all...] |
H A D | a6xx_hfi.c | 28 static int a6xx_hfi_queue_read(struct a6xx_gmu *gmu, argument 59 if (!gmu->legacy) 66 static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu, argument 90 if (!gmu->legacy) { 98 gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01); 102 static int a6xx_hfi_wait_for_ack(struct a6xx_gmu *gmu, u32 id, u32 seqnum, argument 105 struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE]; 110 ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val, 114 DRM_DEV_ERROR(gmu->dev, 121 gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CL 171 a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id, void *data, u32 size, u32 *payload, u32 payload_size) argument 194 a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state) argument 206 a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version) argument 217 a6xx_hfi_send_perf_table_v1(struct a6xx_gmu *gmu) argument 239 a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu) argument 631 a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu) argument 662 a6xx_hfi_send_test(struct a6xx_gmu *gmu) argument 670 a6xx_hfi_send_start(struct a6xx_gmu *gmu) argument 678 a6xx_hfi_send_core_fw_start(struct a6xx_gmu *gmu) argument 686 a6xx_hfi_set_freq(struct a6xx_gmu *gmu, int index) argument 698 a6xx_hfi_send_prep_slumber(struct a6xx_gmu *gmu) argument 708 a6xx_hfi_start_v1(struct a6xx_gmu *gmu, int boot_state) argument 743 a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state) argument 774 a6xx_hfi_stop(struct a6xx_gmu *gmu) argument 822 a6xx_hfi_init(struct a6xx_gmu *gmu) argument [all...] |
H A D | a6xx_gpu.h | 23 struct a6xx_gmu gmu; member in struct:a6xx_gpu 86 int a6xx_gmu_wait_for_idle(struct a6xx_gmu *gmu); 88 bool a6xx_gmu_isidle(struct a6xx_gmu *gmu); 90 int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state); 91 void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
|
H A D | a6xx_gpu.c | 24 if (!adreno_has_gmu_wrapper(adreno_gpu) && !a6xx_gmu_isidle(&a6xx_gpu->gmu)) 1018 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1038 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL, 1040 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL, 1042 gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL, 1073 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0); 1080 gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1); 1704 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 2072 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 2614 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 2692 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local [all...] |
H A D | a6xx_gpu_state.c | 155 if (!a6xx_gmu_sptprac_is_on(&a6xx_gpu->gmu)) 1164 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1184 val = gmu_read_rscc(gmu, offset); 1186 val = gmu_read(gmu, offset); 1213 if (!a6xx_gmu_gx_is_on(&a6xx_gpu->gmu)) 1251 struct a6xx_gmu *gmu = &a6xx_gpu->gmu; local 1254 BUILD_BUG_ON(ARRAY_SIZE(gmu->queues) != ARRAY_SIZE(a6xx_state->hfi_queue_history)); 1256 for (i = 0; i < ARRAY_SIZE(gmu [all...] |