1// SPDX-License-Identifier: GPL-2.0-only
2/* Copyright (c) 2016 The Linux Foundation. All rights reserved.
3 */
4
5#include <linux/pm_opp.h>
6#include "a5xx_gpu.h"
7
8/*
9 * The GPMU data block is a block of shared registers that can be used to
10 * communicate back and forth. These "registers" are by convention with the GPMU
11 * firwmare and not bound to any specific hardware design
12 */
13
14#define AGC_INIT_BASE REG_A5XX_GPMU_DATA_RAM_BASE
15#define AGC_INIT_MSG_MAGIC (AGC_INIT_BASE + 5)
16#define AGC_MSG_BASE (AGC_INIT_BASE + 7)
17
18#define AGC_MSG_STATE (AGC_MSG_BASE + 0)
19#define AGC_MSG_COMMAND (AGC_MSG_BASE + 1)
20#define AGC_MSG_PAYLOAD_SIZE (AGC_MSG_BASE + 3)
21#define AGC_MSG_PAYLOAD(_o) ((AGC_MSG_BASE + 5) + (_o))
22
23#define AGC_POWER_CONFIG_PRODUCTION_ID 1
24#define AGC_INIT_MSG_VALUE 0xBABEFACE
25
26/* AGC_LM_CONFIG (A540+) */
27#define AGC_LM_CONFIG (136/4)
28#define AGC_LM_CONFIG_GPU_VERSION_SHIFT 17
29#define AGC_LM_CONFIG_ENABLE_GPMU_ADAPTIVE 1
30#define AGC_LM_CONFIG_THROTTLE_DISABLE (2 << 8)
31#define AGC_LM_CONFIG_ISENSE_ENABLE (1 << 4)
32#define AGC_LM_CONFIG_ENABLE_ERROR (3 << 4)
33#define AGC_LM_CONFIG_LLM_ENABLED (1 << 16)
34#define AGC_LM_CONFIG_BCL_DISABLED (1 << 24)
35
36#define AGC_LEVEL_CONFIG (140/4)
37
38static struct {
39	uint32_t reg;
40	uint32_t value;
41} a5xx_sequence_regs[] = {
42	{ 0xB9A1, 0x00010303 },
43	{ 0xB9A2, 0x13000000 },
44	{ 0xB9A3, 0x00460020 },
45	{ 0xB9A4, 0x10000000 },
46	{ 0xB9A5, 0x040A1707 },
47	{ 0xB9A6, 0x00010000 },
48	{ 0xB9A7, 0x0E000904 },
49	{ 0xB9A8, 0x10000000 },
50	{ 0xB9A9, 0x01165000 },
51	{ 0xB9AA, 0x000E0002 },
52	{ 0xB9AB, 0x03884141 },
53	{ 0xB9AC, 0x10000840 },
54	{ 0xB9AD, 0x572A5000 },
55	{ 0xB9AE, 0x00000003 },
56	{ 0xB9AF, 0x00000000 },
57	{ 0xB9B0, 0x10000000 },
58	{ 0xB828, 0x6C204010 },
59	{ 0xB829, 0x6C204011 },
60	{ 0xB82A, 0x6C204012 },
61	{ 0xB82B, 0x6C204013 },
62	{ 0xB82C, 0x6C204014 },
63	{ 0xB90F, 0x00000004 },
64	{ 0xB910, 0x00000002 },
65	{ 0xB911, 0x00000002 },
66	{ 0xB912, 0x00000002 },
67	{ 0xB913, 0x00000002 },
68	{ 0xB92F, 0x00000004 },
69	{ 0xB930, 0x00000005 },
70	{ 0xB931, 0x00000005 },
71	{ 0xB932, 0x00000005 },
72	{ 0xB933, 0x00000005 },
73	{ 0xB96F, 0x00000001 },
74	{ 0xB970, 0x00000003 },
75	{ 0xB94F, 0x00000004 },
76	{ 0xB950, 0x0000000B },
77	{ 0xB951, 0x0000000B },
78	{ 0xB952, 0x0000000B },
79	{ 0xB953, 0x0000000B },
80	{ 0xB907, 0x00000019 },
81	{ 0xB927, 0x00000019 },
82	{ 0xB947, 0x00000019 },
83	{ 0xB967, 0x00000019 },
84	{ 0xB987, 0x00000019 },
85	{ 0xB906, 0x00220001 },
86	{ 0xB926, 0x00220001 },
87	{ 0xB946, 0x00220001 },
88	{ 0xB966, 0x00220001 },
89	{ 0xB986, 0x00300000 },
90	{ 0xAC40, 0x0340FF41 },
91	{ 0xAC41, 0x03BEFED0 },
92	{ 0xAC42, 0x00331FED },
93	{ 0xAC43, 0x021FFDD3 },
94	{ 0xAC44, 0x5555AAAA },
95	{ 0xAC45, 0x5555AAAA },
96	{ 0xB9BA, 0x00000008 },
97};
98
99/*
100 * Get the actual voltage value for the operating point at the specified
101 * frequency
102 */
103static inline uint32_t _get_mvolts(struct msm_gpu *gpu, uint32_t freq)
104{
105	struct drm_device *dev = gpu->dev;
106	struct msm_drm_private *priv = dev->dev_private;
107	struct platform_device *pdev = priv->gpu_pdev;
108	struct dev_pm_opp *opp;
109	u32 ret = 0;
110
111	opp = dev_pm_opp_find_freq_exact(&pdev->dev, freq, true);
112
113	if (!IS_ERR(opp)) {
114		ret = dev_pm_opp_get_voltage(opp) / 1000;
115		dev_pm_opp_put(opp);
116	}
117
118	return ret;
119}
120
121/* Setup thermal limit management */
122static void a530_lm_setup(struct msm_gpu *gpu)
123{
124	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
125	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
126	unsigned int i;
127
128	/* Write the block of sequence registers */
129	for (i = 0; i < ARRAY_SIZE(a5xx_sequence_regs); i++)
130		gpu_write(gpu, a5xx_sequence_regs[i].reg,
131			a5xx_sequence_regs[i].value);
132
133	/* Hard code the A530 GPU thermal sensor ID for the GPMU */
134	gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_ID, 0x60007);
135	gpu_write(gpu, REG_A5XX_GPMU_DELTA_TEMP_THRESHOLD, 0x01);
136	gpu_write(gpu, REG_A5XX_GPMU_TEMP_SENSOR_CONFIG, 0x01);
137
138	/* Until we get clock scaling 0 is always the active power level */
139	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
140
141	gpu_write(gpu, REG_A5XX_GPMU_BASE_LEAKAGE, a5xx_gpu->lm_leakage);
142
143	/* The threshold is fixed at 6000 for A530 */
144	gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
145
146	gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
147	gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x00201FF1);
148
149	/* Write the voltage table */
150	gpu_write(gpu, REG_A5XX_GPMU_BEC_ENABLE, 0x10001FFF);
151	gpu_write(gpu, REG_A5XX_GDPM_CONFIG1, 0x201FF1);
152
153	gpu_write(gpu, AGC_MSG_STATE, 1);
154	gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
155
156	/* Write the max power - hard coded to 5448 for A530 */
157	gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
158	gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
159
160	/*
161	 * For now just write the one voltage level - we will do more when we
162	 * can do scaling
163	 */
164	gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
165	gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
166
167	gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE, 4 * sizeof(uint32_t));
168	gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
169}
170
171#define PAYLOAD_SIZE(_size) ((_size) * sizeof(u32))
172#define LM_DCVS_LIMIT 1
173#define LEVEL_CONFIG ~(0x303)
174
175static void a540_lm_setup(struct msm_gpu *gpu)
176{
177	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
178	u32 config;
179
180	/* The battery current limiter isn't enabled for A540 */
181	config = AGC_LM_CONFIG_BCL_DISABLED;
182	config |= adreno_patchid(adreno_gpu) << AGC_LM_CONFIG_GPU_VERSION_SHIFT;
183
184	/* For now disable GPMU side throttling */
185	config |= AGC_LM_CONFIG_THROTTLE_DISABLE;
186
187	/* Until we get clock scaling 0 is always the active power level */
188	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE, 0x80000000 | 0);
189
190	/* Fixed at 6000 for now */
191	gpu_write(gpu, REG_A5XX_GPMU_GPMU_PWR_THRESHOLD, 0x80000000 | 6000);
192
193	gpu_write(gpu, AGC_MSG_STATE, 0x80000001);
194	gpu_write(gpu, AGC_MSG_COMMAND, AGC_POWER_CONFIG_PRODUCTION_ID);
195
196	gpu_write(gpu, AGC_MSG_PAYLOAD(0), 5448);
197	gpu_write(gpu, AGC_MSG_PAYLOAD(1), 1);
198
199	gpu_write(gpu, AGC_MSG_PAYLOAD(2), _get_mvolts(gpu, gpu->fast_rate));
200	gpu_write(gpu, AGC_MSG_PAYLOAD(3), gpu->fast_rate / 1000000);
201
202	gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LM_CONFIG), config);
203	gpu_write(gpu, AGC_MSG_PAYLOAD(AGC_LEVEL_CONFIG), LEVEL_CONFIG);
204	gpu_write(gpu, AGC_MSG_PAYLOAD_SIZE,
205	PAYLOAD_SIZE(AGC_LEVEL_CONFIG + 1));
206
207	gpu_write(gpu, AGC_INIT_MSG_MAGIC, AGC_INIT_MSG_VALUE);
208}
209
210/* Enable SP/TP cpower collapse */
211static void a5xx_pc_init(struct msm_gpu *gpu)
212{
213	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_CTRL, 0x7F);
214	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_BINNING_CTRL, 0);
215	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_INTER_FRAME_HYST, 0xA0080);
216	gpu_write(gpu, REG_A5XX_GPMU_PWR_COL_STAGGER_DELAY, 0x600040);
217}
218
219/* Enable the GPMU microcontroller */
220static int a5xx_gpmu_init(struct msm_gpu *gpu)
221{
222	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
223	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
224	struct msm_ringbuffer *ring = gpu->rb[0];
225
226	if (!a5xx_gpu->gpmu_dwords)
227		return 0;
228
229	/* Turn off protected mode for this operation */
230	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
231	OUT_RING(ring, 0);
232
233	/* Kick off the IB to load the GPMU microcode */
234	OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
235	OUT_RING(ring, lower_32_bits(a5xx_gpu->gpmu_iova));
236	OUT_RING(ring, upper_32_bits(a5xx_gpu->gpmu_iova));
237	OUT_RING(ring, a5xx_gpu->gpmu_dwords);
238
239	/* Turn back on protected mode */
240	OUT_PKT7(ring, CP_SET_PROTECTED_MODE, 1);
241	OUT_RING(ring, 1);
242
243	a5xx_flush(gpu, ring, true);
244
245	if (!a5xx_idle(gpu, ring)) {
246		DRM_ERROR("%s: Unable to load GPMU firmware. GPMU will not be active\n",
247			gpu->name);
248		return -EINVAL;
249	}
250
251	if (adreno_is_a530(adreno_gpu))
252		gpu_write(gpu, REG_A5XX_GPMU_WFI_CONFIG, 0x4014);
253
254	/* Kick off the GPMU */
255	gpu_write(gpu, REG_A5XX_GPMU_CM3_SYSRESET, 0x0);
256
257	/*
258	 * Wait for the GPMU to respond. It isn't fatal if it doesn't, we just
259	 * won't have advanced power collapse.
260	 */
261	if (spin_usecs(gpu, 25, REG_A5XX_GPMU_GENERAL_0, 0xFFFFFFFF,
262		0xBABEFACE))
263		DRM_ERROR("%s: GPMU firmware initialization timed out\n",
264			gpu->name);
265
266	if (!adreno_is_a530(adreno_gpu)) {
267		u32 val = gpu_read(gpu, REG_A5XX_GPMU_GENERAL_1);
268
269		if (val)
270			DRM_ERROR("%s: GPMU firmware initialization failed: %d\n",
271				  gpu->name, val);
272	}
273
274	return 0;
275}
276
277/* Enable limits management */
278static void a5xx_lm_enable(struct msm_gpu *gpu)
279{
280	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
281
282	/* This init sequence only applies to A530 */
283	if (!adreno_is_a530(adreno_gpu))
284		return;
285
286	gpu_write(gpu, REG_A5XX_GDPM_INT_MASK, 0x0);
287	gpu_write(gpu, REG_A5XX_GDPM_INT_EN, 0x0A);
288	gpu_write(gpu, REG_A5XX_GPMU_GPMU_VOLTAGE_INTR_EN_MASK, 0x01);
289	gpu_write(gpu, REG_A5XX_GPMU_TEMP_THRESHOLD_INTR_EN_MASK, 0x50000);
290	gpu_write(gpu, REG_A5XX_GPMU_THROTTLE_UNMASK_FORCE_CTRL, 0x30000);
291
292	gpu_write(gpu, REG_A5XX_GPMU_CLOCK_THROTTLE_CTRL, 0x011);
293}
294
295int a5xx_power_init(struct msm_gpu *gpu)
296{
297	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
298	int ret;
299
300	/* Not all A5xx chips have a GPMU */
301	if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
302		return 0;
303
304	/* Set up the limits management */
305	if (adreno_is_a530(adreno_gpu))
306		a530_lm_setup(gpu);
307	else if (adreno_is_a540(adreno_gpu))
308		a540_lm_setup(gpu);
309
310	/* Set up SP/TP power collpase */
311	a5xx_pc_init(gpu);
312
313	/* Start the GPMU */
314	ret = a5xx_gpmu_init(gpu);
315	if (ret)
316		return ret;
317
318	/* Start the limits management */
319	a5xx_lm_enable(gpu);
320
321	return 0;
322}
323
324void a5xx_gpmu_ucode_init(struct msm_gpu *gpu)
325{
326	struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
327	struct a5xx_gpu *a5xx_gpu = to_a5xx_gpu(adreno_gpu);
328	struct drm_device *drm = gpu->dev;
329	uint32_t dwords = 0, offset = 0, bosize;
330	unsigned int *data, *ptr, *cmds;
331	unsigned int cmds_size;
332
333	if (!(adreno_is_a530(adreno_gpu) || adreno_is_a540(adreno_gpu)))
334		return;
335
336	if (a5xx_gpu->gpmu_bo)
337		return;
338
339	data = (unsigned int *) adreno_gpu->fw[ADRENO_FW_GPMU]->data;
340
341	/*
342	 * The first dword is the size of the remaining data in dwords. Use it
343	 * as a checksum of sorts and make sure it matches the actual size of
344	 * the firmware that we read
345	 */
346
347	if (adreno_gpu->fw[ADRENO_FW_GPMU]->size < 8 ||
348		(data[0] < 2) || (data[0] >=
349			(adreno_gpu->fw[ADRENO_FW_GPMU]->size >> 2)))
350		return;
351
352	/* The second dword is an ID - look for 2 (GPMU_FIRMWARE_ID) */
353	if (data[1] != 2)
354		return;
355
356	cmds = data + data[2] + 3;
357	cmds_size = data[0] - data[2] - 2;
358
359	/*
360	 * A single type4 opcode can only have so many values attached so
361	 * add enough opcodes to load the all the commands
362	 */
363	bosize = (cmds_size + (cmds_size / TYPE4_MAX_PAYLOAD) + 1) << 2;
364
365	ptr = msm_gem_kernel_new(drm, bosize,
366		MSM_BO_WC | MSM_BO_GPU_READONLY, gpu->aspace,
367		&a5xx_gpu->gpmu_bo, &a5xx_gpu->gpmu_iova);
368	if (IS_ERR(ptr))
369		return;
370
371	msm_gem_object_set_name(a5xx_gpu->gpmu_bo, "gpmufw");
372
373	while (cmds_size > 0) {
374		int i;
375		uint32_t _size = cmds_size > TYPE4_MAX_PAYLOAD ?
376			TYPE4_MAX_PAYLOAD : cmds_size;
377
378		ptr[dwords++] = PKT4(REG_A5XX_GPMU_INST_RAM_BASE + offset,
379			_size);
380
381		for (i = 0; i < _size; i++)
382			ptr[dwords++] = *cmds++;
383
384		offset += _size;
385		cmds_size -= _size;
386	}
387
388	msm_gem_put_vaddr(a5xx_gpu->gpmu_bo);
389	a5xx_gpu->gpmu_dwords = dwords;
390}
391