1/*	$NetBSD: amdgpu_si_dpm.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $	*/
2
3/*
4 * Copyright 2013 Advanced Micro Devices, Inc.
5 *
6 * Permission is hereby granted, free of charge, to any person obtaining a
7 * copy of this software and associated documentation files (the "Software"),
8 * to deal in the Software without restriction, including without limitation
9 * the rights to use, copy, modify, merge, publish, distribute, sublicense,
10 * and/or sell copies of the Software, and to permit persons to whom the
11 * Software is furnished to do so, subject to the following conditions:
12 *
13 * The above copyright notice and this permission notice shall be included in
14 * all copies or substantial portions of the Software.
15 *
16 * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
17 * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
18 * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.  IN NO EVENT SHALL
19 * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
20 * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
21 * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
22 * OTHER DEALINGS IN THE SOFTWARE.
23 *
24 */
25
26#include <sys/cdefs.h>
27__KERNEL_RCSID(0, "$NetBSD: amdgpu_si_dpm.c,v 1.3 2021/12/19 12:21:29 riastradh Exp $");
28
29#include <linux/module.h>
30#include <linux/pci.h>
31
32#include "amdgpu.h"
33#include "amdgpu_pm.h"
34#include "amdgpu_dpm.h"
35#include "amdgpu_atombios.h"
36#include "amd_pcie.h"
37#include "sid.h"
38#include "r600_dpm.h"
39#include "si_dpm.h"
40#include "atom.h"
41#include "../include/pptable.h"
42#include <linux/math64.h>
43#include <linux/seq_file.h>
44#include <linux/firmware.h>
45
46#define MC_CG_ARB_FREQ_F0           0x0a
47#define MC_CG_ARB_FREQ_F1           0x0b
48#define MC_CG_ARB_FREQ_F2           0x0c
49#define MC_CG_ARB_FREQ_F3           0x0d
50
51#define SMC_RAM_END                 0x20000
52
53#define SCLK_MIN_DEEPSLEEP_FREQ     1350
54
55
56/* sizeof(ATOM_PPLIB_EXTENDEDHEADER) */
57#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V2 12
58#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V3 14
59#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V4 16
60#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V5 18
61#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V6 20
62#define SIZE_OF_ATOM_PPLIB_EXTENDEDHEADER_V7 22
63
64#define BIOS_SCRATCH_4                                    0x5cd
65
66MODULE_FIRMWARE("amdgpu/tahiti_smc.bin");
67MODULE_FIRMWARE("amdgpu/pitcairn_smc.bin");
68MODULE_FIRMWARE("amdgpu/pitcairn_k_smc.bin");
69MODULE_FIRMWARE("amdgpu/verde_smc.bin");
70MODULE_FIRMWARE("amdgpu/verde_k_smc.bin");
71MODULE_FIRMWARE("amdgpu/oland_smc.bin");
72MODULE_FIRMWARE("amdgpu/oland_k_smc.bin");
73MODULE_FIRMWARE("amdgpu/hainan_smc.bin");
74MODULE_FIRMWARE("amdgpu/hainan_k_smc.bin");
75MODULE_FIRMWARE("amdgpu/banks_k_2_smc.bin");
76
77static const struct amd_pm_funcs si_dpm_funcs;
78
79union power_info {
80	struct _ATOM_POWERPLAY_INFO info;
81	struct _ATOM_POWERPLAY_INFO_V2 info_2;
82	struct _ATOM_POWERPLAY_INFO_V3 info_3;
83	struct _ATOM_PPLIB_POWERPLAYTABLE pplib;
84	struct _ATOM_PPLIB_POWERPLAYTABLE2 pplib2;
85	struct _ATOM_PPLIB_POWERPLAYTABLE3 pplib3;
86	struct _ATOM_PPLIB_POWERPLAYTABLE4 pplib4;
87	struct _ATOM_PPLIB_POWERPLAYTABLE5 pplib5;
88};
89
90union fan_info {
91	struct _ATOM_PPLIB_FANTABLE fan;
92	struct _ATOM_PPLIB_FANTABLE2 fan2;
93	struct _ATOM_PPLIB_FANTABLE3 fan3;
94};
95
96union pplib_clock_info {
97	struct _ATOM_PPLIB_R600_CLOCK_INFO r600;
98	struct _ATOM_PPLIB_RS780_CLOCK_INFO rs780;
99	struct _ATOM_PPLIB_EVERGREEN_CLOCK_INFO evergreen;
100	struct _ATOM_PPLIB_SUMO_CLOCK_INFO sumo;
101	struct _ATOM_PPLIB_SI_CLOCK_INFO si;
102};
103
104static const u32 r600_utc[R600_PM_NUMBER_OF_TC] =
105{
106	R600_UTC_DFLT_00,
107	R600_UTC_DFLT_01,
108	R600_UTC_DFLT_02,
109	R600_UTC_DFLT_03,
110	R600_UTC_DFLT_04,
111	R600_UTC_DFLT_05,
112	R600_UTC_DFLT_06,
113	R600_UTC_DFLT_07,
114	R600_UTC_DFLT_08,
115	R600_UTC_DFLT_09,
116	R600_UTC_DFLT_10,
117	R600_UTC_DFLT_11,
118	R600_UTC_DFLT_12,
119	R600_UTC_DFLT_13,
120	R600_UTC_DFLT_14,
121};
122
123static const u32 r600_dtc[R600_PM_NUMBER_OF_TC] =
124{
125	R600_DTC_DFLT_00,
126	R600_DTC_DFLT_01,
127	R600_DTC_DFLT_02,
128	R600_DTC_DFLT_03,
129	R600_DTC_DFLT_04,
130	R600_DTC_DFLT_05,
131	R600_DTC_DFLT_06,
132	R600_DTC_DFLT_07,
133	R600_DTC_DFLT_08,
134	R600_DTC_DFLT_09,
135	R600_DTC_DFLT_10,
136	R600_DTC_DFLT_11,
137	R600_DTC_DFLT_12,
138	R600_DTC_DFLT_13,
139	R600_DTC_DFLT_14,
140};
141
142static const struct si_cac_config_reg cac_weights_tahiti[] =
143{
144	{ 0x0, 0x0000ffff, 0, 0xc, SISLANDS_CACCONFIG_CGIND },
145	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
146	{ 0x1, 0x0000ffff, 0, 0x101, SISLANDS_CACCONFIG_CGIND },
147	{ 0x1, 0xffff0000, 16, 0xc, SISLANDS_CACCONFIG_CGIND },
148	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
149	{ 0x3, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
150	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
151	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
152	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
153	{ 0x5, 0x0000ffff, 0, 0x8fc, SISLANDS_CACCONFIG_CGIND },
154	{ 0x5, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
155	{ 0x6, 0x0000ffff, 0, 0x95, SISLANDS_CACCONFIG_CGIND },
156	{ 0x6, 0xffff0000, 16, 0x34e, SISLANDS_CACCONFIG_CGIND },
157	{ 0x18f, 0x0000ffff, 0, 0x1a1, SISLANDS_CACCONFIG_CGIND },
158	{ 0x7, 0x0000ffff, 0, 0xda, SISLANDS_CACCONFIG_CGIND },
159	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
160	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
161	{ 0x8, 0xffff0000, 16, 0x46, SISLANDS_CACCONFIG_CGIND },
162	{ 0x9, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
163	{ 0xa, 0x0000ffff, 0, 0x208, SISLANDS_CACCONFIG_CGIND },
164	{ 0xb, 0x0000ffff, 0, 0xe7, SISLANDS_CACCONFIG_CGIND },
165	{ 0xb, 0xffff0000, 16, 0x948, SISLANDS_CACCONFIG_CGIND },
166	{ 0xc, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
167	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
168	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
169	{ 0xe, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
170	{ 0xf, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
171	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
172	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
173	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
174	{ 0x11, 0x0000ffff, 0, 0x167, SISLANDS_CACCONFIG_CGIND },
175	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
176	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
177	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
178	{ 0x13, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
179	{ 0x14, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
180	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
181	{ 0x15, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
182	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
183	{ 0x16, 0x0000ffff, 0, 0x31, SISLANDS_CACCONFIG_CGIND },
184	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
185	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
186	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
187	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
188	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
189	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
190	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
191	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
192	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
193	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
194	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
195	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
196	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
197	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
198	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
199	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
200	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
201	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
202	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
203	{ 0x6d, 0x0000ffff, 0, 0x18e, SISLANDS_CACCONFIG_CGIND },
204	{ 0xFFFFFFFF }
205};
206
207static const struct si_cac_config_reg lcac_tahiti[] =
208{
209	{ 0x143, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
210	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
211	{ 0x146, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
212	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
213	{ 0x149, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
214	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
215	{ 0x14c, 0x0001fffe, 1, 0x3, SISLANDS_CACCONFIG_CGIND },
216	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
217	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
218	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
219	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
220	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
221	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
222	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
223	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
224	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
225	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
226	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
227	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
228	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
229	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
230	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
231	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
232	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
233	{ 0x8c, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
234	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
235	{ 0x8f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
236	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
237	{ 0x92, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
238	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
239	{ 0x95, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
240	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
241	{ 0x14f, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
242	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
243	{ 0x152, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
244	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
245	{ 0x155, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
246	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
247	{ 0x158, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
248	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
249	{ 0x110, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
250	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
251	{ 0x113, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
252	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
253	{ 0x116, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
254	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
255	{ 0x119, 0x0001fffe, 1, 0x8, SISLANDS_CACCONFIG_CGIND },
256	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
257	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
258	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
259	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
260	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
261	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
262	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
263	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
264	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
265	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
266	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
267	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
268	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
269	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
270	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
271	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
272	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
273	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
274	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
275	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
276	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
277	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
278	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
279	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
280	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
281	{ 0x16d, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
282	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
283	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
284	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
285	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
286	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
287	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
288	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
289	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
290	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
291	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
292	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
293	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
294	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
295	{ 0xFFFFFFFF }
296
297};
298
299static const struct si_cac_config_reg cac_override_tahiti[] =
300{
301	{ 0xFFFFFFFF }
302};
303
304static const struct si_powertune_data powertune_data_tahiti =
305{
306	((1 << 16) | 27027),
307	6,
308	0,
309	4,
310	95,
311	{
312		0UL,
313		0UL,
314		4521550UL,
315		309631529UL,
316		-1270850L,
317		4513710L,
318		40
319	},
320	595000000UL,
321	12,
322	{
323		0,
324		0,
325		0,
326		0,
327		0,
328		0,
329		0,
330		0
331	},
332	true
333};
334
335static const struct si_dte_data dte_data_tahiti =
336{
337	{ 1159409, 0, 0, 0, 0 },
338	{ 777, 0, 0, 0, 0 },
339	2,
340	54000,
341	127000,
342	25,
343	2,
344	10,
345	13,
346	{ 27, 31, 35, 39, 43, 47, 54, 61, 67, 74, 81, 88, 95, 0, 0, 0 },
347	{ 240888759, 221057860, 235370597, 162287531, 158510299, 131423027, 116673180, 103067515, 87941937, 76209048, 68209175, 64090048, 58301890, 0, 0, 0 },
348	{ 12024, 11189, 11451, 8411, 7939, 6666, 5681, 4905, 4241, 3720, 3354, 3122, 2890, 0, 0, 0 },
349	85,
350	false
351};
352
353#if 0
354static const struct si_dte_data dte_data_tahiti_le =
355{
356	{ 0x1E8480, 0x7A1200, 0x2160EC0, 0x3938700, 0 },
357	{ 0x7D, 0x7D, 0x4E4, 0xB00, 0 },
358	0x5,
359	0xAFC8,
360	0x64,
361	0x32,
362	1,
363	0,
364	0x10,
365	{ 0x78, 0x7C, 0x82, 0x88, 0x8E, 0x94, 0x9A, 0xA0, 0xA6, 0xAC, 0xB0, 0xB4, 0xB8, 0xBC, 0xC0, 0xC4 },
366	{ 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700, 0x3938700 },
367	{ 0x2AF8, 0x2AF8, 0x29BB, 0x27F9, 0x2637, 0x2475, 0x22B3, 0x20F1, 0x1F2F, 0x1D6D, 0x1734, 0x1414, 0x10F4, 0xDD4, 0xAB4, 0x794 },
368	85,
369	true
370};
371#endif
372
373static const struct si_dte_data dte_data_tahiti_pro =
374{
375	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
376	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
377	5,
378	45000,
379	100,
380	0xA,
381	1,
382	0,
383	0x10,
384	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
385	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
386	{ 0x7D0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
387	90,
388	true
389};
390
391static const struct si_dte_data dte_data_new_zealand =
392{
393	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0 },
394	{ 0x29B, 0x3E9, 0x537, 0x7D2, 0 },
395	0x5,
396	0xAFC8,
397	0x69,
398	0x32,
399	1,
400	0,
401	0x10,
402	{ 0x82, 0xA0, 0xB4, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE, 0xFE },
403	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
404	{ 0xDAC, 0x1388, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685, 0x685 },
405	85,
406	true
407};
408
409static const struct si_dte_data dte_data_aruba_pro =
410{
411	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
412	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
413	5,
414	45000,
415	100,
416	0xA,
417	1,
418	0,
419	0x10,
420	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
421	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
422	{ 0x1000, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
423	90,
424	true
425};
426
427static const struct si_dte_data dte_data_malta =
428{
429	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
430	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
431	5,
432	45000,
433	100,
434	0xA,
435	1,
436	0,
437	0x10,
438	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
439	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
440	{ 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
441	90,
442	true
443};
444
445static const struct si_cac_config_reg cac_weights_pitcairn[] =
446{
447	{ 0x0, 0x0000ffff, 0, 0x8a, SISLANDS_CACCONFIG_CGIND },
448	{ 0x0, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
449	{ 0x1, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
450	{ 0x1, 0xffff0000, 16, 0x24d, SISLANDS_CACCONFIG_CGIND },
451	{ 0x2, 0x0000ffff, 0, 0x19, SISLANDS_CACCONFIG_CGIND },
452	{ 0x3, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
453	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
454	{ 0x4, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
455	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
456	{ 0x5, 0x0000ffff, 0, 0xc11, SISLANDS_CACCONFIG_CGIND },
457	{ 0x5, 0xffff0000, 16, 0x7f3, SISLANDS_CACCONFIG_CGIND },
458	{ 0x6, 0x0000ffff, 0, 0x403, SISLANDS_CACCONFIG_CGIND },
459	{ 0x6, 0xffff0000, 16, 0x367, SISLANDS_CACCONFIG_CGIND },
460	{ 0x18f, 0x0000ffff, 0, 0x4c9, SISLANDS_CACCONFIG_CGIND },
461	{ 0x7, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
462	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
463	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
464	{ 0x8, 0xffff0000, 16, 0x45d, SISLANDS_CACCONFIG_CGIND },
465	{ 0x9, 0x0000ffff, 0, 0x36d, SISLANDS_CACCONFIG_CGIND },
466	{ 0xa, 0x0000ffff, 0, 0x534, SISLANDS_CACCONFIG_CGIND },
467	{ 0xb, 0x0000ffff, 0, 0x5da, SISLANDS_CACCONFIG_CGIND },
468	{ 0xb, 0xffff0000, 16, 0x880, SISLANDS_CACCONFIG_CGIND },
469	{ 0xc, 0x0000ffff, 0, 0x201, SISLANDS_CACCONFIG_CGIND },
470	{ 0xd, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
471	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
472	{ 0xe, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
473	{ 0xf, 0x0000ffff, 0, 0x1f, SISLANDS_CACCONFIG_CGIND },
474	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
475	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
476	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
477	{ 0x11, 0x0000ffff, 0, 0x5de, SISLANDS_CACCONFIG_CGIND },
478	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
479	{ 0x12, 0x0000ffff, 0, 0x7b, SISLANDS_CACCONFIG_CGIND },
480	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
481	{ 0x13, 0xffff0000, 16, 0x13, SISLANDS_CACCONFIG_CGIND },
482	{ 0x14, 0x0000ffff, 0, 0xf9, SISLANDS_CACCONFIG_CGIND },
483	{ 0x15, 0x0000ffff, 0, 0x66, SISLANDS_CACCONFIG_CGIND },
484	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
485	{ 0x4e, 0x0000ffff, 0, 0x13, SISLANDS_CACCONFIG_CGIND },
486	{ 0x16, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
487	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
488	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
489	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
490	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
491	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
492	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
493	{ 0x1a, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
494	{ 0x1a, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
495	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
496	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
497	{ 0x1c, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
498	{ 0x1c, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
499	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
500	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
501	{ 0x1e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
502	{ 0x1e, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
503	{ 0x1f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
504	{ 0x1f, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
505	{ 0x20, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
506	{ 0x6d, 0x0000ffff, 0, 0x186, SISLANDS_CACCONFIG_CGIND },
507	{ 0xFFFFFFFF }
508};
509
510static const struct si_cac_config_reg lcac_pitcairn[] =
511{
512	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
513	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
514	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
515	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
516	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
517	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
518	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
519	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
520	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
521	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
522	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
523	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
524	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
525	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
526	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
527	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
528	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
529	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
530	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
531	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
532	{ 0x8f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
533	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
534	{ 0x146, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
535	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
536	{ 0x9e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
537	{ 0x9e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
538	{ 0x10a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
539	{ 0x10a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
540	{ 0x116, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
541	{ 0x116, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
542	{ 0x155, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
543	{ 0x155, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
544	{ 0x92, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
545	{ 0x92, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
546	{ 0x149, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
547	{ 0x149, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
548	{ 0x101, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
549	{ 0x101, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
550	{ 0x10d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
551	{ 0x10d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
552	{ 0x119, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
553	{ 0x119, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
554	{ 0x158, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
555	{ 0x158, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
556	{ 0x95, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
557	{ 0x95, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
558	{ 0x14c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
559	{ 0x14c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
560	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
561	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
562	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
563	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
564	{ 0x122, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
565	{ 0x122, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
566	{ 0x125, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
567	{ 0x125, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
568	{ 0x128, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
569	{ 0x128, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
570	{ 0x12b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
571	{ 0x12b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
572	{ 0x164, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
573	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
574	{ 0x167, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
575	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
576	{ 0x16a, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
577	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
578	{ 0x15e, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
579	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
580	{ 0x161, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
581	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
582	{ 0x15b, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
583	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
584	{ 0x16d, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
585	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
586	{ 0x170, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
587	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
588	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
589	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
590	{ 0x176, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
591	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
592	{ 0x179, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
593	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
594	{ 0x17c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
595	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
596	{ 0x17f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
597	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
598	{ 0xFFFFFFFF }
599};
600
601static const struct si_cac_config_reg cac_override_pitcairn[] =
602{
603    { 0xFFFFFFFF }
604};
605
606static const struct si_powertune_data powertune_data_pitcairn =
607{
608	((1 << 16) | 27027),
609	5,
610	0,
611	6,
612	100,
613	{
614		51600000UL,
615		1800000UL,
616		7194395UL,
617		309631529UL,
618		-1270850L,
619		4513710L,
620		100
621	},
622	117830498UL,
623	12,
624	{
625		0,
626		0,
627		0,
628		0,
629		0,
630		0,
631		0,
632		0
633	},
634	true
635};
636
637static const struct si_dte_data dte_data_pitcairn =
638{
639	{ 0, 0, 0, 0, 0 },
640	{ 0, 0, 0, 0, 0 },
641	0,
642	0,
643	0,
644	0,
645	0,
646	0,
647	0,
648	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
649	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
650	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
651	0,
652	false
653};
654
655static const struct si_dte_data dte_data_curacao_xt =
656{
657	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
658	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
659	5,
660	45000,
661	100,
662	0xA,
663	1,
664	0,
665	0x10,
666	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
667	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
668	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
669	90,
670	true
671};
672
673static const struct si_dte_data dte_data_curacao_pro =
674{
675	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
676	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
677	5,
678	45000,
679	100,
680	0xA,
681	1,
682	0,
683	0x10,
684	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
685	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
686	{ 0x1D17, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
687	90,
688	true
689};
690
691static const struct si_dte_data dte_data_neptune_xt =
692{
693	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
694	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
695	5,
696	45000,
697	100,
698	0xA,
699	1,
700	0,
701	0x10,
702	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
703	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
704	{ 0x3A2F, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
705	90,
706	true
707};
708
709static const struct si_cac_config_reg cac_weights_chelsea_pro[] =
710{
711	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
712	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
713	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
714	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
715	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
716	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
717	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
718	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
719	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
720	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
721	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
722	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
723	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
724	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
725	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
726	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
727	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
728	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
729	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
730	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
731	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
732	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
733	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
734	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
735	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
736	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
737	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
738	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
739	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
740	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
741	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
742	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
743	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
744	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
745	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
746	{ 0x14, 0x0000ffff, 0, 0x2BD, SISLANDS_CACCONFIG_CGIND },
747	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
748	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
749	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
750	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
751	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
752	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
753	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
754	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
755	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
756	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
757	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
758	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
759	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
760	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
761	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
762	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
763	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
764	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
765	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
766	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
767	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
768	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
769	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
770	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
771	{ 0xFFFFFFFF }
772};
773
774static const struct si_cac_config_reg cac_weights_chelsea_xt[] =
775{
776	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
777	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
778	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
779	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
780	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
781	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
782	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
783	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
784	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
785	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
786	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
787	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
788	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
789	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
790	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
791	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
792	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
793	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
794	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
795	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
796	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
797	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
798	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
799	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
800	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
801	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
802	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
803	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
804	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
805	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
806	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
807	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
808	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
809	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
810	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
811	{ 0x14, 0x0000ffff, 0, 0x30A, SISLANDS_CACCONFIG_CGIND },
812	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
813	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
814	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
815	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
816	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
817	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
818	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
819	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
820	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
821	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
822	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
823	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
824	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
825	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
826	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
827	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
828	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
829	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
830	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
831	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
832	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
833	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
834	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
835	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
836	{ 0xFFFFFFFF }
837};
838
839static const struct si_cac_config_reg cac_weights_heathrow[] =
840{
841	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
842	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
843	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
844	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
845	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
846	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
847	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
848	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
849	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
850	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
851	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
852	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
853	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
854	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
855	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
856	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
857	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
858	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
859	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
860	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
861	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
862	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
863	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
864	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
865	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
866	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
867	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
868	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
869	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
870	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
871	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
872	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
873	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
874	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
875	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
876	{ 0x14, 0x0000ffff, 0, 0x362, SISLANDS_CACCONFIG_CGIND },
877	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
878	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
879	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
880	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
881	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
882	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
883	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
884	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
885	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
886	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
887	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
888	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
889	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
890	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
891	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
892	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
893	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
894	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
895	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
896	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
897	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
898	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
899	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
900	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
901	{ 0xFFFFFFFF }
902};
903
904static const struct si_cac_config_reg cac_weights_cape_verde_pro[] =
905{
906	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
907	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
908	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
909	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
910	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
911	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
912	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
913	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
914	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
915	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
916	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
917	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
918	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
919	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
920	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
921	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
922	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
923	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
924	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
925	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
926	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
927	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
928	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
929	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
930	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
931	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
932	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
933	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
934	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
935	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
936	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
937	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
938	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
939	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
940	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
941	{ 0x14, 0x0000ffff, 0, 0x315, SISLANDS_CACCONFIG_CGIND },
942	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
943	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
944	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
945	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
946	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
947	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
948	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
949	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
950	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
951	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
952	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
953	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
954	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
955	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
956	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
957	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
958	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
959	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
960	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
961	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
962	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
963	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
964	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
965	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
966	{ 0xFFFFFFFF }
967};
968
969static const struct si_cac_config_reg cac_weights_cape_verde[] =
970{
971	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
972	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
973	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
974	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
975	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
976	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
977	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
978	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
979	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
980	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
981	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
982	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
983	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
984	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
985	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
986	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
987	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
988	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
989	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
990	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
991	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
992	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
993	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
994	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
995	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
996	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
997	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
998	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
999	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1000	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1001	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1002	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1003	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1004	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1005	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1006	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1007	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1008	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1009	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1010	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1011	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1012	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1013	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1014	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1015	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1016	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1017	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1018	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1019	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1020	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1021	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1022	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1023	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1024	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1025	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1026	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1027	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1028	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1029	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1030	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1031	{ 0xFFFFFFFF }
1032};
1033
1034static const struct si_cac_config_reg lcac_cape_verde[] =
1035{
1036	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1037	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1038	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1039	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1040	{ 0x110, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1041	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1042	{ 0x14f, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1043	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1044	{ 0x8c, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1045	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1046	{ 0x143, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1047	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1048	{ 0x9b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1049	{ 0x9b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1050	{ 0x107, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1051	{ 0x107, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1052	{ 0x113, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1053	{ 0x113, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1054	{ 0x152, 0x0001fffe, 1, 0x5, SISLANDS_CACCONFIG_CGIND },
1055	{ 0x152, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1056	{ 0x8f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1057	{ 0x8f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1058	{ 0x146, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1059	{ 0x146, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1060	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1061	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1062	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1063	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1064	{ 0x164, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1065	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1066	{ 0x167, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1067	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1068	{ 0x16a, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1069	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1070	{ 0x15e, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1071	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1072	{ 0x161, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1073	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1074	{ 0x15b, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1075	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1076	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1077	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1078	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1079	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1080	{ 0x173, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1081	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1082	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1083	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1084	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1085	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1086	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1087	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1088	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1089	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1090	{ 0xFFFFFFFF }
1091};
1092
1093static const struct si_cac_config_reg cac_override_cape_verde[] =
1094{
1095    { 0xFFFFFFFF }
1096};
1097
1098static const struct si_powertune_data powertune_data_cape_verde =
1099{
1100	((1 << 16) | 0x6993),
1101	5,
1102	0,
1103	7,
1104	105,
1105	{
1106		0UL,
1107		0UL,
1108		7194395UL,
1109		309631529UL,
1110		-1270850L,
1111		4513710L,
1112		100
1113	},
1114	117830498UL,
1115	12,
1116	{
1117		0,
1118		0,
1119		0,
1120		0,
1121		0,
1122		0,
1123		0,
1124		0
1125	},
1126	true
1127};
1128
1129static const struct si_dte_data dte_data_cape_verde =
1130{
1131	{ 0, 0, 0, 0, 0 },
1132	{ 0, 0, 0, 0, 0 },
1133	0,
1134	0,
1135	0,
1136	0,
1137	0,
1138	0,
1139	0,
1140	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1141	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1142	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1143	0,
1144	false
1145};
1146
1147static const struct si_dte_data dte_data_venus_xtx =
1148{
1149	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1150	{ 0x71C, 0xAAB, 0xE39, 0x11C7, 0x0 },
1151	5,
1152	55000,
1153	0x69,
1154	0xA,
1155	1,
1156	0,
1157	0x3,
1158	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1159	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1160	{ 0xD6D8, 0x88B8, 0x1555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1161	90,
1162	true
1163};
1164
1165static const struct si_dte_data dte_data_venus_xt =
1166{
1167	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1168	{ 0xBDA, 0x11C7, 0x17B4, 0x1DA1, 0x0 },
1169	5,
1170	55000,
1171	0x69,
1172	0xA,
1173	1,
1174	0,
1175	0x3,
1176	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1177	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1178	{ 0xAFC8, 0x88B8, 0x238E, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1179	90,
1180	true
1181};
1182
1183static const struct si_dte_data dte_data_venus_pro =
1184{
1185	{  0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1186	{ 0x11C7, 0x1AAB, 0x238E, 0x2C72, 0x0 },
1187	5,
1188	55000,
1189	0x69,
1190	0xA,
1191	1,
1192	0,
1193	0x3,
1194	{ 0x96, 0xB4, 0xFF, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1195	{ 0x895440, 0x3D0900, 0x989680, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1196	{ 0x88B8, 0x88B8, 0x3555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1197	90,
1198	true
1199};
1200
1201static const struct si_cac_config_reg cac_weights_oland[] =
1202{
1203	{ 0x0, 0x0000ffff, 0, 0x82, SISLANDS_CACCONFIG_CGIND },
1204	{ 0x0, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1205	{ 0x1, 0x0000ffff, 0, 0x153, SISLANDS_CACCONFIG_CGIND },
1206	{ 0x1, 0xffff0000, 16, 0x52, SISLANDS_CACCONFIG_CGIND },
1207	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1208	{ 0x3, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1209	{ 0x3, 0xffff0000, 16, 0x4F, SISLANDS_CACCONFIG_CGIND },
1210	{ 0x4, 0x0000ffff, 0, 0x135, SISLANDS_CACCONFIG_CGIND },
1211	{ 0x4, 0xffff0000, 16, 0xAC, SISLANDS_CACCONFIG_CGIND },
1212	{ 0x5, 0x0000ffff, 0, 0x118, SISLANDS_CACCONFIG_CGIND },
1213	{ 0x5, 0xffff0000, 16, 0xBE, SISLANDS_CACCONFIG_CGIND },
1214	{ 0x6, 0x0000ffff, 0, 0x110, SISLANDS_CACCONFIG_CGIND },
1215	{ 0x6, 0xffff0000, 16, 0x4CD, SISLANDS_CACCONFIG_CGIND },
1216	{ 0x18f, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1217	{ 0x7, 0x0000ffff, 0, 0x37, SISLANDS_CACCONFIG_CGIND },
1218	{ 0x7, 0xffff0000, 16, 0x27, SISLANDS_CACCONFIG_CGIND },
1219	{ 0x8, 0x0000ffff, 0, 0xC3, SISLANDS_CACCONFIG_CGIND },
1220	{ 0x8, 0xffff0000, 16, 0x35, SISLANDS_CACCONFIG_CGIND },
1221	{ 0x9, 0x0000ffff, 0, 0x28, SISLANDS_CACCONFIG_CGIND },
1222	{ 0xa, 0x0000ffff, 0, 0x26C, SISLANDS_CACCONFIG_CGIND },
1223	{ 0xb, 0x0000ffff, 0, 0x3B2, SISLANDS_CACCONFIG_CGIND },
1224	{ 0xb, 0xffff0000, 16, 0x99D, SISLANDS_CACCONFIG_CGIND },
1225	{ 0xc, 0x0000ffff, 0, 0xA3F, SISLANDS_CACCONFIG_CGIND },
1226	{ 0xd, 0x0000ffff, 0, 0xA, SISLANDS_CACCONFIG_CGIND },
1227	{ 0xd, 0xffff0000, 16, 0xA, SISLANDS_CACCONFIG_CGIND },
1228	{ 0xe, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1229	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1230	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1231	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1232	{ 0x10, 0xffff0000, 16, 0x1, SISLANDS_CACCONFIG_CGIND },
1233	{ 0x11, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1234	{ 0x11, 0xffff0000, 16, 0x15, SISLANDS_CACCONFIG_CGIND },
1235	{ 0x12, 0x0000ffff, 0, 0x34, SISLANDS_CACCONFIG_CGIND },
1236	{ 0x13, 0x0000ffff, 0, 0x4, SISLANDS_CACCONFIG_CGIND },
1237	{ 0x13, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1238	{ 0x14, 0x0000ffff, 0, 0x3BA, SISLANDS_CACCONFIG_CGIND },
1239	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1240	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1241	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1242	{ 0x16, 0x0000ffff, 0, 0x30, SISLANDS_CACCONFIG_CGIND },
1243	{ 0x16, 0xffff0000, 16, 0x7A, SISLANDS_CACCONFIG_CGIND },
1244	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1245	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1246	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1247	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1248	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1249	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1250	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1251	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1252	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1253	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1254	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1255	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1256	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1257	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1258	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1259	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1260	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1261	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1262	{ 0x6d, 0x0000ffff, 0, 0x100, SISLANDS_CACCONFIG_CGIND },
1263	{ 0xFFFFFFFF }
1264};
1265
1266static const struct si_cac_config_reg cac_weights_mars_pro[] =
1267{
1268	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1269	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1270	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1271	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1272	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1273	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1274	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1275	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1276	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1277	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1278	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1279	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1280	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1281	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1282	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1283	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1284	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1285	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1286	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1287	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1288	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1289	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1290	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1291	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1292	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1293	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1294	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1295	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1296	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1297	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1298	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1299	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1300	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1301	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1302	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1303	{ 0x14, 0x0000ffff, 0, 0x2, SISLANDS_CACCONFIG_CGIND },
1304	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1305	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1306	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1307	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1308	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1309	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1310	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1311	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1312	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1313	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1314	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1315	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1316	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1317	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1318	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1319	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1320	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1321	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1322	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1323	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1324	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1325	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1326	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1327	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1328	{ 0xFFFFFFFF }
1329};
1330
1331static const struct si_cac_config_reg cac_weights_mars_xt[] =
1332{
1333	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1334	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1335	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1336	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1337	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1338	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1339	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1340	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1341	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1342	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1343	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1344	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1345	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1346	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1347	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1348	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1349	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1350	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1351	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1352	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1353	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1354	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1355	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1356	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1357	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1358	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1359	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1360	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1361	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1362	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1363	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1364	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1365	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1366	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1367	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1368	{ 0x14, 0x0000ffff, 0, 0x60, SISLANDS_CACCONFIG_CGIND },
1369	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1370	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1371	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1372	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1373	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1374	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1375	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1376	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1377	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1378	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1379	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1380	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1381	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1382	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1383	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1384	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1385	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1386	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1387	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1388	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1389	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1390	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1391	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1392	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1393	{ 0xFFFFFFFF }
1394};
1395
1396static const struct si_cac_config_reg cac_weights_oland_pro[] =
1397{
1398	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1399	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1400	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1401	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1402	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1403	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1404	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1405	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1406	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1407	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1408	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1409	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1410	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1411	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1412	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1413	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1414	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1415	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1416	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1417	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1418	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1419	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1420	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1421	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1422	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1423	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1424	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1425	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1426	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1427	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1428	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1429	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1430	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1431	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1432	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1433	{ 0x14, 0x0000ffff, 0, 0x90, SISLANDS_CACCONFIG_CGIND },
1434	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1435	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1436	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1437	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1438	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1439	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1440	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1441	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1442	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1443	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1444	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1445	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1446	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1447	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1448	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1449	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1450	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1451	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1452	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1453	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1454	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1455	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1456	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1457	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1458	{ 0xFFFFFFFF }
1459};
1460
1461static const struct si_cac_config_reg cac_weights_oland_xt[] =
1462{
1463	{ 0x0, 0x0000ffff, 0, 0x43, SISLANDS_CACCONFIG_CGIND },
1464	{ 0x0, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1465	{ 0x1, 0x0000ffff, 0, 0xAF, SISLANDS_CACCONFIG_CGIND },
1466	{ 0x1, 0xffff0000, 16, 0x2A, SISLANDS_CACCONFIG_CGIND },
1467	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1468	{ 0x3, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1469	{ 0x3, 0xffff0000, 16, 0x29, SISLANDS_CACCONFIG_CGIND },
1470	{ 0x4, 0x0000ffff, 0, 0xA0, SISLANDS_CACCONFIG_CGIND },
1471	{ 0x4, 0xffff0000, 16, 0x59, SISLANDS_CACCONFIG_CGIND },
1472	{ 0x5, 0x0000ffff, 0, 0x1A5, SISLANDS_CACCONFIG_CGIND },
1473	{ 0x5, 0xffff0000, 16, 0x1D6, SISLANDS_CACCONFIG_CGIND },
1474	{ 0x6, 0x0000ffff, 0, 0x2A3, SISLANDS_CACCONFIG_CGIND },
1475	{ 0x6, 0xffff0000, 16, 0x8FD, SISLANDS_CACCONFIG_CGIND },
1476	{ 0x18f, 0x0000ffff, 0, 0x76, SISLANDS_CACCONFIG_CGIND },
1477	{ 0x7, 0x0000ffff, 0, 0x8A, SISLANDS_CACCONFIG_CGIND },
1478	{ 0x7, 0xffff0000, 16, 0xA3, SISLANDS_CACCONFIG_CGIND },
1479	{ 0x8, 0x0000ffff, 0, 0x71, SISLANDS_CACCONFIG_CGIND },
1480	{ 0x8, 0xffff0000, 16, 0x36, SISLANDS_CACCONFIG_CGIND },
1481	{ 0x9, 0x0000ffff, 0, 0xA6, SISLANDS_CACCONFIG_CGIND },
1482	{ 0xa, 0x0000ffff, 0, 0x81, SISLANDS_CACCONFIG_CGIND },
1483	{ 0xb, 0x0000ffff, 0, 0x3D2, SISLANDS_CACCONFIG_CGIND },
1484	{ 0xb, 0xffff0000, 16, 0x27C, SISLANDS_CACCONFIG_CGIND },
1485	{ 0xc, 0x0000ffff, 0, 0xA96, SISLANDS_CACCONFIG_CGIND },
1486	{ 0xd, 0x0000ffff, 0, 0x5, SISLANDS_CACCONFIG_CGIND },
1487	{ 0xd, 0xffff0000, 16, 0x5, SISLANDS_CACCONFIG_CGIND },
1488	{ 0xe, 0x0000ffff, 0, 0xB, SISLANDS_CACCONFIG_CGIND },
1489	{ 0xf, 0x0000ffff, 0, 0x3, SISLANDS_CACCONFIG_CGIND },
1490	{ 0xf, 0xffff0000, 16, 0x2, SISLANDS_CACCONFIG_CGIND },
1491	{ 0x10, 0x0000ffff, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1492	{ 0x10, 0xffff0000, 16, 0x4, SISLANDS_CACCONFIG_CGIND },
1493	{ 0x11, 0x0000ffff, 0, 0x15, SISLANDS_CACCONFIG_CGIND },
1494	{ 0x11, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1495	{ 0x12, 0x0000ffff, 0, 0x36, SISLANDS_CACCONFIG_CGIND },
1496	{ 0x13, 0x0000ffff, 0, 0x10, SISLANDS_CACCONFIG_CGIND },
1497	{ 0x13, 0xffff0000, 16, 0x10, SISLANDS_CACCONFIG_CGIND },
1498	{ 0x14, 0x0000ffff, 0, 0x120, SISLANDS_CACCONFIG_CGIND },
1499	{ 0x15, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1500	{ 0x15, 0xffff0000, 16, 0x6, SISLANDS_CACCONFIG_CGIND },
1501	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1502	{ 0x16, 0x0000ffff, 0, 0x32, SISLANDS_CACCONFIG_CGIND },
1503	{ 0x16, 0xffff0000, 16, 0x7E, SISLANDS_CACCONFIG_CGIND },
1504	{ 0x17, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1505	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1506	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1507	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1508	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1509	{ 0x1a, 0x0000ffff, 0, 0x280, SISLANDS_CACCONFIG_CGIND },
1510	{ 0x1a, 0xffff0000, 16, 0x7, SISLANDS_CACCONFIG_CGIND },
1511	{ 0x1b, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1512	{ 0x1b, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1513	{ 0x1c, 0x0000ffff, 0, 0x3C, SISLANDS_CACCONFIG_CGIND },
1514	{ 0x1c, 0xffff0000, 16, 0x203, SISLANDS_CACCONFIG_CGIND },
1515	{ 0x1d, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1516	{ 0x1d, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1517	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1518	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1519	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1520	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1521	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1522	{ 0x6d, 0x0000ffff, 0, 0xB4, SISLANDS_CACCONFIG_CGIND },
1523	{ 0xFFFFFFFF }
1524};
1525
1526static const struct si_cac_config_reg lcac_oland[] =
1527{
1528	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1529	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1530	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1531	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1532	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1533	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1534	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1535	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1536	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1537	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1538	{ 0x143, 0x0001fffe, 1, 0x4, SISLANDS_CACCONFIG_CGIND },
1539	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1540	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1541	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1542	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1543	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1544	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1545	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1546	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1547	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1548	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1549	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1550	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1551	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1552	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1553	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1554	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1555	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1556	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1557	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1558	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1559	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1560	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1561	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1562	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1563	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1564	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1565	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1566	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1567	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1568	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1569	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1570	{ 0xFFFFFFFF }
1571};
1572
1573static const struct si_cac_config_reg lcac_mars_pro[] =
1574{
1575	{ 0x98, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1576	{ 0x98, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1577	{ 0x104, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1578	{ 0x104, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1579	{ 0x110, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1580	{ 0x110, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1581	{ 0x14f, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1582	{ 0x14f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1583	{ 0x8c, 0x0001fffe, 1, 0x6, SISLANDS_CACCONFIG_CGIND },
1584	{ 0x8c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1585	{ 0x143, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1586	{ 0x143, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1587	{ 0x11c, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1588	{ 0x11c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1589	{ 0x11f, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1590	{ 0x11f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1591	{ 0x164, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1592	{ 0x164, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1593	{ 0x167, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1594	{ 0x167, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1595	{ 0x16a, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1596	{ 0x16a, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1597	{ 0x15e, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1598	{ 0x15e, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1599	{ 0x161, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1600	{ 0x161, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1601	{ 0x15b, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1602	{ 0x15b, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1603	{ 0x16d, 0x0001fffe, 1, 0x2, SISLANDS_CACCONFIG_CGIND },
1604	{ 0x16d, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1605	{ 0x170, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1606	{ 0x170, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1607	{ 0x173, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1608	{ 0x173, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1609	{ 0x176, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1610	{ 0x176, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1611	{ 0x179, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1612	{ 0x179, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1613	{ 0x17c, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1614	{ 0x17c, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1615	{ 0x17f, 0x0001fffe, 1, 0x1, SISLANDS_CACCONFIG_CGIND },
1616	{ 0x17f, 0x00000001, 0, 0x1, SISLANDS_CACCONFIG_CGIND },
1617	{ 0xFFFFFFFF }
1618};
1619
1620static const struct si_cac_config_reg cac_override_oland[] =
1621{
1622	{ 0xFFFFFFFF }
1623};
1624
1625static const struct si_powertune_data powertune_data_oland =
1626{
1627	((1 << 16) | 0x6993),
1628	5,
1629	0,
1630	7,
1631	105,
1632	{
1633		0UL,
1634		0UL,
1635		7194395UL,
1636		309631529UL,
1637		-1270850L,
1638		4513710L,
1639		100
1640	},
1641	117830498UL,
1642	12,
1643	{
1644		0,
1645		0,
1646		0,
1647		0,
1648		0,
1649		0,
1650		0,
1651		0
1652	},
1653	true
1654};
1655
1656static const struct si_powertune_data powertune_data_mars_pro =
1657{
1658	((1 << 16) | 0x6993),
1659	5,
1660	0,
1661	7,
1662	105,
1663	{
1664		0UL,
1665		0UL,
1666		7194395UL,
1667		309631529UL,
1668		-1270850L,
1669		4513710L,
1670		100
1671	},
1672	117830498UL,
1673	12,
1674	{
1675		0,
1676		0,
1677		0,
1678		0,
1679		0,
1680		0,
1681		0,
1682		0
1683	},
1684	true
1685};
1686
1687static const struct si_dte_data dte_data_oland =
1688{
1689	{ 0, 0, 0, 0, 0 },
1690	{ 0, 0, 0, 0, 0 },
1691	0,
1692	0,
1693	0,
1694	0,
1695	0,
1696	0,
1697	0,
1698	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1699	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1700	{ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 },
1701	0,
1702	false
1703};
1704
1705static const struct si_dte_data dte_data_mars_pro =
1706{
1707	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1708	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
1709	5,
1710	55000,
1711	105,
1712	0xA,
1713	1,
1714	0,
1715	0x10,
1716	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1717	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1718	{ 0xF627, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1719	90,
1720	true
1721};
1722
1723static const struct si_dte_data dte_data_sun_xt =
1724{
1725	{ 0x1E8480, 0x3D0900, 0x989680, 0x2625A00, 0x0 },
1726	{ 0x0, 0x0, 0x0, 0x0, 0x0 },
1727	5,
1728	55000,
1729	105,
1730	0xA,
1731	1,
1732	0,
1733	0x10,
1734	{ 0x96, 0xB4, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF },
1735	{ 0x895440, 0x3D0900, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680, 0x989680 },
1736	{ 0xD555, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0, 0x0 },
1737	90,
1738	true
1739};
1740
1741
1742static const struct si_cac_config_reg cac_weights_hainan[] =
1743{
1744	{ 0x0, 0x0000ffff, 0, 0x2d9, SISLANDS_CACCONFIG_CGIND },
1745	{ 0x0, 0xffff0000, 16, 0x22b, SISLANDS_CACCONFIG_CGIND },
1746	{ 0x1, 0x0000ffff, 0, 0x21c, SISLANDS_CACCONFIG_CGIND },
1747	{ 0x1, 0xffff0000, 16, 0x1dc, SISLANDS_CACCONFIG_CGIND },
1748	{ 0x2, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1749	{ 0x3, 0x0000ffff, 0, 0x24e, SISLANDS_CACCONFIG_CGIND },
1750	{ 0x3, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1751	{ 0x4, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1752	{ 0x4, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1753	{ 0x5, 0x0000ffff, 0, 0x35e, SISLANDS_CACCONFIG_CGIND },
1754	{ 0x5, 0xffff0000, 16, 0x1143, SISLANDS_CACCONFIG_CGIND },
1755	{ 0x6, 0x0000ffff, 0, 0xe17, SISLANDS_CACCONFIG_CGIND },
1756	{ 0x6, 0xffff0000, 16, 0x441, SISLANDS_CACCONFIG_CGIND },
1757	{ 0x18f, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1758	{ 0x7, 0x0000ffff, 0, 0x28b, SISLANDS_CACCONFIG_CGIND },
1759	{ 0x7, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1760	{ 0x8, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1761	{ 0x8, 0xffff0000, 16, 0xabe, SISLANDS_CACCONFIG_CGIND },
1762	{ 0x9, 0x0000ffff, 0, 0xf11, SISLANDS_CACCONFIG_CGIND },
1763	{ 0xa, 0x0000ffff, 0, 0x907, SISLANDS_CACCONFIG_CGIND },
1764	{ 0xb, 0x0000ffff, 0, 0xb45, SISLANDS_CACCONFIG_CGIND },
1765	{ 0xb, 0xffff0000, 16, 0xd1e, SISLANDS_CACCONFIG_CGIND },
1766	{ 0xc, 0x0000ffff, 0, 0xa2c, SISLANDS_CACCONFIG_CGIND },
1767	{ 0xd, 0x0000ffff, 0, 0x62, SISLANDS_CACCONFIG_CGIND },
1768	{ 0xd, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1769	{ 0xe, 0x0000ffff, 0, 0x1f3, SISLANDS_CACCONFIG_CGIND },
1770	{ 0xf, 0x0000ffff, 0, 0x42, SISLANDS_CACCONFIG_CGIND },
1771	{ 0xf, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1772	{ 0x10, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1773	{ 0x10, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1774	{ 0x11, 0x0000ffff, 0, 0x709, SISLANDS_CACCONFIG_CGIND },
1775	{ 0x11, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1776	{ 0x12, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1777	{ 0x13, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1778	{ 0x13, 0xffff0000, 16, 0x3a, SISLANDS_CACCONFIG_CGIND },
1779	{ 0x14, 0x0000ffff, 0, 0x357, SISLANDS_CACCONFIG_CGIND },
1780	{ 0x15, 0x0000ffff, 0, 0x9f, SISLANDS_CACCONFIG_CGIND },
1781	{ 0x15, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1782	{ 0x4e, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1783	{ 0x16, 0x0000ffff, 0, 0x314, SISLANDS_CACCONFIG_CGIND },
1784	{ 0x16, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1785	{ 0x17, 0x0000ffff, 0, 0x6d, SISLANDS_CACCONFIG_CGIND },
1786	{ 0x18, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1787	{ 0x18, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1788	{ 0x19, 0x0000ffff, 0, 0x0, SISLANDS_CACCONFIG_CGIND },
1789	{ 0x19, 0xffff0000, 16, 0x0, SISLANDS_CACCONFIG_CGIND },
1790	{ 0x1a, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1791	{ 0x1a, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1792	{ 0x1b, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1793	{ 0x1b, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1794	{ 0x1c, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1795	{ 0x1c, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1796	{ 0x1d, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1797	{ 0x1d, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1798	{ 0x1e, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1799	{ 0x1e, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1800	{ 0x1f, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1801	{ 0x1f, 0xffff0000, 16, 0, SISLANDS_CACCONFIG_CGIND },
1802	{ 0x20, 0x0000ffff, 0, 0, SISLANDS_CACCONFIG_CGIND },
1803	{ 0x6d, 0x0000ffff, 0, 0x1b9, SISLANDS_CACCONFIG_CGIND },
1804	{ 0xFFFFFFFF }
1805};
1806
1807static const struct si_powertune_data powertune_data_hainan =
1808{
1809	((1 << 16) | 0x6993),
1810	5,
1811	0,
1812	9,
1813	105,
1814	{
1815		0UL,
1816		0UL,
1817		7194395UL,
1818		309631529UL,
1819		-1270850L,
1820		4513710L,
1821		100
1822	},
1823	117830498UL,
1824	12,
1825	{
1826		0,
1827		0,
1828		0,
1829		0,
1830		0,
1831		0,
1832		0,
1833		0
1834	},
1835	true
1836};
1837
1838static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev);
1839static struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev);
1840static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev);
1841static struct  si_ps *si_get_ps(struct amdgpu_ps *rps);
1842
1843static int si_populate_voltage_value(struct amdgpu_device *adev,
1844				     const struct atom_voltage_table *table,
1845				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage);
1846static int si_get_std_voltage_value(struct amdgpu_device *adev,
1847				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
1848				    u16 *std_voltage);
1849static int si_write_smc_soft_register(struct amdgpu_device *adev,
1850				      u16 reg_offset, u32 value);
1851static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
1852					 struct rv7xx_pl *pl,
1853					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level);
1854static int si_calculate_sclk_params(struct amdgpu_device *adev,
1855				    u32 engine_clock,
1856				    SISLANDS_SMC_SCLK_VALUE *sclk);
1857
1858static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev);
1859static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev);
1860static void si_dpm_set_irq_funcs(struct amdgpu_device *adev);
1861
1862static struct si_power_info *si_get_pi(struct amdgpu_device *adev)
1863{
1864	struct si_power_info *pi = adev->pm.dpm.priv;
1865	return pi;
1866}
1867
1868static void si_calculate_leakage_for_v_and_t_formula(const struct ni_leakage_coeffients *coeff,
1869						     u16 v, s32 t, u32 ileakage, u32 *leakage)
1870{
1871	s64 kt, kv, leakage_w, i_leakage, vddc;
1872	s64 temperature, t_slope, t_intercept, av, bv, t_ref;
1873	s64 tmp;
1874
1875	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1876	vddc = div64_s64(drm_int2fixp(v), 1000);
1877	temperature = div64_s64(drm_int2fixp(t), 1000);
1878
1879	t_slope = div64_s64(drm_int2fixp(coeff->t_slope), 100000000);
1880	t_intercept = div64_s64(drm_int2fixp(coeff->t_intercept), 100000000);
1881	av = div64_s64(drm_int2fixp(coeff->av), 100000000);
1882	bv = div64_s64(drm_int2fixp(coeff->bv), 100000000);
1883	t_ref = drm_int2fixp(coeff->t_ref);
1884
1885	tmp = drm_fixp_mul(t_slope, vddc) + t_intercept;
1886	kt = drm_fixp_exp(drm_fixp_mul(tmp, temperature));
1887	kt = drm_fixp_div(kt, drm_fixp_exp(drm_fixp_mul(tmp, t_ref)));
1888	kv = drm_fixp_mul(av, drm_fixp_exp(drm_fixp_mul(bv, vddc)));
1889
1890	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1891
1892	*leakage = drm_fixp2int(leakage_w * 1000);
1893}
1894
1895static void si_calculate_leakage_for_v_and_t(struct amdgpu_device *adev,
1896					     const struct ni_leakage_coeffients *coeff,
1897					     u16 v,
1898					     s32 t,
1899					     u32 i_leakage,
1900					     u32 *leakage)
1901{
1902	si_calculate_leakage_for_v_and_t_formula(coeff, v, t, i_leakage, leakage);
1903}
1904
1905static void si_calculate_leakage_for_v_formula(const struct ni_leakage_coeffients *coeff,
1906					       const u32 fixed_kt, u16 v,
1907					       u32 ileakage, u32 *leakage)
1908{
1909	s64 kt, kv, leakage_w, i_leakage, vddc;
1910
1911	i_leakage = div64_s64(drm_int2fixp(ileakage), 100);
1912	vddc = div64_s64(drm_int2fixp(v), 1000);
1913
1914	kt = div64_s64(drm_int2fixp(fixed_kt), 100000000);
1915	kv = drm_fixp_mul(div64_s64(drm_int2fixp(coeff->av), 100000000),
1916			  drm_fixp_exp(drm_fixp_mul(div64_s64(drm_int2fixp(coeff->bv), 100000000), vddc)));
1917
1918	leakage_w = drm_fixp_mul(drm_fixp_mul(drm_fixp_mul(i_leakage, kt), kv), vddc);
1919
1920	*leakage = drm_fixp2int(leakage_w * 1000);
1921}
1922
1923static void si_calculate_leakage_for_v(struct amdgpu_device *adev,
1924				       const struct ni_leakage_coeffients *coeff,
1925				       const u32 fixed_kt,
1926				       u16 v,
1927				       u32 i_leakage,
1928				       u32 *leakage)
1929{
1930	si_calculate_leakage_for_v_formula(coeff, fixed_kt, v, i_leakage, leakage);
1931}
1932
1933
1934static void si_update_dte_from_pl2(struct amdgpu_device *adev,
1935				   struct si_dte_data *dte_data)
1936{
1937	u32 p_limit1 = adev->pm.dpm.tdp_limit;
1938	u32 p_limit2 = adev->pm.dpm.near_tdp_limit;
1939	u32 k = dte_data->k;
1940	u32 t_max = dte_data->max_t;
1941	u32 t_split[5] = { 10, 15, 20, 25, 30 };
1942	u32 t_0 = dte_data->t0;
1943	u32 i;
1944
1945	if (p_limit2 != 0 && p_limit2 <= p_limit1) {
1946		dte_data->tdep_count = 3;
1947
1948		for (i = 0; i < k; i++) {
1949			dte_data->r[i] =
1950				(t_split[i] * (t_max - t_0/(u32)1000) * (1 << 14)) /
1951				(p_limit2  * (u32)100);
1952		}
1953
1954		dte_data->tdep_r[1] = dte_data->r[4] * 2;
1955
1956		for (i = 2; i < SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE; i++) {
1957			dte_data->tdep_r[i] = dte_data->r[4];
1958		}
1959	} else {
1960		DRM_ERROR("Invalid PL2! DTE will not be updated.\n");
1961	}
1962}
1963
1964static struct rv7xx_power_info *rv770_get_pi(struct amdgpu_device *adev)
1965{
1966	struct rv7xx_power_info *pi = adev->pm.dpm.priv;
1967
1968	return pi;
1969}
1970
1971static struct ni_power_info *ni_get_pi(struct amdgpu_device *adev)
1972{
1973	struct ni_power_info *pi = adev->pm.dpm.priv;
1974
1975	return pi;
1976}
1977
1978static struct si_ps *si_get_ps(struct amdgpu_ps *aps)
1979{
1980	struct  si_ps *ps = aps->ps_priv;
1981
1982	return ps;
1983}
1984
1985static void si_initialize_powertune_defaults(struct amdgpu_device *adev)
1986{
1987	struct ni_power_info *ni_pi = ni_get_pi(adev);
1988	struct si_power_info *si_pi = si_get_pi(adev);
1989	bool update_dte_from_pl2 = false;
1990
1991	if (adev->asic_type == CHIP_TAHITI) {
1992		si_pi->cac_weights = cac_weights_tahiti;
1993		si_pi->lcac_config = lcac_tahiti;
1994		si_pi->cac_override = cac_override_tahiti;
1995		si_pi->powertune_data = &powertune_data_tahiti;
1996		si_pi->dte_data = dte_data_tahiti;
1997
1998		switch (adev->pdev->device) {
1999		case 0x6798:
2000			si_pi->dte_data.enable_dte_by_default = true;
2001			break;
2002		case 0x6799:
2003			si_pi->dte_data = dte_data_new_zealand;
2004			break;
2005		case 0x6790:
2006		case 0x6791:
2007		case 0x6792:
2008		case 0x679E:
2009			si_pi->dte_data = dte_data_aruba_pro;
2010			update_dte_from_pl2 = true;
2011			break;
2012		case 0x679B:
2013			si_pi->dte_data = dte_data_malta;
2014			update_dte_from_pl2 = true;
2015			break;
2016		case 0x679A:
2017			si_pi->dte_data = dte_data_tahiti_pro;
2018			update_dte_from_pl2 = true;
2019			break;
2020		default:
2021			if (si_pi->dte_data.enable_dte_by_default == true)
2022				DRM_ERROR("DTE is not enabled!\n");
2023			break;
2024		}
2025	} else if (adev->asic_type == CHIP_PITCAIRN) {
2026		si_pi->cac_weights = cac_weights_pitcairn;
2027		si_pi->lcac_config = lcac_pitcairn;
2028		si_pi->cac_override = cac_override_pitcairn;
2029		si_pi->powertune_data = &powertune_data_pitcairn;
2030
2031		switch (adev->pdev->device) {
2032		case 0x6810:
2033		case 0x6818:
2034			si_pi->dte_data = dte_data_curacao_xt;
2035			update_dte_from_pl2 = true;
2036			break;
2037		case 0x6819:
2038		case 0x6811:
2039			si_pi->dte_data = dte_data_curacao_pro;
2040			update_dte_from_pl2 = true;
2041			break;
2042		case 0x6800:
2043		case 0x6806:
2044			si_pi->dte_data = dte_data_neptune_xt;
2045			update_dte_from_pl2 = true;
2046			break;
2047		default:
2048			si_pi->dte_data = dte_data_pitcairn;
2049			break;
2050		}
2051	} else if (adev->asic_type == CHIP_VERDE) {
2052		si_pi->lcac_config = lcac_cape_verde;
2053		si_pi->cac_override = cac_override_cape_verde;
2054		si_pi->powertune_data = &powertune_data_cape_verde;
2055
2056		switch (adev->pdev->device) {
2057		case 0x683B:
2058		case 0x683F:
2059		case 0x6829:
2060		case 0x6835:
2061			si_pi->cac_weights = cac_weights_cape_verde_pro;
2062			si_pi->dte_data = dte_data_cape_verde;
2063			break;
2064		case 0x682C:
2065			si_pi->cac_weights = cac_weights_cape_verde_pro;
2066			si_pi->dte_data = dte_data_sun_xt;
2067			update_dte_from_pl2 = true;
2068			break;
2069		case 0x6825:
2070		case 0x6827:
2071			si_pi->cac_weights = cac_weights_heathrow;
2072			si_pi->dte_data = dte_data_cape_verde;
2073			break;
2074		case 0x6824:
2075		case 0x682D:
2076			si_pi->cac_weights = cac_weights_chelsea_xt;
2077			si_pi->dte_data = dte_data_cape_verde;
2078			break;
2079		case 0x682F:
2080			si_pi->cac_weights = cac_weights_chelsea_pro;
2081			si_pi->dte_data = dte_data_cape_verde;
2082			break;
2083		case 0x6820:
2084			si_pi->cac_weights = cac_weights_heathrow;
2085			si_pi->dte_data = dte_data_venus_xtx;
2086			break;
2087		case 0x6821:
2088			si_pi->cac_weights = cac_weights_heathrow;
2089			si_pi->dte_data = dte_data_venus_xt;
2090			break;
2091		case 0x6823:
2092		case 0x682B:
2093		case 0x6822:
2094		case 0x682A:
2095			si_pi->cac_weights = cac_weights_chelsea_pro;
2096			si_pi->dte_data = dte_data_venus_pro;
2097			break;
2098		default:
2099			si_pi->cac_weights = cac_weights_cape_verde;
2100			si_pi->dte_data = dte_data_cape_verde;
2101			break;
2102		}
2103	} else if (adev->asic_type == CHIP_OLAND) {
2104		si_pi->lcac_config = lcac_mars_pro;
2105		si_pi->cac_override = cac_override_oland;
2106		si_pi->powertune_data = &powertune_data_mars_pro;
2107		si_pi->dte_data = dte_data_mars_pro;
2108
2109		switch (adev->pdev->device) {
2110		case 0x6601:
2111		case 0x6621:
2112		case 0x6603:
2113		case 0x6605:
2114			si_pi->cac_weights = cac_weights_mars_pro;
2115			update_dte_from_pl2 = true;
2116			break;
2117		case 0x6600:
2118		case 0x6606:
2119		case 0x6620:
2120		case 0x6604:
2121			si_pi->cac_weights = cac_weights_mars_xt;
2122			update_dte_from_pl2 = true;
2123			break;
2124		case 0x6611:
2125		case 0x6613:
2126		case 0x6608:
2127			si_pi->cac_weights = cac_weights_oland_pro;
2128			update_dte_from_pl2 = true;
2129			break;
2130		case 0x6610:
2131			si_pi->cac_weights = cac_weights_oland_xt;
2132			update_dte_from_pl2 = true;
2133			break;
2134		default:
2135			si_pi->cac_weights = cac_weights_oland;
2136			si_pi->lcac_config = lcac_oland;
2137			si_pi->cac_override = cac_override_oland;
2138			si_pi->powertune_data = &powertune_data_oland;
2139			si_pi->dte_data = dte_data_oland;
2140			break;
2141		}
2142	} else if (adev->asic_type == CHIP_HAINAN) {
2143		si_pi->cac_weights = cac_weights_hainan;
2144		si_pi->lcac_config = lcac_oland;
2145		si_pi->cac_override = cac_override_oland;
2146		si_pi->powertune_data = &powertune_data_hainan;
2147		si_pi->dte_data = dte_data_sun_xt;
2148		update_dte_from_pl2 = true;
2149	} else {
2150		DRM_ERROR("Unknown SI asic revision, failed to initialize PowerTune!\n");
2151		return;
2152	}
2153
2154	ni_pi->enable_power_containment = false;
2155	ni_pi->enable_cac = false;
2156	ni_pi->enable_sq_ramping = false;
2157	si_pi->enable_dte = false;
2158
2159	if (si_pi->powertune_data->enable_powertune_by_default) {
2160		ni_pi->enable_power_containment = true;
2161		ni_pi->enable_cac = true;
2162		if (si_pi->dte_data.enable_dte_by_default) {
2163			si_pi->enable_dte = true;
2164			if (update_dte_from_pl2)
2165				si_update_dte_from_pl2(adev, &si_pi->dte_data);
2166
2167		}
2168		ni_pi->enable_sq_ramping = true;
2169	}
2170
2171	ni_pi->driver_calculate_cac_leakage = true;
2172	ni_pi->cac_configuration_required = true;
2173
2174	if (ni_pi->cac_configuration_required) {
2175		ni_pi->support_cac_long_term_average = true;
2176		si_pi->dyn_powertune_data.l2_lta_window_size =
2177			si_pi->powertune_data->l2_lta_window_size_default;
2178		si_pi->dyn_powertune_data.lts_truncate =
2179			si_pi->powertune_data->lts_truncate_default;
2180	} else {
2181		ni_pi->support_cac_long_term_average = false;
2182		si_pi->dyn_powertune_data.l2_lta_window_size = 0;
2183		si_pi->dyn_powertune_data.lts_truncate = 0;
2184	}
2185
2186	si_pi->dyn_powertune_data.disable_uvd_powertune = false;
2187}
2188
2189static u32 si_get_smc_power_scaling_factor(struct amdgpu_device *adev)
2190{
2191	return 1;
2192}
2193
2194static u32 si_calculate_cac_wintime(struct amdgpu_device *adev)
2195{
2196	u32 xclk;
2197	u32 wintime;
2198	u32 cac_window;
2199	u32 cac_window_size;
2200
2201	xclk = amdgpu_asic_get_xclk(adev);
2202
2203	if (xclk == 0)
2204		return 0;
2205
2206	cac_window = RREG32(CG_CAC_CTRL) & CAC_WINDOW_MASK;
2207	cac_window_size = ((cac_window & 0xFFFF0000) >> 16) * (cac_window & 0x0000FFFF);
2208
2209	wintime = (cac_window_size * 100) / xclk;
2210
2211	return wintime;
2212}
2213
2214static u32 si_scale_power_for_smc(u32 power_in_watts, u32 scaling_factor)
2215{
2216	return power_in_watts;
2217}
2218
2219static int si_calculate_adjusted_tdp_limits(struct amdgpu_device *adev,
2220					    bool adjust_polarity,
2221					    u32 tdp_adjustment,
2222					    u32 *tdp_limit,
2223					    u32 *near_tdp_limit)
2224{
2225	u32 adjustment_delta, max_tdp_limit;
2226
2227	if (tdp_adjustment > (u32)adev->pm.dpm.tdp_od_limit)
2228		return -EINVAL;
2229
2230	max_tdp_limit = ((100 + 100) * adev->pm.dpm.tdp_limit) / 100;
2231
2232	if (adjust_polarity) {
2233		*tdp_limit = ((100 + tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
2234		*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted + (*tdp_limit - adev->pm.dpm.tdp_limit);
2235	} else {
2236		*tdp_limit = ((100 - tdp_adjustment) * adev->pm.dpm.tdp_limit) / 100;
2237		adjustment_delta  = adev->pm.dpm.tdp_limit - *tdp_limit;
2238		if (adjustment_delta < adev->pm.dpm.near_tdp_limit_adjusted)
2239			*near_tdp_limit = adev->pm.dpm.near_tdp_limit_adjusted - adjustment_delta;
2240		else
2241			*near_tdp_limit = 0;
2242	}
2243
2244	if ((*tdp_limit <= 0) || (*tdp_limit > max_tdp_limit))
2245		return -EINVAL;
2246	if ((*near_tdp_limit <= 0) || (*near_tdp_limit > *tdp_limit))
2247		return -EINVAL;
2248
2249	return 0;
2250}
2251
2252static int si_populate_smc_tdp_limits(struct amdgpu_device *adev,
2253				      struct amdgpu_ps *amdgpu_state)
2254{
2255	struct ni_power_info *ni_pi = ni_get_pi(adev);
2256	struct si_power_info *si_pi = si_get_pi(adev);
2257
2258	if (ni_pi->enable_power_containment) {
2259		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2260		PP_SIslands_PAPMParameters *papm_parm;
2261		struct amdgpu_ppm_table *ppm = adev->pm.dpm.dyn_state.ppm_table;
2262		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
2263		u32 tdp_limit;
2264		u32 near_tdp_limit;
2265		int ret;
2266
2267		if (scaling_factor == 0)
2268			return -EINVAL;
2269
2270		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2271
2272		ret = si_calculate_adjusted_tdp_limits(adev,
2273						       false, /* ??? */
2274						       adev->pm.dpm.tdp_adjustment,
2275						       &tdp_limit,
2276						       &near_tdp_limit);
2277		if (ret)
2278			return ret;
2279
2280		smc_table->dpm2Params.TDPLimit =
2281			cpu_to_be32(si_scale_power_for_smc(tdp_limit, scaling_factor) * 1000);
2282		smc_table->dpm2Params.NearTDPLimit =
2283			cpu_to_be32(si_scale_power_for_smc(near_tdp_limit, scaling_factor) * 1000);
2284		smc_table->dpm2Params.SafePowerLimit =
2285			cpu_to_be32(si_scale_power_for_smc((near_tdp_limit * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2286
2287		ret = amdgpu_si_copy_bytes_to_smc(adev,
2288						  (si_pi->state_table_start + offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2289						   offsetof(PP_SIslands_DPM2Parameters, TDPLimit)),
2290						  (u8 *)(&(smc_table->dpm2Params.TDPLimit)),
2291						  sizeof(u32) * 3,
2292						  si_pi->sram_end);
2293		if (ret)
2294			return ret;
2295
2296		if (si_pi->enable_ppm) {
2297			papm_parm = &si_pi->papm_parm;
2298			memset(papm_parm, 0, sizeof(PP_SIslands_PAPMParameters));
2299			papm_parm->NearTDPLimitTherm = cpu_to_be32(ppm->dgpu_tdp);
2300			papm_parm->dGPU_T_Limit = cpu_to_be32(ppm->tj_max);
2301			papm_parm->dGPU_T_Warning = cpu_to_be32(95);
2302			papm_parm->dGPU_T_Hysteresis = cpu_to_be32(5);
2303			papm_parm->PlatformPowerLimit = 0xffffffff;
2304			papm_parm->NearTDPLimitPAPM = 0xffffffff;
2305
2306			ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->papm_cfg_table_start,
2307							  (u8 *)papm_parm,
2308							  sizeof(PP_SIslands_PAPMParameters),
2309							  si_pi->sram_end);
2310			if (ret)
2311				return ret;
2312		}
2313	}
2314	return 0;
2315}
2316
2317static int si_populate_smc_tdp_limits_2(struct amdgpu_device *adev,
2318					struct amdgpu_ps *amdgpu_state)
2319{
2320	struct ni_power_info *ni_pi = ni_get_pi(adev);
2321	struct si_power_info *si_pi = si_get_pi(adev);
2322
2323	if (ni_pi->enable_power_containment) {
2324		SISLANDS_SMC_STATETABLE *smc_table = &si_pi->smc_statetable;
2325		u32 scaling_factor = si_get_smc_power_scaling_factor(adev);
2326		int ret;
2327
2328		memset(smc_table, 0, sizeof(SISLANDS_SMC_STATETABLE));
2329
2330		smc_table->dpm2Params.NearTDPLimit =
2331			cpu_to_be32(si_scale_power_for_smc(adev->pm.dpm.near_tdp_limit_adjusted, scaling_factor) * 1000);
2332		smc_table->dpm2Params.SafePowerLimit =
2333			cpu_to_be32(si_scale_power_for_smc((adev->pm.dpm.near_tdp_limit_adjusted * SISLANDS_DPM2_TDP_SAFE_LIMIT_PERCENT) / 100, scaling_factor) * 1000);
2334
2335		ret = amdgpu_si_copy_bytes_to_smc(adev,
2336						  (si_pi->state_table_start +
2337						   offsetof(SISLANDS_SMC_STATETABLE, dpm2Params) +
2338						   offsetof(PP_SIslands_DPM2Parameters, NearTDPLimit)),
2339						  (u8 *)(&(smc_table->dpm2Params.NearTDPLimit)),
2340						  sizeof(u32) * 2,
2341						  si_pi->sram_end);
2342		if (ret)
2343			return ret;
2344	}
2345
2346	return 0;
2347}
2348
2349static u16 si_calculate_power_efficiency_ratio(struct amdgpu_device *adev,
2350					       const u16 prev_std_vddc,
2351					       const u16 curr_std_vddc)
2352{
2353	u64 margin = (u64)SISLANDS_DPM2_PWREFFICIENCYRATIO_MARGIN;
2354	u64 prev_vddc = (u64)prev_std_vddc;
2355	u64 curr_vddc = (u64)curr_std_vddc;
2356	u64 pwr_efficiency_ratio, n, d;
2357
2358	if ((prev_vddc == 0) || (curr_vddc == 0))
2359		return 0;
2360
2361	n = div64_u64((u64)1024 * curr_vddc * curr_vddc * ((u64)1000 + margin), (u64)1000);
2362	d = prev_vddc * prev_vddc;
2363	pwr_efficiency_ratio = div64_u64(n, d);
2364
2365	if (pwr_efficiency_ratio > (u64)0xFFFF)
2366		return 0;
2367
2368	return (u16)pwr_efficiency_ratio;
2369}
2370
2371static bool si_should_disable_uvd_powertune(struct amdgpu_device *adev,
2372					    struct amdgpu_ps *amdgpu_state)
2373{
2374	struct si_power_info *si_pi = si_get_pi(adev);
2375
2376	if (si_pi->dyn_powertune_data.disable_uvd_powertune &&
2377	    amdgpu_state->vclk && amdgpu_state->dclk)
2378		return true;
2379
2380	return false;
2381}
2382
2383struct evergreen_power_info *evergreen_get_pi(struct amdgpu_device *adev)
2384{
2385	struct evergreen_power_info *pi = adev->pm.dpm.priv;
2386
2387	return pi;
2388}
2389
2390static int si_populate_power_containment_values(struct amdgpu_device *adev,
2391						struct amdgpu_ps *amdgpu_state,
2392						SISLANDS_SMC_SWSTATE *smc_state)
2393{
2394	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
2395	struct ni_power_info *ni_pi = ni_get_pi(adev);
2396	struct  si_ps *state = si_get_ps(amdgpu_state);
2397	SISLANDS_SMC_VOLTAGE_VALUE vddc;
2398	u32 prev_sclk;
2399	u32 max_sclk;
2400	u32 min_sclk;
2401	u16 prev_std_vddc;
2402	u16 curr_std_vddc;
2403	int i;
2404	u16 pwr_efficiency_ratio;
2405	u8 max_ps_percent;
2406	bool disable_uvd_power_tune;
2407	int ret;
2408
2409	if (ni_pi->enable_power_containment == false)
2410		return 0;
2411
2412	if (state->performance_level_count == 0)
2413		return -EINVAL;
2414
2415	if (smc_state->levelCount != state->performance_level_count)
2416		return -EINVAL;
2417
2418	disable_uvd_power_tune = si_should_disable_uvd_powertune(adev, amdgpu_state);
2419
2420	smc_state->levels[0].dpm2.MaxPS = 0;
2421	smc_state->levels[0].dpm2.NearTDPDec = 0;
2422	smc_state->levels[0].dpm2.AboveSafeInc = 0;
2423	smc_state->levels[0].dpm2.BelowSafeInc = 0;
2424	smc_state->levels[0].dpm2.PwrEfficiencyRatio = 0;
2425
2426	for (i = 1; i < state->performance_level_count; i++) {
2427		prev_sclk = state->performance_levels[i-1].sclk;
2428		max_sclk  = state->performance_levels[i].sclk;
2429		if (i == 1)
2430			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_M;
2431		else
2432			max_ps_percent = SISLANDS_DPM2_MAXPS_PERCENT_H;
2433
2434		if (prev_sclk > max_sclk)
2435			return -EINVAL;
2436
2437		if ((max_ps_percent == 0) ||
2438		    (prev_sclk == max_sclk) ||
2439		    disable_uvd_power_tune)
2440			min_sclk = max_sclk;
2441		else if (i == 1)
2442			min_sclk = prev_sclk;
2443		else
2444			min_sclk = (prev_sclk * (u32)max_ps_percent) / 100;
2445
2446		if (min_sclk < state->performance_levels[0].sclk)
2447			min_sclk = state->performance_levels[0].sclk;
2448
2449		if (min_sclk == 0)
2450			return -EINVAL;
2451
2452		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
2453						state->performance_levels[i-1].vddc, &vddc);
2454		if (ret)
2455			return ret;
2456
2457		ret = si_get_std_voltage_value(adev, &vddc, &prev_std_vddc);
2458		if (ret)
2459			return ret;
2460
2461		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
2462						state->performance_levels[i].vddc, &vddc);
2463		if (ret)
2464			return ret;
2465
2466		ret = si_get_std_voltage_value(adev, &vddc, &curr_std_vddc);
2467		if (ret)
2468			return ret;
2469
2470		pwr_efficiency_ratio = si_calculate_power_efficiency_ratio(adev,
2471									   prev_std_vddc, curr_std_vddc);
2472
2473		smc_state->levels[i].dpm2.MaxPS = (u8)((SISLANDS_DPM2_MAX_PULSE_SKIP * (max_sclk - min_sclk)) / max_sclk);
2474		smc_state->levels[i].dpm2.NearTDPDec = SISLANDS_DPM2_NEAR_TDP_DEC;
2475		smc_state->levels[i].dpm2.AboveSafeInc = SISLANDS_DPM2_ABOVE_SAFE_INC;
2476		smc_state->levels[i].dpm2.BelowSafeInc = SISLANDS_DPM2_BELOW_SAFE_INC;
2477		smc_state->levels[i].dpm2.PwrEfficiencyRatio = cpu_to_be16(pwr_efficiency_ratio);
2478	}
2479
2480	return 0;
2481}
2482
2483static int si_populate_sq_ramping_values(struct amdgpu_device *adev,
2484					 struct amdgpu_ps *amdgpu_state,
2485					 SISLANDS_SMC_SWSTATE *smc_state)
2486{
2487	struct ni_power_info *ni_pi = ni_get_pi(adev);
2488	struct  si_ps *state = si_get_ps(amdgpu_state);
2489	u32 sq_power_throttle, sq_power_throttle2;
2490	bool enable_sq_ramping = ni_pi->enable_sq_ramping;
2491	int i;
2492
2493	if (state->performance_level_count == 0)
2494		return -EINVAL;
2495
2496	if (smc_state->levelCount != state->performance_level_count)
2497		return -EINVAL;
2498
2499	if (adev->pm.dpm.sq_ramping_threshold == 0)
2500		return -EINVAL;
2501
2502	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER > (MAX_POWER_MASK >> MAX_POWER_SHIFT))
2503		enable_sq_ramping = false;
2504
2505	if (SISLANDS_DPM2_SQ_RAMP_MIN_POWER > (MIN_POWER_MASK >> MIN_POWER_SHIFT))
2506		enable_sq_ramping = false;
2507
2508	if (SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA > (MAX_POWER_DELTA_MASK >> MAX_POWER_DELTA_SHIFT))
2509		enable_sq_ramping = false;
2510
2511	if (SISLANDS_DPM2_SQ_RAMP_STI_SIZE > (STI_SIZE_MASK >> STI_SIZE_SHIFT))
2512		enable_sq_ramping = false;
2513
2514	if (SISLANDS_DPM2_SQ_RAMP_LTI_RATIO > (LTI_RATIO_MASK >> LTI_RATIO_SHIFT))
2515		enable_sq_ramping = false;
2516
2517	for (i = 0; i < state->performance_level_count; i++) {
2518		sq_power_throttle = 0;
2519		sq_power_throttle2 = 0;
2520
2521		if ((state->performance_levels[i].sclk >= adev->pm.dpm.sq_ramping_threshold) &&
2522		    enable_sq_ramping) {
2523			sq_power_throttle |= MAX_POWER(SISLANDS_DPM2_SQ_RAMP_MAX_POWER);
2524			sq_power_throttle |= MIN_POWER(SISLANDS_DPM2_SQ_RAMP_MIN_POWER);
2525			sq_power_throttle2 |= MAX_POWER_DELTA(SISLANDS_DPM2_SQ_RAMP_MAX_POWER_DELTA);
2526			sq_power_throttle2 |= STI_SIZE(SISLANDS_DPM2_SQ_RAMP_STI_SIZE);
2527			sq_power_throttle2 |= LTI_RATIO(SISLANDS_DPM2_SQ_RAMP_LTI_RATIO);
2528		} else {
2529			sq_power_throttle |= MAX_POWER_MASK | MIN_POWER_MASK;
2530			sq_power_throttle2 |= MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
2531		}
2532
2533		smc_state->levels[i].SQPowerThrottle = cpu_to_be32(sq_power_throttle);
2534		smc_state->levels[i].SQPowerThrottle_2 = cpu_to_be32(sq_power_throttle2);
2535	}
2536
2537	return 0;
2538}
2539
2540static int si_enable_power_containment(struct amdgpu_device *adev,
2541				       struct amdgpu_ps *amdgpu_new_state,
2542				       bool enable)
2543{
2544	struct ni_power_info *ni_pi = ni_get_pi(adev);
2545	PPSMC_Result smc_result;
2546	int ret = 0;
2547
2548	if (ni_pi->enable_power_containment) {
2549		if (enable) {
2550			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
2551				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingActive);
2552				if (smc_result != PPSMC_Result_OK) {
2553					ret = -EINVAL;
2554					ni_pi->pc_enabled = false;
2555				} else {
2556					ni_pi->pc_enabled = true;
2557				}
2558			}
2559		} else {
2560			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_TDPClampingInactive);
2561			if (smc_result != PPSMC_Result_OK)
2562				ret = -EINVAL;
2563			ni_pi->pc_enabled = false;
2564		}
2565	}
2566
2567	return ret;
2568}
2569
2570static int si_initialize_smc_dte_tables(struct amdgpu_device *adev)
2571{
2572	struct si_power_info *si_pi = si_get_pi(adev);
2573	int ret = 0;
2574	struct si_dte_data *dte_data = &si_pi->dte_data;
2575	Smc_SIslands_DTE_Configuration *dte_tables = NULL;
2576	u32 table_size;
2577	u8 tdep_count;
2578	u32 i;
2579
2580	if (dte_data == NULL)
2581		si_pi->enable_dte = false;
2582
2583	if (si_pi->enable_dte == false)
2584		return 0;
2585
2586	if (dte_data->k <= 0)
2587		return -EINVAL;
2588
2589	dte_tables = kzalloc(sizeof(Smc_SIslands_DTE_Configuration), GFP_KERNEL);
2590	if (dte_tables == NULL) {
2591		si_pi->enable_dte = false;
2592		return -ENOMEM;
2593	}
2594
2595	table_size = dte_data->k;
2596
2597	if (table_size > SMC_SISLANDS_DTE_MAX_FILTER_STAGES)
2598		table_size = SMC_SISLANDS_DTE_MAX_FILTER_STAGES;
2599
2600	tdep_count = dte_data->tdep_count;
2601	if (tdep_count > SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE)
2602		tdep_count = SMC_SISLANDS_DTE_MAX_TEMPERATURE_DEPENDENT_ARRAY_SIZE;
2603
2604	dte_tables->K = cpu_to_be32(table_size);
2605	dte_tables->T0 = cpu_to_be32(dte_data->t0);
2606	dte_tables->MaxT = cpu_to_be32(dte_data->max_t);
2607	dte_tables->WindowSize = dte_data->window_size;
2608	dte_tables->temp_select = dte_data->temp_select;
2609	dte_tables->DTE_mode = dte_data->dte_mode;
2610	dte_tables->Tthreshold = cpu_to_be32(dte_data->t_threshold);
2611
2612	if (tdep_count > 0)
2613		table_size--;
2614
2615	for (i = 0; i < table_size; i++) {
2616		dte_tables->tau[i] = cpu_to_be32(dte_data->tau[i]);
2617		dte_tables->R[i]   = cpu_to_be32(dte_data->r[i]);
2618	}
2619
2620	dte_tables->Tdep_count = tdep_count;
2621
2622	for (i = 0; i < (u32)tdep_count; i++) {
2623		dte_tables->T_limits[i] = dte_data->t_limits[i];
2624		dte_tables->Tdep_tau[i] = cpu_to_be32(dte_data->tdep_tau[i]);
2625		dte_tables->Tdep_R[i] = cpu_to_be32(dte_data->tdep_r[i]);
2626	}
2627
2628	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->dte_table_start,
2629					  (u8 *)dte_tables,
2630					  sizeof(Smc_SIslands_DTE_Configuration),
2631					  si_pi->sram_end);
2632	kfree(dte_tables);
2633
2634	return ret;
2635}
2636
2637static int si_get_cac_std_voltage_max_min(struct amdgpu_device *adev,
2638					  u16 *max, u16 *min)
2639{
2640	struct si_power_info *si_pi = si_get_pi(adev);
2641	struct amdgpu_cac_leakage_table *table =
2642		&adev->pm.dpm.dyn_state.cac_leakage_table;
2643	u32 i;
2644	u32 v0_loadline;
2645
2646	if (table == NULL)
2647		return -EINVAL;
2648
2649	*max = 0;
2650	*min = 0xFFFF;
2651
2652	for (i = 0; i < table->count; i++) {
2653		if (table->entries[i].vddc > *max)
2654			*max = table->entries[i].vddc;
2655		if (table->entries[i].vddc < *min)
2656			*min = table->entries[i].vddc;
2657	}
2658
2659	if (si_pi->powertune_data->lkge_lut_v0_percent > 100)
2660		return -EINVAL;
2661
2662	v0_loadline = (*min) * (100 - si_pi->powertune_data->lkge_lut_v0_percent) / 100;
2663
2664	if (v0_loadline > 0xFFFFUL)
2665		return -EINVAL;
2666
2667	*min = (u16)v0_loadline;
2668
2669	if ((*min > *max) || (*max == 0) || (*min == 0))
2670		return -EINVAL;
2671
2672	return 0;
2673}
2674
2675static u16 si_get_cac_std_voltage_step(u16 max, u16 min)
2676{
2677	return ((max - min) + (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1)) /
2678		SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES;
2679}
2680
2681static int si_init_dte_leakage_table(struct amdgpu_device *adev,
2682				     PP_SIslands_CacConfig *cac_tables,
2683				     u16 vddc_max, u16 vddc_min, u16 vddc_step,
2684				     u16 t0, u16 t_step)
2685{
2686	struct si_power_info *si_pi = si_get_pi(adev);
2687	u32 leakage;
2688	unsigned int i, j;
2689	s32 t;
2690	u32 smc_leakage;
2691	u32 scaling_factor;
2692	u16 voltage;
2693
2694	scaling_factor = si_get_smc_power_scaling_factor(adev);
2695
2696	for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++) {
2697		t = (1000 * (i * t_step + t0));
2698
2699		for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2700			voltage = vddc_max - (vddc_step * j);
2701
2702			si_calculate_leakage_for_v_and_t(adev,
2703							 &si_pi->powertune_data->leakage_coefficients,
2704							 voltage,
2705							 t,
2706							 si_pi->dyn_powertune_data.cac_leakage,
2707							 &leakage);
2708
2709			smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2710
2711			if (smc_leakage > 0xFFFF)
2712				smc_leakage = 0xFFFF;
2713
2714			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2715				cpu_to_be16((u16)smc_leakage);
2716		}
2717	}
2718	return 0;
2719}
2720
2721static int si_init_simplified_leakage_table(struct amdgpu_device *adev,
2722					    PP_SIslands_CacConfig *cac_tables,
2723					    u16 vddc_max, u16 vddc_min, u16 vddc_step)
2724{
2725	struct si_power_info *si_pi = si_get_pi(adev);
2726	u32 leakage;
2727	unsigned int i, j;
2728	u32 smc_leakage;
2729	u32 scaling_factor;
2730	u16 voltage;
2731
2732	scaling_factor = si_get_smc_power_scaling_factor(adev);
2733
2734	for (j = 0; j < SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES; j++) {
2735		voltage = vddc_max - (vddc_step * j);
2736
2737		si_calculate_leakage_for_v(adev,
2738					   &si_pi->powertune_data->leakage_coefficients,
2739					   si_pi->powertune_data->fixed_kt,
2740					   voltage,
2741					   si_pi->dyn_powertune_data.cac_leakage,
2742					   &leakage);
2743
2744		smc_leakage = si_scale_power_for_smc(leakage, scaling_factor) / 4;
2745
2746		if (smc_leakage > 0xFFFF)
2747			smc_leakage = 0xFFFF;
2748
2749		for (i = 0; i < SMC_SISLANDS_LKGE_LUT_NUM_OF_TEMP_ENTRIES ; i++)
2750			cac_tables->cac_lkge_lut[i][SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES-1-j] =
2751				cpu_to_be16((u16)smc_leakage);
2752	}
2753	return 0;
2754}
2755
2756static int si_initialize_smc_cac_tables(struct amdgpu_device *adev)
2757{
2758	struct ni_power_info *ni_pi = ni_get_pi(adev);
2759	struct si_power_info *si_pi = si_get_pi(adev);
2760	PP_SIslands_CacConfig *cac_tables = NULL;
2761	u16 vddc_max, vddc_min, vddc_step;
2762	u16 t0, t_step;
2763	u32 load_line_slope, reg;
2764	int ret = 0;
2765	u32 ticks_per_us = amdgpu_asic_get_xclk(adev) / 100;
2766
2767	if (ni_pi->enable_cac == false)
2768		return 0;
2769
2770	cac_tables = kzalloc(sizeof(PP_SIslands_CacConfig), GFP_KERNEL);
2771	if (!cac_tables)
2772		return -ENOMEM;
2773
2774	reg = RREG32(CG_CAC_CTRL) & ~CAC_WINDOW_MASK;
2775	reg |= CAC_WINDOW(si_pi->powertune_data->cac_window);
2776	WREG32(CG_CAC_CTRL, reg);
2777
2778	si_pi->dyn_powertune_data.cac_leakage = adev->pm.dpm.cac_leakage;
2779	si_pi->dyn_powertune_data.dc_pwr_value =
2780		si_pi->powertune_data->dc_cac[NISLANDS_DCCAC_LEVEL_0];
2781	si_pi->dyn_powertune_data.wintime = si_calculate_cac_wintime(adev);
2782	si_pi->dyn_powertune_data.shift_n = si_pi->powertune_data->shift_n_default;
2783
2784	si_pi->dyn_powertune_data.leakage_minimum_temperature = 80 * 1000;
2785
2786	ret = si_get_cac_std_voltage_max_min(adev, &vddc_max, &vddc_min);
2787	if (ret)
2788		goto done_free;
2789
2790	vddc_step = si_get_cac_std_voltage_step(vddc_max, vddc_min);
2791	vddc_min = vddc_max - (vddc_step * (SMC_SISLANDS_LKGE_LUT_NUM_OF_VOLT_ENTRIES - 1));
2792	t_step = 4;
2793	t0 = 60;
2794
2795	if (si_pi->enable_dte || ni_pi->driver_calculate_cac_leakage)
2796		ret = si_init_dte_leakage_table(adev, cac_tables,
2797						vddc_max, vddc_min, vddc_step,
2798						t0, t_step);
2799	else
2800		ret = si_init_simplified_leakage_table(adev, cac_tables,
2801						       vddc_max, vddc_min, vddc_step);
2802	if (ret)
2803		goto done_free;
2804
2805	load_line_slope = ((u32)adev->pm.dpm.load_line_slope << SMC_SISLANDS_SCALE_R) / 100;
2806
2807	cac_tables->l2numWin_TDP = cpu_to_be32(si_pi->dyn_powertune_data.l2_lta_window_size);
2808	cac_tables->lts_truncate_n = si_pi->dyn_powertune_data.lts_truncate;
2809	cac_tables->SHIFT_N = si_pi->dyn_powertune_data.shift_n;
2810	cac_tables->lkge_lut_V0 = cpu_to_be32((u32)vddc_min);
2811	cac_tables->lkge_lut_Vstep = cpu_to_be32((u32)vddc_step);
2812	cac_tables->R_LL = cpu_to_be32(load_line_slope);
2813	cac_tables->WinTime = cpu_to_be32(si_pi->dyn_powertune_data.wintime);
2814	cac_tables->calculation_repeats = cpu_to_be32(2);
2815	cac_tables->dc_cac = cpu_to_be32(0);
2816	cac_tables->log2_PG_LKG_SCALE = 12;
2817	cac_tables->cac_temp = si_pi->powertune_data->operating_temp;
2818	cac_tables->lkge_lut_T0 = cpu_to_be32((u32)t0);
2819	cac_tables->lkge_lut_Tstep = cpu_to_be32((u32)t_step);
2820
2821	ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->cac_table_start,
2822					  (u8 *)cac_tables,
2823					  sizeof(PP_SIslands_CacConfig),
2824					  si_pi->sram_end);
2825
2826	if (ret)
2827		goto done_free;
2828
2829	ret = si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ticks_per_us, ticks_per_us);
2830
2831done_free:
2832	if (ret) {
2833		ni_pi->enable_cac = false;
2834		ni_pi->enable_power_containment = false;
2835	}
2836
2837	kfree(cac_tables);
2838
2839	return ret;
2840}
2841
2842static int si_program_cac_config_registers(struct amdgpu_device *adev,
2843					   const struct si_cac_config_reg *cac_config_regs)
2844{
2845	const struct si_cac_config_reg *config_regs = cac_config_regs;
2846	u32 data = 0, offset;
2847
2848	if (!config_regs)
2849		return -EINVAL;
2850
2851	while (config_regs->offset != 0xFFFFFFFF) {
2852		switch (config_regs->type) {
2853		case SISLANDS_CACCONFIG_CGIND:
2854			offset = SMC_CG_IND_START + config_regs->offset;
2855			if (offset < SMC_CG_IND_END)
2856				data = RREG32_SMC(offset);
2857			break;
2858		default:
2859			data = RREG32(config_regs->offset);
2860			break;
2861		}
2862
2863		data &= ~config_regs->mask;
2864		data |= ((config_regs->value << config_regs->shift) & config_regs->mask);
2865
2866		switch (config_regs->type) {
2867		case SISLANDS_CACCONFIG_CGIND:
2868			offset = SMC_CG_IND_START + config_regs->offset;
2869			if (offset < SMC_CG_IND_END)
2870				WREG32_SMC(offset, data);
2871			break;
2872		default:
2873			WREG32(config_regs->offset, data);
2874			break;
2875		}
2876		config_regs++;
2877	}
2878	return 0;
2879}
2880
2881static int si_initialize_hardware_cac_manager(struct amdgpu_device *adev)
2882{
2883	struct ni_power_info *ni_pi = ni_get_pi(adev);
2884	struct si_power_info *si_pi = si_get_pi(adev);
2885	int ret;
2886
2887	if ((ni_pi->enable_cac == false) ||
2888	    (ni_pi->cac_configuration_required == false))
2889		return 0;
2890
2891	ret = si_program_cac_config_registers(adev, si_pi->lcac_config);
2892	if (ret)
2893		return ret;
2894	ret = si_program_cac_config_registers(adev, si_pi->cac_override);
2895	if (ret)
2896		return ret;
2897	ret = si_program_cac_config_registers(adev, si_pi->cac_weights);
2898	if (ret)
2899		return ret;
2900
2901	return 0;
2902}
2903
2904static int si_enable_smc_cac(struct amdgpu_device *adev,
2905			     struct amdgpu_ps *amdgpu_new_state,
2906			     bool enable)
2907{
2908	struct ni_power_info *ni_pi = ni_get_pi(adev);
2909	struct si_power_info *si_pi = si_get_pi(adev);
2910	PPSMC_Result smc_result;
2911	int ret = 0;
2912
2913	if (ni_pi->enable_cac) {
2914		if (enable) {
2915			if (!si_should_disable_uvd_powertune(adev, amdgpu_new_state)) {
2916				if (ni_pi->support_cac_long_term_average) {
2917					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgEnable);
2918					if (smc_result != PPSMC_Result_OK)
2919						ni_pi->support_cac_long_term_average = false;
2920				}
2921
2922				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableCac);
2923				if (smc_result != PPSMC_Result_OK) {
2924					ret = -EINVAL;
2925					ni_pi->cac_enabled = false;
2926				} else {
2927					ni_pi->cac_enabled = true;
2928				}
2929
2930				if (si_pi->enable_dte) {
2931					smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableDTE);
2932					if (smc_result != PPSMC_Result_OK)
2933						ret = -EINVAL;
2934				}
2935			}
2936		} else if (ni_pi->cac_enabled) {
2937			if (si_pi->enable_dte)
2938				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableDTE);
2939
2940			smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableCac);
2941
2942			ni_pi->cac_enabled = false;
2943
2944			if (ni_pi->support_cac_long_term_average)
2945				smc_result = amdgpu_si_send_msg_to_smc(adev, PPSMC_CACLongTermAvgDisable);
2946		}
2947	}
2948	return ret;
2949}
2950
2951static int si_init_smc_spll_table(struct amdgpu_device *adev)
2952{
2953	struct ni_power_info *ni_pi = ni_get_pi(adev);
2954	struct si_power_info *si_pi = si_get_pi(adev);
2955	SMC_SISLANDS_SPLL_DIV_TABLE *spll_table;
2956	SISLANDS_SMC_SCLK_VALUE sclk_params;
2957	u32 fb_div, p_div;
2958	u32 clk_s, clk_v;
2959	u32 sclk = 0;
2960	int ret = 0;
2961	u32 tmp;
2962	int i;
2963
2964	if (si_pi->spll_table_start == 0)
2965		return -EINVAL;
2966
2967	spll_table = kzalloc(sizeof(SMC_SISLANDS_SPLL_DIV_TABLE), GFP_KERNEL);
2968	if (spll_table == NULL)
2969		return -ENOMEM;
2970
2971	for (i = 0; i < 256; i++) {
2972		ret = si_calculate_sclk_params(adev, sclk, &sclk_params);
2973		if (ret)
2974			break;
2975		p_div = (sclk_params.vCG_SPLL_FUNC_CNTL & SPLL_PDIV_A_MASK) >> SPLL_PDIV_A_SHIFT;
2976		fb_div = (sclk_params.vCG_SPLL_FUNC_CNTL_3 & SPLL_FB_DIV_MASK) >> SPLL_FB_DIV_SHIFT;
2977		clk_s = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM & CLK_S_MASK) >> CLK_S_SHIFT;
2978		clk_v = (sclk_params.vCG_SPLL_SPREAD_SPECTRUM_2 & CLK_V_MASK) >> CLK_V_SHIFT;
2979
2980		fb_div &= ~0x00001FFF;
2981		fb_div >>= 1;
2982		clk_v >>= 6;
2983
2984		if (p_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT))
2985			ret = -EINVAL;
2986		if (fb_div & ~(SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT))
2987			ret = -EINVAL;
2988		if (clk_s & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT))
2989			ret = -EINVAL;
2990		if (clk_v & ~(SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK >> SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT))
2991			ret = -EINVAL;
2992
2993		if (ret)
2994			break;
2995
2996		tmp = ((fb_div << SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_FBDIV_MASK) |
2997			((p_div << SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_PDIV_MASK);
2998		spll_table->freq[i] = cpu_to_be32(tmp);
2999
3000		tmp = ((clk_v << SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKV_MASK) |
3001			((clk_s << SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_SHIFT) & SMC_SISLANDS_SPLL_DIV_TABLE_CLKS_MASK);
3002		spll_table->ss[i] = cpu_to_be32(tmp);
3003
3004		sclk += 512;
3005	}
3006
3007
3008	if (!ret)
3009		ret = amdgpu_si_copy_bytes_to_smc(adev, si_pi->spll_table_start,
3010						  (u8 *)spll_table,
3011						  sizeof(SMC_SISLANDS_SPLL_DIV_TABLE),
3012						  si_pi->sram_end);
3013
3014	if (ret)
3015		ni_pi->enable_power_containment = false;
3016
3017	kfree(spll_table);
3018
3019	return ret;
3020}
3021
3022static u16 si_get_lower_of_leakage_and_vce_voltage(struct amdgpu_device *adev,
3023						   u16 vce_voltage)
3024{
3025	u16 highest_leakage = 0;
3026	struct si_power_info *si_pi = si_get_pi(adev);
3027	int i;
3028
3029	for (i = 0; i < si_pi->leakage_voltage.count; i++){
3030		if (highest_leakage < si_pi->leakage_voltage.entries[i].voltage)
3031			highest_leakage = si_pi->leakage_voltage.entries[i].voltage;
3032	}
3033
3034	if (si_pi->leakage_voltage.count && (highest_leakage < vce_voltage))
3035		return highest_leakage;
3036
3037	return vce_voltage;
3038}
3039
3040static int si_get_vce_clock_voltage(struct amdgpu_device *adev,
3041				    u32 evclk, u32 ecclk, u16 *voltage)
3042{
3043	u32 i;
3044	int ret = -EINVAL;
3045	struct amdgpu_vce_clock_voltage_dependency_table *table =
3046		&adev->pm.dpm.dyn_state.vce_clock_voltage_dependency_table;
3047
3048	if (((evclk == 0) && (ecclk == 0)) ||
3049	    (table && (table->count == 0))) {
3050		*voltage = 0;
3051		return 0;
3052	}
3053
3054	for (i = 0; i < table->count; i++) {
3055		if ((evclk <= table->entries[i].evclk) &&
3056		    (ecclk <= table->entries[i].ecclk)) {
3057			*voltage = table->entries[i].v;
3058			ret = 0;
3059			break;
3060		}
3061	}
3062
3063	/* if no match return the highest voltage */
3064	if (ret)
3065		*voltage = table->entries[table->count - 1].v;
3066
3067	*voltage = si_get_lower_of_leakage_and_vce_voltage(adev, *voltage);
3068
3069	return ret;
3070}
3071
3072static bool si_dpm_vblank_too_short(void *handle)
3073{
3074	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3075	u32 vblank_time = amdgpu_dpm_get_vblank_time(adev);
3076	/* we never hit the non-gddr5 limit so disable it */
3077	u32 switch_limit = adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5 ? 450 : 0;
3078
3079	if (vblank_time < switch_limit)
3080		return true;
3081	else
3082		return false;
3083
3084}
3085
3086static int ni_copy_and_switch_arb_sets(struct amdgpu_device *adev,
3087				u32 arb_freq_src, u32 arb_freq_dest)
3088{
3089	u32 mc_arb_dram_timing;
3090	u32 mc_arb_dram_timing2;
3091	u32 burst_time;
3092	u32 mc_cg_config;
3093
3094	switch (arb_freq_src) {
3095	case MC_CG_ARB_FREQ_F0:
3096		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
3097		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
3098		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE0_MASK) >> STATE0_SHIFT;
3099		break;
3100	case MC_CG_ARB_FREQ_F1:
3101		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_1);
3102		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_1);
3103		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE1_MASK) >> STATE1_SHIFT;
3104		break;
3105	case MC_CG_ARB_FREQ_F2:
3106		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_2);
3107		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_2);
3108		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE2_MASK) >> STATE2_SHIFT;
3109		break;
3110	case MC_CG_ARB_FREQ_F3:
3111		mc_arb_dram_timing  = RREG32(MC_ARB_DRAM_TIMING_3);
3112		mc_arb_dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2_3);
3113		burst_time = (RREG32(MC_ARB_BURST_TIME) & STATE3_MASK) >> STATE3_SHIFT;
3114		break;
3115	default:
3116		return -EINVAL;
3117	}
3118
3119	switch (arb_freq_dest) {
3120	case MC_CG_ARB_FREQ_F0:
3121		WREG32(MC_ARB_DRAM_TIMING, mc_arb_dram_timing);
3122		WREG32(MC_ARB_DRAM_TIMING2, mc_arb_dram_timing2);
3123		WREG32_P(MC_ARB_BURST_TIME, STATE0(burst_time), ~STATE0_MASK);
3124		break;
3125	case MC_CG_ARB_FREQ_F1:
3126		WREG32(MC_ARB_DRAM_TIMING_1, mc_arb_dram_timing);
3127		WREG32(MC_ARB_DRAM_TIMING2_1, mc_arb_dram_timing2);
3128		WREG32_P(MC_ARB_BURST_TIME, STATE1(burst_time), ~STATE1_MASK);
3129		break;
3130	case MC_CG_ARB_FREQ_F2:
3131		WREG32(MC_ARB_DRAM_TIMING_2, mc_arb_dram_timing);
3132		WREG32(MC_ARB_DRAM_TIMING2_2, mc_arb_dram_timing2);
3133		WREG32_P(MC_ARB_BURST_TIME, STATE2(burst_time), ~STATE2_MASK);
3134		break;
3135	case MC_CG_ARB_FREQ_F3:
3136		WREG32(MC_ARB_DRAM_TIMING_3, mc_arb_dram_timing);
3137		WREG32(MC_ARB_DRAM_TIMING2_3, mc_arb_dram_timing2);
3138		WREG32_P(MC_ARB_BURST_TIME, STATE3(burst_time), ~STATE3_MASK);
3139		break;
3140	default:
3141		return -EINVAL;
3142	}
3143
3144	mc_cg_config = RREG32(MC_CG_CONFIG) | 0x0000000F;
3145	WREG32(MC_CG_CONFIG, mc_cg_config);
3146	WREG32_P(MC_ARB_CG, CG_ARB_REQ(arb_freq_dest), ~CG_ARB_REQ_MASK);
3147
3148	return 0;
3149}
3150
3151static void ni_update_current_ps(struct amdgpu_device *adev,
3152			  struct amdgpu_ps *rps)
3153{
3154	struct si_ps *new_ps = si_get_ps(rps);
3155	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3156	struct ni_power_info *ni_pi = ni_get_pi(adev);
3157
3158	eg_pi->current_rps = *rps;
3159	ni_pi->current_ps = *new_ps;
3160	eg_pi->current_rps.ps_priv = &ni_pi->current_ps;
3161	adev->pm.dpm.current_ps = &eg_pi->current_rps;
3162}
3163
3164static void ni_update_requested_ps(struct amdgpu_device *adev,
3165			    struct amdgpu_ps *rps)
3166{
3167	struct si_ps *new_ps = si_get_ps(rps);
3168	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3169	struct ni_power_info *ni_pi = ni_get_pi(adev);
3170
3171	eg_pi->requested_rps = *rps;
3172	ni_pi->requested_ps = *new_ps;
3173	eg_pi->requested_rps.ps_priv = &ni_pi->requested_ps;
3174	adev->pm.dpm.requested_ps = &eg_pi->requested_rps;
3175}
3176
3177static void ni_set_uvd_clock_before_set_eng_clock(struct amdgpu_device *adev,
3178					   struct amdgpu_ps *new_ps,
3179					   struct amdgpu_ps *old_ps)
3180{
3181	struct si_ps *new_state = si_get_ps(new_ps);
3182	struct si_ps *current_state = si_get_ps(old_ps);
3183
3184	if ((new_ps->vclk == old_ps->vclk) &&
3185	    (new_ps->dclk == old_ps->dclk))
3186		return;
3187
3188	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk >=
3189	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3190		return;
3191
3192	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
3193}
3194
3195static void ni_set_uvd_clock_after_set_eng_clock(struct amdgpu_device *adev,
3196					  struct amdgpu_ps *new_ps,
3197					  struct amdgpu_ps *old_ps)
3198{
3199	struct si_ps *new_state = si_get_ps(new_ps);
3200	struct si_ps *current_state = si_get_ps(old_ps);
3201
3202	if ((new_ps->vclk == old_ps->vclk) &&
3203	    (new_ps->dclk == old_ps->dclk))
3204		return;
3205
3206	if (new_state->performance_levels[new_state->performance_level_count - 1].sclk <
3207	    current_state->performance_levels[current_state->performance_level_count - 1].sclk)
3208		return;
3209
3210	amdgpu_asic_set_uvd_clocks(adev, new_ps->vclk, new_ps->dclk);
3211}
3212
3213static u16 btc_find_voltage(struct atom_voltage_table *table, u16 voltage)
3214{
3215	unsigned int i;
3216
3217	for (i = 0; i < table->count; i++)
3218		if (voltage <= table->entries[i].value)
3219			return table->entries[i].value;
3220
3221	return table->entries[table->count - 1].value;
3222}
3223
3224static u32 btc_find_valid_clock(struct amdgpu_clock_array *clocks,
3225		                u32 max_clock, u32 requested_clock)
3226{
3227	unsigned int i;
3228
3229	if ((clocks == NULL) || (clocks->count == 0))
3230		return (requested_clock < max_clock) ? requested_clock : max_clock;
3231
3232	for (i = 0; i < clocks->count; i++) {
3233		if (clocks->values[i] >= requested_clock)
3234			return (clocks->values[i] < max_clock) ? clocks->values[i] : max_clock;
3235	}
3236
3237	return (clocks->values[clocks->count - 1] < max_clock) ?
3238		clocks->values[clocks->count - 1] : max_clock;
3239}
3240
3241static u32 btc_get_valid_mclk(struct amdgpu_device *adev,
3242			      u32 max_mclk, u32 requested_mclk)
3243{
3244	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_mclk_values,
3245				    max_mclk, requested_mclk);
3246}
3247
3248static u32 btc_get_valid_sclk(struct amdgpu_device *adev,
3249		              u32 max_sclk, u32 requested_sclk)
3250{
3251	return btc_find_valid_clock(&adev->pm.dpm.dyn_state.valid_sclk_values,
3252				    max_sclk, requested_sclk);
3253}
3254
3255static void btc_get_max_clock_from_voltage_dependency_table(struct amdgpu_clock_voltage_dependency_table *table,
3256							    u32 *max_clock)
3257{
3258	u32 i, clock = 0;
3259
3260	if ((table == NULL) || (table->count == 0)) {
3261		*max_clock = clock;
3262		return;
3263	}
3264
3265	for (i = 0; i < table->count; i++) {
3266		if (clock < table->entries[i].clk)
3267			clock = table->entries[i].clk;
3268	}
3269	*max_clock = clock;
3270}
3271
3272static void btc_apply_voltage_dependency_rules(struct amdgpu_clock_voltage_dependency_table *table,
3273					       u32 clock, u16 max_voltage, u16 *voltage)
3274{
3275	u32 i;
3276
3277	if ((table == NULL) || (table->count == 0))
3278		return;
3279
3280	for (i= 0; i < table->count; i++) {
3281		if (clock <= table->entries[i].clk) {
3282			if (*voltage < table->entries[i].v)
3283				*voltage = (u16)((table->entries[i].v < max_voltage) ?
3284					   table->entries[i].v : max_voltage);
3285			return;
3286		}
3287	}
3288
3289	*voltage = (*voltage > max_voltage) ? *voltage : max_voltage;
3290}
3291
3292static void btc_adjust_clock_combinations(struct amdgpu_device *adev,
3293					  const struct amdgpu_clock_and_voltage_limits *max_limits,
3294					  struct rv7xx_pl *pl)
3295{
3296
3297	if ((pl->mclk == 0) || (pl->sclk == 0))
3298		return;
3299
3300	if (pl->mclk == pl->sclk)
3301		return;
3302
3303	if (pl->mclk > pl->sclk) {
3304		if (((pl->mclk + (pl->sclk - 1)) / pl->sclk) > adev->pm.dpm.dyn_state.mclk_sclk_ratio)
3305			pl->sclk = btc_get_valid_sclk(adev,
3306						      max_limits->sclk,
3307						      (pl->mclk +
3308						      (adev->pm.dpm.dyn_state.mclk_sclk_ratio - 1)) /
3309						      adev->pm.dpm.dyn_state.mclk_sclk_ratio);
3310	} else {
3311		if ((pl->sclk - pl->mclk) > adev->pm.dpm.dyn_state.sclk_mclk_delta)
3312			pl->mclk = btc_get_valid_mclk(adev,
3313						      max_limits->mclk,
3314						      pl->sclk -
3315						      adev->pm.dpm.dyn_state.sclk_mclk_delta);
3316	}
3317}
3318
3319static void btc_apply_voltage_delta_rules(struct amdgpu_device *adev,
3320					  u16 max_vddc, u16 max_vddci,
3321					  u16 *vddc, u16 *vddci)
3322{
3323	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
3324	u16 new_voltage;
3325
3326	if ((0 == *vddc) || (0 == *vddci))
3327		return;
3328
3329	if (*vddc > *vddci) {
3330		if ((*vddc - *vddci) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
3331			new_voltage = btc_find_voltage(&eg_pi->vddci_voltage_table,
3332						       (*vddc - adev->pm.dpm.dyn_state.vddc_vddci_delta));
3333			*vddci = (new_voltage < max_vddci) ? new_voltage : max_vddci;
3334		}
3335	} else {
3336		if ((*vddci - *vddc) > adev->pm.dpm.dyn_state.vddc_vddci_delta) {
3337			new_voltage = btc_find_voltage(&eg_pi->vddc_voltage_table,
3338						       (*vddci - adev->pm.dpm.dyn_state.vddc_vddci_delta));
3339			*vddc = (new_voltage < max_vddc) ? new_voltage : max_vddc;
3340		}
3341	}
3342}
3343
3344static void r600_calculate_u_and_p(u32 i, u32 r_c, u32 p_b,
3345			    u32 *p, u32 *u)
3346{
3347	u32 b_c = 0;
3348	u32 i_c;
3349	u32 tmp;
3350
3351	i_c = (i * r_c) / 100;
3352	tmp = i_c >> p_b;
3353
3354	while (tmp) {
3355		b_c++;
3356		tmp >>= 1;
3357	}
3358
3359	*u = (b_c + 1) / 2;
3360	*p = i_c / (1 << (2 * (*u)));
3361}
3362
3363static int r600_calculate_at(u32 t, u32 h, u32 fh, u32 fl, u32 *tl, u32 *th)
3364{
3365	u32 k, a, ah, al;
3366	u32 t1;
3367
3368	if ((fl == 0) || (fh == 0) || (fl > fh))
3369		return -EINVAL;
3370
3371	k = (100 * fh) / fl;
3372	t1 = (t * (k - 100));
3373	a = (1000 * (100 * h + t1)) / (10000 + (t1 / 100));
3374	a = (a + 5) / 10;
3375	ah = ((a * t) + 5000) / 10000;
3376	al = a - ah;
3377
3378	*th = t - ah;
3379	*tl = t + al;
3380
3381	return 0;
3382}
3383
3384static bool r600_is_uvd_state(u32 class, u32 class2)
3385{
3386	if (class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
3387		return true;
3388	if (class & ATOM_PPLIB_CLASSIFICATION_HD2STATE)
3389		return true;
3390	if (class & ATOM_PPLIB_CLASSIFICATION_HDSTATE)
3391		return true;
3392	if (class & ATOM_PPLIB_CLASSIFICATION_SDSTATE)
3393		return true;
3394	if (class2 & ATOM_PPLIB_CLASSIFICATION2_MVC)
3395		return true;
3396	return false;
3397}
3398
3399static u8 rv770_get_memory_module_index(struct amdgpu_device *adev)
3400{
3401	return (u8) ((RREG32(BIOS_SCRATCH_4) >> 16) & 0xff);
3402}
3403
3404static void rv770_get_max_vddc(struct amdgpu_device *adev)
3405{
3406	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3407	u16 vddc;
3408
3409	if (amdgpu_atombios_get_max_vddc(adev, 0, 0, &vddc))
3410		pi->max_vddc = 0;
3411	else
3412		pi->max_vddc = vddc;
3413}
3414
3415static void rv770_get_engine_memory_ss(struct amdgpu_device *adev)
3416{
3417	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3418	struct amdgpu_atom_ss ss;
3419
3420	pi->sclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
3421						       ASIC_INTERNAL_ENGINE_SS, 0);
3422	pi->mclk_ss = amdgpu_atombios_get_asic_ss_info(adev, &ss,
3423						       ASIC_INTERNAL_MEMORY_SS, 0);
3424
3425	if (pi->sclk_ss || pi->mclk_ss)
3426		pi->dynamic_ss = true;
3427	else
3428		pi->dynamic_ss = false;
3429}
3430
3431
3432static void si_apply_state_adjust_rules(struct amdgpu_device *adev,
3433					struct amdgpu_ps *rps)
3434{
3435	struct  si_ps *ps = si_get_ps(rps);
3436	struct amdgpu_clock_and_voltage_limits *max_limits;
3437	bool disable_mclk_switching = false;
3438	bool disable_sclk_switching = false;
3439	u32 mclk, sclk;
3440	u16 vddc, vddci, min_vce_voltage = 0;
3441	u32 max_sclk_vddc, max_mclk_vddci, max_mclk_vddc;
3442	u32 max_sclk = 0, max_mclk = 0;
3443	int i;
3444
3445	if (adev->asic_type == CHIP_HAINAN) {
3446		if ((adev->pdev->revision == 0x81) ||
3447		    (adev->pdev->revision == 0x83) ||
3448		    (adev->pdev->revision == 0xC3) ||
3449		    (adev->pdev->device == 0x6664) ||
3450		    (adev->pdev->device == 0x6665) ||
3451		    (adev->pdev->device == 0x6667)) {
3452			max_sclk = 75000;
3453		}
3454		if ((adev->pdev->revision == 0xC3) ||
3455		    (adev->pdev->device == 0x6665)) {
3456			max_sclk = 60000;
3457			max_mclk = 80000;
3458		}
3459	} else if (adev->asic_type == CHIP_OLAND) {
3460		if ((adev->pdev->revision == 0xC7) ||
3461		    (adev->pdev->revision == 0x80) ||
3462		    (adev->pdev->revision == 0x81) ||
3463		    (adev->pdev->revision == 0x83) ||
3464		    (adev->pdev->revision == 0x87) ||
3465		    (adev->pdev->device == 0x6604) ||
3466		    (adev->pdev->device == 0x6605)) {
3467			max_sclk = 75000;
3468		}
3469	}
3470
3471	if (rps->vce_active) {
3472		rps->evclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].evclk;
3473		rps->ecclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].ecclk;
3474		si_get_vce_clock_voltage(adev, rps->evclk, rps->ecclk,
3475					 &min_vce_voltage);
3476	} else {
3477		rps->evclk = 0;
3478		rps->ecclk = 0;
3479	}
3480
3481	if ((adev->pm.dpm.new_active_crtc_count > 1) ||
3482	    si_dpm_vblank_too_short(adev))
3483		disable_mclk_switching = true;
3484
3485	if (rps->vclk || rps->dclk) {
3486		disable_mclk_switching = true;
3487		disable_sclk_switching = true;
3488	}
3489
3490	if (adev->pm.ac_power)
3491		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
3492	else
3493		max_limits = &adev->pm.dpm.dyn_state.max_clock_voltage_on_dc;
3494
3495	for (i = ps->performance_level_count - 2; i >= 0; i--) {
3496		if (ps->performance_levels[i].vddc > ps->performance_levels[i+1].vddc)
3497			ps->performance_levels[i].vddc = ps->performance_levels[i+1].vddc;
3498	}
3499	if (adev->pm.ac_power == false) {
3500		for (i = 0; i < ps->performance_level_count; i++) {
3501			if (ps->performance_levels[i].mclk > max_limits->mclk)
3502				ps->performance_levels[i].mclk = max_limits->mclk;
3503			if (ps->performance_levels[i].sclk > max_limits->sclk)
3504				ps->performance_levels[i].sclk = max_limits->sclk;
3505			if (ps->performance_levels[i].vddc > max_limits->vddc)
3506				ps->performance_levels[i].vddc = max_limits->vddc;
3507			if (ps->performance_levels[i].vddci > max_limits->vddci)
3508				ps->performance_levels[i].vddci = max_limits->vddci;
3509		}
3510	}
3511
3512	/* limit clocks to max supported clocks based on voltage dependency tables */
3513	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3514							&max_sclk_vddc);
3515	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3516							&max_mclk_vddci);
3517	btc_get_max_clock_from_voltage_dependency_table(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3518							&max_mclk_vddc);
3519
3520	for (i = 0; i < ps->performance_level_count; i++) {
3521		if (max_sclk_vddc) {
3522			if (ps->performance_levels[i].sclk > max_sclk_vddc)
3523				ps->performance_levels[i].sclk = max_sclk_vddc;
3524		}
3525		if (max_mclk_vddci) {
3526			if (ps->performance_levels[i].mclk > max_mclk_vddci)
3527				ps->performance_levels[i].mclk = max_mclk_vddci;
3528		}
3529		if (max_mclk_vddc) {
3530			if (ps->performance_levels[i].mclk > max_mclk_vddc)
3531				ps->performance_levels[i].mclk = max_mclk_vddc;
3532		}
3533		if (max_mclk) {
3534			if (ps->performance_levels[i].mclk > max_mclk)
3535				ps->performance_levels[i].mclk = max_mclk;
3536		}
3537		if (max_sclk) {
3538			if (ps->performance_levels[i].sclk > max_sclk)
3539				ps->performance_levels[i].sclk = max_sclk;
3540		}
3541	}
3542
3543	/* XXX validate the min clocks required for display */
3544
3545	if (disable_mclk_switching) {
3546		mclk  = ps->performance_levels[ps->performance_level_count - 1].mclk;
3547		vddci = ps->performance_levels[ps->performance_level_count - 1].vddci;
3548	} else {
3549		mclk = ps->performance_levels[0].mclk;
3550		vddci = ps->performance_levels[0].vddci;
3551	}
3552
3553	if (disable_sclk_switching) {
3554		sclk = ps->performance_levels[ps->performance_level_count - 1].sclk;
3555		vddc = ps->performance_levels[ps->performance_level_count - 1].vddc;
3556	} else {
3557		sclk = ps->performance_levels[0].sclk;
3558		vddc = ps->performance_levels[0].vddc;
3559	}
3560
3561	if (rps->vce_active) {
3562		if (sclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk)
3563			sclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].sclk;
3564		if (mclk < adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk)
3565			mclk = adev->pm.dpm.vce_states[adev->pm.dpm.vce_level].mclk;
3566	}
3567
3568	/* adjusted low state */
3569	ps->performance_levels[0].sclk = sclk;
3570	ps->performance_levels[0].mclk = mclk;
3571	ps->performance_levels[0].vddc = vddc;
3572	ps->performance_levels[0].vddci = vddci;
3573
3574	if (disable_sclk_switching) {
3575		sclk = ps->performance_levels[0].sclk;
3576		for (i = 1; i < ps->performance_level_count; i++) {
3577			if (sclk < ps->performance_levels[i].sclk)
3578				sclk = ps->performance_levels[i].sclk;
3579		}
3580		for (i = 0; i < ps->performance_level_count; i++) {
3581			ps->performance_levels[i].sclk = sclk;
3582			ps->performance_levels[i].vddc = vddc;
3583		}
3584	} else {
3585		for (i = 1; i < ps->performance_level_count; i++) {
3586			if (ps->performance_levels[i].sclk < ps->performance_levels[i - 1].sclk)
3587				ps->performance_levels[i].sclk = ps->performance_levels[i - 1].sclk;
3588			if (ps->performance_levels[i].vddc < ps->performance_levels[i - 1].vddc)
3589				ps->performance_levels[i].vddc = ps->performance_levels[i - 1].vddc;
3590		}
3591	}
3592
3593	if (disable_mclk_switching) {
3594		mclk = ps->performance_levels[0].mclk;
3595		for (i = 1; i < ps->performance_level_count; i++) {
3596			if (mclk < ps->performance_levels[i].mclk)
3597				mclk = ps->performance_levels[i].mclk;
3598		}
3599		for (i = 0; i < ps->performance_level_count; i++) {
3600			ps->performance_levels[i].mclk = mclk;
3601			ps->performance_levels[i].vddci = vddci;
3602		}
3603	} else {
3604		for (i = 1; i < ps->performance_level_count; i++) {
3605			if (ps->performance_levels[i].mclk < ps->performance_levels[i - 1].mclk)
3606				ps->performance_levels[i].mclk = ps->performance_levels[i - 1].mclk;
3607			if (ps->performance_levels[i].vddci < ps->performance_levels[i - 1].vddci)
3608				ps->performance_levels[i].vddci = ps->performance_levels[i - 1].vddci;
3609		}
3610	}
3611
3612	for (i = 0; i < ps->performance_level_count; i++)
3613		btc_adjust_clock_combinations(adev, max_limits,
3614					      &ps->performance_levels[i]);
3615
3616	for (i = 0; i < ps->performance_level_count; i++) {
3617		if (ps->performance_levels[i].vddc < min_vce_voltage)
3618			ps->performance_levels[i].vddc = min_vce_voltage;
3619		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk,
3620						   ps->performance_levels[i].sclk,
3621						   max_limits->vddc,  &ps->performance_levels[i].vddc);
3622		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
3623						   ps->performance_levels[i].mclk,
3624						   max_limits->vddci, &ps->performance_levels[i].vddci);
3625		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
3626						   ps->performance_levels[i].mclk,
3627						   max_limits->vddc,  &ps->performance_levels[i].vddc);
3628		btc_apply_voltage_dependency_rules(&adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk,
3629						   adev->clock.current_dispclk,
3630						   max_limits->vddc,  &ps->performance_levels[i].vddc);
3631	}
3632
3633	for (i = 0; i < ps->performance_level_count; i++) {
3634		btc_apply_voltage_delta_rules(adev,
3635					      max_limits->vddc, max_limits->vddci,
3636					      &ps->performance_levels[i].vddc,
3637					      &ps->performance_levels[i].vddci);
3638	}
3639
3640	ps->dc_compatible = true;
3641	for (i = 0; i < ps->performance_level_count; i++) {
3642		if (ps->performance_levels[i].vddc > adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.vddc)
3643			ps->dc_compatible = false;
3644	}
3645}
3646
3647#if 0
3648static int si_read_smc_soft_register(struct amdgpu_device *adev,
3649				     u16 reg_offset, u32 *value)
3650{
3651	struct si_power_info *si_pi = si_get_pi(adev);
3652
3653	return amdgpu_si_read_smc_sram_dword(adev,
3654					     si_pi->soft_regs_start + reg_offset, value,
3655					     si_pi->sram_end);
3656}
3657#endif
3658
3659static int si_write_smc_soft_register(struct amdgpu_device *adev,
3660				      u16 reg_offset, u32 value)
3661{
3662	struct si_power_info *si_pi = si_get_pi(adev);
3663
3664	return amdgpu_si_write_smc_sram_dword(adev,
3665					      si_pi->soft_regs_start + reg_offset,
3666					      value, si_pi->sram_end);
3667}
3668
3669static bool si_is_special_1gb_platform(struct amdgpu_device *adev)
3670{
3671	bool ret = false;
3672	u32 tmp, width, row, column, bank, density;
3673	bool is_memory_gddr5, is_special;
3674
3675	tmp = RREG32(MC_SEQ_MISC0);
3676	is_memory_gddr5 = (MC_SEQ_MISC0_GDDR5_VALUE == ((tmp & MC_SEQ_MISC0_GDDR5_MASK) >> MC_SEQ_MISC0_GDDR5_SHIFT));
3677	is_special = (MC_SEQ_MISC0_REV_ID_VALUE == ((tmp & MC_SEQ_MISC0_REV_ID_MASK) >> MC_SEQ_MISC0_REV_ID_SHIFT))
3678		& (MC_SEQ_MISC0_VEN_ID_VALUE == ((tmp & MC_SEQ_MISC0_VEN_ID_MASK) >> MC_SEQ_MISC0_VEN_ID_SHIFT));
3679
3680	WREG32(MC_SEQ_IO_DEBUG_INDEX, 0xb);
3681	width = ((RREG32(MC_SEQ_IO_DEBUG_DATA) >> 1) & 1) ? 16 : 32;
3682
3683	tmp = RREG32(MC_ARB_RAMCFG);
3684	row = ((tmp & NOOFROWS_MASK) >> NOOFROWS_SHIFT) + 10;
3685	column = ((tmp & NOOFCOLS_MASK) >> NOOFCOLS_SHIFT) + 8;
3686	bank = ((tmp & NOOFBANK_MASK) >> NOOFBANK_SHIFT) + 2;
3687
3688	density = (1 << (row + column - 20 + bank)) * width;
3689
3690	if ((adev->pdev->device == 0x6819) &&
3691	    is_memory_gddr5 && is_special && (density == 0x400))
3692		ret = true;
3693
3694	return ret;
3695}
3696
3697static void si_get_leakage_vddc(struct amdgpu_device *adev)
3698{
3699	struct si_power_info *si_pi = si_get_pi(adev);
3700	u16 vddc, count = 0;
3701	int i, ret;
3702
3703	for (i = 0; i < SISLANDS_MAX_LEAKAGE_COUNT; i++) {
3704		ret = amdgpu_atombios_get_leakage_vddc_based_on_leakage_idx(adev, &vddc, SISLANDS_LEAKAGE_INDEX0 + i);
3705
3706		if (!ret && (vddc > 0) && (vddc != (SISLANDS_LEAKAGE_INDEX0 + i))) {
3707			si_pi->leakage_voltage.entries[count].voltage = vddc;
3708			si_pi->leakage_voltage.entries[count].leakage_index =
3709				SISLANDS_LEAKAGE_INDEX0 + i;
3710			count++;
3711		}
3712	}
3713	si_pi->leakage_voltage.count = count;
3714}
3715
3716static int si_get_leakage_voltage_from_leakage_index(struct amdgpu_device *adev,
3717						     u32 index, u16 *leakage_voltage)
3718{
3719	struct si_power_info *si_pi = si_get_pi(adev);
3720	int i;
3721
3722	if (leakage_voltage == NULL)
3723		return -EINVAL;
3724
3725	if ((index & 0xff00) != 0xff00)
3726		return -EINVAL;
3727
3728	if ((index & 0xff) > SISLANDS_MAX_LEAKAGE_COUNT + 1)
3729		return -EINVAL;
3730
3731	if (index < SISLANDS_LEAKAGE_INDEX0)
3732		return -EINVAL;
3733
3734	for (i = 0; i < si_pi->leakage_voltage.count; i++) {
3735		if (si_pi->leakage_voltage.entries[i].leakage_index == index) {
3736			*leakage_voltage = si_pi->leakage_voltage.entries[i].voltage;
3737			return 0;
3738		}
3739	}
3740	return -EAGAIN;
3741}
3742
3743static void si_set_dpm_event_sources(struct amdgpu_device *adev, u32 sources)
3744{
3745	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3746	bool want_thermal_protection;
3747	enum amdgpu_dpm_event_src dpm_event_src;
3748
3749	switch (sources) {
3750	case 0:
3751	default:
3752		want_thermal_protection = false;
3753		break;
3754	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL):
3755		want_thermal_protection = true;
3756		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGITAL;
3757		break;
3758	case (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL):
3759		want_thermal_protection = true;
3760		dpm_event_src = AMDGPU_DPM_EVENT_SRC_EXTERNAL;
3761		break;
3762	case ((1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_EXTERNAL) |
3763	      (1 << AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL)):
3764		want_thermal_protection = true;
3765		dpm_event_src = AMDGPU_DPM_EVENT_SRC_DIGIAL_OR_EXTERNAL;
3766		break;
3767	}
3768
3769	if (want_thermal_protection) {
3770		WREG32_P(CG_THERMAL_CTRL, DPM_EVENT_SRC(dpm_event_src), ~DPM_EVENT_SRC_MASK);
3771		if (pi->thermal_protection)
3772			WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
3773	} else {
3774		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
3775	}
3776}
3777
3778static void si_enable_auto_throttle_source(struct amdgpu_device *adev,
3779					   enum amdgpu_dpm_auto_throttle_src source,
3780					   bool enable)
3781{
3782	struct rv7xx_power_info *pi = rv770_get_pi(adev);
3783
3784	if (enable) {
3785		if (!(pi->active_auto_throttle_sources & (1 << source))) {
3786			pi->active_auto_throttle_sources |= 1 << source;
3787			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
3788		}
3789	} else {
3790		if (pi->active_auto_throttle_sources & (1 << source)) {
3791			pi->active_auto_throttle_sources &= ~(1 << source);
3792			si_set_dpm_event_sources(adev, pi->active_auto_throttle_sources);
3793		}
3794	}
3795}
3796
3797static void si_start_dpm(struct amdgpu_device *adev)
3798{
3799	WREG32_P(GENERAL_PWRMGT, GLOBAL_PWRMGT_EN, ~GLOBAL_PWRMGT_EN);
3800}
3801
3802static void si_stop_dpm(struct amdgpu_device *adev)
3803{
3804	WREG32_P(GENERAL_PWRMGT, 0, ~GLOBAL_PWRMGT_EN);
3805}
3806
3807static void si_enable_sclk_control(struct amdgpu_device *adev, bool enable)
3808{
3809	if (enable)
3810		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~SCLK_PWRMGT_OFF);
3811	else
3812		WREG32_P(SCLK_PWRMGT_CNTL, SCLK_PWRMGT_OFF, ~SCLK_PWRMGT_OFF);
3813
3814}
3815
3816#if 0
3817static int si_notify_hardware_of_thermal_state(struct amdgpu_device *adev,
3818					       u32 thermal_level)
3819{
3820	PPSMC_Result ret;
3821
3822	if (thermal_level == 0) {
3823		ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
3824		if (ret == PPSMC_Result_OK)
3825			return 0;
3826		else
3827			return -EINVAL;
3828	}
3829	return 0;
3830}
3831
3832static void si_notify_hardware_vpu_recovery_event(struct amdgpu_device *adev)
3833{
3834	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_tdr_is_about_to_happen, true);
3835}
3836#endif
3837
3838#if 0
3839static int si_notify_hw_of_powersource(struct amdgpu_device *adev, bool ac_power)
3840{
3841	if (ac_power)
3842		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_RunningOnAC) == PPSMC_Result_OK) ?
3843			0 : -EINVAL;
3844
3845	return 0;
3846}
3847#endif
3848
3849static PPSMC_Result si_send_msg_to_smc_with_parameter(struct amdgpu_device *adev,
3850						      PPSMC_Msg msg, u32 parameter)
3851{
3852	WREG32(SMC_SCRATCH0, parameter);
3853	return amdgpu_si_send_msg_to_smc(adev, msg);
3854}
3855
3856static int si_restrict_performance_levels_before_switch(struct amdgpu_device *adev)
3857{
3858	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_NoForcedLevel) != PPSMC_Result_OK)
3859		return -EINVAL;
3860
3861	return (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) == PPSMC_Result_OK) ?
3862		0 : -EINVAL;
3863}
3864
3865static int si_dpm_force_performance_level(void *handle,
3866				   enum amd_dpm_forced_level level)
3867{
3868	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
3869	struct amdgpu_ps *rps = adev->pm.dpm.current_ps;
3870	struct  si_ps *ps = si_get_ps(rps);
3871	u32 levels = ps->performance_level_count;
3872
3873	if (level == AMD_DPM_FORCED_LEVEL_HIGH) {
3874		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3875			return -EINVAL;
3876
3877		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 1) != PPSMC_Result_OK)
3878			return -EINVAL;
3879	} else if (level == AMD_DPM_FORCED_LEVEL_LOW) {
3880		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3881			return -EINVAL;
3882
3883		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, 1) != PPSMC_Result_OK)
3884			return -EINVAL;
3885	} else if (level == AMD_DPM_FORCED_LEVEL_AUTO) {
3886		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetForcedLevels, 0) != PPSMC_Result_OK)
3887			return -EINVAL;
3888
3889		if (si_send_msg_to_smc_with_parameter(adev, PPSMC_MSG_SetEnabledLevels, levels) != PPSMC_Result_OK)
3890			return -EINVAL;
3891	}
3892
3893	adev->pm.dpm.forced_level = level;
3894
3895	return 0;
3896}
3897
3898#if 0
3899static int si_set_boot_state(struct amdgpu_device *adev)
3900{
3901	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToInitialState) == PPSMC_Result_OK) ?
3902		0 : -EINVAL;
3903}
3904#endif
3905
3906static int si_set_sw_state(struct amdgpu_device *adev)
3907{
3908	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_SwitchToSwState) == PPSMC_Result_OK) ?
3909		0 : -EINVAL;
3910}
3911
3912static int si_halt_smc(struct amdgpu_device *adev)
3913{
3914	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Halt) != PPSMC_Result_OK)
3915		return -EINVAL;
3916
3917	return (amdgpu_si_wait_for_smc_inactive(adev) == PPSMC_Result_OK) ?
3918		0 : -EINVAL;
3919}
3920
3921static int si_resume_smc(struct amdgpu_device *adev)
3922{
3923	if (amdgpu_si_send_msg_to_smc(adev, PPSMC_FlushDataCache) != PPSMC_Result_OK)
3924		return -EINVAL;
3925
3926	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_Resume) == PPSMC_Result_OK) ?
3927		0 : -EINVAL;
3928}
3929
3930static void si_dpm_start_smc(struct amdgpu_device *adev)
3931{
3932	amdgpu_si_program_jump_on_start(adev);
3933	amdgpu_si_start_smc(adev);
3934	amdgpu_si_smc_clock(adev, true);
3935}
3936
3937static void si_dpm_stop_smc(struct amdgpu_device *adev)
3938{
3939	amdgpu_si_reset_smc(adev);
3940	amdgpu_si_smc_clock(adev, false);
3941}
3942
3943static int si_process_firmware_header(struct amdgpu_device *adev)
3944{
3945	struct si_power_info *si_pi = si_get_pi(adev);
3946	u32 tmp;
3947	int ret;
3948
3949	ret = amdgpu_si_read_smc_sram_dword(adev,
3950					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3951					    SISLANDS_SMC_FIRMWARE_HEADER_stateTable,
3952					    &tmp, si_pi->sram_end);
3953	if (ret)
3954		return ret;
3955
3956	si_pi->state_table_start = tmp;
3957
3958	ret = amdgpu_si_read_smc_sram_dword(adev,
3959					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3960					    SISLANDS_SMC_FIRMWARE_HEADER_softRegisters,
3961					    &tmp, si_pi->sram_end);
3962	if (ret)
3963		return ret;
3964
3965	si_pi->soft_regs_start = tmp;
3966
3967	ret = amdgpu_si_read_smc_sram_dword(adev,
3968					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3969					    SISLANDS_SMC_FIRMWARE_HEADER_mcRegisterTable,
3970					    &tmp, si_pi->sram_end);
3971	if (ret)
3972		return ret;
3973
3974	si_pi->mc_reg_table_start = tmp;
3975
3976	ret = amdgpu_si_read_smc_sram_dword(adev,
3977					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3978					    SISLANDS_SMC_FIRMWARE_HEADER_fanTable,
3979					    &tmp, si_pi->sram_end);
3980	if (ret)
3981		return ret;
3982
3983	si_pi->fan_table_start = tmp;
3984
3985	ret = amdgpu_si_read_smc_sram_dword(adev,
3986					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3987					    SISLANDS_SMC_FIRMWARE_HEADER_mcArbDramAutoRefreshTable,
3988					    &tmp, si_pi->sram_end);
3989	if (ret)
3990		return ret;
3991
3992	si_pi->arb_table_start = tmp;
3993
3994	ret = amdgpu_si_read_smc_sram_dword(adev,
3995					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
3996					    SISLANDS_SMC_FIRMWARE_HEADER_CacConfigTable,
3997					    &tmp, si_pi->sram_end);
3998	if (ret)
3999		return ret;
4000
4001	si_pi->cac_table_start = tmp;
4002
4003	ret = amdgpu_si_read_smc_sram_dword(adev,
4004					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4005					    SISLANDS_SMC_FIRMWARE_HEADER_DteConfiguration,
4006					    &tmp, si_pi->sram_end);
4007	if (ret)
4008		return ret;
4009
4010	si_pi->dte_table_start = tmp;
4011
4012	ret = amdgpu_si_read_smc_sram_dword(adev,
4013					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4014					    SISLANDS_SMC_FIRMWARE_HEADER_spllTable,
4015					    &tmp, si_pi->sram_end);
4016	if (ret)
4017		return ret;
4018
4019	si_pi->spll_table_start = tmp;
4020
4021	ret = amdgpu_si_read_smc_sram_dword(adev,
4022					    SISLANDS_SMC_FIRMWARE_HEADER_LOCATION +
4023					    SISLANDS_SMC_FIRMWARE_HEADER_PAPMParameters,
4024					    &tmp, si_pi->sram_end);
4025	if (ret)
4026		return ret;
4027
4028	si_pi->papm_cfg_table_start = tmp;
4029
4030	return ret;
4031}
4032
4033static void si_read_clock_registers(struct amdgpu_device *adev)
4034{
4035	struct si_power_info *si_pi = si_get_pi(adev);
4036
4037	si_pi->clock_registers.cg_spll_func_cntl = RREG32(CG_SPLL_FUNC_CNTL);
4038	si_pi->clock_registers.cg_spll_func_cntl_2 = RREG32(CG_SPLL_FUNC_CNTL_2);
4039	si_pi->clock_registers.cg_spll_func_cntl_3 = RREG32(CG_SPLL_FUNC_CNTL_3);
4040	si_pi->clock_registers.cg_spll_func_cntl_4 = RREG32(CG_SPLL_FUNC_CNTL_4);
4041	si_pi->clock_registers.cg_spll_spread_spectrum = RREG32(CG_SPLL_SPREAD_SPECTRUM);
4042	si_pi->clock_registers.cg_spll_spread_spectrum_2 = RREG32(CG_SPLL_SPREAD_SPECTRUM_2);
4043	si_pi->clock_registers.dll_cntl = RREG32(DLL_CNTL);
4044	si_pi->clock_registers.mclk_pwrmgt_cntl = RREG32(MCLK_PWRMGT_CNTL);
4045	si_pi->clock_registers.mpll_ad_func_cntl = RREG32(MPLL_AD_FUNC_CNTL);
4046	si_pi->clock_registers.mpll_dq_func_cntl = RREG32(MPLL_DQ_FUNC_CNTL);
4047	si_pi->clock_registers.mpll_func_cntl = RREG32(MPLL_FUNC_CNTL);
4048	si_pi->clock_registers.mpll_func_cntl_1 = RREG32(MPLL_FUNC_CNTL_1);
4049	si_pi->clock_registers.mpll_func_cntl_2 = RREG32(MPLL_FUNC_CNTL_2);
4050	si_pi->clock_registers.mpll_ss1 = RREG32(MPLL_SS1);
4051	si_pi->clock_registers.mpll_ss2 = RREG32(MPLL_SS2);
4052}
4053
4054static void si_enable_thermal_protection(struct amdgpu_device *adev,
4055					  bool enable)
4056{
4057	if (enable)
4058		WREG32_P(GENERAL_PWRMGT, 0, ~THERMAL_PROTECTION_DIS);
4059	else
4060		WREG32_P(GENERAL_PWRMGT, THERMAL_PROTECTION_DIS, ~THERMAL_PROTECTION_DIS);
4061}
4062
4063static void si_enable_acpi_power_management(struct amdgpu_device *adev)
4064{
4065	WREG32_P(GENERAL_PWRMGT, STATIC_PM_EN, ~STATIC_PM_EN);
4066}
4067
4068#if 0
4069static int si_enter_ulp_state(struct amdgpu_device *adev)
4070{
4071	WREG32(SMC_MESSAGE_0, PPSMC_MSG_SwitchToMinimumPower);
4072
4073	udelay(25000);
4074
4075	return 0;
4076}
4077
4078static int si_exit_ulp_state(struct amdgpu_device *adev)
4079{
4080	int i;
4081
4082	WREG32(SMC_MESSAGE_0, PPSMC_MSG_ResumeFromMinimumPower);
4083
4084	udelay(7000);
4085
4086	for (i = 0; i < adev->usec_timeout; i++) {
4087		if (RREG32(SMC_RESP_0) == 1)
4088			break;
4089		udelay(1000);
4090	}
4091
4092	return 0;
4093}
4094#endif
4095
4096static int si_notify_smc_display_change(struct amdgpu_device *adev,
4097				     bool has_display)
4098{
4099	PPSMC_Msg msg = has_display ?
4100		PPSMC_MSG_HasDisplay : PPSMC_MSG_NoDisplay;
4101
4102	return (amdgpu_si_send_msg_to_smc(adev, msg) == PPSMC_Result_OK) ?
4103		0 : -EINVAL;
4104}
4105
4106static void si_program_response_times(struct amdgpu_device *adev)
4107{
4108	u32 voltage_response_time, acpi_delay_time, vbi_time_out;
4109	u32 vddc_dly, acpi_dly, vbi_dly;
4110	u32 reference_clock;
4111
4112	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mvdd_chg_time, 1);
4113
4114	voltage_response_time = (u32)adev->pm.dpm.voltage_response_time;
4115
4116	if (voltage_response_time == 0)
4117		voltage_response_time = 1000;
4118
4119	acpi_delay_time = 15000;
4120	vbi_time_out = 100000;
4121
4122	reference_clock = amdgpu_asic_get_xclk(adev);
4123
4124	vddc_dly = (voltage_response_time  * reference_clock) / 100;
4125	acpi_dly = (acpi_delay_time * reference_clock) / 100;
4126	vbi_dly  = (vbi_time_out * reference_clock) / 100;
4127
4128	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_vreg,  vddc_dly);
4129	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_delay_acpi,  acpi_dly);
4130	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mclk_chg_timeout, vbi_dly);
4131	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_mc_block_delay, 0xAA);
4132}
4133
4134static void si_program_ds_registers(struct amdgpu_device *adev)
4135{
4136	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4137	u32 tmp;
4138
4139	/* DEEP_SLEEP_CLK_SEL field should be 0x10 on tahiti A0 */
4140	if (adev->asic_type == CHIP_TAHITI && adev->rev_id == 0x0)
4141		tmp = 0x10;
4142	else
4143		tmp = 0x1;
4144
4145	if (eg_pi->sclk_deep_sleep) {
4146		WREG32_P(MISC_CLK_CNTL, DEEP_SLEEP_CLK_SEL(tmp), ~DEEP_SLEEP_CLK_SEL_MASK);
4147		WREG32_P(CG_SPLL_AUTOSCALE_CNTL, AUTOSCALE_ON_SS_CLEAR,
4148			 ~AUTOSCALE_ON_SS_CLEAR);
4149	}
4150}
4151
4152static void si_program_display_gap(struct amdgpu_device *adev)
4153{
4154	u32 tmp, pipe;
4155	int i;
4156
4157	tmp = RREG32(CG_DISPLAY_GAP_CNTL) & ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
4158	if (adev->pm.dpm.new_active_crtc_count > 0)
4159		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
4160	else
4161		tmp |= DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE);
4162
4163	if (adev->pm.dpm.new_active_crtc_count > 1)
4164		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_VBLANK_OR_WM);
4165	else
4166		tmp |= DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE);
4167
4168	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
4169
4170	tmp = RREG32(DCCG_DISP_SLOW_SELECT_REG);
4171	pipe = (tmp & DCCG_DISP1_SLOW_SELECT_MASK) >> DCCG_DISP1_SLOW_SELECT_SHIFT;
4172
4173	if ((adev->pm.dpm.new_active_crtc_count > 0) &&
4174	    (!(adev->pm.dpm.new_active_crtcs & (1 << pipe)))) {
4175		/* find the first active crtc */
4176		for (i = 0; i < adev->mode_info.num_crtc; i++) {
4177			if (adev->pm.dpm.new_active_crtcs & (1 << i))
4178				break;
4179		}
4180		if (i == adev->mode_info.num_crtc)
4181			pipe = 0;
4182		else
4183			pipe = i;
4184
4185		tmp &= ~DCCG_DISP1_SLOW_SELECT_MASK;
4186		tmp |= DCCG_DISP1_SLOW_SELECT(pipe);
4187		WREG32(DCCG_DISP_SLOW_SELECT_REG, tmp);
4188	}
4189
4190	/* Setting this to false forces the performance state to low if the crtcs are disabled.
4191	 * This can be a problem on PowerXpress systems or if you want to use the card
4192	 * for offscreen rendering or compute if there are no crtcs enabled.
4193	 */
4194	si_notify_smc_display_change(adev, adev->pm.dpm.new_active_crtc_count > 0);
4195}
4196
4197static void si_enable_spread_spectrum(struct amdgpu_device *adev, bool enable)
4198{
4199	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4200
4201	if (enable) {
4202		if (pi->sclk_ss)
4203			WREG32_P(GENERAL_PWRMGT, DYN_SPREAD_SPECTRUM_EN, ~DYN_SPREAD_SPECTRUM_EN);
4204	} else {
4205		WREG32_P(CG_SPLL_SPREAD_SPECTRUM, 0, ~SSEN);
4206		WREG32_P(GENERAL_PWRMGT, 0, ~DYN_SPREAD_SPECTRUM_EN);
4207	}
4208}
4209
4210static void si_setup_bsp(struct amdgpu_device *adev)
4211{
4212	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4213	u32 xclk = amdgpu_asic_get_xclk(adev);
4214
4215	r600_calculate_u_and_p(pi->asi,
4216			       xclk,
4217			       16,
4218			       &pi->bsp,
4219			       &pi->bsu);
4220
4221	r600_calculate_u_and_p(pi->pasi,
4222			       xclk,
4223			       16,
4224			       &pi->pbsp,
4225			       &pi->pbsu);
4226
4227
4228        pi->dsp = BSP(pi->bsp) | BSU(pi->bsu);
4229	pi->psp = BSP(pi->pbsp) | BSU(pi->pbsu);
4230
4231	WREG32(CG_BSP, pi->dsp);
4232}
4233
4234static void si_program_git(struct amdgpu_device *adev)
4235{
4236	WREG32_P(CG_GIT, CG_GICST(R600_GICST_DFLT), ~CG_GICST_MASK);
4237}
4238
4239static void si_program_tp(struct amdgpu_device *adev)
4240{
4241	int i;
4242	enum r600_td td = R600_TD_DFLT;
4243
4244	for (i = 0; i < R600_PM_NUMBER_OF_TC; i++)
4245		WREG32(CG_FFCT_0 + i, (UTC_0(r600_utc[i]) | DTC_0(r600_dtc[i])));
4246
4247	if (td == R600_TD_AUTO)
4248		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_FORCE_TREND_SEL);
4249	else
4250		WREG32_P(SCLK_PWRMGT_CNTL, FIR_FORCE_TREND_SEL, ~FIR_FORCE_TREND_SEL);
4251
4252	if (td == R600_TD_UP)
4253		WREG32_P(SCLK_PWRMGT_CNTL, 0, ~FIR_TREND_MODE);
4254
4255	if (td == R600_TD_DOWN)
4256		WREG32_P(SCLK_PWRMGT_CNTL, FIR_TREND_MODE, ~FIR_TREND_MODE);
4257}
4258
4259static void si_program_tpp(struct amdgpu_device *adev)
4260{
4261	WREG32(CG_TPC, R600_TPC_DFLT);
4262}
4263
4264static void si_program_sstp(struct amdgpu_device *adev)
4265{
4266	WREG32(CG_SSP, (SSTU(R600_SSTU_DFLT) | SST(R600_SST_DFLT)));
4267}
4268
4269static void si_enable_display_gap(struct amdgpu_device *adev)
4270{
4271	u32 tmp = RREG32(CG_DISPLAY_GAP_CNTL);
4272
4273	tmp &= ~(DISP1_GAP_MASK | DISP2_GAP_MASK);
4274	tmp |= (DISP1_GAP(R600_PM_DISPLAY_GAP_IGNORE) |
4275		DISP2_GAP(R600_PM_DISPLAY_GAP_IGNORE));
4276
4277	tmp &= ~(DISP1_GAP_MCHG_MASK | DISP2_GAP_MCHG_MASK);
4278	tmp |= (DISP1_GAP_MCHG(R600_PM_DISPLAY_GAP_VBLANK) |
4279		DISP2_GAP_MCHG(R600_PM_DISPLAY_GAP_IGNORE));
4280	WREG32(CG_DISPLAY_GAP_CNTL, tmp);
4281}
4282
4283static void si_program_vc(struct amdgpu_device *adev)
4284{
4285	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4286
4287	WREG32(CG_FTV, pi->vrc);
4288}
4289
4290static void si_clear_vc(struct amdgpu_device *adev)
4291{
4292	WREG32(CG_FTV, 0);
4293}
4294
4295static u8 si_get_ddr3_mclk_frequency_ratio(u32 memory_clock)
4296{
4297	u8 mc_para_index;
4298
4299	if (memory_clock < 10000)
4300		mc_para_index = 0;
4301	else if (memory_clock >= 80000)
4302		mc_para_index = 0x0f;
4303	else
4304		mc_para_index = (u8)((memory_clock - 10000) / 5000 + 1);
4305	return mc_para_index;
4306}
4307
4308static u8 si_get_mclk_frequency_ratio(u32 memory_clock, bool strobe_mode)
4309{
4310	u8 mc_para_index;
4311
4312	if (strobe_mode) {
4313		if (memory_clock < 12500)
4314			mc_para_index = 0x00;
4315		else if (memory_clock > 47500)
4316			mc_para_index = 0x0f;
4317		else
4318			mc_para_index = (u8)((memory_clock - 10000) / 2500);
4319	} else {
4320		if (memory_clock < 65000)
4321			mc_para_index = 0x00;
4322		else if (memory_clock > 135000)
4323			mc_para_index = 0x0f;
4324		else
4325			mc_para_index = (u8)((memory_clock - 60000) / 5000);
4326	}
4327	return mc_para_index;
4328}
4329
4330static u8 si_get_strobe_mode_settings(struct amdgpu_device *adev, u32 mclk)
4331{
4332	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4333	bool strobe_mode = false;
4334	u8 result = 0;
4335
4336	if (mclk <= pi->mclk_strobe_mode_threshold)
4337		strobe_mode = true;
4338
4339	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
4340		result = si_get_mclk_frequency_ratio(mclk, strobe_mode);
4341	else
4342		result = si_get_ddr3_mclk_frequency_ratio(mclk);
4343
4344	if (strobe_mode)
4345		result |= SISLANDS_SMC_STROBE_ENABLE;
4346
4347	return result;
4348}
4349
4350static int si_upload_firmware(struct amdgpu_device *adev)
4351{
4352	struct si_power_info *si_pi = si_get_pi(adev);
4353
4354	amdgpu_si_reset_smc(adev);
4355	amdgpu_si_smc_clock(adev, false);
4356
4357	return amdgpu_si_load_smc_ucode(adev, si_pi->sram_end);
4358}
4359
4360static bool si_validate_phase_shedding_tables(struct amdgpu_device *adev,
4361					      const struct atom_voltage_table *table,
4362					      const struct amdgpu_phase_shedding_limits_table *limits)
4363{
4364	u32 data, num_bits, num_levels;
4365
4366	if ((table == NULL) || (limits == NULL))
4367		return false;
4368
4369	data = table->mask_low;
4370
4371	num_bits = hweight32(data);
4372
4373	if (num_bits == 0)
4374		return false;
4375
4376	num_levels = (1 << num_bits);
4377
4378	if (table->count != num_levels)
4379		return false;
4380
4381	if (limits->count != (num_levels - 1))
4382		return false;
4383
4384	return true;
4385}
4386
4387static void si_trim_voltage_table_to_fit_state_table(struct amdgpu_device *adev,
4388					      u32 max_voltage_steps,
4389					      struct atom_voltage_table *voltage_table)
4390{
4391	unsigned int i, diff;
4392
4393	if (voltage_table->count <= max_voltage_steps)
4394		return;
4395
4396	diff = voltage_table->count - max_voltage_steps;
4397
4398	for (i= 0; i < max_voltage_steps; i++)
4399		voltage_table->entries[i] = voltage_table->entries[i + diff];
4400
4401	voltage_table->count = max_voltage_steps;
4402}
4403
4404static int si_get_svi2_voltage_table(struct amdgpu_device *adev,
4405				     struct amdgpu_clock_voltage_dependency_table *voltage_dependency_table,
4406				     struct atom_voltage_table *voltage_table)
4407{
4408	u32 i;
4409
4410	if (voltage_dependency_table == NULL)
4411		return -EINVAL;
4412
4413	voltage_table->mask_low = 0;
4414	voltage_table->phase_delay = 0;
4415
4416	voltage_table->count = voltage_dependency_table->count;
4417	for (i = 0; i < voltage_table->count; i++) {
4418		voltage_table->entries[i].value = voltage_dependency_table->entries[i].v;
4419		voltage_table->entries[i].smio_low = 0;
4420	}
4421
4422	return 0;
4423}
4424
4425static int si_construct_voltage_tables(struct amdgpu_device *adev)
4426{
4427	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4428	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4429	struct si_power_info *si_pi = si_get_pi(adev);
4430	int ret;
4431
4432	if (pi->voltage_control) {
4433		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
4434						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddc_voltage_table);
4435		if (ret)
4436			return ret;
4437
4438		if (eg_pi->vddc_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4439			si_trim_voltage_table_to_fit_state_table(adev,
4440								 SISLANDS_MAX_NO_VREG_STEPS,
4441								 &eg_pi->vddc_voltage_table);
4442	} else if (si_pi->voltage_control_svi2) {
4443		ret = si_get_svi2_voltage_table(adev,
4444						&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk,
4445						&eg_pi->vddc_voltage_table);
4446		if (ret)
4447			return ret;
4448	} else {
4449		return -EINVAL;
4450	}
4451
4452	if (eg_pi->vddci_control) {
4453		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDCI,
4454						    VOLTAGE_OBJ_GPIO_LUT, &eg_pi->vddci_voltage_table);
4455		if (ret)
4456			return ret;
4457
4458		if (eg_pi->vddci_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4459			si_trim_voltage_table_to_fit_state_table(adev,
4460								 SISLANDS_MAX_NO_VREG_STEPS,
4461								 &eg_pi->vddci_voltage_table);
4462	}
4463	if (si_pi->vddci_control_svi2) {
4464		ret = si_get_svi2_voltage_table(adev,
4465						&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk,
4466						&eg_pi->vddci_voltage_table);
4467		if (ret)
4468			return ret;
4469	}
4470
4471	if (pi->mvdd_control) {
4472		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_MVDDC,
4473						    VOLTAGE_OBJ_GPIO_LUT, &si_pi->mvdd_voltage_table);
4474
4475		if (ret) {
4476			pi->mvdd_control = false;
4477			return ret;
4478		}
4479
4480		if (si_pi->mvdd_voltage_table.count == 0) {
4481			pi->mvdd_control = false;
4482			return -EINVAL;
4483		}
4484
4485		if (si_pi->mvdd_voltage_table.count > SISLANDS_MAX_NO_VREG_STEPS)
4486			si_trim_voltage_table_to_fit_state_table(adev,
4487								 SISLANDS_MAX_NO_VREG_STEPS,
4488								 &si_pi->mvdd_voltage_table);
4489	}
4490
4491	if (si_pi->vddc_phase_shed_control) {
4492		ret = amdgpu_atombios_get_voltage_table(adev, VOLTAGE_TYPE_VDDC,
4493						    VOLTAGE_OBJ_PHASE_LUT, &si_pi->vddc_phase_shed_table);
4494		if (ret)
4495			si_pi->vddc_phase_shed_control = false;
4496
4497		if ((si_pi->vddc_phase_shed_table.count == 0) ||
4498		    (si_pi->vddc_phase_shed_table.count > SISLANDS_MAX_NO_VREG_STEPS))
4499			si_pi->vddc_phase_shed_control = false;
4500	}
4501
4502	return 0;
4503}
4504
4505static void si_populate_smc_voltage_table(struct amdgpu_device *adev,
4506					  const struct atom_voltage_table *voltage_table,
4507					  SISLANDS_SMC_STATETABLE *table)
4508{
4509	unsigned int i;
4510
4511	for (i = 0; i < voltage_table->count; i++)
4512		table->lowSMIO[i] |= cpu_to_be32(voltage_table->entries[i].smio_low);
4513}
4514
4515static int si_populate_smc_voltage_tables(struct amdgpu_device *adev,
4516					  SISLANDS_SMC_STATETABLE *table)
4517{
4518	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4519	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4520	struct si_power_info *si_pi = si_get_pi(adev);
4521	u8 i;
4522
4523	if (si_pi->voltage_control_svi2) {
4524		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svc,
4525			si_pi->svc_gpio_id);
4526		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_gpio_id_svd,
4527			si_pi->svd_gpio_id);
4528		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_svi_rework_plat_type,
4529					   2);
4530	} else {
4531		if (eg_pi->vddc_voltage_table.count) {
4532			si_populate_smc_voltage_table(adev, &eg_pi->vddc_voltage_table, table);
4533			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC] =
4534				cpu_to_be32(eg_pi->vddc_voltage_table.mask_low);
4535
4536			for (i = 0; i < eg_pi->vddc_voltage_table.count; i++) {
4537				if (pi->max_vddc_in_table <= eg_pi->vddc_voltage_table.entries[i].value) {
4538					table->maxVDDCIndexInPPTable = i;
4539					break;
4540				}
4541			}
4542		}
4543
4544		if (eg_pi->vddci_voltage_table.count) {
4545			si_populate_smc_voltage_table(adev, &eg_pi->vddci_voltage_table, table);
4546
4547			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDCI] =
4548				cpu_to_be32(eg_pi->vddci_voltage_table.mask_low);
4549		}
4550
4551
4552		if (si_pi->mvdd_voltage_table.count) {
4553			si_populate_smc_voltage_table(adev, &si_pi->mvdd_voltage_table, table);
4554
4555			table->voltageMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_MVDD] =
4556				cpu_to_be32(si_pi->mvdd_voltage_table.mask_low);
4557		}
4558
4559		if (si_pi->vddc_phase_shed_control) {
4560			if (si_validate_phase_shedding_tables(adev, &si_pi->vddc_phase_shed_table,
4561							      &adev->pm.dpm.dyn_state.phase_shedding_limits_table)) {
4562				si_populate_smc_voltage_table(adev, &si_pi->vddc_phase_shed_table, table);
4563
4564				table->phaseMaskTable.lowMask[SISLANDS_SMC_VOLTAGEMASK_VDDC_PHASE_SHEDDING] =
4565					cpu_to_be32(si_pi->vddc_phase_shed_table.mask_low);
4566
4567				si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_phase_shedding_delay,
4568							   (u32)si_pi->vddc_phase_shed_table.phase_delay);
4569			} else {
4570				si_pi->vddc_phase_shed_control = false;
4571			}
4572		}
4573	}
4574
4575	return 0;
4576}
4577
4578static int si_populate_voltage_value(struct amdgpu_device *adev,
4579				     const struct atom_voltage_table *table,
4580				     u16 value, SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4581{
4582	unsigned int i;
4583
4584	for (i = 0; i < table->count; i++) {
4585		if (value <= table->entries[i].value) {
4586			voltage->index = (u8)i;
4587			voltage->value = cpu_to_be16(table->entries[i].value);
4588			break;
4589		}
4590	}
4591
4592	if (i >= table->count)
4593		return -EINVAL;
4594
4595	return 0;
4596}
4597
4598static int si_populate_mvdd_value(struct amdgpu_device *adev, u32 mclk,
4599				  SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4600{
4601	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4602	struct si_power_info *si_pi = si_get_pi(adev);
4603
4604	if (pi->mvdd_control) {
4605		if (mclk <= pi->mvdd_split_frequency)
4606			voltage->index = 0;
4607		else
4608			voltage->index = (u8)(si_pi->mvdd_voltage_table.count) - 1;
4609
4610		voltage->value = cpu_to_be16(si_pi->mvdd_voltage_table.entries[voltage->index].value);
4611	}
4612	return 0;
4613}
4614
4615static int si_get_std_voltage_value(struct amdgpu_device *adev,
4616				    SISLANDS_SMC_VOLTAGE_VALUE *voltage,
4617				    u16 *std_voltage)
4618{
4619	u16 v_index;
4620	bool voltage_found = false;
4621	*std_voltage = be16_to_cpu(voltage->value);
4622
4623	if (adev->pm.dpm.dyn_state.cac_leakage_table.entries) {
4624		if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_NEW_CAC_VOLTAGE) {
4625			if (adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries == NULL)
4626				return -EINVAL;
4627
4628			for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
4629				if (be16_to_cpu(voltage->value) ==
4630				    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
4631					voltage_found = true;
4632					if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4633						*std_voltage =
4634							adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
4635					else
4636						*std_voltage =
4637							adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
4638					break;
4639				}
4640			}
4641
4642			if (!voltage_found) {
4643				for (v_index = 0; (u32)v_index < adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.count; v_index++) {
4644					if (be16_to_cpu(voltage->value) <=
4645					    (u16)adev->pm.dpm.dyn_state.vddc_dependency_on_sclk.entries[v_index].v) {
4646						voltage_found = true;
4647						if ((u32)v_index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4648							*std_voltage =
4649								adev->pm.dpm.dyn_state.cac_leakage_table.entries[v_index].vddc;
4650						else
4651							*std_voltage =
4652								adev->pm.dpm.dyn_state.cac_leakage_table.entries[adev->pm.dpm.dyn_state.cac_leakage_table.count-1].vddc;
4653						break;
4654					}
4655				}
4656			}
4657		} else {
4658			if ((u32)voltage->index < adev->pm.dpm.dyn_state.cac_leakage_table.count)
4659				*std_voltage = adev->pm.dpm.dyn_state.cac_leakage_table.entries[voltage->index].vddc;
4660		}
4661	}
4662
4663	return 0;
4664}
4665
4666static int si_populate_std_voltage_value(struct amdgpu_device *adev,
4667					 u16 value, u8 index,
4668					 SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4669{
4670	voltage->index = index;
4671	voltage->value = cpu_to_be16(value);
4672
4673	return 0;
4674}
4675
4676static int si_populate_phase_shedding_value(struct amdgpu_device *adev,
4677					    const struct amdgpu_phase_shedding_limits_table *limits,
4678					    u16 voltage, u32 sclk, u32 mclk,
4679					    SISLANDS_SMC_VOLTAGE_VALUE *smc_voltage)
4680{
4681	unsigned int i;
4682
4683	for (i = 0; i < limits->count; i++) {
4684		if ((voltage <= limits->entries[i].voltage) &&
4685		    (sclk <= limits->entries[i].sclk) &&
4686		    (mclk <= limits->entries[i].mclk))
4687			break;
4688	}
4689
4690	smc_voltage->phase_settings = (u8)i;
4691
4692	return 0;
4693}
4694
4695static int si_init_arb_table_index(struct amdgpu_device *adev)
4696{
4697	struct si_power_info *si_pi = si_get_pi(adev);
4698	u32 tmp;
4699	int ret;
4700
4701	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
4702					    &tmp, si_pi->sram_end);
4703	if (ret)
4704		return ret;
4705
4706	tmp &= 0x00FFFFFF;
4707	tmp |= MC_CG_ARB_FREQ_F1 << 24;
4708
4709	return amdgpu_si_write_smc_sram_dword(adev, si_pi->arb_table_start,
4710					      tmp, si_pi->sram_end);
4711}
4712
4713static int si_initial_switch_from_arb_f0_to_f1(struct amdgpu_device *adev)
4714{
4715	return ni_copy_and_switch_arb_sets(adev, MC_CG_ARB_FREQ_F0, MC_CG_ARB_FREQ_F1);
4716}
4717
4718static int si_reset_to_default(struct amdgpu_device *adev)
4719{
4720	return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ResetToDefaults) == PPSMC_Result_OK) ?
4721		0 : -EINVAL;
4722}
4723
4724static int si_force_switch_to_arb_f0(struct amdgpu_device *adev)
4725{
4726	struct si_power_info *si_pi = si_get_pi(adev);
4727	u32 tmp;
4728	int ret;
4729
4730	ret = amdgpu_si_read_smc_sram_dword(adev, si_pi->arb_table_start,
4731					    &tmp, si_pi->sram_end);
4732	if (ret)
4733		return ret;
4734
4735	tmp = (tmp >> 24) & 0xff;
4736
4737	if (tmp == MC_CG_ARB_FREQ_F0)
4738		return 0;
4739
4740	return ni_copy_and_switch_arb_sets(adev, tmp, MC_CG_ARB_FREQ_F0);
4741}
4742
4743static u32 si_calculate_memory_refresh_rate(struct amdgpu_device *adev,
4744					    u32 engine_clock)
4745{
4746	u32 dram_rows;
4747	u32 dram_refresh_rate;
4748	u32 mc_arb_rfsh_rate;
4749	u32 tmp = (RREG32(MC_ARB_RAMCFG) & NOOFROWS_MASK) >> NOOFROWS_SHIFT;
4750
4751	if (tmp >= 4)
4752		dram_rows = 16384;
4753	else
4754		dram_rows = 1 << (tmp + 10);
4755
4756	dram_refresh_rate = 1 << ((RREG32(MC_SEQ_MISC0) & 0x3) + 3);
4757	mc_arb_rfsh_rate = ((engine_clock * 10) * dram_refresh_rate / dram_rows - 32) / 64;
4758
4759	return mc_arb_rfsh_rate;
4760}
4761
4762static int si_populate_memory_timing_parameters(struct amdgpu_device *adev,
4763						struct rv7xx_pl *pl,
4764						SMC_SIslands_MCArbDramTimingRegisterSet *arb_regs)
4765{
4766	u32 dram_timing;
4767	u32 dram_timing2;
4768	u32 burst_time;
4769
4770	arb_regs->mc_arb_rfsh_rate =
4771		(u8)si_calculate_memory_refresh_rate(adev, pl->sclk);
4772
4773	amdgpu_atombios_set_engine_dram_timings(adev,
4774					    pl->sclk,
4775		                            pl->mclk);
4776
4777	dram_timing  = RREG32(MC_ARB_DRAM_TIMING);
4778	dram_timing2 = RREG32(MC_ARB_DRAM_TIMING2);
4779	burst_time = RREG32(MC_ARB_BURST_TIME) & STATE0_MASK;
4780
4781	arb_regs->mc_arb_dram_timing  = cpu_to_be32(dram_timing);
4782	arb_regs->mc_arb_dram_timing2 = cpu_to_be32(dram_timing2);
4783	arb_regs->mc_arb_burst_time = (u8)burst_time;
4784
4785	return 0;
4786}
4787
4788static int si_do_program_memory_timing_parameters(struct amdgpu_device *adev,
4789						  struct amdgpu_ps *amdgpu_state,
4790						  unsigned int first_arb_set)
4791{
4792	struct si_power_info *si_pi = si_get_pi(adev);
4793	struct  si_ps *state = si_get_ps(amdgpu_state);
4794	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
4795	int i, ret = 0;
4796
4797	for (i = 0; i < state->performance_level_count; i++) {
4798		ret = si_populate_memory_timing_parameters(adev, &state->performance_levels[i], &arb_regs);
4799		if (ret)
4800			break;
4801		ret = amdgpu_si_copy_bytes_to_smc(adev,
4802						  si_pi->arb_table_start +
4803						  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
4804						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * (first_arb_set + i),
4805						  (u8 *)&arb_regs,
4806						  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
4807						  si_pi->sram_end);
4808		if (ret)
4809			break;
4810	}
4811
4812	return ret;
4813}
4814
4815static int si_program_memory_timing_parameters(struct amdgpu_device *adev,
4816					       struct amdgpu_ps *amdgpu_new_state)
4817{
4818	return si_do_program_memory_timing_parameters(adev, amdgpu_new_state,
4819						      SISLANDS_DRIVER_STATE_ARB_INDEX);
4820}
4821
4822static int si_populate_initial_mvdd_value(struct amdgpu_device *adev,
4823					  struct SISLANDS_SMC_VOLTAGE_VALUE *voltage)
4824{
4825	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4826	struct si_power_info *si_pi = si_get_pi(adev);
4827
4828	if (pi->mvdd_control)
4829		return si_populate_voltage_value(adev, &si_pi->mvdd_voltage_table,
4830						 si_pi->mvdd_bootup_value, voltage);
4831
4832	return 0;
4833}
4834
4835static int si_populate_smc_initial_state(struct amdgpu_device *adev,
4836					 struct amdgpu_ps *amdgpu_initial_state,
4837					 SISLANDS_SMC_STATETABLE *table)
4838{
4839	struct  si_ps *initial_state = si_get_ps(amdgpu_initial_state);
4840	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4841	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4842	struct si_power_info *si_pi = si_get_pi(adev);
4843	u32 reg;
4844	int ret;
4845
4846	table->initialState.levels[0].mclk.vDLL_CNTL =
4847		cpu_to_be32(si_pi->clock_registers.dll_cntl);
4848	table->initialState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
4849		cpu_to_be32(si_pi->clock_registers.mclk_pwrmgt_cntl);
4850	table->initialState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
4851		cpu_to_be32(si_pi->clock_registers.mpll_ad_func_cntl);
4852	table->initialState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
4853		cpu_to_be32(si_pi->clock_registers.mpll_dq_func_cntl);
4854	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL =
4855		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl);
4856	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
4857		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_1);
4858	table->initialState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
4859		cpu_to_be32(si_pi->clock_registers.mpll_func_cntl_2);
4860	table->initialState.levels[0].mclk.vMPLL_SS =
4861		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
4862	table->initialState.levels[0].mclk.vMPLL_SS2 =
4863		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
4864
4865	table->initialState.levels[0].mclk.mclk_value =
4866		cpu_to_be32(initial_state->performance_levels[0].mclk);
4867
4868	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
4869		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl);
4870	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
4871		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_2);
4872	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
4873		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_3);
4874	table->initialState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
4875		cpu_to_be32(si_pi->clock_registers.cg_spll_func_cntl_4);
4876	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM =
4877		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum);
4878	table->initialState.levels[0].sclk.vCG_SPLL_SPREAD_SPECTRUM_2  =
4879		cpu_to_be32(si_pi->clock_registers.cg_spll_spread_spectrum_2);
4880
4881	table->initialState.levels[0].sclk.sclk_value =
4882		cpu_to_be32(initial_state->performance_levels[0].sclk);
4883
4884	table->initialState.levels[0].arbRefreshState =
4885		SISLANDS_INITIAL_STATE_ARB_INDEX;
4886
4887	table->initialState.levels[0].ACIndex = 0;
4888
4889	ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
4890					initial_state->performance_levels[0].vddc,
4891					&table->initialState.levels[0].vddc);
4892
4893	if (!ret) {
4894		u16 std_vddc;
4895
4896		ret = si_get_std_voltage_value(adev,
4897					       &table->initialState.levels[0].vddc,
4898					       &std_vddc);
4899		if (!ret)
4900			si_populate_std_voltage_value(adev, std_vddc,
4901						      table->initialState.levels[0].vddc.index,
4902						      &table->initialState.levels[0].std_vddc);
4903	}
4904
4905	if (eg_pi->vddci_control)
4906		si_populate_voltage_value(adev,
4907					  &eg_pi->vddci_voltage_table,
4908					  initial_state->performance_levels[0].vddci,
4909					  &table->initialState.levels[0].vddci);
4910
4911	if (si_pi->vddc_phase_shed_control)
4912		si_populate_phase_shedding_value(adev,
4913						 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
4914						 initial_state->performance_levels[0].vddc,
4915						 initial_state->performance_levels[0].sclk,
4916						 initial_state->performance_levels[0].mclk,
4917						 &table->initialState.levels[0].vddc);
4918
4919	si_populate_initial_mvdd_value(adev, &table->initialState.levels[0].mvdd);
4920
4921	reg = CG_R(0xffff) | CG_L(0);
4922	table->initialState.levels[0].aT = cpu_to_be32(reg);
4923	table->initialState.levels[0].bSP = cpu_to_be32(pi->dsp);
4924	table->initialState.levels[0].gen2PCIE = (u8)si_pi->boot_pcie_gen;
4925
4926	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
4927		table->initialState.levels[0].strobeMode =
4928			si_get_strobe_mode_settings(adev,
4929						    initial_state->performance_levels[0].mclk);
4930
4931		if (initial_state->performance_levels[0].mclk > pi->mclk_edc_enable_threshold)
4932			table->initialState.levels[0].mcFlags = SISLANDS_SMC_MC_EDC_RD_FLAG | SISLANDS_SMC_MC_EDC_WR_FLAG;
4933		else
4934			table->initialState.levels[0].mcFlags =  0;
4935	}
4936
4937	table->initialState.levelCount = 1;
4938
4939	table->initialState.flags |= PPSMC_SWSTATE_FLAG_DC;
4940
4941	table->initialState.levels[0].dpm2.MaxPS = 0;
4942	table->initialState.levels[0].dpm2.NearTDPDec = 0;
4943	table->initialState.levels[0].dpm2.AboveSafeInc = 0;
4944	table->initialState.levels[0].dpm2.BelowSafeInc = 0;
4945	table->initialState.levels[0].dpm2.PwrEfficiencyRatio = 0;
4946
4947	reg = MIN_POWER_MASK | MAX_POWER_MASK;
4948	table->initialState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
4949
4950	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
4951	table->initialState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
4952
4953	return 0;
4954}
4955
4956static int si_populate_smc_acpi_state(struct amdgpu_device *adev,
4957				      SISLANDS_SMC_STATETABLE *table)
4958{
4959	struct rv7xx_power_info *pi = rv770_get_pi(adev);
4960	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
4961	struct si_power_info *si_pi = si_get_pi(adev);
4962	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
4963	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
4964	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
4965	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
4966	u32 dll_cntl = si_pi->clock_registers.dll_cntl;
4967	u32 mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
4968	u32 mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
4969	u32 mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
4970	u32 mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
4971	u32 mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
4972	u32 mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
4973	u32 reg;
4974	int ret;
4975
4976	table->ACPIState = table->initialState;
4977
4978	table->ACPIState.flags &= ~PPSMC_SWSTATE_FLAG_DC;
4979
4980	if (pi->acpi_vddc) {
4981		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
4982						pi->acpi_vddc, &table->ACPIState.levels[0].vddc);
4983		if (!ret) {
4984			u16 std_vddc;
4985
4986			ret = si_get_std_voltage_value(adev,
4987						       &table->ACPIState.levels[0].vddc, &std_vddc);
4988			if (!ret)
4989				si_populate_std_voltage_value(adev, std_vddc,
4990							      table->ACPIState.levels[0].vddc.index,
4991							      &table->ACPIState.levels[0].std_vddc);
4992		}
4993		table->ACPIState.levels[0].gen2PCIE = si_pi->acpi_pcie_gen;
4994
4995		if (si_pi->vddc_phase_shed_control) {
4996			si_populate_phase_shedding_value(adev,
4997							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
4998							 pi->acpi_vddc,
4999							 0,
5000							 0,
5001							 &table->ACPIState.levels[0].vddc);
5002		}
5003	} else {
5004		ret = si_populate_voltage_value(adev, &eg_pi->vddc_voltage_table,
5005						pi->min_vddc_in_table, &table->ACPIState.levels[0].vddc);
5006		if (!ret) {
5007			u16 std_vddc;
5008
5009			ret = si_get_std_voltage_value(adev,
5010						       &table->ACPIState.levels[0].vddc, &std_vddc);
5011
5012			if (!ret)
5013				si_populate_std_voltage_value(adev, std_vddc,
5014							      table->ACPIState.levels[0].vddc.index,
5015							      &table->ACPIState.levels[0].std_vddc);
5016		}
5017		table->ACPIState.levels[0].gen2PCIE =
5018			(u8)amdgpu_get_pcie_gen_support(adev,
5019							si_pi->sys_pcie_mask,
5020							si_pi->boot_pcie_gen,
5021							AMDGPU_PCIE_GEN1);
5022
5023		if (si_pi->vddc_phase_shed_control)
5024			si_populate_phase_shedding_value(adev,
5025							 &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5026							 pi->min_vddc_in_table,
5027							 0,
5028							 0,
5029							 &table->ACPIState.levels[0].vddc);
5030	}
5031
5032	if (pi->acpi_vddc) {
5033		if (eg_pi->acpi_vddci)
5034			si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
5035						  eg_pi->acpi_vddci,
5036						  &table->ACPIState.levels[0].vddci);
5037	}
5038
5039	mclk_pwrmgt_cntl |= MRDCK0_RESET | MRDCK1_RESET;
5040	mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
5041
5042	dll_cntl &= ~(MRDCK0_BYPASS | MRDCK1_BYPASS);
5043
5044	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
5045	spll_func_cntl_2 |= SCLK_MUX_SEL(4);
5046
5047	table->ACPIState.levels[0].mclk.vDLL_CNTL =
5048		cpu_to_be32(dll_cntl);
5049	table->ACPIState.levels[0].mclk.vMCLK_PWRMGT_CNTL =
5050		cpu_to_be32(mclk_pwrmgt_cntl);
5051	table->ACPIState.levels[0].mclk.vMPLL_AD_FUNC_CNTL =
5052		cpu_to_be32(mpll_ad_func_cntl);
5053	table->ACPIState.levels[0].mclk.vMPLL_DQ_FUNC_CNTL =
5054		cpu_to_be32(mpll_dq_func_cntl);
5055	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL =
5056		cpu_to_be32(mpll_func_cntl);
5057	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_1 =
5058		cpu_to_be32(mpll_func_cntl_1);
5059	table->ACPIState.levels[0].mclk.vMPLL_FUNC_CNTL_2 =
5060		cpu_to_be32(mpll_func_cntl_2);
5061	table->ACPIState.levels[0].mclk.vMPLL_SS =
5062		cpu_to_be32(si_pi->clock_registers.mpll_ss1);
5063	table->ACPIState.levels[0].mclk.vMPLL_SS2 =
5064		cpu_to_be32(si_pi->clock_registers.mpll_ss2);
5065
5066	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL =
5067		cpu_to_be32(spll_func_cntl);
5068	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_2 =
5069		cpu_to_be32(spll_func_cntl_2);
5070	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_3 =
5071		cpu_to_be32(spll_func_cntl_3);
5072	table->ACPIState.levels[0].sclk.vCG_SPLL_FUNC_CNTL_4 =
5073		cpu_to_be32(spll_func_cntl_4);
5074
5075	table->ACPIState.levels[0].mclk.mclk_value = 0;
5076	table->ACPIState.levels[0].sclk.sclk_value = 0;
5077
5078	si_populate_mvdd_value(adev, 0, &table->ACPIState.levels[0].mvdd);
5079
5080	if (eg_pi->dynamic_ac_timing)
5081		table->ACPIState.levels[0].ACIndex = 0;
5082
5083	table->ACPIState.levels[0].dpm2.MaxPS = 0;
5084	table->ACPIState.levels[0].dpm2.NearTDPDec = 0;
5085	table->ACPIState.levels[0].dpm2.AboveSafeInc = 0;
5086	table->ACPIState.levels[0].dpm2.BelowSafeInc = 0;
5087	table->ACPIState.levels[0].dpm2.PwrEfficiencyRatio = 0;
5088
5089	reg = MIN_POWER_MASK | MAX_POWER_MASK;
5090	table->ACPIState.levels[0].SQPowerThrottle = cpu_to_be32(reg);
5091
5092	reg = MAX_POWER_DELTA_MASK | STI_SIZE_MASK | LTI_RATIO_MASK;
5093	table->ACPIState.levels[0].SQPowerThrottle_2 = cpu_to_be32(reg);
5094
5095	return 0;
5096}
5097
5098static int si_populate_ulv_state(struct amdgpu_device *adev,
5099				 SISLANDS_SMC_SWSTATE *state)
5100{
5101	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5102	struct si_power_info *si_pi = si_get_pi(adev);
5103	struct si_ulv_param *ulv = &si_pi->ulv;
5104	u32 sclk_in_sr = 1350; /* ??? */
5105	int ret;
5106
5107	ret = si_convert_power_level_to_smc(adev, &ulv->pl,
5108					    &state->levels[0]);
5109	if (!ret) {
5110		if (eg_pi->sclk_deep_sleep) {
5111			if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
5112				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
5113			else
5114				state->levels[0].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
5115		}
5116		if (ulv->one_pcie_lane_in_ulv)
5117			state->flags |= PPSMC_SWSTATE_FLAG_PCIE_X1;
5118		state->levels[0].arbRefreshState = (u8)(SISLANDS_ULV_STATE_ARB_INDEX);
5119		state->levels[0].ACIndex = 1;
5120		state->levels[0].std_vddc = state->levels[0].vddc;
5121		state->levelCount = 1;
5122
5123		state->flags |= PPSMC_SWSTATE_FLAG_DC;
5124	}
5125
5126	return ret;
5127}
5128
5129static int si_program_ulv_memory_timing_parameters(struct amdgpu_device *adev)
5130{
5131	struct si_power_info *si_pi = si_get_pi(adev);
5132	struct si_ulv_param *ulv = &si_pi->ulv;
5133	SMC_SIslands_MCArbDramTimingRegisterSet arb_regs = { 0 };
5134	int ret;
5135
5136	ret = si_populate_memory_timing_parameters(adev, &ulv->pl,
5137						   &arb_regs);
5138	if (ret)
5139		return ret;
5140
5141	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_ulv_volt_change_delay,
5142				   ulv->volt_change_delay);
5143
5144	ret = amdgpu_si_copy_bytes_to_smc(adev,
5145					  si_pi->arb_table_start +
5146					  offsetof(SMC_SIslands_MCArbDramTimingRegisters, data) +
5147					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet) * SISLANDS_ULV_STATE_ARB_INDEX,
5148					  (u8 *)&arb_regs,
5149					  sizeof(SMC_SIslands_MCArbDramTimingRegisterSet),
5150					  si_pi->sram_end);
5151
5152	return ret;
5153}
5154
5155static void si_get_mvdd_configuration(struct amdgpu_device *adev)
5156{
5157	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5158
5159	pi->mvdd_split_frequency = 30000;
5160}
5161
5162static int si_init_smc_table(struct amdgpu_device *adev)
5163{
5164	struct si_power_info *si_pi = si_get_pi(adev);
5165	struct amdgpu_ps *amdgpu_boot_state = adev->pm.dpm.boot_ps;
5166	const struct si_ulv_param *ulv = &si_pi->ulv;
5167	SISLANDS_SMC_STATETABLE  *table = &si_pi->smc_statetable;
5168	int ret;
5169	u32 lane_width;
5170	u32 vr_hot_gpio;
5171
5172	si_populate_smc_voltage_tables(adev, table);
5173
5174	switch (adev->pm.int_thermal_type) {
5175	case THERMAL_TYPE_SI:
5176	case THERMAL_TYPE_EMC2103_WITH_INTERNAL:
5177		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_INTERNAL;
5178		break;
5179	case THERMAL_TYPE_NONE:
5180		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_NONE;
5181		break;
5182	default:
5183		table->thermalProtectType = PPSMC_THERMAL_PROTECT_TYPE_EXTERNAL;
5184		break;
5185	}
5186
5187	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_HARDWAREDC)
5188		table->systemFlags |= PPSMC_SYSTEMFLAG_GPIO_DC;
5189
5190	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REGULATOR_HOT) {
5191		if ((adev->pdev->device != 0x6818) && (adev->pdev->device != 0x6819))
5192			table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT;
5193	}
5194
5195	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_STEPVDDC)
5196		table->systemFlags |= PPSMC_SYSTEMFLAG_STEPVDDC;
5197
5198	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5199		table->systemFlags |= PPSMC_SYSTEMFLAG_GDDR5;
5200
5201	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_REVERT_GPIO5_POLARITY)
5202		table->extraFlags |= PPSMC_EXTRAFLAGS_AC2DC_GPIO5_POLARITY_HIGH;
5203
5204	if (adev->pm.dpm.platform_caps & ATOM_PP_PLATFORM_CAP_VRHOT_GPIO_CONFIGURABLE) {
5205		table->systemFlags |= PPSMC_SYSTEMFLAG_REGULATOR_HOT_PROG_GPIO;
5206		vr_hot_gpio = adev->pm.dpm.backbias_response_time;
5207		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_vr_hot_gpio,
5208					   vr_hot_gpio);
5209	}
5210
5211	ret = si_populate_smc_initial_state(adev, amdgpu_boot_state, table);
5212	if (ret)
5213		return ret;
5214
5215	ret = si_populate_smc_acpi_state(adev, table);
5216	if (ret)
5217		return ret;
5218
5219	table->driverState = table->initialState;
5220
5221	ret = si_do_program_memory_timing_parameters(adev, amdgpu_boot_state,
5222						     SISLANDS_INITIAL_STATE_ARB_INDEX);
5223	if (ret)
5224		return ret;
5225
5226	if (ulv->supported && ulv->pl.vddc) {
5227		ret = si_populate_ulv_state(adev, &table->ULVState);
5228		if (ret)
5229			return ret;
5230
5231		ret = si_program_ulv_memory_timing_parameters(adev);
5232		if (ret)
5233			return ret;
5234
5235		WREG32(CG_ULV_CONTROL, ulv->cg_ulv_control);
5236		WREG32(CG_ULV_PARAMETER, ulv->cg_ulv_parameter);
5237
5238		lane_width = amdgpu_get_pcie_lanes(adev);
5239		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
5240	} else {
5241		table->ULVState = table->initialState;
5242	}
5243
5244	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->state_table_start,
5245					   (u8 *)table, sizeof(SISLANDS_SMC_STATETABLE),
5246					   si_pi->sram_end);
5247}
5248
5249static int si_calculate_sclk_params(struct amdgpu_device *adev,
5250				    u32 engine_clock,
5251				    SISLANDS_SMC_SCLK_VALUE *sclk)
5252{
5253	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5254	struct si_power_info *si_pi = si_get_pi(adev);
5255	struct atom_clock_dividers dividers;
5256	u32 spll_func_cntl = si_pi->clock_registers.cg_spll_func_cntl;
5257	u32 spll_func_cntl_2 = si_pi->clock_registers.cg_spll_func_cntl_2;
5258	u32 spll_func_cntl_3 = si_pi->clock_registers.cg_spll_func_cntl_3;
5259	u32 spll_func_cntl_4 = si_pi->clock_registers.cg_spll_func_cntl_4;
5260	u32 cg_spll_spread_spectrum = si_pi->clock_registers.cg_spll_spread_spectrum;
5261	u32 cg_spll_spread_spectrum_2 = si_pi->clock_registers.cg_spll_spread_spectrum_2;
5262	u64 tmp;
5263	u32 reference_clock = adev->clock.spll.reference_freq;
5264	u32 reference_divider;
5265	u32 fbdiv;
5266	int ret;
5267
5268	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
5269					     engine_clock, false, &dividers);
5270	if (ret)
5271		return ret;
5272
5273	reference_divider = 1 + dividers.ref_div;
5274
5275	tmp = (u64) engine_clock * reference_divider * dividers.post_div * 16384;
5276	do_div(tmp, reference_clock);
5277	fbdiv = (u32) tmp;
5278
5279	spll_func_cntl &= ~(SPLL_PDIV_A_MASK | SPLL_REF_DIV_MASK);
5280	spll_func_cntl |= SPLL_REF_DIV(dividers.ref_div);
5281	spll_func_cntl |= SPLL_PDIV_A(dividers.post_div);
5282
5283	spll_func_cntl_2 &= ~SCLK_MUX_SEL_MASK;
5284	spll_func_cntl_2 |= SCLK_MUX_SEL(2);
5285
5286	spll_func_cntl_3 &= ~SPLL_FB_DIV_MASK;
5287	spll_func_cntl_3 |= SPLL_FB_DIV(fbdiv);
5288	spll_func_cntl_3 |= SPLL_DITHEN;
5289
5290	if (pi->sclk_ss) {
5291		struct amdgpu_atom_ss ss;
5292		u32 vco_freq = engine_clock * dividers.post_div;
5293
5294		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
5295						     ASIC_INTERNAL_ENGINE_SS, vco_freq)) {
5296			u32 clk_s = reference_clock * 5 / (reference_divider * ss.rate);
5297			u32 clk_v = 4 * ss.percentage * fbdiv / (clk_s * 10000);
5298
5299			cg_spll_spread_spectrum &= ~CLK_S_MASK;
5300			cg_spll_spread_spectrum |= CLK_S(clk_s);
5301			cg_spll_spread_spectrum |= SSEN;
5302
5303			cg_spll_spread_spectrum_2 &= ~CLK_V_MASK;
5304			cg_spll_spread_spectrum_2 |= CLK_V(clk_v);
5305		}
5306	}
5307
5308	sclk->sclk_value = engine_clock;
5309	sclk->vCG_SPLL_FUNC_CNTL = spll_func_cntl;
5310	sclk->vCG_SPLL_FUNC_CNTL_2 = spll_func_cntl_2;
5311	sclk->vCG_SPLL_FUNC_CNTL_3 = spll_func_cntl_3;
5312	sclk->vCG_SPLL_FUNC_CNTL_4 = spll_func_cntl_4;
5313	sclk->vCG_SPLL_SPREAD_SPECTRUM = cg_spll_spread_spectrum;
5314	sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cg_spll_spread_spectrum_2;
5315
5316	return 0;
5317}
5318
5319static int si_populate_sclk_value(struct amdgpu_device *adev,
5320				  u32 engine_clock,
5321				  SISLANDS_SMC_SCLK_VALUE *sclk)
5322{
5323	SISLANDS_SMC_SCLK_VALUE sclk_tmp;
5324	int ret;
5325
5326	ret = si_calculate_sclk_params(adev, engine_clock, &sclk_tmp);
5327	if (!ret) {
5328		sclk->sclk_value = cpu_to_be32(sclk_tmp.sclk_value);
5329		sclk->vCG_SPLL_FUNC_CNTL = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL);
5330		sclk->vCG_SPLL_FUNC_CNTL_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_2);
5331		sclk->vCG_SPLL_FUNC_CNTL_3 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_3);
5332		sclk->vCG_SPLL_FUNC_CNTL_4 = cpu_to_be32(sclk_tmp.vCG_SPLL_FUNC_CNTL_4);
5333		sclk->vCG_SPLL_SPREAD_SPECTRUM = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM);
5334		sclk->vCG_SPLL_SPREAD_SPECTRUM_2 = cpu_to_be32(sclk_tmp.vCG_SPLL_SPREAD_SPECTRUM_2);
5335	}
5336
5337	return ret;
5338}
5339
5340static int si_populate_mclk_value(struct amdgpu_device *adev,
5341				  u32 engine_clock,
5342				  u32 memory_clock,
5343				  SISLANDS_SMC_MCLK_VALUE *mclk,
5344				  bool strobe_mode,
5345				  bool dll_state_on)
5346{
5347	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5348	struct si_power_info *si_pi = si_get_pi(adev);
5349	u32  dll_cntl = si_pi->clock_registers.dll_cntl;
5350	u32  mclk_pwrmgt_cntl = si_pi->clock_registers.mclk_pwrmgt_cntl;
5351	u32  mpll_ad_func_cntl = si_pi->clock_registers.mpll_ad_func_cntl;
5352	u32  mpll_dq_func_cntl = si_pi->clock_registers.mpll_dq_func_cntl;
5353	u32  mpll_func_cntl = si_pi->clock_registers.mpll_func_cntl;
5354	u32  mpll_func_cntl_1 = si_pi->clock_registers.mpll_func_cntl_1;
5355	u32  mpll_func_cntl_2 = si_pi->clock_registers.mpll_func_cntl_2;
5356	u32  mpll_ss1 = si_pi->clock_registers.mpll_ss1;
5357	u32  mpll_ss2 = si_pi->clock_registers.mpll_ss2;
5358	struct atom_mpll_param mpll_param;
5359	int ret;
5360
5361	ret = amdgpu_atombios_get_memory_pll_dividers(adev, memory_clock, strobe_mode, &mpll_param);
5362	if (ret)
5363		return ret;
5364
5365	mpll_func_cntl &= ~BWCTRL_MASK;
5366	mpll_func_cntl |= BWCTRL(mpll_param.bwcntl);
5367
5368	mpll_func_cntl_1 &= ~(CLKF_MASK | CLKFRAC_MASK | VCO_MODE_MASK);
5369	mpll_func_cntl_1 |= CLKF(mpll_param.clkf) |
5370		CLKFRAC(mpll_param.clkfrac) | VCO_MODE(mpll_param.vco_mode);
5371
5372	mpll_ad_func_cntl &= ~YCLK_POST_DIV_MASK;
5373	mpll_ad_func_cntl |= YCLK_POST_DIV(mpll_param.post_div);
5374
5375	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5376		mpll_dq_func_cntl &= ~(YCLK_SEL_MASK | YCLK_POST_DIV_MASK);
5377		mpll_dq_func_cntl |= YCLK_SEL(mpll_param.yclk_sel) |
5378			YCLK_POST_DIV(mpll_param.post_div);
5379	}
5380
5381	if (pi->mclk_ss) {
5382		struct amdgpu_atom_ss ss;
5383		u32 freq_nom;
5384		u32 tmp;
5385		u32 reference_clock = adev->clock.mpll.reference_freq;
5386
5387		if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5)
5388			freq_nom = memory_clock * 4;
5389		else
5390			freq_nom = memory_clock * 2;
5391
5392		tmp = freq_nom / reference_clock;
5393		tmp = tmp * tmp;
5394		if (amdgpu_atombios_get_asic_ss_info(adev, &ss,
5395		                                     ASIC_INTERNAL_MEMORY_SS, freq_nom)) {
5396			u32 clks = reference_clock * 5 / ss.rate;
5397			u32 clkv = (u32)((((131 * ss.percentage * ss.rate) / 100) * tmp) / freq_nom);
5398
5399		        mpll_ss1 &= ~CLKV_MASK;
5400		        mpll_ss1 |= CLKV(clkv);
5401
5402		        mpll_ss2 &= ~CLKS_MASK;
5403		        mpll_ss2 |= CLKS(clks);
5404		}
5405	}
5406
5407	mclk_pwrmgt_cntl &= ~DLL_SPEED_MASK;
5408	mclk_pwrmgt_cntl |= DLL_SPEED(mpll_param.dll_speed);
5409
5410	if (dll_state_on)
5411		mclk_pwrmgt_cntl |= MRDCK0_PDNB | MRDCK1_PDNB;
5412	else
5413		mclk_pwrmgt_cntl &= ~(MRDCK0_PDNB | MRDCK1_PDNB);
5414
5415	mclk->mclk_value = cpu_to_be32(memory_clock);
5416	mclk->vMPLL_FUNC_CNTL = cpu_to_be32(mpll_func_cntl);
5417	mclk->vMPLL_FUNC_CNTL_1 = cpu_to_be32(mpll_func_cntl_1);
5418	mclk->vMPLL_FUNC_CNTL_2 = cpu_to_be32(mpll_func_cntl_2);
5419	mclk->vMPLL_AD_FUNC_CNTL = cpu_to_be32(mpll_ad_func_cntl);
5420	mclk->vMPLL_DQ_FUNC_CNTL = cpu_to_be32(mpll_dq_func_cntl);
5421	mclk->vMCLK_PWRMGT_CNTL = cpu_to_be32(mclk_pwrmgt_cntl);
5422	mclk->vDLL_CNTL = cpu_to_be32(dll_cntl);
5423	mclk->vMPLL_SS = cpu_to_be32(mpll_ss1);
5424	mclk->vMPLL_SS2 = cpu_to_be32(mpll_ss2);
5425
5426	return 0;
5427}
5428
5429static void si_populate_smc_sp(struct amdgpu_device *adev,
5430			       struct amdgpu_ps *amdgpu_state,
5431			       SISLANDS_SMC_SWSTATE *smc_state)
5432{
5433	struct  si_ps *ps = si_get_ps(amdgpu_state);
5434	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5435	int i;
5436
5437	for (i = 0; i < ps->performance_level_count - 1; i++)
5438		smc_state->levels[i].bSP = cpu_to_be32(pi->dsp);
5439
5440	smc_state->levels[ps->performance_level_count - 1].bSP =
5441		cpu_to_be32(pi->psp);
5442}
5443
5444static int si_convert_power_level_to_smc(struct amdgpu_device *adev,
5445					 struct rv7xx_pl *pl,
5446					 SISLANDS_SMC_HW_PERFORMANCE_LEVEL *level)
5447{
5448	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5449	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5450	struct si_power_info *si_pi = si_get_pi(adev);
5451	int ret;
5452	bool dll_state_on;
5453	u16 std_vddc;
5454	bool gmc_pg = false;
5455
5456	if (eg_pi->pcie_performance_request &&
5457	    (si_pi->force_pcie_gen != AMDGPU_PCIE_GEN_INVALID))
5458		level->gen2PCIE = (u8)si_pi->force_pcie_gen;
5459	else
5460		level->gen2PCIE = (u8)pl->pcie_gen;
5461
5462	ret = si_populate_sclk_value(adev, pl->sclk, &level->sclk);
5463	if (ret)
5464		return ret;
5465
5466	level->mcFlags =  0;
5467
5468	if (pi->mclk_stutter_mode_threshold &&
5469	    (pl->mclk <= pi->mclk_stutter_mode_threshold) &&
5470	    !eg_pi->uvd_enabled &&
5471	    (RREG32(DPG_PIPE_STUTTER_CONTROL) & STUTTER_ENABLE) &&
5472	    (adev->pm.dpm.new_active_crtc_count <= 2)) {
5473		level->mcFlags |= SISLANDS_SMC_MC_STUTTER_EN;
5474
5475		if (gmc_pg)
5476			level->mcFlags |= SISLANDS_SMC_MC_PG_EN;
5477	}
5478
5479	if (adev->gmc.vram_type == AMDGPU_VRAM_TYPE_GDDR5) {
5480		if (pl->mclk > pi->mclk_edc_enable_threshold)
5481			level->mcFlags |= SISLANDS_SMC_MC_EDC_RD_FLAG;
5482
5483		if (pl->mclk > eg_pi->mclk_edc_wr_enable_threshold)
5484			level->mcFlags |= SISLANDS_SMC_MC_EDC_WR_FLAG;
5485
5486		level->strobeMode = si_get_strobe_mode_settings(adev, pl->mclk);
5487
5488		if (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) {
5489			if (si_get_mclk_frequency_ratio(pl->mclk, true) >=
5490			    ((RREG32(MC_SEQ_MISC7) >> 16) & 0xf))
5491				dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
5492			else
5493				dll_state_on = ((RREG32(MC_SEQ_MISC6) >> 1) & 0x1) ? true : false;
5494		} else {
5495			dll_state_on = false;
5496		}
5497	} else {
5498		level->strobeMode = si_get_strobe_mode_settings(adev,
5499								pl->mclk);
5500
5501		dll_state_on = ((RREG32(MC_SEQ_MISC5) >> 1) & 0x1) ? true : false;
5502	}
5503
5504	ret = si_populate_mclk_value(adev,
5505				     pl->sclk,
5506				     pl->mclk,
5507				     &level->mclk,
5508				     (level->strobeMode & SISLANDS_SMC_STROBE_ENABLE) != 0, dll_state_on);
5509	if (ret)
5510		return ret;
5511
5512	ret = si_populate_voltage_value(adev,
5513					&eg_pi->vddc_voltage_table,
5514					pl->vddc, &level->vddc);
5515	if (ret)
5516		return ret;
5517
5518
5519	ret = si_get_std_voltage_value(adev, &level->vddc, &std_vddc);
5520	if (ret)
5521		return ret;
5522
5523	ret = si_populate_std_voltage_value(adev, std_vddc,
5524					    level->vddc.index, &level->std_vddc);
5525	if (ret)
5526		return ret;
5527
5528	if (eg_pi->vddci_control) {
5529		ret = si_populate_voltage_value(adev, &eg_pi->vddci_voltage_table,
5530						pl->vddci, &level->vddci);
5531		if (ret)
5532			return ret;
5533	}
5534
5535	if (si_pi->vddc_phase_shed_control) {
5536		ret = si_populate_phase_shedding_value(adev,
5537						       &adev->pm.dpm.dyn_state.phase_shedding_limits_table,
5538						       pl->vddc,
5539						       pl->sclk,
5540						       pl->mclk,
5541						       &level->vddc);
5542		if (ret)
5543			return ret;
5544	}
5545
5546	level->MaxPoweredUpCU = si_pi->max_cu;
5547
5548	ret = si_populate_mvdd_value(adev, pl->mclk, &level->mvdd);
5549
5550	return ret;
5551}
5552
5553static int si_populate_smc_t(struct amdgpu_device *adev,
5554			     struct amdgpu_ps *amdgpu_state,
5555			     SISLANDS_SMC_SWSTATE *smc_state)
5556{
5557	struct rv7xx_power_info *pi = rv770_get_pi(adev);
5558	struct  si_ps *state = si_get_ps(amdgpu_state);
5559	u32 a_t;
5560	u32 t_l, t_h;
5561	u32 high_bsp;
5562	int i, ret;
5563
5564	if (state->performance_level_count >= 9)
5565		return -EINVAL;
5566
5567	if (state->performance_level_count < 2) {
5568		a_t = CG_R(0xffff) | CG_L(0);
5569		smc_state->levels[0].aT = cpu_to_be32(a_t);
5570		return 0;
5571	}
5572
5573	smc_state->levels[0].aT = cpu_to_be32(0);
5574
5575	for (i = 0; i <= state->performance_level_count - 2; i++) {
5576		ret = r600_calculate_at(
5577			(50 / SISLANDS_MAX_HARDWARE_POWERLEVELS) * 100 * (i + 1),
5578			100 * R600_AH_DFLT,
5579			state->performance_levels[i + 1].sclk,
5580			state->performance_levels[i].sclk,
5581			&t_l,
5582			&t_h);
5583
5584		if (ret) {
5585			t_h = (i + 1) * 1000 - 50 * R600_AH_DFLT;
5586			t_l = (i + 1) * 1000 + 50 * R600_AH_DFLT;
5587		}
5588
5589		a_t = be32_to_cpu(smc_state->levels[i].aT) & ~CG_R_MASK;
5590		a_t |= CG_R(t_l * pi->bsp / 20000);
5591		smc_state->levels[i].aT = cpu_to_be32(a_t);
5592
5593		high_bsp = (i == state->performance_level_count - 2) ?
5594			pi->pbsp : pi->bsp;
5595		a_t = CG_R(0xffff) | CG_L(t_h * high_bsp / 20000);
5596		smc_state->levels[i + 1].aT = cpu_to_be32(a_t);
5597	}
5598
5599	return 0;
5600}
5601
5602static int si_disable_ulv(struct amdgpu_device *adev)
5603{
5604	struct si_power_info *si_pi = si_get_pi(adev);
5605	struct si_ulv_param *ulv = &si_pi->ulv;
5606
5607	if (ulv->supported)
5608		return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_DisableULV) == PPSMC_Result_OK) ?
5609			0 : -EINVAL;
5610
5611	return 0;
5612}
5613
5614static bool si_is_state_ulv_compatible(struct amdgpu_device *adev,
5615				       struct amdgpu_ps *amdgpu_state)
5616{
5617	const struct si_power_info *si_pi = si_get_pi(adev);
5618	const struct si_ulv_param *ulv = &si_pi->ulv;
5619	const struct  si_ps *state = si_get_ps(amdgpu_state);
5620	int i;
5621
5622	if (state->performance_levels[0].mclk != ulv->pl.mclk)
5623		return false;
5624
5625	/* XXX validate against display requirements! */
5626
5627	for (i = 0; i < adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count; i++) {
5628		if (adev->clock.current_dispclk <=
5629		    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].clk) {
5630			if (ulv->pl.vddc <
5631			    adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[i].v)
5632				return false;
5633		}
5634	}
5635
5636	if ((amdgpu_state->vclk != 0) || (amdgpu_state->dclk != 0))
5637		return false;
5638
5639	return true;
5640}
5641
5642static int si_set_power_state_conditionally_enable_ulv(struct amdgpu_device *adev,
5643						       struct amdgpu_ps *amdgpu_new_state)
5644{
5645	const struct si_power_info *si_pi = si_get_pi(adev);
5646	const struct si_ulv_param *ulv = &si_pi->ulv;
5647
5648	if (ulv->supported) {
5649		if (si_is_state_ulv_compatible(adev, amdgpu_new_state))
5650			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableULV) == PPSMC_Result_OK) ?
5651				0 : -EINVAL;
5652	}
5653	return 0;
5654}
5655
5656static int si_convert_power_state_to_smc(struct amdgpu_device *adev,
5657					 struct amdgpu_ps *amdgpu_state,
5658					 SISLANDS_SMC_SWSTATE *smc_state)
5659{
5660	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
5661	struct ni_power_info *ni_pi = ni_get_pi(adev);
5662	struct si_power_info *si_pi = si_get_pi(adev);
5663	struct  si_ps *state = si_get_ps(amdgpu_state);
5664	int i, ret;
5665	u32 threshold;
5666	u32 sclk_in_sr = 1350; /* ??? */
5667
5668	if (state->performance_level_count > SISLANDS_MAX_HARDWARE_POWERLEVELS)
5669		return -EINVAL;
5670
5671	threshold = state->performance_levels[state->performance_level_count-1].sclk * 100 / 100;
5672
5673	if (amdgpu_state->vclk && amdgpu_state->dclk) {
5674		eg_pi->uvd_enabled = true;
5675		if (eg_pi->smu_uvd_hs)
5676			smc_state->flags |= PPSMC_SWSTATE_FLAG_UVD;
5677	} else {
5678		eg_pi->uvd_enabled = false;
5679	}
5680
5681	if (state->dc_compatible)
5682		smc_state->flags |= PPSMC_SWSTATE_FLAG_DC;
5683
5684	smc_state->levelCount = 0;
5685	for (i = 0; i < state->performance_level_count; i++) {
5686		if (eg_pi->sclk_deep_sleep) {
5687			if ((i == 0) || si_pi->sclk_deep_sleep_above_low) {
5688				if (sclk_in_sr <= SCLK_MIN_DEEPSLEEP_FREQ)
5689					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_BYPASS;
5690				else
5691					smc_state->levels[i].stateFlags |= PPSMC_STATEFLAG_DEEPSLEEP_THROTTLE;
5692			}
5693		}
5694
5695		ret = si_convert_power_level_to_smc(adev, &state->performance_levels[i],
5696						    &smc_state->levels[i]);
5697		smc_state->levels[i].arbRefreshState =
5698			(u8)(SISLANDS_DRIVER_STATE_ARB_INDEX + i);
5699
5700		if (ret)
5701			return ret;
5702
5703		if (ni_pi->enable_power_containment)
5704			smc_state->levels[i].displayWatermark =
5705				(state->performance_levels[i].sclk < threshold) ?
5706				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
5707		else
5708			smc_state->levels[i].displayWatermark = (i < 2) ?
5709				PPSMC_DISPLAY_WATERMARK_LOW : PPSMC_DISPLAY_WATERMARK_HIGH;
5710
5711		if (eg_pi->dynamic_ac_timing)
5712			smc_state->levels[i].ACIndex = SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i;
5713		else
5714			smc_state->levels[i].ACIndex = 0;
5715
5716		smc_state->levelCount++;
5717	}
5718
5719	si_write_smc_soft_register(adev,
5720				   SI_SMC_SOFT_REGISTER_watermark_threshold,
5721				   threshold / 512);
5722
5723	si_populate_smc_sp(adev, amdgpu_state, smc_state);
5724
5725	ret = si_populate_power_containment_values(adev, amdgpu_state, smc_state);
5726	if (ret)
5727		ni_pi->enable_power_containment = false;
5728
5729	ret = si_populate_sq_ramping_values(adev, amdgpu_state, smc_state);
5730	if (ret)
5731		ni_pi->enable_sq_ramping = false;
5732
5733	return si_populate_smc_t(adev, amdgpu_state, smc_state);
5734}
5735
5736static int si_upload_sw_state(struct amdgpu_device *adev,
5737			      struct amdgpu_ps *amdgpu_new_state)
5738{
5739	struct si_power_info *si_pi = si_get_pi(adev);
5740	struct  si_ps *new_state = si_get_ps(amdgpu_new_state);
5741	int ret;
5742	u32 address = si_pi->state_table_start +
5743		offsetof(SISLANDS_SMC_STATETABLE, driverState);
5744	u32 state_size = sizeof(SISLANDS_SMC_SWSTATE) +
5745		((new_state->performance_level_count - 1) *
5746		 sizeof(SISLANDS_SMC_HW_PERFORMANCE_LEVEL));
5747	SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.driverState;
5748
5749	memset(smc_state, 0, state_size);
5750
5751	ret = si_convert_power_state_to_smc(adev, amdgpu_new_state, smc_state);
5752	if (ret)
5753		return ret;
5754
5755	return amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
5756					   state_size, si_pi->sram_end);
5757}
5758
5759static int si_upload_ulv_state(struct amdgpu_device *adev)
5760{
5761	struct si_power_info *si_pi = si_get_pi(adev);
5762	struct si_ulv_param *ulv = &si_pi->ulv;
5763	int ret = 0;
5764
5765	if (ulv->supported && ulv->pl.vddc) {
5766		u32 address = si_pi->state_table_start +
5767			offsetof(SISLANDS_SMC_STATETABLE, ULVState);
5768		SISLANDS_SMC_SWSTATE *smc_state = &si_pi->smc_statetable.ULVState;
5769		u32 state_size = sizeof(SISLANDS_SMC_SWSTATE);
5770
5771		memset(smc_state, 0, state_size);
5772
5773		ret = si_populate_ulv_state(adev, smc_state);
5774		if (!ret)
5775			ret = amdgpu_si_copy_bytes_to_smc(adev, address, (u8 *)smc_state,
5776							  state_size, si_pi->sram_end);
5777	}
5778
5779	return ret;
5780}
5781
5782static int si_upload_smc_data(struct amdgpu_device *adev)
5783{
5784	struct amdgpu_crtc *amdgpu_crtc = NULL;
5785	int i;
5786
5787	if (adev->pm.dpm.new_active_crtc_count == 0)
5788		return 0;
5789
5790	for (i = 0; i < adev->mode_info.num_crtc; i++) {
5791		if (adev->pm.dpm.new_active_crtcs & (1 << i)) {
5792			amdgpu_crtc = adev->mode_info.crtcs[i];
5793			break;
5794		}
5795	}
5796
5797	if (amdgpu_crtc == NULL)
5798		return 0;
5799
5800	if (amdgpu_crtc->line_time <= 0)
5801		return 0;
5802
5803	if (si_write_smc_soft_register(adev,
5804				       SI_SMC_SOFT_REGISTER_crtc_index,
5805				       amdgpu_crtc->crtc_id) != PPSMC_Result_OK)
5806		return 0;
5807
5808	if (si_write_smc_soft_register(adev,
5809				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_min,
5810				       amdgpu_crtc->wm_high / amdgpu_crtc->line_time) != PPSMC_Result_OK)
5811		return 0;
5812
5813	if (si_write_smc_soft_register(adev,
5814				       SI_SMC_SOFT_REGISTER_mclk_change_block_cp_max,
5815				       amdgpu_crtc->wm_low / amdgpu_crtc->line_time) != PPSMC_Result_OK)
5816		return 0;
5817
5818	return 0;
5819}
5820
5821static int si_set_mc_special_registers(struct amdgpu_device *adev,
5822				       struct si_mc_reg_table *table)
5823{
5824	u8 i, j, k;
5825	u32 temp_reg;
5826
5827	for (i = 0, j = table->last; i < table->last; i++) {
5828		if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5829			return -EINVAL;
5830		switch (table->mc_reg_address[i].s1) {
5831		case MC_SEQ_MISC1:
5832			temp_reg = RREG32(MC_PMG_CMD_EMRS);
5833			table->mc_reg_address[j].s1 = MC_PMG_CMD_EMRS;
5834			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_EMRS_LP;
5835			for (k = 0; k < table->num_entries; k++)
5836				table->mc_reg_table_entry[k].mc_data[j] =
5837					((temp_reg & 0xffff0000)) |
5838					((table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16);
5839			j++;
5840
5841			if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5842				return -EINVAL;
5843			temp_reg = RREG32(MC_PMG_CMD_MRS);
5844			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS;
5845			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS_LP;
5846			for (k = 0; k < table->num_entries; k++) {
5847				table->mc_reg_table_entry[k].mc_data[j] =
5848					(temp_reg & 0xffff0000) |
5849					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5850				if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5)
5851					table->mc_reg_table_entry[k].mc_data[j] |= 0x100;
5852			}
5853			j++;
5854
5855			if (adev->gmc.vram_type != AMDGPU_VRAM_TYPE_GDDR5) {
5856				if (j >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5857					return -EINVAL;
5858				table->mc_reg_address[j].s1 = MC_PMG_AUTO_CMD;
5859				table->mc_reg_address[j].s0 = MC_PMG_AUTO_CMD;
5860				for (k = 0; k < table->num_entries; k++)
5861					table->mc_reg_table_entry[k].mc_data[j] =
5862						(table->mc_reg_table_entry[k].mc_data[i] & 0xffff0000) >> 16;
5863				j++;
5864			}
5865			break;
5866		case MC_SEQ_RESERVE_M:
5867			temp_reg = RREG32(MC_PMG_CMD_MRS1);
5868			table->mc_reg_address[j].s1 = MC_PMG_CMD_MRS1;
5869			table->mc_reg_address[j].s0 = MC_SEQ_PMG_CMD_MRS1_LP;
5870			for(k = 0; k < table->num_entries; k++)
5871				table->mc_reg_table_entry[k].mc_data[j] =
5872					(temp_reg & 0xffff0000) |
5873					(table->mc_reg_table_entry[k].mc_data[i] & 0x0000ffff);
5874			j++;
5875			break;
5876		default:
5877			break;
5878		}
5879	}
5880
5881	table->last = j;
5882
5883	return 0;
5884}
5885
5886static bool si_check_s0_mc_reg_index(u16 in_reg, u16 *out_reg)
5887{
5888	bool result = true;
5889	switch (in_reg) {
5890	case  MC_SEQ_RAS_TIMING:
5891		*out_reg = MC_SEQ_RAS_TIMING_LP;
5892		break;
5893	case MC_SEQ_CAS_TIMING:
5894		*out_reg = MC_SEQ_CAS_TIMING_LP;
5895		break;
5896	case MC_SEQ_MISC_TIMING:
5897		*out_reg = MC_SEQ_MISC_TIMING_LP;
5898		break;
5899	case MC_SEQ_MISC_TIMING2:
5900		*out_reg = MC_SEQ_MISC_TIMING2_LP;
5901		break;
5902	case MC_SEQ_RD_CTL_D0:
5903		*out_reg = MC_SEQ_RD_CTL_D0_LP;
5904		break;
5905	case MC_SEQ_RD_CTL_D1:
5906		*out_reg = MC_SEQ_RD_CTL_D1_LP;
5907		break;
5908	case MC_SEQ_WR_CTL_D0:
5909		*out_reg = MC_SEQ_WR_CTL_D0_LP;
5910		break;
5911	case MC_SEQ_WR_CTL_D1:
5912		*out_reg = MC_SEQ_WR_CTL_D1_LP;
5913		break;
5914	case MC_PMG_CMD_EMRS:
5915		*out_reg = MC_SEQ_PMG_CMD_EMRS_LP;
5916		break;
5917	case MC_PMG_CMD_MRS:
5918		*out_reg = MC_SEQ_PMG_CMD_MRS_LP;
5919		break;
5920	case MC_PMG_CMD_MRS1:
5921		*out_reg = MC_SEQ_PMG_CMD_MRS1_LP;
5922		break;
5923	case MC_SEQ_PMG_TIMING:
5924		*out_reg = MC_SEQ_PMG_TIMING_LP;
5925		break;
5926	case MC_PMG_CMD_MRS2:
5927		*out_reg = MC_SEQ_PMG_CMD_MRS2_LP;
5928		break;
5929	case MC_SEQ_WR_CTL_2:
5930		*out_reg = MC_SEQ_WR_CTL_2_LP;
5931		break;
5932	default:
5933		result = false;
5934		break;
5935	}
5936
5937	return result;
5938}
5939
5940static void si_set_valid_flag(struct si_mc_reg_table *table)
5941{
5942	u8 i, j;
5943
5944	for (i = 0; i < table->last; i++) {
5945		for (j = 1; j < table->num_entries; j++) {
5946			if (table->mc_reg_table_entry[j-1].mc_data[i] != table->mc_reg_table_entry[j].mc_data[i]) {
5947				table->valid_flag |= 1 << i;
5948				break;
5949			}
5950		}
5951	}
5952}
5953
5954static void si_set_s0_mc_reg_index(struct si_mc_reg_table *table)
5955{
5956	u32 i;
5957	u16 address;
5958
5959	for (i = 0; i < table->last; i++)
5960		table->mc_reg_address[i].s0 = si_check_s0_mc_reg_index(table->mc_reg_address[i].s1, &address) ?
5961			address : table->mc_reg_address[i].s1;
5962
5963}
5964
5965static int si_copy_vbios_mc_reg_table(struct atom_mc_reg_table *table,
5966				      struct si_mc_reg_table *si_table)
5967{
5968	u8 i, j;
5969
5970	if (table->last > SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
5971		return -EINVAL;
5972	if (table->num_entries > MAX_AC_TIMING_ENTRIES)
5973		return -EINVAL;
5974
5975	for (i = 0; i < table->last; i++)
5976		si_table->mc_reg_address[i].s1 = table->mc_reg_address[i].s1;
5977	si_table->last = table->last;
5978
5979	for (i = 0; i < table->num_entries; i++) {
5980		si_table->mc_reg_table_entry[i].mclk_max =
5981			table->mc_reg_table_entry[i].mclk_max;
5982		for (j = 0; j < table->last; j++) {
5983			si_table->mc_reg_table_entry[i].mc_data[j] =
5984				table->mc_reg_table_entry[i].mc_data[j];
5985		}
5986	}
5987	si_table->num_entries = table->num_entries;
5988
5989	return 0;
5990}
5991
5992static int si_initialize_mc_reg_table(struct amdgpu_device *adev)
5993{
5994	struct si_power_info *si_pi = si_get_pi(adev);
5995	struct atom_mc_reg_table *table;
5996	struct si_mc_reg_table *si_table = &si_pi->mc_reg_table;
5997	u8 module_index = rv770_get_memory_module_index(adev);
5998	int ret;
5999
6000	table = kzalloc(sizeof(struct atom_mc_reg_table), GFP_KERNEL);
6001	if (!table)
6002		return -ENOMEM;
6003
6004	WREG32(MC_SEQ_RAS_TIMING_LP, RREG32(MC_SEQ_RAS_TIMING));
6005	WREG32(MC_SEQ_CAS_TIMING_LP, RREG32(MC_SEQ_CAS_TIMING));
6006	WREG32(MC_SEQ_MISC_TIMING_LP, RREG32(MC_SEQ_MISC_TIMING));
6007	WREG32(MC_SEQ_MISC_TIMING2_LP, RREG32(MC_SEQ_MISC_TIMING2));
6008	WREG32(MC_SEQ_PMG_CMD_EMRS_LP, RREG32(MC_PMG_CMD_EMRS));
6009	WREG32(MC_SEQ_PMG_CMD_MRS_LP, RREG32(MC_PMG_CMD_MRS));
6010	WREG32(MC_SEQ_PMG_CMD_MRS1_LP, RREG32(MC_PMG_CMD_MRS1));
6011	WREG32(MC_SEQ_WR_CTL_D0_LP, RREG32(MC_SEQ_WR_CTL_D0));
6012	WREG32(MC_SEQ_WR_CTL_D1_LP, RREG32(MC_SEQ_WR_CTL_D1));
6013	WREG32(MC_SEQ_RD_CTL_D0_LP, RREG32(MC_SEQ_RD_CTL_D0));
6014	WREG32(MC_SEQ_RD_CTL_D1_LP, RREG32(MC_SEQ_RD_CTL_D1));
6015	WREG32(MC_SEQ_PMG_TIMING_LP, RREG32(MC_SEQ_PMG_TIMING));
6016	WREG32(MC_SEQ_PMG_CMD_MRS2_LP, RREG32(MC_PMG_CMD_MRS2));
6017	WREG32(MC_SEQ_WR_CTL_2_LP, RREG32(MC_SEQ_WR_CTL_2));
6018
6019	ret = amdgpu_atombios_init_mc_reg_table(adev, module_index, table);
6020	if (ret)
6021		goto init_mc_done;
6022
6023	ret = si_copy_vbios_mc_reg_table(table, si_table);
6024	if (ret)
6025		goto init_mc_done;
6026
6027	si_set_s0_mc_reg_index(si_table);
6028
6029	ret = si_set_mc_special_registers(adev, si_table);
6030	if (ret)
6031		goto init_mc_done;
6032
6033	si_set_valid_flag(si_table);
6034
6035init_mc_done:
6036	kfree(table);
6037
6038	return ret;
6039
6040}
6041
6042static void si_populate_mc_reg_addresses(struct amdgpu_device *adev,
6043					 SMC_SIslands_MCRegisters *mc_reg_table)
6044{
6045	struct si_power_info *si_pi = si_get_pi(adev);
6046	u32 i, j;
6047
6048	for (i = 0, j = 0; j < si_pi->mc_reg_table.last; j++) {
6049		if (si_pi->mc_reg_table.valid_flag & (1 << j)) {
6050			if (i >= SMC_SISLANDS_MC_REGISTER_ARRAY_SIZE)
6051				break;
6052			mc_reg_table->address[i].s0 =
6053				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s0);
6054			mc_reg_table->address[i].s1 =
6055				cpu_to_be16(si_pi->mc_reg_table.mc_reg_address[j].s1);
6056			i++;
6057		}
6058	}
6059	mc_reg_table->last = (u8)i;
6060}
6061
6062static void si_convert_mc_registers(const struct si_mc_reg_entry *entry,
6063				    SMC_SIslands_MCRegisterSet *data,
6064				    u32 num_entries, u32 valid_flag)
6065{
6066	u32 i, j;
6067
6068	for(i = 0, j = 0; j < num_entries; j++) {
6069		if (valid_flag & (1 << j)) {
6070			data->value[i] = cpu_to_be32(entry->mc_data[j]);
6071			i++;
6072		}
6073	}
6074}
6075
6076static void si_convert_mc_reg_table_entry_to_smc(struct amdgpu_device *adev,
6077						 struct rv7xx_pl *pl,
6078						 SMC_SIslands_MCRegisterSet *mc_reg_table_data)
6079{
6080	struct si_power_info *si_pi = si_get_pi(adev);
6081	u32 i = 0;
6082
6083	for (i = 0; i < si_pi->mc_reg_table.num_entries; i++) {
6084		if (pl->mclk <= si_pi->mc_reg_table.mc_reg_table_entry[i].mclk_max)
6085			break;
6086	}
6087
6088	if ((i == si_pi->mc_reg_table.num_entries) && (i > 0))
6089		--i;
6090
6091	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[i],
6092				mc_reg_table_data, si_pi->mc_reg_table.last,
6093				si_pi->mc_reg_table.valid_flag);
6094}
6095
6096static void si_convert_mc_reg_table_to_smc(struct amdgpu_device *adev,
6097					   struct amdgpu_ps *amdgpu_state,
6098					   SMC_SIslands_MCRegisters *mc_reg_table)
6099{
6100	struct si_ps *state = si_get_ps(amdgpu_state);
6101	int i;
6102
6103	for (i = 0; i < state->performance_level_count; i++) {
6104		si_convert_mc_reg_table_entry_to_smc(adev,
6105						     &state->performance_levels[i],
6106						     &mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT + i]);
6107	}
6108}
6109
6110static int si_populate_mc_reg_table(struct amdgpu_device *adev,
6111				    struct amdgpu_ps *amdgpu_boot_state)
6112{
6113	struct  si_ps *boot_state = si_get_ps(amdgpu_boot_state);
6114	struct si_power_info *si_pi = si_get_pi(adev);
6115	struct si_ulv_param *ulv = &si_pi->ulv;
6116	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
6117
6118	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
6119
6120	si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_seq_index, 1);
6121
6122	si_populate_mc_reg_addresses(adev, smc_mc_reg_table);
6123
6124	si_convert_mc_reg_table_entry_to_smc(adev, &boot_state->performance_levels[0],
6125					     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_INITIAL_SLOT]);
6126
6127	si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
6128				&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ACPI_SLOT],
6129				si_pi->mc_reg_table.last,
6130				si_pi->mc_reg_table.valid_flag);
6131
6132	if (ulv->supported && ulv->pl.vddc != 0)
6133		si_convert_mc_reg_table_entry_to_smc(adev, &ulv->pl,
6134						     &smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT]);
6135	else
6136		si_convert_mc_registers(&si_pi->mc_reg_table.mc_reg_table_entry[0],
6137					&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_ULV_SLOT],
6138					si_pi->mc_reg_table.last,
6139					si_pi->mc_reg_table.valid_flag);
6140
6141	si_convert_mc_reg_table_to_smc(adev, amdgpu_boot_state, smc_mc_reg_table);
6142
6143	return amdgpu_si_copy_bytes_to_smc(adev, si_pi->mc_reg_table_start,
6144					   (u8 *)smc_mc_reg_table,
6145					   sizeof(SMC_SIslands_MCRegisters), si_pi->sram_end);
6146}
6147
6148static int si_upload_mc_reg_table(struct amdgpu_device *adev,
6149				  struct amdgpu_ps *amdgpu_new_state)
6150{
6151	struct si_ps *new_state = si_get_ps(amdgpu_new_state);
6152	struct si_power_info *si_pi = si_get_pi(adev);
6153	u32 address = si_pi->mc_reg_table_start +
6154		offsetof(SMC_SIslands_MCRegisters,
6155			 data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT]);
6156	SMC_SIslands_MCRegisters *smc_mc_reg_table = &si_pi->smc_mc_reg_table;
6157
6158	memset(smc_mc_reg_table, 0, sizeof(SMC_SIslands_MCRegisters));
6159
6160	si_convert_mc_reg_table_to_smc(adev, amdgpu_new_state, smc_mc_reg_table);
6161
6162	return amdgpu_si_copy_bytes_to_smc(adev, address,
6163					   (u8 *)&smc_mc_reg_table->data[SISLANDS_MCREGISTERTABLE_FIRST_DRIVERSTATE_SLOT],
6164					   sizeof(SMC_SIslands_MCRegisterSet) * new_state->performance_level_count,
6165					   si_pi->sram_end);
6166}
6167
6168static void si_enable_voltage_control(struct amdgpu_device *adev, bool enable)
6169{
6170	if (enable)
6171		WREG32_P(GENERAL_PWRMGT, VOLT_PWRMGT_EN, ~VOLT_PWRMGT_EN);
6172	else
6173		WREG32_P(GENERAL_PWRMGT, 0, ~VOLT_PWRMGT_EN);
6174}
6175
6176static enum amdgpu_pcie_gen si_get_maximum_link_speed(struct amdgpu_device *adev,
6177						      struct amdgpu_ps *amdgpu_state)
6178{
6179	struct si_ps *state = si_get_ps(amdgpu_state);
6180	int i;
6181	u16 pcie_speed, max_speed = 0;
6182
6183	for (i = 0; i < state->performance_level_count; i++) {
6184		pcie_speed = state->performance_levels[i].pcie_gen;
6185		if (max_speed < pcie_speed)
6186			max_speed = pcie_speed;
6187	}
6188	return max_speed;
6189}
6190
6191static u16 si_get_current_pcie_speed(struct amdgpu_device *adev)
6192{
6193	u32 speed_cntl;
6194
6195	speed_cntl = RREG32_PCIE_PORT(PCIE_LC_SPEED_CNTL) & LC_CURRENT_DATA_RATE_MASK;
6196	speed_cntl >>= LC_CURRENT_DATA_RATE_SHIFT;
6197
6198	return (u16)speed_cntl;
6199}
6200
6201static void si_request_link_speed_change_before_state_change(struct amdgpu_device *adev,
6202							     struct amdgpu_ps *amdgpu_new_state,
6203							     struct amdgpu_ps *amdgpu_current_state)
6204{
6205	struct si_power_info *si_pi = si_get_pi(adev);
6206	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
6207	enum amdgpu_pcie_gen current_link_speed;
6208
6209	if (si_pi->force_pcie_gen == AMDGPU_PCIE_GEN_INVALID)
6210		current_link_speed = si_get_maximum_link_speed(adev, amdgpu_current_state);
6211	else
6212		current_link_speed = si_pi->force_pcie_gen;
6213
6214	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
6215	si_pi->pspp_notify_required = false;
6216	if (target_link_speed > current_link_speed) {
6217		switch (target_link_speed) {
6218#if defined(CONFIG_ACPI)
6219		case AMDGPU_PCIE_GEN3:
6220			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN3, false) == 0)
6221				break;
6222			si_pi->force_pcie_gen = AMDGPU_PCIE_GEN2;
6223			if (current_link_speed == AMDGPU_PCIE_GEN2)
6224				break;
6225			/* fall through */
6226		case AMDGPU_PCIE_GEN2:
6227			if (amdgpu_acpi_pcie_performance_request(adev, PCIE_PERF_REQ_PECI_GEN2, false) == 0)
6228				break;
6229#endif
6230			/* fall through */
6231		default:
6232			si_pi->force_pcie_gen = si_get_current_pcie_speed(adev);
6233			break;
6234		}
6235	} else {
6236		if (target_link_speed < current_link_speed)
6237			si_pi->pspp_notify_required = true;
6238	}
6239}
6240
6241static void si_notify_link_speed_change_after_state_change(struct amdgpu_device *adev,
6242							   struct amdgpu_ps *amdgpu_new_state,
6243							   struct amdgpu_ps *amdgpu_current_state)
6244{
6245	struct si_power_info *si_pi = si_get_pi(adev);
6246	enum amdgpu_pcie_gen target_link_speed = si_get_maximum_link_speed(adev, amdgpu_new_state);
6247	u8 request;
6248
6249	if (si_pi->pspp_notify_required) {
6250		if (target_link_speed == AMDGPU_PCIE_GEN3)
6251			request = PCIE_PERF_REQ_PECI_GEN3;
6252		else if (target_link_speed == AMDGPU_PCIE_GEN2)
6253			request = PCIE_PERF_REQ_PECI_GEN2;
6254		else
6255			request = PCIE_PERF_REQ_PECI_GEN1;
6256
6257		if ((request == PCIE_PERF_REQ_PECI_GEN1) &&
6258		    (si_get_current_pcie_speed(adev) > 0))
6259			return;
6260
6261#if defined(CONFIG_ACPI)
6262		amdgpu_acpi_pcie_performance_request(adev, request, false);
6263#endif
6264	}
6265}
6266
6267#if 0
6268static int si_ds_request(struct amdgpu_device *adev,
6269			 bool ds_status_on, u32 count_write)
6270{
6271	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6272
6273	if (eg_pi->sclk_deep_sleep) {
6274		if (ds_status_on)
6275			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_CancelThrottleOVRDSCLKDS) ==
6276				PPSMC_Result_OK) ?
6277				0 : -EINVAL;
6278		else
6279			return (amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_ThrottleOVRDSCLKDS) ==
6280				PPSMC_Result_OK) ? 0 : -EINVAL;
6281	}
6282	return 0;
6283}
6284#endif
6285
6286static void si_set_max_cu_value(struct amdgpu_device *adev)
6287{
6288	struct si_power_info *si_pi = si_get_pi(adev);
6289
6290	if (adev->asic_type == CHIP_VERDE) {
6291		switch (adev->pdev->device) {
6292		case 0x6820:
6293		case 0x6825:
6294		case 0x6821:
6295		case 0x6823:
6296		case 0x6827:
6297			si_pi->max_cu = 10;
6298			break;
6299		case 0x682D:
6300		case 0x6824:
6301		case 0x682F:
6302		case 0x6826:
6303			si_pi->max_cu = 8;
6304			break;
6305		case 0x6828:
6306		case 0x6830:
6307		case 0x6831:
6308		case 0x6838:
6309		case 0x6839:
6310		case 0x683D:
6311			si_pi->max_cu = 10;
6312			break;
6313		case 0x683B:
6314		case 0x683F:
6315		case 0x6829:
6316			si_pi->max_cu = 8;
6317			break;
6318		default:
6319			si_pi->max_cu = 0;
6320			break;
6321		}
6322	} else {
6323		si_pi->max_cu = 0;
6324	}
6325}
6326
6327static int si_patch_single_dependency_table_based_on_leakage(struct amdgpu_device *adev,
6328							     struct amdgpu_clock_voltage_dependency_table *table)
6329{
6330	u32 i;
6331	int j;
6332	u16 leakage_voltage;
6333
6334	if (table) {
6335		for (i = 0; i < table->count; i++) {
6336			switch (si_get_leakage_voltage_from_leakage_index(adev,
6337									  table->entries[i].v,
6338									  &leakage_voltage)) {
6339			case 0:
6340				table->entries[i].v = leakage_voltage;
6341				break;
6342			case -EAGAIN:
6343				return -EINVAL;
6344			case -EINVAL:
6345			default:
6346				break;
6347			}
6348		}
6349
6350		for (j = (table->count - 2); j >= 0; j--) {
6351			table->entries[j].v = (table->entries[j].v <= table->entries[j + 1].v) ?
6352				table->entries[j].v : table->entries[j + 1].v;
6353		}
6354	}
6355	return 0;
6356}
6357
6358static int si_patch_dependency_tables_based_on_leakage(struct amdgpu_device *adev)
6359{
6360	int ret = 0;
6361
6362	ret = si_patch_single_dependency_table_based_on_leakage(adev,
6363								&adev->pm.dpm.dyn_state.vddc_dependency_on_sclk);
6364	if (ret)
6365		DRM_ERROR("Could not patch vddc_on_sclk leakage table\n");
6366	ret = si_patch_single_dependency_table_based_on_leakage(adev,
6367								&adev->pm.dpm.dyn_state.vddc_dependency_on_mclk);
6368	if (ret)
6369		DRM_ERROR("Could not patch vddc_on_mclk leakage table\n");
6370	ret = si_patch_single_dependency_table_based_on_leakage(adev,
6371								&adev->pm.dpm.dyn_state.vddci_dependency_on_mclk);
6372	if (ret)
6373		DRM_ERROR("Could not patch vddci_on_mclk leakage table\n");
6374	return ret;
6375}
6376
6377static void si_set_pcie_lane_width_in_smc(struct amdgpu_device *adev,
6378					  struct amdgpu_ps *amdgpu_new_state,
6379					  struct amdgpu_ps *amdgpu_current_state)
6380{
6381	u32 lane_width;
6382	u32 new_lane_width =
6383		((amdgpu_new_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6384	u32 current_lane_width =
6385		((amdgpu_current_state->caps & ATOM_PPLIB_PCIE_LINK_WIDTH_MASK) >> ATOM_PPLIB_PCIE_LINK_WIDTH_SHIFT) + 1;
6386
6387	if (new_lane_width != current_lane_width) {
6388		amdgpu_set_pcie_lanes(adev, new_lane_width);
6389		lane_width = amdgpu_get_pcie_lanes(adev);
6390		si_write_smc_soft_register(adev, SI_SMC_SOFT_REGISTER_non_ulv_pcie_link_width, lane_width);
6391	}
6392}
6393
6394static void si_dpm_setup_asic(struct amdgpu_device *adev)
6395{
6396	si_read_clock_registers(adev);
6397	si_enable_acpi_power_management(adev);
6398}
6399
6400static int si_thermal_enable_alert(struct amdgpu_device *adev,
6401				   bool enable)
6402{
6403	u32 thermal_int = RREG32(CG_THERMAL_INT);
6404
6405	if (enable) {
6406		PPSMC_Result result;
6407
6408		thermal_int &= ~(THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW);
6409		WREG32(CG_THERMAL_INT, thermal_int);
6410		result = amdgpu_si_send_msg_to_smc(adev, PPSMC_MSG_EnableThermalInterrupt);
6411		if (result != PPSMC_Result_OK) {
6412			DRM_DEBUG_KMS("Could not enable thermal interrupts.\n");
6413			return -EINVAL;
6414		}
6415	} else {
6416		thermal_int |= THERM_INT_MASK_HIGH | THERM_INT_MASK_LOW;
6417		WREG32(CG_THERMAL_INT, thermal_int);
6418	}
6419
6420	return 0;
6421}
6422
6423static int si_thermal_set_temperature_range(struct amdgpu_device *adev,
6424					    int min_temp, int max_temp)
6425{
6426	int low_temp = 0 * 1000;
6427	int high_temp = 255 * 1000;
6428
6429	if (low_temp < min_temp)
6430		low_temp = min_temp;
6431	if (high_temp > max_temp)
6432		high_temp = max_temp;
6433	if (high_temp < low_temp) {
6434		DRM_ERROR("invalid thermal range: %d - %d\n", low_temp, high_temp);
6435		return -EINVAL;
6436	}
6437
6438	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTH(high_temp / 1000), ~DIG_THERM_INTH_MASK);
6439	WREG32_P(CG_THERMAL_INT, DIG_THERM_INTL(low_temp / 1000), ~DIG_THERM_INTL_MASK);
6440	WREG32_P(CG_THERMAL_CTRL, DIG_THERM_DPM(high_temp / 1000), ~DIG_THERM_DPM_MASK);
6441
6442	adev->pm.dpm.thermal.min_temp = low_temp;
6443	adev->pm.dpm.thermal.max_temp = high_temp;
6444
6445	return 0;
6446}
6447
6448static void si_fan_ctrl_set_static_mode(struct amdgpu_device *adev, u32 mode)
6449{
6450	struct si_power_info *si_pi = si_get_pi(adev);
6451	u32 tmp;
6452
6453	if (si_pi->fan_ctrl_is_in_default_mode) {
6454		tmp = (RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK) >> FDO_PWM_MODE_SHIFT;
6455		si_pi->fan_ctrl_default_mode = tmp;
6456		tmp = (RREG32(CG_FDO_CTRL2) & TMIN_MASK) >> TMIN_SHIFT;
6457		si_pi->t_min = tmp;
6458		si_pi->fan_ctrl_is_in_default_mode = false;
6459	}
6460
6461	tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
6462	tmp |= TMIN(0);
6463	WREG32(CG_FDO_CTRL2, tmp);
6464
6465	tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
6466	tmp |= FDO_PWM_MODE(mode);
6467	WREG32(CG_FDO_CTRL2, tmp);
6468}
6469
6470static int si_thermal_setup_fan_table(struct amdgpu_device *adev)
6471{
6472	struct si_power_info *si_pi = si_get_pi(adev);
6473	PP_SIslands_FanTable fan_table = { FDO_MODE_HARDWARE };
6474	u32 duty100;
6475	u32 t_diff1, t_diff2, pwm_diff1, pwm_diff2;
6476	u16 fdo_min, slope1, slope2;
6477	u32 reference_clock, tmp;
6478	int ret;
6479	u64 tmp64;
6480
6481	if (!si_pi->fan_table_start) {
6482		adev->pm.dpm.fan.ucode_fan_control = false;
6483		return 0;
6484	}
6485
6486	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6487
6488	if (duty100 == 0) {
6489		adev->pm.dpm.fan.ucode_fan_control = false;
6490		return 0;
6491	}
6492
6493	tmp64 = (u64)adev->pm.dpm.fan.pwm_min * duty100;
6494	do_div(tmp64, 10000);
6495	fdo_min = (u16)tmp64;
6496
6497	t_diff1 = adev->pm.dpm.fan.t_med - adev->pm.dpm.fan.t_min;
6498	t_diff2 = adev->pm.dpm.fan.t_high - adev->pm.dpm.fan.t_med;
6499
6500	pwm_diff1 = adev->pm.dpm.fan.pwm_med - adev->pm.dpm.fan.pwm_min;
6501	pwm_diff2 = adev->pm.dpm.fan.pwm_high - adev->pm.dpm.fan.pwm_med;
6502
6503	slope1 = (u16)((50 + ((16 * duty100 * pwm_diff1) / t_diff1)) / 100);
6504	slope2 = (u16)((50 + ((16 * duty100 * pwm_diff2) / t_diff2)) / 100);
6505
6506	fan_table.temp_min = cpu_to_be16((50 + adev->pm.dpm.fan.t_min) / 100);
6507	fan_table.temp_med = cpu_to_be16((50 + adev->pm.dpm.fan.t_med) / 100);
6508	fan_table.temp_max = cpu_to_be16((50 + adev->pm.dpm.fan.t_max) / 100);
6509	fan_table.slope1 = cpu_to_be16(slope1);
6510	fan_table.slope2 = cpu_to_be16(slope2);
6511	fan_table.fdo_min = cpu_to_be16(fdo_min);
6512	fan_table.hys_down = cpu_to_be16(adev->pm.dpm.fan.t_hyst);
6513	fan_table.hys_up = cpu_to_be16(1);
6514	fan_table.hys_slope = cpu_to_be16(1);
6515	fan_table.temp_resp_lim = cpu_to_be16(5);
6516	reference_clock = amdgpu_asic_get_xclk(adev);
6517
6518	fan_table.refresh_period = cpu_to_be32((adev->pm.dpm.fan.cycle_delay *
6519						reference_clock) / 1600);
6520	fan_table.fdo_max = cpu_to_be16((u16)duty100);
6521
6522	tmp = (RREG32(CG_MULT_THERMAL_CTRL) & TEMP_SEL_MASK) >> TEMP_SEL_SHIFT;
6523	fan_table.temp_src = (uint8_t)tmp;
6524
6525	ret = amdgpu_si_copy_bytes_to_smc(adev,
6526					  si_pi->fan_table_start,
6527					  (u8 *)(&fan_table),
6528					  sizeof(fan_table),
6529					  si_pi->sram_end);
6530
6531	if (ret) {
6532		DRM_ERROR("Failed to load fan table to the SMC.");
6533		adev->pm.dpm.fan.ucode_fan_control = false;
6534	}
6535
6536	return ret;
6537}
6538
6539static int si_fan_ctrl_start_smc_fan_control(struct amdgpu_device *adev)
6540{
6541	struct si_power_info *si_pi = si_get_pi(adev);
6542	PPSMC_Result ret;
6543
6544	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StartFanControl);
6545	if (ret == PPSMC_Result_OK) {
6546		si_pi->fan_is_controlled_by_smc = true;
6547		return 0;
6548	} else {
6549		return -EINVAL;
6550	}
6551}
6552
6553static int si_fan_ctrl_stop_smc_fan_control(struct amdgpu_device *adev)
6554{
6555	struct si_power_info *si_pi = si_get_pi(adev);
6556	PPSMC_Result ret;
6557
6558	ret = amdgpu_si_send_msg_to_smc(adev, PPSMC_StopFanControl);
6559
6560	if (ret == PPSMC_Result_OK) {
6561		si_pi->fan_is_controlled_by_smc = false;
6562		return 0;
6563	} else {
6564		return -EINVAL;
6565	}
6566}
6567
6568static int si_dpm_get_fan_speed_percent(void *handle,
6569				      u32 *speed)
6570{
6571	u32 duty, duty100;
6572	u64 tmp64;
6573	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6574
6575	if (adev->pm.no_fan)
6576		return -ENOENT;
6577
6578	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6579	duty = (RREG32(CG_THERMAL_STATUS) & FDO_PWM_DUTY_MASK) >> FDO_PWM_DUTY_SHIFT;
6580
6581	if (duty100 == 0)
6582		return -EINVAL;
6583
6584	tmp64 = (u64)duty * 100;
6585	do_div(tmp64, duty100);
6586	*speed = (u32)tmp64;
6587
6588	if (*speed > 100)
6589		*speed = 100;
6590
6591	return 0;
6592}
6593
6594static int si_dpm_set_fan_speed_percent(void *handle,
6595				      u32 speed)
6596{
6597	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6598	struct si_power_info *si_pi = si_get_pi(adev);
6599	u32 tmp;
6600	u32 duty, duty100;
6601	u64 tmp64;
6602
6603	if (adev->pm.no_fan)
6604		return -ENOENT;
6605
6606	if (si_pi->fan_is_controlled_by_smc)
6607		return -EINVAL;
6608
6609	if (speed > 100)
6610		return -EINVAL;
6611
6612	duty100 = (RREG32(CG_FDO_CTRL1) & FMAX_DUTY100_MASK) >> FMAX_DUTY100_SHIFT;
6613
6614	if (duty100 == 0)
6615		return -EINVAL;
6616
6617	tmp64 = (u64)speed * duty100;
6618	do_div(tmp64, 100);
6619	duty = (u32)tmp64;
6620
6621	tmp = RREG32(CG_FDO_CTRL0) & ~FDO_STATIC_DUTY_MASK;
6622	tmp |= FDO_STATIC_DUTY(duty);
6623	WREG32(CG_FDO_CTRL0, tmp);
6624
6625	return 0;
6626}
6627
6628static void si_dpm_set_fan_control_mode(void *handle, u32 mode)
6629{
6630	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6631
6632	if (mode) {
6633		/* stop auto-manage */
6634		if (adev->pm.dpm.fan.ucode_fan_control)
6635			si_fan_ctrl_stop_smc_fan_control(adev);
6636		si_fan_ctrl_set_static_mode(adev, mode);
6637	} else {
6638		/* restart auto-manage */
6639		if (adev->pm.dpm.fan.ucode_fan_control)
6640			si_thermal_start_smc_fan_control(adev);
6641		else
6642			si_fan_ctrl_set_default_mode(adev);
6643	}
6644}
6645
6646static u32 si_dpm_get_fan_control_mode(void *handle)
6647{
6648	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6649	struct si_power_info *si_pi = si_get_pi(adev);
6650	u32 tmp;
6651
6652	if (si_pi->fan_is_controlled_by_smc)
6653		return 0;
6654
6655	tmp = RREG32(CG_FDO_CTRL2) & FDO_PWM_MODE_MASK;
6656	return (tmp >> FDO_PWM_MODE_SHIFT);
6657}
6658
6659#if 0
6660static int si_fan_ctrl_get_fan_speed_rpm(struct amdgpu_device *adev,
6661					 u32 *speed)
6662{
6663	u32 tach_period;
6664	u32 xclk = amdgpu_asic_get_xclk(adev);
6665
6666	if (adev->pm.no_fan)
6667		return -ENOENT;
6668
6669	if (adev->pm.fan_pulses_per_revolution == 0)
6670		return -ENOENT;
6671
6672	tach_period = (RREG32(CG_TACH_STATUS) & TACH_PERIOD_MASK) >> TACH_PERIOD_SHIFT;
6673	if (tach_period == 0)
6674		return -ENOENT;
6675
6676	*speed = 60 * xclk * 10000 / tach_period;
6677
6678	return 0;
6679}
6680
6681static int si_fan_ctrl_set_fan_speed_rpm(struct amdgpu_device *adev,
6682					 u32 speed)
6683{
6684	u32 tach_period, tmp;
6685	u32 xclk = amdgpu_asic_get_xclk(adev);
6686
6687	if (adev->pm.no_fan)
6688		return -ENOENT;
6689
6690	if (adev->pm.fan_pulses_per_revolution == 0)
6691		return -ENOENT;
6692
6693	if ((speed < adev->pm.fan_min_rpm) ||
6694	    (speed > adev->pm.fan_max_rpm))
6695		return -EINVAL;
6696
6697	if (adev->pm.dpm.fan.ucode_fan_control)
6698		si_fan_ctrl_stop_smc_fan_control(adev);
6699
6700	tach_period = 60 * xclk * 10000 / (8 * speed);
6701	tmp = RREG32(CG_TACH_CTRL) & ~TARGET_PERIOD_MASK;
6702	tmp |= TARGET_PERIOD(tach_period);
6703	WREG32(CG_TACH_CTRL, tmp);
6704
6705	si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC_RPM);
6706
6707	return 0;
6708}
6709#endif
6710
6711static void si_fan_ctrl_set_default_mode(struct amdgpu_device *adev)
6712{
6713	struct si_power_info *si_pi = si_get_pi(adev);
6714	u32 tmp;
6715
6716	if (!si_pi->fan_ctrl_is_in_default_mode) {
6717		tmp = RREG32(CG_FDO_CTRL2) & ~FDO_PWM_MODE_MASK;
6718		tmp |= FDO_PWM_MODE(si_pi->fan_ctrl_default_mode);
6719		WREG32(CG_FDO_CTRL2, tmp);
6720
6721		tmp = RREG32(CG_FDO_CTRL2) & ~TMIN_MASK;
6722		tmp |= TMIN(si_pi->t_min);
6723		WREG32(CG_FDO_CTRL2, tmp);
6724		si_pi->fan_ctrl_is_in_default_mode = true;
6725	}
6726}
6727
6728static void si_thermal_start_smc_fan_control(struct amdgpu_device *adev)
6729{
6730	if (adev->pm.dpm.fan.ucode_fan_control) {
6731		si_fan_ctrl_start_smc_fan_control(adev);
6732		si_fan_ctrl_set_static_mode(adev, FDO_PWM_MODE_STATIC);
6733	}
6734}
6735
6736static void si_thermal_initialize(struct amdgpu_device *adev)
6737{
6738	u32 tmp;
6739
6740	if (adev->pm.fan_pulses_per_revolution) {
6741		tmp = RREG32(CG_TACH_CTRL) & ~EDGE_PER_REV_MASK;
6742		tmp |= EDGE_PER_REV(adev->pm.fan_pulses_per_revolution -1);
6743		WREG32(CG_TACH_CTRL, tmp);
6744	}
6745
6746	tmp = RREG32(CG_FDO_CTRL2) & ~TACH_PWM_RESP_RATE_MASK;
6747	tmp |= TACH_PWM_RESP_RATE(0x28);
6748	WREG32(CG_FDO_CTRL2, tmp);
6749}
6750
6751static int si_thermal_start_thermal_controller(struct amdgpu_device *adev)
6752{
6753	int ret;
6754
6755	si_thermal_initialize(adev);
6756	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6757	if (ret)
6758		return ret;
6759	ret = si_thermal_enable_alert(adev, true);
6760	if (ret)
6761		return ret;
6762	if (adev->pm.dpm.fan.ucode_fan_control) {
6763		ret = si_halt_smc(adev);
6764		if (ret)
6765			return ret;
6766		ret = si_thermal_setup_fan_table(adev);
6767		if (ret)
6768			return ret;
6769		ret = si_resume_smc(adev);
6770		if (ret)
6771			return ret;
6772		si_thermal_start_smc_fan_control(adev);
6773	}
6774
6775	return 0;
6776}
6777
6778static void si_thermal_stop_thermal_controller(struct amdgpu_device *adev)
6779{
6780	if (!adev->pm.no_fan) {
6781		si_fan_ctrl_set_default_mode(adev);
6782		si_fan_ctrl_stop_smc_fan_control(adev);
6783	}
6784}
6785
6786static int si_dpm_enable(struct amdgpu_device *adev)
6787{
6788	struct rv7xx_power_info *pi = rv770_get_pi(adev);
6789	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6790	struct si_power_info *si_pi = si_get_pi(adev);
6791	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
6792	int ret;
6793
6794	if (amdgpu_si_is_smc_running(adev))
6795		return -EINVAL;
6796	if (pi->voltage_control || si_pi->voltage_control_svi2)
6797		si_enable_voltage_control(adev, true);
6798	if (pi->mvdd_control)
6799		si_get_mvdd_configuration(adev);
6800	if (pi->voltage_control || si_pi->voltage_control_svi2) {
6801		ret = si_construct_voltage_tables(adev);
6802		if (ret) {
6803			DRM_ERROR("si_construct_voltage_tables failed\n");
6804			return ret;
6805		}
6806	}
6807	if (eg_pi->dynamic_ac_timing) {
6808		ret = si_initialize_mc_reg_table(adev);
6809		if (ret)
6810			eg_pi->dynamic_ac_timing = false;
6811	}
6812	if (pi->dynamic_ss)
6813		si_enable_spread_spectrum(adev, true);
6814	if (pi->thermal_protection)
6815		si_enable_thermal_protection(adev, true);
6816	si_setup_bsp(adev);
6817	si_program_git(adev);
6818	si_program_tp(adev);
6819	si_program_tpp(adev);
6820	si_program_sstp(adev);
6821	si_enable_display_gap(adev);
6822	si_program_vc(adev);
6823	ret = si_upload_firmware(adev);
6824	if (ret) {
6825		DRM_ERROR("si_upload_firmware failed\n");
6826		return ret;
6827	}
6828	ret = si_process_firmware_header(adev);
6829	if (ret) {
6830		DRM_ERROR("si_process_firmware_header failed\n");
6831		return ret;
6832	}
6833	ret = si_initial_switch_from_arb_f0_to_f1(adev);
6834	if (ret) {
6835		DRM_ERROR("si_initial_switch_from_arb_f0_to_f1 failed\n");
6836		return ret;
6837	}
6838	ret = si_init_smc_table(adev);
6839	if (ret) {
6840		DRM_ERROR("si_init_smc_table failed\n");
6841		return ret;
6842	}
6843	ret = si_init_smc_spll_table(adev);
6844	if (ret) {
6845		DRM_ERROR("si_init_smc_spll_table failed\n");
6846		return ret;
6847	}
6848	ret = si_init_arb_table_index(adev);
6849	if (ret) {
6850		DRM_ERROR("si_init_arb_table_index failed\n");
6851		return ret;
6852	}
6853	if (eg_pi->dynamic_ac_timing) {
6854		ret = si_populate_mc_reg_table(adev, boot_ps);
6855		if (ret) {
6856			DRM_ERROR("si_populate_mc_reg_table failed\n");
6857			return ret;
6858		}
6859	}
6860	ret = si_initialize_smc_cac_tables(adev);
6861	if (ret) {
6862		DRM_ERROR("si_initialize_smc_cac_tables failed\n");
6863		return ret;
6864	}
6865	ret = si_initialize_hardware_cac_manager(adev);
6866	if (ret) {
6867		DRM_ERROR("si_initialize_hardware_cac_manager failed\n");
6868		return ret;
6869	}
6870	ret = si_initialize_smc_dte_tables(adev);
6871	if (ret) {
6872		DRM_ERROR("si_initialize_smc_dte_tables failed\n");
6873		return ret;
6874	}
6875	ret = si_populate_smc_tdp_limits(adev, boot_ps);
6876	if (ret) {
6877		DRM_ERROR("si_populate_smc_tdp_limits failed\n");
6878		return ret;
6879	}
6880	ret = si_populate_smc_tdp_limits_2(adev, boot_ps);
6881	if (ret) {
6882		DRM_ERROR("si_populate_smc_tdp_limits_2 failed\n");
6883		return ret;
6884	}
6885	si_program_response_times(adev);
6886	si_program_ds_registers(adev);
6887	si_dpm_start_smc(adev);
6888	ret = si_notify_smc_display_change(adev, false);
6889	if (ret) {
6890		DRM_ERROR("si_notify_smc_display_change failed\n");
6891		return ret;
6892	}
6893	si_enable_sclk_control(adev, true);
6894	si_start_dpm(adev);
6895
6896	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
6897	si_thermal_start_thermal_controller(adev);
6898
6899	return 0;
6900}
6901
6902static int si_set_temperature_range(struct amdgpu_device *adev)
6903{
6904	int ret;
6905
6906	ret = si_thermal_enable_alert(adev, false);
6907	if (ret)
6908		return ret;
6909	ret = si_thermal_set_temperature_range(adev, R600_TEMP_RANGE_MIN, R600_TEMP_RANGE_MAX);
6910	if (ret)
6911		return ret;
6912	ret = si_thermal_enable_alert(adev, true);
6913	if (ret)
6914		return ret;
6915
6916	return ret;
6917}
6918
6919static void si_dpm_disable(struct amdgpu_device *adev)
6920{
6921	struct rv7xx_power_info *pi = rv770_get_pi(adev);
6922	struct amdgpu_ps *boot_ps = adev->pm.dpm.boot_ps;
6923
6924	if (!amdgpu_si_is_smc_running(adev))
6925		return;
6926	si_thermal_stop_thermal_controller(adev);
6927	si_disable_ulv(adev);
6928	si_clear_vc(adev);
6929	if (pi->thermal_protection)
6930		si_enable_thermal_protection(adev, false);
6931	si_enable_power_containment(adev, boot_ps, false);
6932	si_enable_smc_cac(adev, boot_ps, false);
6933	si_enable_spread_spectrum(adev, false);
6934	si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, false);
6935	si_stop_dpm(adev);
6936	si_reset_to_default(adev);
6937	si_dpm_stop_smc(adev);
6938	si_force_switch_to_arb_f0(adev);
6939
6940	ni_update_current_ps(adev, boot_ps);
6941}
6942
6943static int si_dpm_pre_set_power_state(void *handle)
6944{
6945	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6946	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6947	struct amdgpu_ps requested_ps = *adev->pm.dpm.requested_ps;
6948	struct amdgpu_ps *new_ps = &requested_ps;
6949
6950	ni_update_requested_ps(adev, new_ps);
6951	si_apply_state_adjust_rules(adev, &eg_pi->requested_rps);
6952
6953	return 0;
6954}
6955
6956static int si_power_control_set_level(struct amdgpu_device *adev)
6957{
6958	struct amdgpu_ps *new_ps = adev->pm.dpm.requested_ps;
6959	int ret;
6960
6961	ret = si_restrict_performance_levels_before_switch(adev);
6962	if (ret)
6963		return ret;
6964	ret = si_halt_smc(adev);
6965	if (ret)
6966		return ret;
6967	ret = si_populate_smc_tdp_limits(adev, new_ps);
6968	if (ret)
6969		return ret;
6970	ret = si_populate_smc_tdp_limits_2(adev, new_ps);
6971	if (ret)
6972		return ret;
6973	ret = si_resume_smc(adev);
6974	if (ret)
6975		return ret;
6976	ret = si_set_sw_state(adev);
6977	if (ret)
6978		return ret;
6979	return 0;
6980}
6981
6982static int si_dpm_set_power_state(void *handle)
6983{
6984	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
6985	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
6986	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
6987	struct amdgpu_ps *old_ps = &eg_pi->current_rps;
6988	int ret;
6989
6990	ret = si_disable_ulv(adev);
6991	if (ret) {
6992		DRM_ERROR("si_disable_ulv failed\n");
6993		return ret;
6994	}
6995	ret = si_restrict_performance_levels_before_switch(adev);
6996	if (ret) {
6997		DRM_ERROR("si_restrict_performance_levels_before_switch failed\n");
6998		return ret;
6999	}
7000	if (eg_pi->pcie_performance_request)
7001		si_request_link_speed_change_before_state_change(adev, new_ps, old_ps);
7002	ni_set_uvd_clock_before_set_eng_clock(adev, new_ps, old_ps);
7003	ret = si_enable_power_containment(adev, new_ps, false);
7004	if (ret) {
7005		DRM_ERROR("si_enable_power_containment failed\n");
7006		return ret;
7007	}
7008	ret = si_enable_smc_cac(adev, new_ps, false);
7009	if (ret) {
7010		DRM_ERROR("si_enable_smc_cac failed\n");
7011		return ret;
7012	}
7013	ret = si_halt_smc(adev);
7014	if (ret) {
7015		DRM_ERROR("si_halt_smc failed\n");
7016		return ret;
7017	}
7018	ret = si_upload_sw_state(adev, new_ps);
7019	if (ret) {
7020		DRM_ERROR("si_upload_sw_state failed\n");
7021		return ret;
7022	}
7023	ret = si_upload_smc_data(adev);
7024	if (ret) {
7025		DRM_ERROR("si_upload_smc_data failed\n");
7026		return ret;
7027	}
7028	ret = si_upload_ulv_state(adev);
7029	if (ret) {
7030		DRM_ERROR("si_upload_ulv_state failed\n");
7031		return ret;
7032	}
7033	if (eg_pi->dynamic_ac_timing) {
7034		ret = si_upload_mc_reg_table(adev, new_ps);
7035		if (ret) {
7036			DRM_ERROR("si_upload_mc_reg_table failed\n");
7037			return ret;
7038		}
7039	}
7040	ret = si_program_memory_timing_parameters(adev, new_ps);
7041	if (ret) {
7042		DRM_ERROR("si_program_memory_timing_parameters failed\n");
7043		return ret;
7044	}
7045	si_set_pcie_lane_width_in_smc(adev, new_ps, old_ps);
7046
7047	ret = si_resume_smc(adev);
7048	if (ret) {
7049		DRM_ERROR("si_resume_smc failed\n");
7050		return ret;
7051	}
7052	ret = si_set_sw_state(adev);
7053	if (ret) {
7054		DRM_ERROR("si_set_sw_state failed\n");
7055		return ret;
7056	}
7057	ni_set_uvd_clock_after_set_eng_clock(adev, new_ps, old_ps);
7058	if (eg_pi->pcie_performance_request)
7059		si_notify_link_speed_change_after_state_change(adev, new_ps, old_ps);
7060	ret = si_set_power_state_conditionally_enable_ulv(adev, new_ps);
7061	if (ret) {
7062		DRM_ERROR("si_set_power_state_conditionally_enable_ulv failed\n");
7063		return ret;
7064	}
7065	ret = si_enable_smc_cac(adev, new_ps, true);
7066	if (ret) {
7067		DRM_ERROR("si_enable_smc_cac failed\n");
7068		return ret;
7069	}
7070	ret = si_enable_power_containment(adev, new_ps, true);
7071	if (ret) {
7072		DRM_ERROR("si_enable_power_containment failed\n");
7073		return ret;
7074	}
7075
7076	ret = si_power_control_set_level(adev);
7077	if (ret) {
7078		DRM_ERROR("si_power_control_set_level failed\n");
7079		return ret;
7080	}
7081
7082	return 0;
7083}
7084
7085static void si_dpm_post_set_power_state(void *handle)
7086{
7087	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7088	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7089	struct amdgpu_ps *new_ps = &eg_pi->requested_rps;
7090
7091	ni_update_current_ps(adev, new_ps);
7092}
7093
7094#if 0
7095void si_dpm_reset_asic(struct amdgpu_device *adev)
7096{
7097	si_restrict_performance_levels_before_switch(adev);
7098	si_disable_ulv(adev);
7099	si_set_boot_state(adev);
7100}
7101#endif
7102
7103static void si_dpm_display_configuration_changed(void *handle)
7104{
7105	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7106
7107	si_program_display_gap(adev);
7108}
7109
7110
7111static void si_parse_pplib_non_clock_info(struct amdgpu_device *adev,
7112					  struct amdgpu_ps *rps,
7113					  struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info,
7114					  u8 table_rev)
7115{
7116	rps->caps = le32_to_cpu(non_clock_info->ulCapsAndSettings);
7117	rps->class = le16_to_cpu(non_clock_info->usClassification);
7118	rps->class2 = le16_to_cpu(non_clock_info->usClassification2);
7119
7120	if (ATOM_PPLIB_NONCLOCKINFO_VER1 < table_rev) {
7121		rps->vclk = le32_to_cpu(non_clock_info->ulVCLK);
7122		rps->dclk = le32_to_cpu(non_clock_info->ulDCLK);
7123	} else if (r600_is_uvd_state(rps->class, rps->class2)) {
7124		rps->vclk = RV770_DEFAULT_VCLK_FREQ;
7125		rps->dclk = RV770_DEFAULT_DCLK_FREQ;
7126	} else {
7127		rps->vclk = 0;
7128		rps->dclk = 0;
7129	}
7130
7131	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT)
7132		adev->pm.dpm.boot_ps = rps;
7133	if (rps->class & ATOM_PPLIB_CLASSIFICATION_UVDSTATE)
7134		adev->pm.dpm.uvd_ps = rps;
7135}
7136
7137static void si_parse_pplib_clock_info(struct amdgpu_device *adev,
7138				      struct amdgpu_ps *rps, int index,
7139				      union pplib_clock_info *clock_info)
7140{
7141	struct rv7xx_power_info *pi = rv770_get_pi(adev);
7142	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7143	struct si_power_info *si_pi = si_get_pi(adev);
7144	struct  si_ps *ps = si_get_ps(rps);
7145	u16 leakage_voltage;
7146	struct rv7xx_pl *pl = &ps->performance_levels[index];
7147	int ret;
7148
7149	ps->performance_level_count = index + 1;
7150
7151	pl->sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
7152	pl->sclk |= clock_info->si.ucEngineClockHigh << 16;
7153	pl->mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
7154	pl->mclk |= clock_info->si.ucMemoryClockHigh << 16;
7155
7156	pl->vddc = le16_to_cpu(clock_info->si.usVDDC);
7157	pl->vddci = le16_to_cpu(clock_info->si.usVDDCI);
7158	pl->flags = le32_to_cpu(clock_info->si.ulFlags);
7159	pl->pcie_gen = amdgpu_get_pcie_gen_support(adev,
7160						   si_pi->sys_pcie_mask,
7161						   si_pi->boot_pcie_gen,
7162						   clock_info->si.ucPCIEGen);
7163
7164	/* patch up vddc if necessary */
7165	ret = si_get_leakage_voltage_from_leakage_index(adev, pl->vddc,
7166							&leakage_voltage);
7167	if (ret == 0)
7168		pl->vddc = leakage_voltage;
7169
7170	if (rps->class & ATOM_PPLIB_CLASSIFICATION_ACPI) {
7171		pi->acpi_vddc = pl->vddc;
7172		eg_pi->acpi_vddci = pl->vddci;
7173		si_pi->acpi_pcie_gen = pl->pcie_gen;
7174	}
7175
7176	if ((rps->class2 & ATOM_PPLIB_CLASSIFICATION2_ULV) &&
7177	    index == 0) {
7178		/* XXX disable for A0 tahiti */
7179		si_pi->ulv.supported = false;
7180		si_pi->ulv.pl = *pl;
7181		si_pi->ulv.one_pcie_lane_in_ulv = false;
7182		si_pi->ulv.volt_change_delay = SISLANDS_ULVVOLTAGECHANGEDELAY_DFLT;
7183		si_pi->ulv.cg_ulv_parameter = SISLANDS_CGULVPARAMETER_DFLT;
7184		si_pi->ulv.cg_ulv_control = SISLANDS_CGULVCONTROL_DFLT;
7185	}
7186
7187	if (pi->min_vddc_in_table > pl->vddc)
7188		pi->min_vddc_in_table = pl->vddc;
7189
7190	if (pi->max_vddc_in_table < pl->vddc)
7191		pi->max_vddc_in_table = pl->vddc;
7192
7193	/* patch up boot state */
7194	if (rps->class & ATOM_PPLIB_CLASSIFICATION_BOOT) {
7195		u16 vddc, vddci, mvdd;
7196		amdgpu_atombios_get_default_voltages(adev, &vddc, &vddci, &mvdd);
7197		pl->mclk = adev->clock.default_mclk;
7198		pl->sclk = adev->clock.default_sclk;
7199		pl->vddc = vddc;
7200		pl->vddci = vddci;
7201		si_pi->mvdd_bootup_value = mvdd;
7202	}
7203
7204	if ((rps->class & ATOM_PPLIB_CLASSIFICATION_UI_MASK) ==
7205	    ATOM_PPLIB_CLASSIFICATION_UI_PERFORMANCE) {
7206		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.sclk = pl->sclk;
7207		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.mclk = pl->mclk;
7208		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddc = pl->vddc;
7209		adev->pm.dpm.dyn_state.max_clock_voltage_on_ac.vddci = pl->vddci;
7210	}
7211}
7212
7213union pplib_power_state {
7214	struct _ATOM_PPLIB_STATE v1;
7215	struct _ATOM_PPLIB_STATE_V2 v2;
7216};
7217
7218static int si_parse_power_table(struct amdgpu_device *adev)
7219{
7220	struct amdgpu_mode_info *mode_info = &adev->mode_info;
7221	struct _ATOM_PPLIB_NONCLOCK_INFO *non_clock_info;
7222	union pplib_power_state *power_state;
7223	int i, j, k, non_clock_array_index, clock_array_index;
7224	union pplib_clock_info *clock_info;
7225	struct _StateArray *state_array;
7226	struct _ClockInfoArray *clock_info_array;
7227	struct _NonClockInfoArray *non_clock_info_array;
7228	union power_info *power_info;
7229	int index = GetIndexIntoMasterTable(DATA, PowerPlayInfo);
7230	u16 data_offset;
7231	u8 frev, crev;
7232	u8 *power_state_offset;
7233	struct  si_ps *ps;
7234
7235	if (!amdgpu_atom_parse_data_header(mode_info->atom_context, index, NULL,
7236				   &frev, &crev, &data_offset))
7237		return -EINVAL;
7238	power_info = (union power_info *)(mode_info->atom_context->bios + data_offset);
7239
7240	amdgpu_add_thermal_controller(adev);
7241
7242	state_array = (struct _StateArray *)
7243		(mode_info->atom_context->bios + data_offset +
7244		 le16_to_cpu(power_info->pplib.usStateArrayOffset));
7245	clock_info_array = (struct _ClockInfoArray *)
7246		(mode_info->atom_context->bios + data_offset +
7247		 le16_to_cpu(power_info->pplib.usClockInfoArrayOffset));
7248	non_clock_info_array = (struct _NonClockInfoArray *)
7249		(mode_info->atom_context->bios + data_offset +
7250		 le16_to_cpu(power_info->pplib.usNonClockInfoArrayOffset));
7251
7252	adev->pm.dpm.ps = kcalloc(state_array->ucNumEntries,
7253				  sizeof(struct amdgpu_ps),
7254				  GFP_KERNEL);
7255	if (!adev->pm.dpm.ps)
7256		return -ENOMEM;
7257	power_state_offset = (u8 *)state_array->states;
7258	for (i = 0; i < state_array->ucNumEntries; i++) {
7259		u8 *idx;
7260		power_state = (union pplib_power_state *)power_state_offset;
7261		non_clock_array_index = power_state->v2.nonClockInfoIndex;
7262		non_clock_info = (struct _ATOM_PPLIB_NONCLOCK_INFO *)
7263			&non_clock_info_array->nonClockInfo[non_clock_array_index];
7264		ps = kzalloc(sizeof(struct  si_ps), GFP_KERNEL);
7265		if (ps == NULL) {
7266			kfree(adev->pm.dpm.ps);
7267			return -ENOMEM;
7268		}
7269		adev->pm.dpm.ps[i].ps_priv = ps;
7270		si_parse_pplib_non_clock_info(adev, &adev->pm.dpm.ps[i],
7271					      non_clock_info,
7272					      non_clock_info_array->ucEntrySize);
7273		k = 0;
7274		idx = (u8 *)&power_state->v2.clockInfoIndex[0];
7275		for (j = 0; j < power_state->v2.ucNumDPMLevels; j++) {
7276			clock_array_index = idx[j];
7277			if (clock_array_index >= clock_info_array->ucNumEntries)
7278				continue;
7279			if (k >= SISLANDS_MAX_HARDWARE_POWERLEVELS)
7280				break;
7281			clock_info = (union pplib_clock_info *)
7282				((u8 *)&clock_info_array->clockInfo[0] +
7283				 (clock_array_index * clock_info_array->ucEntrySize));
7284			si_parse_pplib_clock_info(adev,
7285						  &adev->pm.dpm.ps[i], k,
7286						  clock_info);
7287			k++;
7288		}
7289		power_state_offset += 2 + power_state->v2.ucNumDPMLevels;
7290	}
7291	adev->pm.dpm.num_ps = state_array->ucNumEntries;
7292
7293	/* fill in the vce power states */
7294	for (i = 0; i < adev->pm.dpm.num_of_vce_states; i++) {
7295		u32 sclk, mclk;
7296		clock_array_index = adev->pm.dpm.vce_states[i].clk_idx;
7297		clock_info = (union pplib_clock_info *)
7298			&clock_info_array->clockInfo[clock_array_index * clock_info_array->ucEntrySize];
7299		sclk = le16_to_cpu(clock_info->si.usEngineClockLow);
7300		sclk |= clock_info->si.ucEngineClockHigh << 16;
7301		mclk = le16_to_cpu(clock_info->si.usMemoryClockLow);
7302		mclk |= clock_info->si.ucMemoryClockHigh << 16;
7303		adev->pm.dpm.vce_states[i].sclk = sclk;
7304		adev->pm.dpm.vce_states[i].mclk = mclk;
7305	}
7306
7307	return 0;
7308}
7309
7310static int si_dpm_init(struct amdgpu_device *adev)
7311{
7312	struct rv7xx_power_info *pi;
7313	struct evergreen_power_info *eg_pi;
7314	struct ni_power_info *ni_pi;
7315	struct si_power_info *si_pi;
7316	struct atom_clock_dividers dividers;
7317	int ret;
7318
7319	si_pi = kzalloc(sizeof(struct si_power_info), GFP_KERNEL);
7320	if (si_pi == NULL)
7321		return -ENOMEM;
7322	adev->pm.dpm.priv = si_pi;
7323	ni_pi = &si_pi->ni;
7324	eg_pi = &ni_pi->eg;
7325	pi = &eg_pi->rv7xx;
7326
7327	si_pi->sys_pcie_mask =
7328		adev->pm.pcie_gen_mask & CAIL_PCIE_LINK_SPEED_SUPPORT_MASK;
7329	si_pi->force_pcie_gen = AMDGPU_PCIE_GEN_INVALID;
7330	si_pi->boot_pcie_gen = si_get_current_pcie_speed(adev);
7331
7332	si_set_max_cu_value(adev);
7333
7334	rv770_get_max_vddc(adev);
7335	si_get_leakage_vddc(adev);
7336	si_patch_dependency_tables_based_on_leakage(adev);
7337
7338	pi->acpi_vddc = 0;
7339	eg_pi->acpi_vddci = 0;
7340	pi->min_vddc_in_table = 0;
7341	pi->max_vddc_in_table = 0;
7342
7343	ret = amdgpu_get_platform_caps(adev);
7344	if (ret)
7345		return ret;
7346
7347	ret = amdgpu_parse_extended_power_table(adev);
7348	if (ret)
7349		return ret;
7350
7351	ret = si_parse_power_table(adev);
7352	if (ret)
7353		return ret;
7354
7355	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries =
7356		kcalloc(4,
7357			sizeof(struct amdgpu_clock_voltage_dependency_entry),
7358			GFP_KERNEL);
7359	if (!adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries) {
7360		amdgpu_free_extended_power_table(adev);
7361		return -ENOMEM;
7362	}
7363	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.count = 4;
7364	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].clk = 0;
7365	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[0].v = 0;
7366	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].clk = 36000;
7367	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[1].v = 720;
7368	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].clk = 54000;
7369	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[2].v = 810;
7370	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].clk = 72000;
7371	adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries[3].v = 900;
7372
7373	if (adev->pm.dpm.voltage_response_time == 0)
7374		adev->pm.dpm.voltage_response_time = R600_VOLTAGERESPONSETIME_DFLT;
7375	if (adev->pm.dpm.backbias_response_time == 0)
7376		adev->pm.dpm.backbias_response_time = R600_BACKBIASRESPONSETIME_DFLT;
7377
7378	ret = amdgpu_atombios_get_clock_dividers(adev, COMPUTE_ENGINE_PLL_PARAM,
7379					     0, false, &dividers);
7380	if (ret)
7381		pi->ref_div = dividers.ref_div + 1;
7382	else
7383		pi->ref_div = R600_REFERENCEDIVIDER_DFLT;
7384
7385	eg_pi->smu_uvd_hs = false;
7386
7387	pi->mclk_strobe_mode_threshold = 40000;
7388	if (si_is_special_1gb_platform(adev))
7389		pi->mclk_stutter_mode_threshold = 0;
7390	else
7391		pi->mclk_stutter_mode_threshold = pi->mclk_strobe_mode_threshold;
7392	pi->mclk_edc_enable_threshold = 40000;
7393	eg_pi->mclk_edc_wr_enable_threshold = 40000;
7394
7395	ni_pi->mclk_rtt_mode_threshold = eg_pi->mclk_edc_wr_enable_threshold;
7396
7397	pi->voltage_control =
7398		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7399					    VOLTAGE_OBJ_GPIO_LUT);
7400	if (!pi->voltage_control) {
7401		si_pi->voltage_control_svi2 =
7402			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7403						    VOLTAGE_OBJ_SVID2);
7404		if (si_pi->voltage_control_svi2)
7405			amdgpu_atombios_get_svi2_info(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7406						  &si_pi->svd_gpio_id, &si_pi->svc_gpio_id);
7407	}
7408
7409	pi->mvdd_control =
7410		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_MVDDC,
7411					    VOLTAGE_OBJ_GPIO_LUT);
7412
7413	eg_pi->vddci_control =
7414		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
7415					    VOLTAGE_OBJ_GPIO_LUT);
7416	if (!eg_pi->vddci_control)
7417		si_pi->vddci_control_svi2 =
7418			amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDCI,
7419						    VOLTAGE_OBJ_SVID2);
7420
7421	si_pi->vddc_phase_shed_control =
7422		amdgpu_atombios_is_voltage_gpio(adev, SET_VOLTAGE_TYPE_ASIC_VDDC,
7423					    VOLTAGE_OBJ_PHASE_LUT);
7424
7425	rv770_get_engine_memory_ss(adev);
7426
7427	pi->asi = RV770_ASI_DFLT;
7428	pi->pasi = CYPRESS_HASI_DFLT;
7429	pi->vrc = SISLANDS_VRC_DFLT;
7430
7431	pi->gfx_clock_gating = true;
7432
7433	eg_pi->sclk_deep_sleep = true;
7434	si_pi->sclk_deep_sleep_above_low = false;
7435
7436	if (adev->pm.int_thermal_type != THERMAL_TYPE_NONE)
7437		pi->thermal_protection = true;
7438	else
7439		pi->thermal_protection = false;
7440
7441	eg_pi->dynamic_ac_timing = true;
7442
7443	eg_pi->light_sleep = true;
7444#if defined(CONFIG_ACPI)
7445	eg_pi->pcie_performance_request =
7446		amdgpu_acpi_is_pcie_performance_request_supported(adev);
7447#else
7448	eg_pi->pcie_performance_request = false;
7449#endif
7450
7451	si_pi->sram_end = SMC_RAM_END;
7452
7453	adev->pm.dpm.dyn_state.mclk_sclk_ratio = 4;
7454	adev->pm.dpm.dyn_state.sclk_mclk_delta = 15000;
7455	adev->pm.dpm.dyn_state.vddc_vddci_delta = 200;
7456	adev->pm.dpm.dyn_state.valid_sclk_values.count = 0;
7457	adev->pm.dpm.dyn_state.valid_sclk_values.values = NULL;
7458	adev->pm.dpm.dyn_state.valid_mclk_values.count = 0;
7459	adev->pm.dpm.dyn_state.valid_mclk_values.values = NULL;
7460
7461	si_initialize_powertune_defaults(adev);
7462
7463	/* make sure dc limits are valid */
7464	if ((adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.sclk == 0) ||
7465	    (adev->pm.dpm.dyn_state.max_clock_voltage_on_dc.mclk == 0))
7466		adev->pm.dpm.dyn_state.max_clock_voltage_on_dc =
7467			adev->pm.dpm.dyn_state.max_clock_voltage_on_ac;
7468
7469	si_pi->fan_ctrl_is_in_default_mode = true;
7470
7471	return 0;
7472}
7473
7474static void si_dpm_fini(struct amdgpu_device *adev)
7475{
7476	int i;
7477
7478	if (adev->pm.dpm.ps)
7479		for (i = 0; i < adev->pm.dpm.num_ps; i++)
7480			kfree(adev->pm.dpm.ps[i].ps_priv);
7481	kfree(adev->pm.dpm.ps);
7482	kfree(adev->pm.dpm.priv);
7483	kfree(adev->pm.dpm.dyn_state.vddc_dependency_on_dispclk.entries);
7484	amdgpu_free_extended_power_table(adev);
7485}
7486
7487static void si_dpm_debugfs_print_current_performance_level(void *handle,
7488						    struct seq_file *m)
7489{
7490#ifndef __NetBSD__		/* XXX amdgpu debugfs */
7491	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7492	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7493	struct amdgpu_ps *rps = &eg_pi->current_rps;
7494	struct  si_ps *ps = si_get_ps(rps);
7495	struct rv7xx_pl *pl;
7496	u32 current_index =
7497		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
7498		CURRENT_STATE_INDEX_SHIFT;
7499
7500	if (current_index >= ps->performance_level_count) {
7501		seq_printf(m, "invalid dpm profile %d\n", current_index);
7502	} else {
7503		pl = &ps->performance_levels[current_index];
7504		seq_printf(m, "uvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
7505		seq_printf(m, "power level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7506			   current_index, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
7507	}
7508#endif
7509}
7510
7511static int si_dpm_set_interrupt_state(struct amdgpu_device *adev,
7512				      struct amdgpu_irq_src *source,
7513				      unsigned type,
7514				      enum amdgpu_interrupt_state state)
7515{
7516	u32 cg_thermal_int;
7517
7518	switch (type) {
7519	case AMDGPU_THERMAL_IRQ_LOW_TO_HIGH:
7520		switch (state) {
7521		case AMDGPU_IRQ_STATE_DISABLE:
7522			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7523			cg_thermal_int |= THERM_INT_MASK_HIGH;
7524			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7525			break;
7526		case AMDGPU_IRQ_STATE_ENABLE:
7527			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7528			cg_thermal_int &= ~THERM_INT_MASK_HIGH;
7529			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7530			break;
7531		default:
7532			break;
7533		}
7534		break;
7535
7536	case AMDGPU_THERMAL_IRQ_HIGH_TO_LOW:
7537		switch (state) {
7538		case AMDGPU_IRQ_STATE_DISABLE:
7539			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7540			cg_thermal_int |= THERM_INT_MASK_LOW;
7541			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7542			break;
7543		case AMDGPU_IRQ_STATE_ENABLE:
7544			cg_thermal_int = RREG32_SMC(CG_THERMAL_INT);
7545			cg_thermal_int &= ~THERM_INT_MASK_LOW;
7546			WREG32_SMC(CG_THERMAL_INT, cg_thermal_int);
7547			break;
7548		default:
7549			break;
7550		}
7551		break;
7552
7553	default:
7554		break;
7555	}
7556	return 0;
7557}
7558
7559static int si_dpm_process_interrupt(struct amdgpu_device *adev,
7560				    struct amdgpu_irq_src *source,
7561				    struct amdgpu_iv_entry *entry)
7562{
7563	bool queue_thermal = false;
7564
7565	if (entry == NULL)
7566		return -EINVAL;
7567
7568	switch (entry->src_id) {
7569	case 230: /* thermal low to high */
7570		DRM_DEBUG("IH: thermal low to high\n");
7571		adev->pm.dpm.thermal.high_to_low = false;
7572		queue_thermal = true;
7573		break;
7574	case 231: /* thermal high to low */
7575		DRM_DEBUG("IH: thermal high to low\n");
7576		adev->pm.dpm.thermal.high_to_low = true;
7577		queue_thermal = true;
7578		break;
7579	default:
7580		break;
7581	}
7582
7583	if (queue_thermal)
7584		schedule_work(&adev->pm.dpm.thermal.work);
7585
7586	return 0;
7587}
7588
7589static int si_dpm_late_init(void *handle)
7590{
7591	int ret;
7592	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7593
7594	if (!adev->pm.dpm_enabled)
7595		return 0;
7596
7597	ret = si_set_temperature_range(adev);
7598	if (ret)
7599		return ret;
7600#if 0 //TODO ?
7601	si_dpm_powergate_uvd(adev, true);
7602#endif
7603	return 0;
7604}
7605
7606/**
7607 * si_dpm_init_microcode - load ucode images from disk
7608 *
7609 * @adev: amdgpu_device pointer
7610 *
7611 * Use the firmware interface to load the ucode images into
7612 * the driver (not loaded into hw).
7613 * Returns 0 on success, error on failure.
7614 */
7615static int si_dpm_init_microcode(struct amdgpu_device *adev)
7616{
7617	const char *chip_name;
7618	char fw_name[30];
7619	int err;
7620
7621	DRM_DEBUG("\n");
7622	switch (adev->asic_type) {
7623	case CHIP_TAHITI:
7624		chip_name = "tahiti";
7625		break;
7626	case CHIP_PITCAIRN:
7627		if ((adev->pdev->revision == 0x81) &&
7628		    ((adev->pdev->device == 0x6810) ||
7629		    (adev->pdev->device == 0x6811)))
7630			chip_name = "pitcairn_k";
7631		else
7632			chip_name = "pitcairn";
7633		break;
7634	case CHIP_VERDE:
7635		if (((adev->pdev->device == 0x6820) &&
7636			((adev->pdev->revision == 0x81) ||
7637			(adev->pdev->revision == 0x83))) ||
7638		    ((adev->pdev->device == 0x6821) &&
7639			((adev->pdev->revision == 0x83) ||
7640			(adev->pdev->revision == 0x87))) ||
7641		    ((adev->pdev->revision == 0x87) &&
7642			((adev->pdev->device == 0x6823) ||
7643			(adev->pdev->device == 0x682b))))
7644			chip_name = "verde_k";
7645		else
7646			chip_name = "verde";
7647		break;
7648	case CHIP_OLAND:
7649		if (((adev->pdev->revision == 0x81) &&
7650			((adev->pdev->device == 0x6600) ||
7651			(adev->pdev->device == 0x6604) ||
7652			(adev->pdev->device == 0x6605) ||
7653			(adev->pdev->device == 0x6610))) ||
7654		    ((adev->pdev->revision == 0x83) &&
7655			(adev->pdev->device == 0x6610)))
7656			chip_name = "oland_k";
7657		else
7658			chip_name = "oland";
7659		break;
7660	case CHIP_HAINAN:
7661		if (((adev->pdev->revision == 0x81) &&
7662			(adev->pdev->device == 0x6660)) ||
7663		    ((adev->pdev->revision == 0x83) &&
7664			((adev->pdev->device == 0x6660) ||
7665			(adev->pdev->device == 0x6663) ||
7666			(adev->pdev->device == 0x6665) ||
7667			 (adev->pdev->device == 0x6667))))
7668			chip_name = "hainan_k";
7669		else if ((adev->pdev->revision == 0xc3) &&
7670			 (adev->pdev->device == 0x6665))
7671			chip_name = "banks_k_2";
7672		else
7673			chip_name = "hainan";
7674		break;
7675	default: BUG();
7676	}
7677
7678	snprintf(fw_name, sizeof(fw_name), "amdgpu/%s_smc.bin", chip_name);
7679	err = request_firmware(&adev->pm.fw, fw_name, adev->dev);
7680	if (err)
7681		goto out;
7682	err = amdgpu_ucode_validate(adev->pm.fw);
7683
7684out:
7685	if (err) {
7686		DRM_ERROR("si_smc: Failed to load firmware. err = %d\"%s\"\n",
7687			  err, fw_name);
7688		release_firmware(adev->pm.fw);
7689		adev->pm.fw = NULL;
7690	}
7691	return err;
7692
7693}
7694
7695static int si_dpm_sw_init(void *handle)
7696{
7697	int ret;
7698	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7699
7700	ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 230, &adev->pm.dpm.thermal.irq);
7701	if (ret)
7702		return ret;
7703
7704	ret = amdgpu_irq_add_id(adev, AMDGPU_IRQ_CLIENTID_LEGACY, 231, &adev->pm.dpm.thermal.irq);
7705	if (ret)
7706		return ret;
7707
7708	/* default to balanced state */
7709	adev->pm.dpm.state = POWER_STATE_TYPE_BALANCED;
7710	adev->pm.dpm.user_state = POWER_STATE_TYPE_BALANCED;
7711	adev->pm.dpm.forced_level = AMD_DPM_FORCED_LEVEL_AUTO;
7712	adev->pm.default_sclk = adev->clock.default_sclk;
7713	adev->pm.default_mclk = adev->clock.default_mclk;
7714	adev->pm.current_sclk = adev->clock.default_sclk;
7715	adev->pm.current_mclk = adev->clock.default_mclk;
7716	adev->pm.int_thermal_type = THERMAL_TYPE_NONE;
7717
7718	if (amdgpu_dpm == 0)
7719		return 0;
7720
7721	ret = si_dpm_init_microcode(adev);
7722	if (ret)
7723		return ret;
7724
7725	INIT_WORK(&adev->pm.dpm.thermal.work, amdgpu_dpm_thermal_work_handler);
7726	mutex_lock(&adev->pm.mutex);
7727	ret = si_dpm_init(adev);
7728	if (ret)
7729		goto dpm_failed;
7730	adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
7731	if (amdgpu_dpm == 1)
7732		amdgpu_pm_print_power_states(adev);
7733	mutex_unlock(&adev->pm.mutex);
7734	DRM_INFO("amdgpu: dpm initialized\n");
7735
7736	return 0;
7737
7738dpm_failed:
7739	si_dpm_fini(adev);
7740	mutex_unlock(&adev->pm.mutex);
7741	DRM_ERROR("amdgpu: dpm initialization failed\n");
7742	return ret;
7743}
7744
7745static int si_dpm_sw_fini(void *handle)
7746{
7747	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7748
7749	flush_work(&adev->pm.dpm.thermal.work);
7750
7751	mutex_lock(&adev->pm.mutex);
7752	si_dpm_fini(adev);
7753	mutex_unlock(&adev->pm.mutex);
7754
7755	return 0;
7756}
7757
7758static int si_dpm_hw_init(void *handle)
7759{
7760	int ret;
7761
7762	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7763
7764	if (!amdgpu_dpm)
7765		return 0;
7766
7767	mutex_lock(&adev->pm.mutex);
7768	si_dpm_setup_asic(adev);
7769	ret = si_dpm_enable(adev);
7770	if (ret)
7771		adev->pm.dpm_enabled = false;
7772	else
7773		adev->pm.dpm_enabled = true;
7774	mutex_unlock(&adev->pm.mutex);
7775	amdgpu_pm_compute_clocks(adev);
7776	return ret;
7777}
7778
7779static int si_dpm_hw_fini(void *handle)
7780{
7781	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7782
7783	if (adev->pm.dpm_enabled) {
7784		mutex_lock(&adev->pm.mutex);
7785		si_dpm_disable(adev);
7786		mutex_unlock(&adev->pm.mutex);
7787	}
7788
7789	return 0;
7790}
7791
7792static int si_dpm_suspend(void *handle)
7793{
7794	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7795
7796	if (adev->pm.dpm_enabled) {
7797		mutex_lock(&adev->pm.mutex);
7798		/* disable dpm */
7799		si_dpm_disable(adev);
7800		/* reset the power state */
7801		adev->pm.dpm.current_ps = adev->pm.dpm.requested_ps = adev->pm.dpm.boot_ps;
7802		mutex_unlock(&adev->pm.mutex);
7803	}
7804	return 0;
7805}
7806
7807static int si_dpm_resume(void *handle)
7808{
7809	int ret;
7810	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7811
7812	if (adev->pm.dpm_enabled) {
7813		/* asic init will reset to the boot state */
7814		mutex_lock(&adev->pm.mutex);
7815		si_dpm_setup_asic(adev);
7816		ret = si_dpm_enable(adev);
7817		if (ret)
7818			adev->pm.dpm_enabled = false;
7819		else
7820			adev->pm.dpm_enabled = true;
7821		mutex_unlock(&adev->pm.mutex);
7822		if (adev->pm.dpm_enabled)
7823			amdgpu_pm_compute_clocks(adev);
7824	}
7825	return 0;
7826}
7827
7828static bool si_dpm_is_idle(void *handle)
7829{
7830	/* XXX */
7831	return true;
7832}
7833
7834static int si_dpm_wait_for_idle(void *handle)
7835{
7836	/* XXX */
7837	return 0;
7838}
7839
7840static int si_dpm_soft_reset(void *handle)
7841{
7842	return 0;
7843}
7844
7845static int si_dpm_set_clockgating_state(void *handle,
7846					enum amd_clockgating_state state)
7847{
7848	return 0;
7849}
7850
7851static int si_dpm_set_powergating_state(void *handle,
7852					enum amd_powergating_state state)
7853{
7854	return 0;
7855}
7856
7857/* get temperature in millidegrees */
7858static int si_dpm_get_temp(void *handle)
7859{
7860	u32 temp;
7861	int actual_temp = 0;
7862	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7863
7864	temp = (RREG32(CG_MULT_THERMAL_STATUS) & CTF_TEMP_MASK) >>
7865		CTF_TEMP_SHIFT;
7866
7867	if (temp & 0x200)
7868		actual_temp = 255;
7869	else
7870		actual_temp = temp & 0x1ff;
7871
7872	actual_temp = (actual_temp * 1000);
7873
7874	return actual_temp;
7875}
7876
7877static u32 si_dpm_get_sclk(void *handle, bool low)
7878{
7879	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7880	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7881	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
7882
7883	if (low)
7884		return requested_state->performance_levels[0].sclk;
7885	else
7886		return requested_state->performance_levels[requested_state->performance_level_count - 1].sclk;
7887}
7888
7889static u32 si_dpm_get_mclk(void *handle, bool low)
7890{
7891	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7892	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7893	struct  si_ps *requested_state = si_get_ps(&eg_pi->requested_rps);
7894
7895	if (low)
7896		return requested_state->performance_levels[0].mclk;
7897	else
7898		return requested_state->performance_levels[requested_state->performance_level_count - 1].mclk;
7899}
7900
7901static void si_dpm_print_power_state(void *handle,
7902				     void *current_ps)
7903{
7904	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7905	struct amdgpu_ps *rps = (struct amdgpu_ps *)current_ps;
7906	struct  si_ps *ps = si_get_ps(rps);
7907	struct rv7xx_pl *pl;
7908	int i;
7909
7910	amdgpu_dpm_print_class_info(rps->class, rps->class2);
7911	amdgpu_dpm_print_cap_info(rps->caps);
7912	DRM_INFO("\tuvd    vclk: %d dclk: %d\n", rps->vclk, rps->dclk);
7913	for (i = 0; i < ps->performance_level_count; i++) {
7914		pl = &ps->performance_levels[i];
7915		if (adev->asic_type >= CHIP_TAHITI)
7916			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u pcie gen: %u\n",
7917				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci, pl->pcie_gen + 1);
7918		else
7919			DRM_INFO("\t\tpower level %d    sclk: %u mclk: %u vddc: %u vddci: %u\n",
7920				 i, pl->sclk, pl->mclk, pl->vddc, pl->vddci);
7921	}
7922	amdgpu_dpm_print_ps_status(adev, rps);
7923}
7924
7925static int si_dpm_early_init(void *handle)
7926{
7927
7928	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7929
7930	adev->powerplay.pp_funcs = &si_dpm_funcs;
7931	adev->powerplay.pp_handle = adev;
7932	si_dpm_set_irq_funcs(adev);
7933	return 0;
7934}
7935
7936static inline bool si_are_power_levels_equal(const struct rv7xx_pl  *si_cpl1,
7937						const struct rv7xx_pl *si_cpl2)
7938{
7939	return ((si_cpl1->mclk == si_cpl2->mclk) &&
7940		  (si_cpl1->sclk == si_cpl2->sclk) &&
7941		  (si_cpl1->pcie_gen == si_cpl2->pcie_gen) &&
7942		  (si_cpl1->vddc == si_cpl2->vddc) &&
7943		  (si_cpl1->vddci == si_cpl2->vddci));
7944}
7945
7946static int si_check_state_equal(void *handle,
7947				void *current_ps,
7948				void *request_ps,
7949				bool *equal)
7950{
7951	struct si_ps *si_cps;
7952	struct si_ps *si_rps;
7953	int i;
7954	struct amdgpu_ps *cps = (struct amdgpu_ps *)current_ps;
7955	struct amdgpu_ps *rps = (struct amdgpu_ps *)request_ps;
7956	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7957
7958	if (adev == NULL || cps == NULL || rps == NULL || equal == NULL)
7959		return -EINVAL;
7960
7961	si_cps = si_get_ps((struct amdgpu_ps *)cps);
7962	si_rps = si_get_ps((struct amdgpu_ps *)rps);
7963
7964	if (si_cps == NULL) {
7965		printk("si_cps is NULL\n");
7966		*equal = false;
7967		return 0;
7968	}
7969
7970	if (si_cps->performance_level_count != si_rps->performance_level_count) {
7971		*equal = false;
7972		return 0;
7973	}
7974
7975	for (i = 0; i < si_cps->performance_level_count; i++) {
7976		if (!si_are_power_levels_equal(&(si_cps->performance_levels[i]),
7977					&(si_rps->performance_levels[i]))) {
7978			*equal = false;
7979			return 0;
7980		}
7981	}
7982
7983	/* If all performance levels are the same try to use the UVD clocks to break the tie.*/
7984	*equal = ((cps->vclk == rps->vclk) && (cps->dclk == rps->dclk));
7985	*equal &= ((cps->evclk == rps->evclk) && (cps->ecclk == rps->ecclk));
7986
7987	return 0;
7988}
7989
7990static int si_dpm_read_sensor(void *handle, int idx,
7991			      void *value, int *size)
7992{
7993	struct amdgpu_device *adev = (struct amdgpu_device *)handle;
7994	struct evergreen_power_info *eg_pi = evergreen_get_pi(adev);
7995	struct amdgpu_ps *rps = &eg_pi->current_rps;
7996	struct  si_ps *ps = si_get_ps(rps);
7997	uint32_t sclk, mclk;
7998	u32 pl_index =
7999		(RREG32(TARGET_AND_CURRENT_PROFILE_INDEX) & CURRENT_STATE_INDEX_MASK) >>
8000		CURRENT_STATE_INDEX_SHIFT;
8001
8002	/* size must be at least 4 bytes for all sensors */
8003	if (*size < 4)
8004		return -EINVAL;
8005
8006	switch (idx) {
8007	case AMDGPU_PP_SENSOR_GFX_SCLK:
8008		if (pl_index < ps->performance_level_count) {
8009			sclk = ps->performance_levels[pl_index].sclk;
8010			*((uint32_t *)value) = sclk;
8011			*size = 4;
8012			return 0;
8013		}
8014		return -EINVAL;
8015	case AMDGPU_PP_SENSOR_GFX_MCLK:
8016		if (pl_index < ps->performance_level_count) {
8017			mclk = ps->performance_levels[pl_index].mclk;
8018			*((uint32_t *)value) = mclk;
8019			*size = 4;
8020			return 0;
8021		}
8022		return -EINVAL;
8023	case AMDGPU_PP_SENSOR_GPU_TEMP:
8024		*((uint32_t *)value) = si_dpm_get_temp(adev);
8025		*size = 4;
8026		return 0;
8027	default:
8028		return -EINVAL;
8029	}
8030}
8031
8032static const struct amd_ip_funcs si_dpm_ip_funcs = {
8033	.name = "si_dpm",
8034	.early_init = si_dpm_early_init,
8035	.late_init = si_dpm_late_init,
8036	.sw_init = si_dpm_sw_init,
8037	.sw_fini = si_dpm_sw_fini,
8038	.hw_init = si_dpm_hw_init,
8039	.hw_fini = si_dpm_hw_fini,
8040	.suspend = si_dpm_suspend,
8041	.resume = si_dpm_resume,
8042	.is_idle = si_dpm_is_idle,
8043	.wait_for_idle = si_dpm_wait_for_idle,
8044	.soft_reset = si_dpm_soft_reset,
8045	.set_clockgating_state = si_dpm_set_clockgating_state,
8046	.set_powergating_state = si_dpm_set_powergating_state,
8047};
8048
8049const struct amdgpu_ip_block_version si_smu_ip_block =
8050{
8051	.type = AMD_IP_BLOCK_TYPE_SMC,
8052	.major = 6,
8053	.minor = 0,
8054	.rev = 0,
8055	.funcs = &si_dpm_ip_funcs,
8056};
8057
8058static const struct amd_pm_funcs si_dpm_funcs = {
8059	.pre_set_power_state = &si_dpm_pre_set_power_state,
8060	.set_power_state = &si_dpm_set_power_state,
8061	.post_set_power_state = &si_dpm_post_set_power_state,
8062	.display_configuration_changed = &si_dpm_display_configuration_changed,
8063	.get_sclk = &si_dpm_get_sclk,
8064	.get_mclk = &si_dpm_get_mclk,
8065	.print_power_state = &si_dpm_print_power_state,
8066	.debugfs_print_current_performance_level = &si_dpm_debugfs_print_current_performance_level,
8067	.force_performance_level = &si_dpm_force_performance_level,
8068	.vblank_too_short = &si_dpm_vblank_too_short,
8069	.set_fan_control_mode = &si_dpm_set_fan_control_mode,
8070	.get_fan_control_mode = &si_dpm_get_fan_control_mode,
8071	.set_fan_speed_percent = &si_dpm_set_fan_speed_percent,
8072	.get_fan_speed_percent = &si_dpm_get_fan_speed_percent,
8073	.check_state_equal = &si_check_state_equal,
8074	.get_vce_clock_state = amdgpu_get_vce_clock_state,
8075	.read_sensor = &si_dpm_read_sensor,
8076};
8077
8078static const struct amdgpu_irq_src_funcs si_dpm_irq_funcs = {
8079	.set = si_dpm_set_interrupt_state,
8080	.process = si_dpm_process_interrupt,
8081};
8082
8083static void si_dpm_set_irq_funcs(struct amdgpu_device *adev)
8084{
8085	adev->pm.dpm.thermal.irq.num_types = AMDGPU_THERMAL_IRQ_LAST;
8086	adev->pm.dpm.thermal.irq.funcs = &si_dpm_irq_funcs;
8087}
8088
8089