1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Marvell International Ltd. and its affiliates
4 */
5#include "ddr_ml_wrapper.h"
6#include "mv_ddr_plat.h"
7
8#include "mv_ddr_topology.h"
9#include "mv_ddr_common.h"
10#include "mv_ddr_spd.h"
11#include "ddr_topology_def.h"
12#include "ddr3_training_ip_db.h"
13#include "ddr3_training_ip.h"
14#include "mv_ddr_training_db.h"
15
16unsigned int mv_ddr_cl_calc(unsigned int taa_min, unsigned int tclk)
17{
18	unsigned int cl = ceil_div(taa_min, tclk);
19
20	return mv_ddr_spd_supported_cl_get(cl);
21
22}
23
24unsigned int mv_ddr_cwl_calc(unsigned int tclk)
25{
26	unsigned int cwl;
27
28	if (tclk >= 1250)
29		cwl = 9;
30	else if (tclk >= 1071)
31		cwl = 10;
32	else if (tclk >= 938)
33		cwl = 11;
34	else if (tclk >= 833)
35		cwl = 12;
36	else if (tclk >= 750)
37		cwl = 14;
38	else if (tclk >= 625)
39		cwl = 16;
40	else
41		cwl = 0;
42
43	return cwl;
44}
45
46int mv_ddr_topology_map_update(void)
47{
48	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
49	struct if_params *iface_params = &(tm->interface_params[0]);
50	unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
51	enum mv_ddr_speed_bin speed_bin_index;
52	enum mv_ddr_freq freq = MV_DDR_FREQ_LAST;
53	unsigned int tclk;
54	unsigned char val = 0;
55	int i;
56
57	if (iface_params->memory_freq == MV_DDR_FREQ_SAR)
58		iface_params->memory_freq = mv_ddr_init_freq_get();
59
60	if (tm->cfg_src == MV_DDR_CFG_SPD) {
61		/* check dram device type */
62		val = mv_ddr_spd_dev_type_get(&tm->spd_data);
63		if (val != MV_DDR_SPD_DEV_TYPE_DDR4) {
64			printf("mv_ddr: unsupported dram device type found\n");
65			return -1;
66		}
67
68		/* update topology map with timing data */
69		if (mv_ddr_spd_timing_calc(&tm->spd_data, tm->timing_data) > 0) {
70			printf("mv_ddr: negative timing data found\n");
71			return -1;
72		}
73
74		/* update device width in topology map */
75		iface_params->bus_width = mv_ddr_spd_dev_width_get(&tm->spd_data);
76
77		/* update die capacity in topology map */
78		iface_params->memory_size = mv_ddr_spd_die_capacity_get(&tm->spd_data);
79
80		/* update bus bit mask in topology map */
81		tm->bus_act_mask = mv_ddr_bus_bit_mask_get();
82
83		/* update cs bit mask in topology map */
84		val = mv_ddr_spd_cs_bit_mask_get(&tm->spd_data);
85		for (i = 0; i < octets_per_if_num; i++)
86			iface_params->as_bus_params[i].cs_bitmask = val;
87
88		/* check dram module type */
89		val = mv_ddr_spd_module_type_get(&tm->spd_data);
90		switch (val) {
91		case MV_DDR_SPD_MODULE_TYPE_UDIMM:
92		case MV_DDR_SPD_MODULE_TYPE_SO_DIMM:
93		case MV_DDR_SPD_MODULE_TYPE_MINI_UDIMM:
94		case MV_DDR_SPD_MODULE_TYPE_72BIT_SO_UDIMM:
95		case MV_DDR_SPD_MODULE_TYPE_16BIT_SO_DIMM:
96		case MV_DDR_SPD_MODULE_TYPE_32BIT_SO_DIMM:
97			break;
98		default:
99			printf("mv_ddr: unsupported dram module type found\n");
100			return -1;
101		}
102
103		/* update mirror bit mask in topology map */
104		val = mv_ddr_spd_mem_mirror_get(&tm->spd_data);
105		for (i = 0; i < octets_per_if_num; i++)
106			iface_params->as_bus_params[i].mirror_enable_bitmask = val << 1;
107
108		tclk = 1000000 / mv_ddr_freq_get(iface_params->memory_freq);
109		/* update cas write latency (cwl) */
110		val = mv_ddr_cwl_calc(tclk);
111		if (val == 0) {
112			printf("mv_ddr: unsupported cas write latency value found\n");
113			return -1;
114		}
115		iface_params->cas_wl = val;
116
117		/* update cas latency (cl) */
118		mv_ddr_spd_supported_cls_calc(&tm->spd_data);
119		val = mv_ddr_cl_calc(tm->timing_data[MV_DDR_TAA_MIN], tclk);
120		if (val == 0) {
121			printf("mv_ddr: unsupported cas latency value found\n");
122			return -1;
123		}
124		iface_params->cas_l = val;
125	} else if (tm->cfg_src == MV_DDR_CFG_DEFAULT) {
126		/* set cas and cas-write latencies per speed bin, if they unset */
127		speed_bin_index = iface_params->speed_bin_index;
128		freq = iface_params->memory_freq;
129
130		if (tm->twin_die_combined == COMBINED) {
131			iface_params->bus_width = MV_DDR_DEV_WIDTH_8BIT;
132			iface_params->memory_size -= 1;
133		}
134
135		if (iface_params->cas_l == 0)
136			iface_params->cas_l = mv_ddr_cl_val_get(speed_bin_index, freq);
137
138		if (iface_params->cas_wl == 0)
139			iface_params->cas_wl = mv_ddr_cwl_val_get(speed_bin_index, freq);
140	}
141
142	return 0;
143}
144
145unsigned short mv_ddr_bus_bit_mask_get(void)
146{
147	unsigned short pri_and_ext_bus_width = 0x0;
148	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
149	unsigned int octets_per_if_num = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
150
151	if (tm->cfg_src == MV_DDR_CFG_SPD) {
152		if (tm->bus_act_mask == MV_DDR_32BIT_ECC_PUP8_BUS_MASK)
153			tm->spd_data.byte_fields.byte_13.bit_fields.primary_bus_width = MV_DDR_PRI_BUS_WIDTH_32;
154
155		enum mv_ddr_pri_bus_width pri_bus_width = mv_ddr_spd_pri_bus_width_get(&tm->spd_data);
156		enum mv_ddr_bus_width_ext bus_width_ext = mv_ddr_spd_bus_width_ext_get(&tm->spd_data);
157
158		switch (pri_bus_width) {
159		case MV_DDR_PRI_BUS_WIDTH_16:
160			pri_and_ext_bus_width = BUS_MASK_16BIT;
161			break;
162		case MV_DDR_PRI_BUS_WIDTH_32: /*each bit represents byte, so 0xf-is means 4 bytes-32 bit*/
163			pri_and_ext_bus_width = BUS_MASK_32BIT;
164			break;
165		case MV_DDR_PRI_BUS_WIDTH_64:
166			pri_and_ext_bus_width = MV_DDR_64BIT_BUS_MASK;
167			break;
168		default:
169			pri_and_ext_bus_width = 0x0;
170		}
171
172		if (bus_width_ext == MV_DDR_BUS_WIDTH_EXT_8)
173			pri_and_ext_bus_width |= 1 << (octets_per_if_num - 1);
174	}
175
176	return pri_and_ext_bus_width;
177}
178
179unsigned int mv_ddr_if_bus_width_get(void)
180{
181	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
182	unsigned int bus_width;
183
184	switch (tm->bus_act_mask) {
185	case BUS_MASK_16BIT:
186	case BUS_MASK_16BIT_ECC:
187	case BUS_MASK_16BIT_ECC_PUP3:
188		bus_width = 16;
189		break;
190	case BUS_MASK_32BIT:
191	case BUS_MASK_32BIT_ECC:
192	case MV_DDR_32BIT_ECC_PUP8_BUS_MASK:
193		bus_width = 32;
194		break;
195	case MV_DDR_64BIT_BUS_MASK:
196	case MV_DDR_64BIT_ECC_PUP8_BUS_MASK:
197		bus_width = 64;
198		break;
199	default:
200		printf("mv_ddr: unsupported bus active mask parameter found\n");
201		bus_width = 0;
202	}
203
204	return bus_width;
205}
206
207unsigned int mv_ddr_cs_num_get(void)
208{
209	unsigned int cs_num = 0;
210	unsigned int cs, sphy;
211	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
212	struct if_params *iface_params = &(tm->interface_params[0]);
213	unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
214
215	for (sphy = 0; sphy < sphy_max; sphy++) {
216		VALIDATE_BUS_ACTIVE(tm->bus_act_mask, sphy);
217		break;
218	}
219
220	for (cs = 0; cs < MAX_CS_NUM; cs++) {
221		VALIDATE_ACTIVE(iface_params->as_bus_params[sphy].cs_bitmask, cs);
222		cs_num++;
223	}
224
225	return cs_num;
226}
227
228int mv_ddr_is_ecc_ena(void)
229{
230	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
231
232	if (DDR3_IS_ECC_PUP4_MODE(tm->bus_act_mask) ||
233	    DDR3_IS_ECC_PUP3_MODE(tm->bus_act_mask) ||
234	    DDR3_IS_ECC_PUP8_MODE(tm->bus_act_mask))
235		return 1;
236	else
237		return 0;
238}
239
240int mv_ddr_ck_delay_get(void)
241{
242	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
243
244	if (tm->ck_delay)
245		return tm->ck_delay;
246
247	return -1;
248}
249
250/* translate topology map definition to real memory size in bits */
251static unsigned int mem_size[] = {
252	ADDR_SIZE_512MB,
253	ADDR_SIZE_1GB,
254	ADDR_SIZE_2GB,
255	ADDR_SIZE_4GB,
256	ADDR_SIZE_8GB,
257	ADDR_SIZE_16GB
258	/* TODO: add capacity up to 256GB */
259};
260
261unsigned long long mv_ddr_mem_sz_per_cs_get(void)
262{
263	unsigned long long mem_sz_per_cs;
264	unsigned int i, sphys, sphys_per_dunit;
265	unsigned int sphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE);
266	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
267	struct if_params *iface_params = &(tm->interface_params[0]);
268
269	/* calc number of active subphys excl. ecc one */
270	for (i = 0, sphys = 0; i < sphy_max - 1; i++) {
271		VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i);
272		sphys++;
273	}
274
275	/* calc number of subphys per ddr unit */
276	if (iface_params->bus_width == MV_DDR_DEV_WIDTH_8BIT)
277		sphys_per_dunit = MV_DDR_ONE_SPHY_PER_DUNIT;
278	else if (iface_params->bus_width == MV_DDR_DEV_WIDTH_16BIT)
279		sphys_per_dunit = MV_DDR_TWO_SPHY_PER_DUNIT;
280	else {
281		printf("mv_ddr: unsupported bus width type found\n");
282		return 0;
283	}
284
285	/* calc dram size per cs */
286	mem_sz_per_cs = (unsigned long long)mem_size[iface_params->memory_size] *
287			(unsigned long long)sphys /
288			(unsigned long long)sphys_per_dunit;
289	return mem_sz_per_cs;
290}
291
292unsigned long long mv_ddr_mem_sz_get(void)
293{
294	unsigned long long tot_mem_sz = 0;
295	unsigned long long mem_sz_per_cs = 0;
296	unsigned long long max_cs = mv_ddr_cs_num_get();
297
298	mem_sz_per_cs = mv_ddr_mem_sz_per_cs_get();
299	tot_mem_sz = max_cs * mem_sz_per_cs;
300
301	return tot_mem_sz;
302}
303
304unsigned int mv_ddr_rtt_nom_get(void)
305{
306	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
307	unsigned int rtt_nom = tm->edata.mem_edata.rtt_nom;
308
309	if (rtt_nom >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) {
310		printf("error: %s: unsupported rtt_nom parameter found\n", __func__);
311		rtt_nom = PARAM_UNDEFINED;
312	}
313
314	return rtt_nom;
315}
316
317unsigned int mv_ddr_rtt_park_get(void)
318{
319	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
320	unsigned int cs_num = mv_ddr_cs_num_get();
321	unsigned int rtt_park = MV_DDR_RTT_NOM_PARK_RZQ_LAST;
322
323	if (cs_num > 0 && cs_num <= MAX_CS_NUM)
324		rtt_park = tm->edata.mem_edata.rtt_park[cs_num - 1];
325
326	if (rtt_park >= MV_DDR_RTT_NOM_PARK_RZQ_LAST) {
327		printf("error: %s: unsupported rtt_park parameter found\n", __func__);
328		rtt_park = PARAM_UNDEFINED;
329	}
330
331	return rtt_park;
332}
333
334unsigned int mv_ddr_rtt_wr_get(void)
335{
336	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
337	unsigned int cs_num = mv_ddr_cs_num_get();
338	unsigned int rtt_wr = MV_DDR_RTT_WR_RZQ_LAST;
339
340	if (cs_num > 0 && cs_num <= MAX_CS_NUM)
341		rtt_wr = tm->edata.mem_edata.rtt_wr[cs_num - 1];
342
343	if (rtt_wr >= MV_DDR_RTT_WR_RZQ_LAST) {
344		printf("error: %s: unsupported rtt_wr parameter found\n", __func__);
345		rtt_wr = PARAM_UNDEFINED;
346	}
347
348	return rtt_wr;
349}
350
351unsigned int mv_ddr_dic_get(void)
352{
353	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
354	unsigned int dic = tm->edata.mem_edata.dic;
355
356	if (dic >= MV_DDR_DIC_RZQ_LAST) {
357		printf("error: %s: unsupported dic parameter found\n", __func__);
358		dic = PARAM_UNDEFINED;
359	}
360
361	return dic;
362}
363