1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Marvell International Ltd. and its affiliates
4 */
5
6#if defined(CONFIG_DDR4)
7
8/* DDR4 MPR/PDA Interface */
9#include "ddr3_init.h"
10#include "mv_ddr4_mpr_pda_if.h"
11#include "mv_ddr4_training.h"
12#include "mv_ddr_training_db.h"
13#include "mv_ddr_common.h"
14#include "mv_ddr_regs.h"
15
16static u8 dram_to_mc_dq_map[MAX_BUS_NUM][BUS_WIDTH_IN_BITS];
17static int dq_map_enable;
18
19static u32 mv_ddr4_tx_odt_get(void)
20{
21	u16 odt = 0xffff, rtt = 0xffff;
22
23	if (g_odt_config & 0xe0000)
24		rtt =  mv_ddr4_rtt_nom_to_odt(g_rtt_nom);
25	else if (g_odt_config & 0x10000)
26		rtt = mv_ddr4_rtt_wr_to_odt(g_rtt_wr);
27	else
28		return odt;
29
30	return (odt * rtt) / (odt + rtt);
31}
32
33/*
34 * mode registers initialization function
35 * replaces all MR writes in DDR3 init function
36 */
37int mv_ddr4_mode_regs_init(u8 dev_num)
38{
39	int status;
40	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
41	enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
42	u32 if_id;
43	u32 cl, cwl;
44	u32 val, mask;
45	u32 t_wr, t_ckclk;
46	/* design GL params to be set outside */
47	u32 dic = 0;
48	u32 ron = 30; /* znri */
49	u32 rodt = mv_ddr4_tx_odt_get(); /* effective rtt */
50	/* vref percentage presented as 100 x percentage value (e.g., 6000 = 100 x 60%) */
51	u32 vref = ((ron + rodt / 2) * 10000) / (ron + rodt);
52	u32 range = (vref >= 6000) ? 0 : 1; /* if vref is >= 60%, use upper range */
53	u32 tap;
54	u32 refresh_mode;
55
56	if (range == 0)
57		tap = (vref - 6000) / 65;
58	else
59		tap = (vref - 4500) / 65;
60
61	for (if_id = 0; if_id < MAX_INTERFACE_NUM; if_id++) {
62		VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id);
63		cl = tm->interface_params[if_id].cas_l;
64		cwl = tm->interface_params[if_id].cas_wl;
65		t_ckclk = MEGA / mv_ddr_freq_get(tm->interface_params[if_id].memory_freq);
66		t_wr = time_to_nclk(mv_ddr_speed_bin_timing_get(tm->interface_params[if_id].speed_bin_index,
67					    SPEED_BIN_TWR), t_ckclk) - 1;
68
69		/* TODO: replace hard-coded values with appropriate defines */
70		/* DDR4 MR0 */
71		/*
72		 * [6:4,2] bits to be taken from S@R frequency and speed bin
73		 * rtt_nom to be taken from the algorithm definition
74		 * dic to be taken fro the algorithm definition -
75		 * set to 0x1 (for driver rzq/5 = 48 ohm) or
76		 * set to 0x0 (for driver rzq/7 = 34 ohm)
77		 */
78		/* set dll reset, 0x1900[8] to 0x1 */
79		/* set tm, 0x1900[7] to 0x0 */
80		/* set rbt, 0x1900[3] to 0x0 */
81		/* set bl, 0x1900[1:0] to 0x0 */
82		val = ((cl_mask_table[cl] & 0x1) << 2) |
83		      (((cl_mask_table[cl] & 0xe) >> 1)  <<  4) |
84		      (twr_mask_table[t_wr + 1] << 9) |
85		      (0x1 << 8) | (0x0 << 7) | (0x0 << 3) | 0x0;
86		mask = (0x1 << 2) | (0x7 << 4) | (0x7 << 9) |
87		       (0x1 << 8) | (0x1 << 7) | (0x1 << 3) | 0x3;
88		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR0_REG,
89					   val, mask);
90		if (status != MV_OK)
91			return status;
92
93		/* DDR4 MR1 */
94		/* set rtt nom to 0 if rtt park is activated (not zero) */
95		if ((g_rtt_park >> 6) != 0x0)
96			g_rtt_nom = 0;
97		/* set tdqs, 0x1904[11] to 0x0 */
98		/* set al, 0x1904[4:3] to 0x0 */
99		/* dic, 0x1904[2:1] */
100		/* dll enable */
101		val = g_rtt_nom | (0x0 << 11) | (0x0 << 3) | (dic << 1) | 0x1;
102		mask = (0x7 << 8) | (0x1 << 11) | (0x3 << 3) | (0x3 << 1) | 0x1;
103		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR1_REG,
104					   val, mask);
105		if (status != MV_OK)
106			return status;
107
108		/* DDR4 MR2 */
109		/* set rtt wr, 0x1908[10,9] to 0x0 */
110		/* set wr crc, 0x1908[12] to 0x0 */
111		/* cwl */
112		val = g_rtt_wr | (0x0 << 12) | (cwl_mask_table[cwl] << 3);
113		mask = (0x3 << 9) | (0x1 << 12) | (0x7 << 3);
114		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR2_REG,
115					   val, mask);
116		if (status != MV_OK)
117			return status;
118
119		/* DDR4 MR3 */
120		/* set fgrm, 0x190C[8:6] to 0x0 */
121		/* set gd, 0x190C[3] to 0x0 */
122		refresh_mode = (tm->interface_params[if_id].interface_temp == MV_DDR_TEMP_HIGH) ? 1 : 0;
123
124		val = (refresh_mode << 6) | (0x0 << 3);
125		mask = (0x7 << 6) | (0x1 << 3);
126		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR3_REG,
127					   val, mask);
128		if (status != MV_OK)
129			return status;
130
131		/* DDR4 MR4 */
132		/*
133		 * set wp, 0x1910[12] to 0x0
134		 * set rp, 0x1910[11] to 0x0
135		 * set rp training, 0x1910[10] to 0x0
136		 * set sra, 0x1910[9] to 0x0
137		 * set cs2cmd, 0x1910[8:6] to 0x0
138		 * set mpd, 0x1910[1] to 0x0
139		 */
140		mask = (0x1 << 12) | (0x1 << 11) | (0x1 << 10) | (0x1 << 9) | (0x7 << 6) | (0x1 << 1);
141		val =  (0x0 << 12) | (0x1 << 11) | (0x0 << 10) | (0x0 << 9) | (0x0 << 6) | (0x0 << 1);
142
143		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR4_REG,
144					   val, mask);
145		if (status != MV_OK)
146			return status;
147
148		/* DDR4 MR5 */
149		/*
150		 * set rdbi, 0x1914[12] to 0x0 during init sequence (may be enabled with
151		 * op cmd mrs - bug in z1, to be fixed in a0)
152		 * set wdbi, 0x1914[11] to 0x0
153		 * set dm, 0x1914[10] to 0x1
154		 * set ca_pl, 0x1914[2:0] to 0x0
155		 * set odt input buffer during power down mode, 0x1914[5] to 0x1
156		 */
157		mask = (0x1 << 12) | (0x1 << 11) | (0x1 << 10) | (0x7 << 6) | (0x1 << 5) | 0x7;
158		val = (0x0 << 12) | (0x0 << 11) | (0x1 << 10) | g_rtt_park | (0x1 << 5) | 0x0;
159		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR5_REG,
160					   val, mask);
161		if (status != MV_OK)
162			return status;
163
164		/* DDR4 MR6 */
165		/*
166		 * set t_ccd_l, 0x1918[12:10] to 0x0, 0x2, or 0x4 (z1 supports only even
167		 * values, to be fixed in a0)
168		 * set vdq te, 0x1918[7] to 0x0
169		 * set vdq tv, 0x1918[5:0] to vref training value
170		 */
171		mask = (0x7 << 10) | (0x1 << 7) | (0x1 << 6) | 0x3f;
172		val = (0x2 << 10) | (0x0 << 7) | (range << 6) | tap;
173		status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR6_REG,
174					   val, mask);
175		if (status != MV_OK)
176			return status;
177	}
178
179	return MV_OK;
180}
181
182/* enter mpr read mode */
183static int mv_ddr4_mpr_read_mode_enable(u8 dev_num, u32 mpr_num, u32 page_num,
184				 enum mv_ddr4_mpr_read_format read_format)
185{
186	/*
187	 * enable MPR page 2 mpr mode in DDR4 MR3
188	 * read_format: 0 for serial, 1 for parallel, and 2 for staggered
189	 * TODO: add support for cs, multicast or unicast, and if id
190	 */
191	int status;
192	u32 val, mask, if_id = 0;
193
194	if (page_num != 0) {
195		/* serial is the only read format if the page is other than 0 */
196		read_format = MV_DDR4_MPR_READ_SERIAL;
197	}
198
199	val = (page_num << 0) | (0x1 << 2) | (read_format << 11);
200	mask = (0x3 << 0) | (0x1 << 2) | (0x3 << 11);
201
202	/* cs0 */
203	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, DDR4_MR3_REG, val, mask);
204	if (status != MV_OK)
205		return status;
206
207	/* op cmd: cs0, cs1 are on, cs2, cs3 are off */
208	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, SDRAM_OP_REG,
209				   (0x9 | (0xc << 8)) , (0x1f | (0xf << 8)));
210	if (status != MV_OK)
211		return status;
212
213	if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, SDRAM_OP_REG,
214				MAX_POLLING_ITERATIONS) != MV_OK) {
215		DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_mpr_read_mode_enable: DDR3 poll failed(MPR3)\n"));
216	}
217
218	return MV_OK;
219}
220
221/* exit mpr read or write mode */
222static int mv_ddr4_mpr_mode_disable(u8 dev_num)
223{
224	 /* TODO: add support for cs, multicast or unicast, and if id */
225	int status;
226	u32 val, mask, if_id = 0;
227
228	/* exit mpr */
229	val =  0x0 << 2;
230	mask =  0x1 << 2;
231	/* cs0 */
232	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, DDR4_MR3_REG, val, mask);
233	if (status != MV_OK)
234		return status;
235
236	/* op cmd: cs0, cs1 are on, cs2, cs3 are off */
237	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, SDRAM_OP_REG,
238				   (0x9 | (0xc << 8)) , (0x1f | (0xf << 8)));
239	if (status != MV_OK)
240		return status;
241
242	if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, SDRAM_OP_REG,
243				MAX_POLLING_ITERATIONS) != MV_OK) {
244		DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_mpr_mode_disable: DDR3 poll failed(MPR3)\n"));
245	}
246
247	return MV_OK;
248}
249
250/* translate dq read value per dram dq pin */
251static int mv_ddr4_dq_decode(u8 dev_num, u32 *data)
252{
253	u32 subphy_num, dq_num;
254	u32 dq_val = 0, raw_data, idx;
255	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
256	u32 subphy_max = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
257
258	/* suppose the third word is stable */
259	raw_data = data[2];
260
261	/* skip ecc supbhy; TODO: check to add support for ecc */
262	if (subphy_max % 2)
263		subphy_max -= 1;
264
265	for (subphy_num = 0; subphy_num < subphy_max; subphy_num++) {
266		VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy_num);
267		for (dq_num = 0; dq_num < BUS_WIDTH_IN_BITS; dq_num++) {
268			idx = (dram_to_mc_dq_map[subphy_num][dq_num] + (subphy_num * BUS_WIDTH_IN_BITS));
269			dq_val |= (((raw_data & (1 << idx)) >> idx) << ((subphy_num * BUS_WIDTH_IN_BITS) + dq_num));
270		}
271	}
272
273	/* update burst words[0..7] with correct mapping */
274	for (idx = 0; idx < EXT_ACCESS_BURST_LENGTH; idx++)
275		data[idx] = dq_val;
276
277	return MV_OK;
278}
279
280/*
281 * read mpr value per requested format and type
282 * note: for parallel decoded read, data is presented as stored in mpr on dram side,
283 *	for all others, data to be presneted "as is" (i.e. per dq order from high to low
284 *	and bus pins connectivity).
285 */
286int mv_ddr4_mpr_read(u8 dev_num, u32 mpr_num, u32 page_num,
287		      enum mv_ddr4_mpr_read_format read_format,
288		      enum mv_ddr4_mpr_read_type read_type,
289		      u32 *data)
290{
291	/* TODO: add support for multiple if_id, dev num, and cs */
292	u32 word_idx, if_id = 0;
293	volatile unsigned long *addr = NULL;
294
295	/* enter mpr read mode */
296	mv_ddr4_mpr_read_mode_enable(dev_num, mpr_num, page_num, read_format);
297
298	/* set pattern type*/
299	ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, DDR4_MPR_WR_REG,
300			  mpr_num << 8, 0x3 << 8);
301
302	for (word_idx = 0; word_idx < EXT_ACCESS_BURST_LENGTH; word_idx++) {
303		data[word_idx] = *addr;
304		DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("mv_ddr4_mpr_read: addr 0x%08lx, data 0x%08x\n",
305						     (unsigned long)addr, data[word_idx]));
306		addr++;
307	}
308
309	/* exit mpr read mode */
310	mv_ddr4_mpr_mode_disable(dev_num);
311
312	/* decode mpr read value (only parallel mode supported) */
313	if ((read_type == MV_DDR4_MPR_READ_DECODED) && (read_format == MV_DDR4_MPR_READ_PARALLEL)) {
314		if (dq_map_enable == 1) {
315			mv_ddr4_dq_decode(dev_num, data);
316		} else {
317			DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_mpr_read: run mv_ddr4_dq_pins_mapping()\n"));
318			return MV_FAIL;
319		}
320	}
321
322	return MV_OK;
323}
324
325/* enter mpr write mode */
326static int mv_ddr4_mpr_write_mode_enable(u8 dev_num, u32 mpr_location, u32 page_num, u32 data)
327{
328	/*
329	 * enable MPR page 2 mpr mode in DDR4 MR3
330	 * TODO: add support for cs, multicast or unicast, and if id
331	 */
332	int status;
333	u32 if_id = 0, val = 0, mask;
334
335	val = (page_num << 0) | (0x1 << 2);
336	mask = (0x3 << 0) | (0x1 << 2);
337	/* cs0 */
338	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, DDR4_MR3_REG, val, mask);
339	if (status != MV_OK)
340		return status;
341
342	/* cs0 */
343	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, DDR4_MPR_WR_REG,
344				   (mpr_location << 8) | data, 0x3ff);
345	if (status != MV_OK)
346		return status;
347
348	/* op cmd: cs0, cs1 are on, cs2, cs3 are off */
349	status = ddr3_tip_if_write(dev_num, ACCESS_TYPE_UNICAST, if_id, SDRAM_OP_REG,
350				   (0x13 | 0xc << 8) , (0x1f | (0xf << 8)));
351	if (status != MV_OK)
352		return status;
353
354	if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id,  0, 0x1f, SDRAM_OP_REG,
355				MAX_POLLING_ITERATIONS) != MV_OK) {
356		DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_mpr_write_mode_enable: DDR3 poll failed(MPR3)\n"));
357	}
358
359	return MV_OK;
360}
361
362/* write mpr value */
363int mv_ddr4_mpr_write(u8 dev_num, u32 mpr_location, u32 mpr_num, u32 page_num, u32 data)
364{
365	/* enter mpr write mode */
366	mv_ddr4_mpr_write_mode_enable(dev_num, mpr_location, page_num, data);
367
368	/* TODO: implement this function */
369
370	/* TODO: exit mpr write mode */
371
372	return MV_OK;
373}
374
375/*
376 * map physical on-board connection of dram dq pins to ddr4 controller pins
377 * note: supports only 32b width
378 * TODO: add support for 64-bit bus width and ecc subphy
379 */
380int mv_ddr4_dq_pins_mapping(u8 dev_num)
381{
382	static int run_once;
383	u8 dq_val[MAX_BUS_NUM][BUS_WIDTH_IN_BITS] = { {0} };
384	u32 mpr_pattern[MV_DDR4_MPR_READ_PATTERN_NUM][EXT_ACCESS_BURST_LENGTH] = { {0} };
385	u32 subphy_num, dq_num, mpr_type;
386	u8 subphy_pattern[3];
387	struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get();
388	u32 subphy_max = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE);
389
390	if (run_once)
391		return MV_OK;
392	else
393		run_once++;
394
395	/* clear dq mapping */
396	memset(dram_to_mc_dq_map, 0, sizeof(dram_to_mc_dq_map));
397
398	/* stage 1: read page 0 mpr0..2 raw patterns */
399	for (mpr_type = 0; mpr_type < MV_DDR4_MPR_READ_PATTERN_NUM; mpr_type++)
400		mv_ddr4_mpr_read(dev_num, mpr_type, 0, MV_DDR4_MPR_READ_PARALLEL,
401				 MV_DDR4_MPR_READ_RAW, mpr_pattern[mpr_type]);
402
403	/* stage 2: map every dq for each subphy to 3-bit value, create local database */
404	/* skip ecc supbhy; TODO: check to add support for ecc */
405	if (subphy_max % 2)
406		subphy_max -= 1;
407
408	for (subphy_num = 0; subphy_num < subphy_max; subphy_num++) {
409		VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy_num);
410		/* extract pattern for each subphy */
411		for (mpr_type = 0; mpr_type < MV_DDR4_MPR_READ_PATTERN_NUM; mpr_type++)
412			subphy_pattern[mpr_type] = ((mpr_pattern[mpr_type][2] >> (subphy_num * 8)) & 0xff);
413
414		for (dq_num = 0; dq_num < BUS_WIDTH_IN_BITS; dq_num++)
415			for (mpr_type = 0; mpr_type < MV_DDR4_MPR_READ_PATTERN_NUM; mpr_type++)
416				dq_val[subphy_num][dq_num] += (((subphy_pattern[mpr_type] >> dq_num) & 1) *
417							       (1 << mpr_type));
418	}
419
420	/* stage 3: map dram dq to mc dq and update database */
421	for (subphy_num = 0; subphy_num < subphy_max; subphy_num++) {
422		VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy_num);
423		for (dq_num = 0; dq_num < BUS_WIDTH_IN_BITS; dq_num++)
424			dram_to_mc_dq_map[subphy_num][7 - dq_val[subphy_num][dq_num]] = dq_num;
425	}
426
427	/* set dq_map_enable */
428	dq_map_enable = 1;
429
430	return MV_OK;
431}
432
433/* enter to or exit from dram vref training mode */
434int mv_ddr4_vref_training_mode_ctrl(u8 dev_num, u8 if_id, enum hws_access_type access_type, int enable)
435{
436	int status;
437	u32 val, mask;
438
439	/* DDR4 MR6 */
440	/*
441	 * set t_ccd_l, 0x1918[12:10] to 0x0, 0x2, or 0x4 (z1 supports only even
442	 * values, to be fixed in a0)
443	 * set vdq te, 0x1918[7] to 0x0
444	 * set vdq tv, 0x1918[5:0] to vref training value
445	 */
446
447	val = (((enable == 1) ? 1 : 0) << 7);
448	mask = (0x1 << 7);
449	status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR6_REG, val, mask);
450	if (status != MV_OK)
451		return status;
452
453	/* write DDR4 MR6 cs configuration; only cs0, cs1 supported */
454	if (effective_cs == 0)
455		val = 0xe;
456	else
457		val = 0xd;
458	val <<= 8;
459	/* write DDR4 MR6 command */
460	val |= 0x12;
461	mask = (0xf << 8) | 0x1f;
462	status = ddr3_tip_if_write(dev_num, access_type, if_id, SDRAM_OP_REG, val, mask);
463	if (status != MV_OK)
464		return status;
465
466	if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id,  0, 0x1f, SDRAM_OP_REG,
467				MAX_POLLING_ITERATIONS) != MV_OK) {
468		DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_vref_training_mode_ctrl: Polling command failed\n"));
469	}
470
471	return MV_OK;
472}
473
474/* set dram vref tap value */
475int mv_ddr4_vref_tap_set(u8 dev_num, u8 if_id, enum hws_access_type access_type,
476			 u32 taps_num, enum mv_ddr4_vref_tap_state state)
477{
478	int status;
479	u32 range, vdq_tv;
480
481	/* disable and then enable the training with a new range */
482	if ((state == MV_DDR4_VREF_TAP_BUSY) && ((taps_num + MV_DDR4_VREF_STEP_SIZE) >= 23) &&
483	    (taps_num < 23))
484		state = MV_DDR4_VREF_TAP_FLIP;
485
486	if (taps_num < 23) {
487		range = 1;
488		vdq_tv = taps_num;
489	} else {
490		range = 0;
491		vdq_tv = taps_num - 23;
492	}
493
494	if ((state == MV_DDR4_VREF_TAP_FLIP) | (state == MV_DDR4_VREF_TAP_START)) {
495		/* 0 to disable */
496		status = mv_ddr4_vref_set(dev_num, if_id, access_type, range, vdq_tv, 0);
497		if (status != MV_OK)
498			return status;
499		/* 1 to enable */
500		status = (mv_ddr4_vref_set(dev_num, if_id, access_type, range, vdq_tv, 1));
501		if (status != MV_OK)
502			return status;
503	} else if (state == MV_DDR4_VREF_TAP_END) {
504		/* 1 to enable */
505		status = (mv_ddr4_vref_set(dev_num, if_id, access_type, range, vdq_tv, 1));
506		if (status != MV_OK)
507			return status;
508		/* 0 to disable */
509		status = mv_ddr4_vref_set(dev_num, if_id, access_type, range, vdq_tv, 0);
510		if (status != MV_OK)
511			return status;
512	} else {
513		/* 1 to enable */
514		status = (mv_ddr4_vref_set(dev_num, if_id, access_type, range, vdq_tv, 1));
515		if (status != MV_OK)
516			return status;
517	}
518
519	return MV_OK;
520}
521
522/* set dram vref value */
523int mv_ddr4_vref_set(u8 dev_num, u8 if_id, enum hws_access_type access_type,
524		     u32 range, u32 vdq_tv, u8 vdq_training_ena)
525{
526	int status;
527	u32 read_data;
528	u32 val, mask;
529
530	DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("mv_ddr4_vref_set: range %d, vdq_tv %d\n", range, vdq_tv));
531
532	/* DDR4 MR6 */
533	/*
534	 * set t_ccd_l, 0x1918[12:10] to 0x0, 0x2, or 0x4 (z1 supports only even
535	 * values, to be fixed in a0)
536	 * set vdq te, 0x1918[7] to 0x0
537	 * set vdq tr, 0x1918[6] to 0x0 to disable or 0x1 to enable
538	 * set vdq tv, 0x1918[5:0] to vref training value
539	 */
540	val = (vdq_training_ena << 7) | (range << 6) | vdq_tv;
541	mask = (0x0 << 7) | (0x1 << 6) | 0x3f;
542
543	status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR6_REG, val, mask);
544	if (status != MV_OK)
545		return status;
546
547	ddr3_tip_if_read(dev_num, access_type, if_id, DDR4_MR6_REG, &read_data, 0xffffffff);
548	DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, ("mv_ddr4_vref_set: MR6 = 0x%x\n", read_data));
549
550	/* write DDR4 MR6 cs configuration; only cs0, cs1 supported */
551	if (effective_cs == 0)
552		val = 0xe;
553	else
554		val = 0xd;
555	val <<= 8;
556	/* write DDR4 MR6 command */
557	val |= 0x12;
558	mask = (0xf << 8) | 0x1f;
559	status = ddr3_tip_if_write(dev_num, access_type, if_id, SDRAM_OP_REG, val, mask);
560	if (status != MV_OK)
561		return status;
562
563	if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id,  0, 0x1F, SDRAM_OP_REG,
564				MAX_POLLING_ITERATIONS) != MV_OK) {
565		DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_vref_set: Polling command failed\n"));
566	}
567
568	return MV_OK;
569}
570
571/* pda - load pattern to odpg */
572int mv_ddr4_pda_pattern_odpg_load(u32 dev_num, enum hws_access_type access_type,
573				  u32 if_id, u32 subphy_mask, u32 cs_num)
574{
575	int status;
576	u32 pattern_len_count = 0;
577	u32 data_low[KILLER_PATTERN_LENGTH] = {0};
578	u32 data_high[KILLER_PATTERN_LENGTH] = {0};
579	u32 val, mask, subphy_num;
580
581	/*
582	 * set 0x1630[10:5] bits to 0x3 (0x1 for 16-bit bus width)
583	 * set 0x1630[14:11] bits to 0x3 (0x1 for 16-bit bus width)
584	 */
585	val = (cs_num << 26) | (0x1 << 25) | (0x3 << 11) | (0x3 << 5) | 0x1;
586	mask = (0x3 << 26) | (0x1 << 25) | (0x3f << 11) | (0x3f << 5) | 0x1;
587	status = ddr3_tip_if_write(dev_num, access_type, if_id, ODPG_DATA_CTRL_REG, val, mask);
588	if (status != MV_OK)
589		return status;
590
591	if (subphy_mask != 0xf) {
592		for (subphy_num = 0; subphy_num < 4; subphy_num++)
593			if (((subphy_mask >> subphy_num) & 0x1) == 0)
594				data_low[0] = (data_low[0] | (0xff << (subphy_num * 8)));
595	} else
596		data_low[0] = 0;
597
598	for (pattern_len_count = 0; pattern_len_count < 4; pattern_len_count++) {
599		data_low[pattern_len_count] = data_low[0];
600		data_high[pattern_len_count] = data_low[0];
601	}
602
603	for (pattern_len_count = 0; pattern_len_count < 4 ; pattern_len_count++) {
604		status = ddr3_tip_if_write(dev_num, access_type, if_id, ODPG_DATA_WR_DATA_LOW_REG,
605					   data_low[pattern_len_count], MASK_ALL_BITS);
606		if (status != MV_OK)
607			return status;
608
609		status = ddr3_tip_if_write(dev_num, access_type, if_id, ODPG_DATA_WR_DATA_HIGH_REG,
610					   data_high[pattern_len_count], MASK_ALL_BITS);
611		if (status != MV_OK)
612			return status;
613
614		status = ddr3_tip_if_write(dev_num, access_type, if_id, ODPG_DATA_WR_ADDR_REG,
615					   pattern_len_count, MASK_ALL_BITS);
616		if (status != MV_OK)
617			return status;
618	}
619
620	status = ddr3_tip_if_write(dev_num, access_type, if_id, ODPG_DATA_BUFFER_OFFS_REG,
621				   0x0, MASK_ALL_BITS);
622	if (status != MV_OK)
623		return status;
624
625	return MV_OK;
626}
627
628/* enable or disable pda */
629int mv_ddr4_pda_ctrl(u8 dev_num, u8 if_id, u8 cs_num, int enable)
630{
631	/*
632	 * if enable is 0, exit
633	 * mrs to be directed to all dram devices
634	 * a calling function responsible to change odpg to 0x0
635	 */
636
637	int status;
638	enum hws_access_type access_type = ACCESS_TYPE_UNICAST;
639	u32 val, mask;
640
641	/* per dram addressability enable */
642	val = ((enable == 1) ? 1 : 0);
643	val <<= 4;
644	mask = 0x1 << 4;
645	status = ddr3_tip_if_write(dev_num, access_type, if_id, DDR4_MR3_REG, val, mask);
646	if (status != MV_OK)
647		return status;
648
649	/* write DDR4 MR3 cs configuration; only cs0, cs1 supported */
650	if (cs_num == 0)
651		val = 0xe;
652	else
653		val = 0xd;
654	val <<= 8;
655	/* write DDR4 MR3 command */
656	val |= 0x9;
657	mask = (0xf << 8) | 0x1f;
658	status = ddr3_tip_if_write(dev_num, access_type, if_id, SDRAM_OP_REG, val, mask);
659	if (status != MV_OK)
660		return status;
661
662	if (enable == 0) {
663		/* check odpg access is done */
664		if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK)
665			return MV_FAIL;
666	}
667
668	if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0, 0x1f, SDRAM_OP_REG,
669				MAX_POLLING_ITERATIONS) != MV_OK)
670		DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("mv_ddr4_pda_ctrl: Polling command failed\n"));
671
672	return MV_OK;
673}
674#endif /* CONFIG_DDR4 */
675