1// SPDX-License-Identifier: GPL-2.0
2/*
3 * Copyright (C) Marvell International Ltd. and its affiliates
4 */
5
6#include "ddr3_init.h"
7#include "mv_ddr_common.h"
8#include "xor_regs.h"
9
10/* defines  */
11#ifdef MV_DEBUG
12#define DB(x)	x
13#else
14#define DB(x)
15#endif
16
17static u32 ui_xor_regs_ctrl_backup;
18static u32 ui_xor_regs_base_backup[MAX_CS_NUM + 1];
19static u32 ui_xor_regs_mask_backup[MAX_CS_NUM + 1];
20
21void mv_sys_xor_init(u32 num_of_cs, u32 cs_ena, uint64_t cs_size, u32 base_delta)
22{
23	u32 reg, ui, cs_count;
24	uint64_t base, size_mask;
25
26	ui_xor_regs_ctrl_backup = reg_read(XOR_WINDOW_CTRL_REG(0, 0));
27	for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
28		ui_xor_regs_base_backup[ui] =
29			reg_read(XOR_BASE_ADDR_REG(0, ui));
30	for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
31		ui_xor_regs_mask_backup[ui] =
32			reg_read(XOR_SIZE_MASK_REG(0, ui));
33
34	reg = 0;
35	for (ui = 0, cs_count = 0;
36	     (cs_count < num_of_cs) && (ui < 8);
37	     ui++, cs_count++) {
38		if (cs_ena & (1 << ui)) {
39			/* Enable Window x for each CS */
40			reg |= (0x1 << (ui));
41			/* Enable Window x for each CS */
42			reg |= (0x3 << ((ui * 2) + 16));
43		}
44	}
45
46	reg_write(XOR_WINDOW_CTRL_REG(0, 0), reg);
47
48	cs_count = 0;
49	for (ui = 0, cs_count = 0;
50	     (cs_count < num_of_cs) && (ui < 8);
51	     ui++, cs_count++) {
52		if (cs_ena & (1 << ui)) {
53			/*
54			 * window x - Base - 0x00000000,
55			 * Attribute 0x0e - DRAM
56			 */
57			base = cs_size * ui + base_delta;
58			/* fixed size 2GB for each CS */
59			size_mask = 0x7FFF0000;
60			switch (ui) {
61			case 0:
62				base |= 0xe00;
63				break;
64			case 1:
65				base |= 0xd00;
66				break;
67			case 2:
68				base |= 0xb00;
69				break;
70			case 3:
71				base |= 0x700;
72				break;
73			case 4: /* SRAM */
74				base = 0x40000000;
75				/* configure as shared transaction */
76				base |= 0x1F00;
77				size_mask = 0xF0000;
78				break;
79			}
80
81			reg_write(XOR_BASE_ADDR_REG(0, ui), (u32)base);
82			size_mask = (cs_size / _64K) - 1;
83			size_mask = (size_mask << XESMRX_SIZE_MASK_OFFS) & XESMRX_SIZE_MASK_MASK;
84			/* window x - Size */
85			reg_write(XOR_SIZE_MASK_REG(0, ui), (u32)size_mask);
86		}
87	}
88
89	mv_xor_hal_init(1);
90
91	return;
92}
93
94void mv_sys_xor_finish(void)
95{
96	u32 ui;
97
98	reg_write(XOR_WINDOW_CTRL_REG(0, 0), ui_xor_regs_ctrl_backup);
99	for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
100		reg_write(XOR_BASE_ADDR_REG(0, ui),
101			  ui_xor_regs_base_backup[ui]);
102	for (ui = 0; ui < MAX_CS_NUM + 1; ui++)
103		reg_write(XOR_SIZE_MASK_REG(0, ui),
104			  ui_xor_regs_mask_backup[ui]);
105
106	reg_write(XOR_ADDR_OVRD_REG(0, 0), 0);
107}
108
109/*
110 * mv_xor_hal_init - Initialize XOR engine
111 *
112 * DESCRIPTION:
113 *               This function initialize XOR unit.
114 * INPUT:
115 *       None.
116 *
117 * OUTPUT:
118 *       None.
119 *
120 * RETURN:
121 *       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
122 */
123void mv_xor_hal_init(u32 xor_chan_num)
124{
125	u32 i;
126
127	/* Abort any XOR activity & set default configuration */
128	for (i = 0; i < xor_chan_num; i++) {
129		mv_xor_command_set(i, MV_STOP);
130		mv_xor_ctrl_set(i, (1 << XEXCR_REG_ACC_PROTECT_OFFS) |
131				(4 << XEXCR_DST_BURST_LIMIT_OFFS) |
132				(4 << XEXCR_SRC_BURST_LIMIT_OFFS));
133	}
134}
135
136/*
137 * mv_xor_ctrl_set - Set XOR channel control registers
138 *
139 * DESCRIPTION:
140 *
141 * INPUT:
142 *
143 * OUTPUT:
144 *       None.
145 *
146 * RETURN:
147 *       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
148 * NOTE:
149 *  This function does not modify the Operation_mode field of control register.
150 */
151int mv_xor_ctrl_set(u32 chan, u32 xor_ctrl)
152{
153	u32 old_value;
154
155	/* update the XOR Engine [0..1] Configuration Registers (XEx_c_r) */
156	old_value = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan))) &
157		XEXCR_OPERATION_MODE_MASK;
158	xor_ctrl &= ~XEXCR_OPERATION_MODE_MASK;
159	xor_ctrl |= old_value;
160	reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), xor_ctrl);
161
162	return MV_OK;
163}
164
165int mv_xor_mem_init(u32 chan, u32 start_ptr, unsigned long long block_size,
166		    u32 init_val_high, u32 init_val_low)
167{
168	u32 temp;
169
170	if (block_size == _4G)
171		block_size -= 1;
172
173	/* Parameter checking */
174	if (chan >= MV_XOR_MAX_CHAN)
175		return MV_BAD_PARAM;
176
177	if (MV_ACTIVE == mv_xor_state_get(chan))
178		return MV_BUSY;
179
180	if ((block_size < XEXBSR_BLOCK_SIZE_MIN_VALUE) ||
181	    (block_size > XEXBSR_BLOCK_SIZE_MAX_VALUE))
182		return MV_BAD_PARAM;
183
184	/* set the operation mode to Memory Init */
185	temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
186	temp &= ~XEXCR_OPERATION_MODE_MASK;
187	temp |= XEXCR_OPERATION_MODE_MEM_INIT;
188	reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
189
190	/*
191	 * update the start_ptr field in XOR Engine [0..1] Destination Pointer
192	 * Register
193	 */
194	reg_write(XOR_DST_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)), start_ptr);
195
196	/*
197	 * update the Block_size field in the XOR Engine[0..1] Block Size
198	 * Registers
199	 */
200	reg_write(XOR_BLOCK_SIZE_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
201		  block_size);
202
203	/*
204	 * update the field Init_val_l in the XOR Engine Initial Value Register
205	 * Low (XEIVRL)
206	 */
207	reg_write(XOR_INIT_VAL_LOW_REG(XOR_UNIT(chan)), init_val_low);
208
209	/*
210	 * update the field Init_val_h in the XOR Engine Initial Value Register
211	 * High (XEIVRH)
212	 */
213	reg_write(XOR_INIT_VAL_HIGH_REG(XOR_UNIT(chan)), init_val_high);
214
215	/* start transfer */
216	reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
217		    XEXACTR_XESTART_MASK);
218
219	return MV_OK;
220}
221
222/*
223 * mv_xor_state_get - Get XOR channel state.
224 *
225 * DESCRIPTION:
226 *       XOR channel activity state can be active, idle, paused.
227 *       This function retrunes the channel activity state.
228 *
229 * INPUT:
230 *       chan     - the channel number
231 *
232 * OUTPUT:
233 *       None.
234 *
235 * RETURN:
236 *       XOR_CHANNEL_IDLE    - If the engine is idle.
237 *       XOR_CHANNEL_ACTIVE  - If the engine is busy.
238 *       XOR_CHANNEL_PAUSED  - If the engine is paused.
239 *       MV_UNDEFINED_STATE  - If the engine state is undefind or there is no
240 *                             such engine
241 */
242enum mv_state mv_xor_state_get(u32 chan)
243{
244	u32 state;
245
246	/* Parameter checking   */
247	if (chan >= MV_XOR_MAX_CHAN) {
248		DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
249		return MV_UNDEFINED_STATE;
250	}
251
252	/* read the current state */
253	state = reg_read(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
254	state &= XEXACTR_XESTATUS_MASK;
255
256	/* return the state */
257	switch (state) {
258	case XEXACTR_XESTATUS_IDLE:
259		return MV_IDLE;
260	case XEXACTR_XESTATUS_ACTIVE:
261		return MV_ACTIVE;
262	case XEXACTR_XESTATUS_PAUSED:
263		return MV_PAUSED;
264	}
265
266	return MV_UNDEFINED_STATE;
267}
268
269/*
270 * mv_xor_command_set - Set command of XOR channel
271 *
272 * DESCRIPTION:
273 *       XOR channel can be started, idle, paused and restarted.
274 *       Paused can be set only if channel is active.
275 *       Start can be set only if channel is idle or paused.
276 *       Restart can be set only if channel is paused.
277 *       Stop can be set only if channel is active.
278 *
279 * INPUT:
280 *       chan     - The channel number
281 *       command  - The command type (start, stop, restart, pause)
282 *
283 * OUTPUT:
284 *       None.
285 *
286 * RETURN:
287 *       MV_OK on success , MV_BAD_PARAM on erroneous parameter, MV_ERROR on
288 *       undefind XOR engine mode
289 */
290int mv_xor_command_set(u32 chan, enum mv_command command)
291{
292	enum mv_state state;
293
294	/* Parameter checking */
295	if (chan >= MV_XOR_MAX_CHAN) {
296		DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
297		return MV_BAD_PARAM;
298	}
299
300	/* get the current state */
301	state = mv_xor_state_get(chan);
302
303	if ((command == MV_START) && (state == MV_IDLE)) {
304		/* command is start and current state is idle */
305		reg_bit_set(XOR_ACTIVATION_REG
306			    (XOR_UNIT(chan), XOR_CHAN(chan)),
307			    XEXACTR_XESTART_MASK);
308		return MV_OK;
309	} else if ((command == MV_STOP) && (state == MV_ACTIVE)) {
310		/* command is stop and current state is active */
311		reg_bit_set(XOR_ACTIVATION_REG
312			    (XOR_UNIT(chan), XOR_CHAN(chan)),
313			    XEXACTR_XESTOP_MASK);
314		return MV_OK;
315	} else if (((enum mv_state)command == MV_PAUSED) &&
316		   (state == MV_ACTIVE)) {
317		/* command is paused and current state is active */
318		reg_bit_set(XOR_ACTIVATION_REG
319			    (XOR_UNIT(chan), XOR_CHAN(chan)),
320			    XEXACTR_XEPAUSE_MASK);
321		return MV_OK;
322	} else if ((command == MV_RESTART) && (state == MV_PAUSED)) {
323		/* command is restart and current state is paused */
324		reg_bit_set(XOR_ACTIVATION_REG
325			    (XOR_UNIT(chan), XOR_CHAN(chan)),
326			    XEXACTR_XERESTART_MASK);
327		return MV_OK;
328	} else if ((command == MV_STOP) && (state == MV_IDLE)) {
329		/* command is stop and current state is active */
330		return MV_OK;
331	}
332
333	/* illegal command */
334	DB(printf("%s: ERR. Illegal command\n", __func__));
335
336	return MV_BAD_PARAM;
337}
338
339void ddr3_new_tip_ecc_scrub(void)
340{
341	u32 cs_c, max_cs;
342	u32 cs_ena = 0;
343	uint64_t total_mem_size, cs_mem_size_mb = 0, cs_mem_size = 0;
344
345	printf("DDR Training Sequence - Start scrubbing\n");
346	max_cs = mv_ddr_cs_num_get();
347	for (cs_c = 0; cs_c < max_cs; cs_c++)
348		cs_ena |= 1 << cs_c;
349
350	/* all chip-selects are of same size */
351	ddr3_calc_mem_cs_size(0, &cs_mem_size_mb);
352	cs_mem_size = cs_mem_size_mb * _1M;
353	mv_sys_xor_init(max_cs, cs_ena, cs_mem_size, 0);
354	total_mem_size = max_cs * cs_mem_size;
355	mv_xor_mem_init(0, 0, total_mem_size, 0xdeadbeef, 0xdeadbeef);
356	/* wait for previous transfer completion */
357	while (mv_xor_state_get(0) != MV_IDLE)
358		;
359	/* Return XOR State */
360	mv_sys_xor_finish();
361
362	printf("DDR3 Training Sequence - End scrubbing\n");
363}
364
365/*
366* mv_xor_transfer - Transfer data from source to destination in one of
367*		    three modes: XOR, CRC32 or DMA
368*
369* DESCRIPTION:
370*	This function initiates XOR channel, according to function parameters,
371*	in order to perform XOR, CRC32 or DMA transaction.
372*	To gain maximum performance the user is asked to keep the following
373*	restrictions:
374*	1) Selected engine is available (not busy).
375*	2) This module does not take into consideration CPU MMU issues.
376*	   In order for the XOR engine to access the appropriate source
377*	   and destination, address parameters must be given in system
378*	   physical mode.
379*	3) This API does not take care of cache coherency issues. The source,
380*	   destination and, in case of chain, the descriptor list are assumed
381*	   to be cache coherent.
382*	4) Parameters validity.
383*
384* INPUT:
385*	chan		- XOR channel number.
386*	type	- One of three: XOR, CRC32 and DMA operations.
387*	xor_chain_ptr	- address of chain pointer
388*
389* OUTPUT:
390*	None.
391*
392* RETURN:
393*       MV_BAD_PARAM if parameters to function invalid, MV_OK otherwise.
394*
395*******************************************************************************/
396int mv_xor_transfer(u32 chan, enum xor_type type, u32 xor_chain_ptr)
397{
398	u32 temp;
399
400	/* Parameter checking */
401	if (chan >= MV_XOR_MAX_CHAN) {
402		DB(printf("%s: ERR. Invalid chan num %d\n", __func__, chan));
403		return MV_BAD_PARAM;
404	}
405	if (mv_xor_state_get(chan) == MV_ACTIVE) {
406		DB(printf("%s: ERR. Channel is already active\n", __func__));
407		return MV_BUSY;
408	}
409	if (xor_chain_ptr == 0x0) {
410		DB(printf("%s: ERR. xor_chain_ptr is NULL pointer\n", __func__));
411		return MV_BAD_PARAM;
412	}
413
414	/* read configuration register and mask the operation mode field */
415	temp = reg_read(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)));
416	temp &= ~XEXCR_OPERATION_MODE_MASK;
417
418	switch (type) {
419	case MV_XOR:
420		if ((xor_chain_ptr & XEXDPR_DST_PTR_XOR_MASK) != 0) {
421			DB(printf("%s: ERR. Invalid chain pointer (bits [5:0] must be cleared)\n",
422				  __func__));
423			return MV_BAD_PARAM;
424		}
425		/* set the operation mode to XOR */
426		temp |= XEXCR_OPERATION_MODE_XOR;
427		break;
428	case MV_DMA:
429		if ((xor_chain_ptr & XEXDPR_DST_PTR_DMA_MASK) != 0) {
430			DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
431				  __func__));
432			return MV_BAD_PARAM;
433		}
434		/* set the operation mode to DMA */
435		temp |= XEXCR_OPERATION_MODE_DMA;
436		break;
437	case MV_CRC32:
438		if ((xor_chain_ptr & XEXDPR_DST_PTR_CRC_MASK) != 0) {
439			DB(printf("%s: ERR. Invalid chain pointer (bits [4:0] must be cleared)\n",
440				  __func__));
441			return MV_BAD_PARAM;
442		}
443		/* set the operation mode to CRC32 */
444		temp |= XEXCR_OPERATION_MODE_CRC;
445		break;
446	default:
447		return MV_BAD_PARAM;
448	}
449
450	/* write the operation mode to the register */
451	reg_write(XOR_CONFIG_REG(XOR_UNIT(chan), XOR_CHAN(chan)), temp);
452	/*
453	 * update the NextDescPtr field in the XOR Engine [0..1] Next Descriptor
454	 * Pointer Register (XExNDPR)
455	 */
456	reg_write(XOR_NEXT_DESC_PTR_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
457		  xor_chain_ptr);
458
459	/* start transfer */
460	reg_bit_set(XOR_ACTIVATION_REG(XOR_UNIT(chan), XOR_CHAN(chan)),
461		    XEXACTR_XESTART_MASK);
462
463	return MV_OK;
464}
465