1// SPDX-License-Identifier: GPL-2.0+
2/*
3 * Copyright 2008, 2010-2016 Freescale Semiconductor, Inc.
4 * Copyright 2017-2018 NXP Semiconductor
5 */
6
7#include <common.h>
8#include <env.h>
9#include <hwconfig.h>
10#include <fsl_ddr_sdram.h>
11#include <log.h>
12
13#include <fsl_ddr.h>
14#if defined(CONFIG_FSL_LSCH2) || defined(CONFIG_FSL_LSCH3) || \
15	defined(CONFIG_ARM)
16#include <asm/arch/clock.h>
17#endif
18
19/*
20 * Use our own stack based buffer before relocation to allow accessing longer
21 * hwconfig strings that might be in the environment before we've relocated.
22 * This is pretty fragile on both the use of stack and if the buffer is big
23 * enough. However we will get a warning from env_get_f() for the latter.
24 */
25
26/* Board-specific functions defined in each board's ddr.c */
27void __weak fsl_ddr_board_options(memctl_options_t *popts,
28				  dimm_params_t *pdimm,
29				  unsigned int ctrl_num)
30{
31	return;
32}
33
34struct dynamic_odt {
35	unsigned int odt_rd_cfg;
36	unsigned int odt_wr_cfg;
37	unsigned int odt_rtt_norm;
38	unsigned int odt_rtt_wr;
39};
40
41#ifdef CONFIG_SYS_FSL_DDR4
42/* Quad rank is not verified yet due availability.
43 * Replacing 20 OHM with 34 OHM since DDR4 doesn't have 20 OHM option
44 */
45static __maybe_unused const struct dynamic_odt single_Q[4] = {
46	{	/* cs0 */
47		FSL_DDR_ODT_NEVER,
48		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
49		DDR4_RTT_34_OHM,	/* unverified */
50		DDR4_RTT_120_OHM
51	},
52	{	/* cs1 */
53		FSL_DDR_ODT_NEVER,
54		FSL_DDR_ODT_NEVER,
55		DDR4_RTT_OFF,
56		DDR4_RTT_120_OHM
57	},
58	{	/* cs2 */
59		FSL_DDR_ODT_NEVER,
60		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
61		DDR4_RTT_34_OHM,
62		DDR4_RTT_120_OHM
63	},
64	{	/* cs3 */
65		FSL_DDR_ODT_NEVER,
66		FSL_DDR_ODT_NEVER,	/* tied high */
67		DDR4_RTT_OFF,
68		DDR4_RTT_120_OHM
69	}
70};
71
72static __maybe_unused const struct dynamic_odt single_D[4] = {
73	{	/* cs0 */
74		FSL_DDR_ODT_NEVER,
75		FSL_DDR_ODT_ALL,
76		DDR4_RTT_40_OHM,
77		DDR4_RTT_OFF
78	},
79	{	/* cs1 */
80		FSL_DDR_ODT_NEVER,
81		FSL_DDR_ODT_NEVER,
82		DDR4_RTT_OFF,
83		DDR4_RTT_OFF
84	},
85	{0, 0, 0, 0},
86	{0, 0, 0, 0}
87};
88
89static __maybe_unused const struct dynamic_odt single_S[4] = {
90	{	/* cs0 */
91		FSL_DDR_ODT_NEVER,
92		FSL_DDR_ODT_ALL,
93		DDR4_RTT_40_OHM,
94		DDR4_RTT_OFF
95	},
96	{0, 0, 0, 0},
97	{0, 0, 0, 0},
98	{0, 0, 0, 0},
99};
100
101static __maybe_unused const struct dynamic_odt dual_DD[4] = {
102	{	/* cs0 */
103		FSL_DDR_ODT_NEVER,
104		FSL_DDR_ODT_SAME_DIMM,
105		DDR4_RTT_120_OHM,
106		DDR4_RTT_OFF
107	},
108	{	/* cs1 */
109		FSL_DDR_ODT_OTHER_DIMM,
110		FSL_DDR_ODT_OTHER_DIMM,
111		DDR4_RTT_34_OHM,
112		DDR4_RTT_OFF
113	},
114	{	/* cs2 */
115		FSL_DDR_ODT_NEVER,
116		FSL_DDR_ODT_SAME_DIMM,
117		DDR4_RTT_120_OHM,
118		DDR4_RTT_OFF
119	},
120	{	/* cs3 */
121		FSL_DDR_ODT_OTHER_DIMM,
122		FSL_DDR_ODT_OTHER_DIMM,
123		DDR4_RTT_34_OHM,
124		DDR4_RTT_OFF
125	}
126};
127
128static __maybe_unused const struct dynamic_odt dual_DS[4] = {
129	{	/* cs0 */
130		FSL_DDR_ODT_NEVER,
131		FSL_DDR_ODT_SAME_DIMM,
132		DDR4_RTT_120_OHM,
133		DDR4_RTT_OFF
134	},
135	{	/* cs1 */
136		FSL_DDR_ODT_OTHER_DIMM,
137		FSL_DDR_ODT_OTHER_DIMM,
138		DDR4_RTT_34_OHM,
139		DDR4_RTT_OFF
140	},
141	{	/* cs2 */
142		FSL_DDR_ODT_OTHER_DIMM,
143		FSL_DDR_ODT_ALL,
144		DDR4_RTT_34_OHM,
145		DDR4_RTT_120_OHM
146	},
147	{0, 0, 0, 0}
148};
149static __maybe_unused const struct dynamic_odt dual_SD[4] = {
150	{	/* cs0 */
151		FSL_DDR_ODT_OTHER_DIMM,
152		FSL_DDR_ODT_ALL,
153		DDR4_RTT_34_OHM,
154		DDR4_RTT_120_OHM
155	},
156	{0, 0, 0, 0},
157	{	/* cs2 */
158		FSL_DDR_ODT_NEVER,
159		FSL_DDR_ODT_SAME_DIMM,
160		DDR4_RTT_120_OHM,
161		DDR4_RTT_OFF
162	},
163	{	/* cs3 */
164		FSL_DDR_ODT_OTHER_DIMM,
165		FSL_DDR_ODT_OTHER_DIMM,
166		DDR4_RTT_34_OHM,
167		DDR4_RTT_OFF
168	}
169};
170
171static __maybe_unused const struct dynamic_odt dual_SS[4] = {
172	{	/* cs0 */
173		FSL_DDR_ODT_OTHER_DIMM,
174		FSL_DDR_ODT_ALL,
175		DDR4_RTT_34_OHM,
176		DDR4_RTT_120_OHM
177	},
178	{0, 0, 0, 0},
179	{	/* cs2 */
180		FSL_DDR_ODT_OTHER_DIMM,
181		FSL_DDR_ODT_ALL,
182		DDR4_RTT_34_OHM,
183		DDR4_RTT_120_OHM
184	},
185	{0, 0, 0, 0}
186};
187
188static __maybe_unused const struct dynamic_odt dual_D0[4] = {
189	{	/* cs0 */
190		FSL_DDR_ODT_NEVER,
191		FSL_DDR_ODT_SAME_DIMM,
192		DDR4_RTT_40_OHM,
193		DDR4_RTT_OFF
194	},
195	{	/* cs1 */
196		FSL_DDR_ODT_NEVER,
197		FSL_DDR_ODT_NEVER,
198		DDR4_RTT_OFF,
199		DDR4_RTT_OFF
200	},
201	{0, 0, 0, 0},
202	{0, 0, 0, 0}
203};
204
205static __maybe_unused const struct dynamic_odt dual_0D[4] = {
206	{0, 0, 0, 0},
207	{0, 0, 0, 0},
208	{	/* cs2 */
209		FSL_DDR_ODT_NEVER,
210		FSL_DDR_ODT_SAME_DIMM,
211		DDR4_RTT_40_OHM,
212		DDR4_RTT_OFF
213	},
214	{	/* cs3 */
215		FSL_DDR_ODT_NEVER,
216		FSL_DDR_ODT_NEVER,
217		DDR4_RTT_OFF,
218		DDR4_RTT_OFF
219	}
220};
221
222static __maybe_unused const struct dynamic_odt dual_S0[4] = {
223	{	/* cs0 */
224		FSL_DDR_ODT_NEVER,
225		FSL_DDR_ODT_CS,
226		DDR4_RTT_40_OHM,
227		DDR4_RTT_OFF
228	},
229	{0, 0, 0, 0},
230	{0, 0, 0, 0},
231	{0, 0, 0, 0}
232
233};
234
235static __maybe_unused const struct dynamic_odt dual_0S[4] = {
236	{0, 0, 0, 0},
237	{0, 0, 0, 0},
238	{	/* cs2 */
239		FSL_DDR_ODT_NEVER,
240		FSL_DDR_ODT_CS,
241		DDR4_RTT_40_OHM,
242		DDR4_RTT_OFF
243	},
244	{0, 0, 0, 0}
245
246};
247
248static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
249	{	/* cs0 */
250		FSL_DDR_ODT_NEVER,
251		FSL_DDR_ODT_CS,
252		DDR4_RTT_120_OHM,
253		DDR4_RTT_OFF
254	},
255	{	/* cs1 */
256		FSL_DDR_ODT_NEVER,
257		FSL_DDR_ODT_CS,
258		DDR4_RTT_120_OHM,
259		DDR4_RTT_OFF
260	},
261	{	/* cs2 */
262		FSL_DDR_ODT_NEVER,
263		FSL_DDR_ODT_CS,
264		DDR4_RTT_120_OHM,
265		DDR4_RTT_OFF
266	},
267	{	/* cs3 */
268		FSL_DDR_ODT_NEVER,
269		FSL_DDR_ODT_CS,
270		DDR4_RTT_120_OHM,
271		DDR4_RTT_OFF
272	}
273};
274#elif defined(CONFIG_SYS_FSL_DDR3)
275static __maybe_unused const struct dynamic_odt single_Q[4] = {
276	{	/* cs0 */
277		FSL_DDR_ODT_NEVER,
278		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
279		DDR3_RTT_20_OHM,
280		DDR3_RTT_120_OHM
281	},
282	{	/* cs1 */
283		FSL_DDR_ODT_NEVER,
284		FSL_DDR_ODT_NEVER,	/* tied high */
285		DDR3_RTT_OFF,
286		DDR3_RTT_120_OHM
287	},
288	{	/* cs2 */
289		FSL_DDR_ODT_NEVER,
290		FSL_DDR_ODT_CS_AND_OTHER_DIMM,
291		DDR3_RTT_20_OHM,
292		DDR3_RTT_120_OHM
293	},
294	{	/* cs3 */
295		FSL_DDR_ODT_NEVER,
296		FSL_DDR_ODT_NEVER,	/* tied high */
297		DDR3_RTT_OFF,
298		DDR3_RTT_120_OHM
299	}
300};
301
302static __maybe_unused const struct dynamic_odt single_D[4] = {
303	{	/* cs0 */
304		FSL_DDR_ODT_NEVER,
305		FSL_DDR_ODT_ALL,
306		DDR3_RTT_40_OHM,
307		DDR3_RTT_OFF
308	},
309	{	/* cs1 */
310		FSL_DDR_ODT_NEVER,
311		FSL_DDR_ODT_NEVER,
312		DDR3_RTT_OFF,
313		DDR3_RTT_OFF
314	},
315	{0, 0, 0, 0},
316	{0, 0, 0, 0}
317};
318
319static __maybe_unused const struct dynamic_odt single_S[4] = {
320	{	/* cs0 */
321		FSL_DDR_ODT_NEVER,
322		FSL_DDR_ODT_ALL,
323		DDR3_RTT_40_OHM,
324		DDR3_RTT_OFF
325	},
326	{0, 0, 0, 0},
327	{0, 0, 0, 0},
328	{0, 0, 0, 0},
329};
330
331static __maybe_unused const struct dynamic_odt dual_DD[4] = {
332	{	/* cs0 */
333		FSL_DDR_ODT_NEVER,
334		FSL_DDR_ODT_SAME_DIMM,
335		DDR3_RTT_120_OHM,
336		DDR3_RTT_OFF
337	},
338	{	/* cs1 */
339		FSL_DDR_ODT_OTHER_DIMM,
340		FSL_DDR_ODT_OTHER_DIMM,
341		DDR3_RTT_30_OHM,
342		DDR3_RTT_OFF
343	},
344	{	/* cs2 */
345		FSL_DDR_ODT_NEVER,
346		FSL_DDR_ODT_SAME_DIMM,
347		DDR3_RTT_120_OHM,
348		DDR3_RTT_OFF
349	},
350	{	/* cs3 */
351		FSL_DDR_ODT_OTHER_DIMM,
352		FSL_DDR_ODT_OTHER_DIMM,
353		DDR3_RTT_30_OHM,
354		DDR3_RTT_OFF
355	}
356};
357
358static __maybe_unused const struct dynamic_odt dual_DS[4] = {
359	{	/* cs0 */
360		FSL_DDR_ODT_NEVER,
361		FSL_DDR_ODT_SAME_DIMM,
362		DDR3_RTT_120_OHM,
363		DDR3_RTT_OFF
364	},
365	{	/* cs1 */
366		FSL_DDR_ODT_OTHER_DIMM,
367		FSL_DDR_ODT_OTHER_DIMM,
368		DDR3_RTT_30_OHM,
369		DDR3_RTT_OFF
370	},
371	{	/* cs2 */
372		FSL_DDR_ODT_OTHER_DIMM,
373		FSL_DDR_ODT_ALL,
374		DDR3_RTT_20_OHM,
375		DDR3_RTT_120_OHM
376	},
377	{0, 0, 0, 0}
378};
379static __maybe_unused const struct dynamic_odt dual_SD[4] = {
380	{	/* cs0 */
381		FSL_DDR_ODT_OTHER_DIMM,
382		FSL_DDR_ODT_ALL,
383		DDR3_RTT_20_OHM,
384		DDR3_RTT_120_OHM
385	},
386	{0, 0, 0, 0},
387	{	/* cs2 */
388		FSL_DDR_ODT_NEVER,
389		FSL_DDR_ODT_SAME_DIMM,
390		DDR3_RTT_120_OHM,
391		DDR3_RTT_OFF
392	},
393	{	/* cs3 */
394		FSL_DDR_ODT_OTHER_DIMM,
395		FSL_DDR_ODT_OTHER_DIMM,
396		DDR3_RTT_20_OHM,
397		DDR3_RTT_OFF
398	}
399};
400
401static __maybe_unused const struct dynamic_odt dual_SS[4] = {
402	{	/* cs0 */
403		FSL_DDR_ODT_OTHER_DIMM,
404		FSL_DDR_ODT_ALL,
405		DDR3_RTT_30_OHM,
406		DDR3_RTT_120_OHM
407	},
408	{0, 0, 0, 0},
409	{	/* cs2 */
410		FSL_DDR_ODT_OTHER_DIMM,
411		FSL_DDR_ODT_ALL,
412		DDR3_RTT_30_OHM,
413		DDR3_RTT_120_OHM
414	},
415	{0, 0, 0, 0}
416};
417
418static __maybe_unused const struct dynamic_odt dual_D0[4] = {
419	{	/* cs0 */
420		FSL_DDR_ODT_NEVER,
421		FSL_DDR_ODT_SAME_DIMM,
422		DDR3_RTT_40_OHM,
423		DDR3_RTT_OFF
424	},
425	{	/* cs1 */
426		FSL_DDR_ODT_NEVER,
427		FSL_DDR_ODT_NEVER,
428		DDR3_RTT_OFF,
429		DDR3_RTT_OFF
430	},
431	{0, 0, 0, 0},
432	{0, 0, 0, 0}
433};
434
435static __maybe_unused const struct dynamic_odt dual_0D[4] = {
436	{0, 0, 0, 0},
437	{0, 0, 0, 0},
438	{	/* cs2 */
439		FSL_DDR_ODT_NEVER,
440		FSL_DDR_ODT_SAME_DIMM,
441		DDR3_RTT_40_OHM,
442		DDR3_RTT_OFF
443	},
444	{	/* cs3 */
445		FSL_DDR_ODT_NEVER,
446		FSL_DDR_ODT_NEVER,
447		DDR3_RTT_OFF,
448		DDR3_RTT_OFF
449	}
450};
451
452static __maybe_unused const struct dynamic_odt dual_S0[4] = {
453	{	/* cs0 */
454		FSL_DDR_ODT_NEVER,
455		FSL_DDR_ODT_CS,
456		DDR3_RTT_40_OHM,
457		DDR3_RTT_OFF
458	},
459	{0, 0, 0, 0},
460	{0, 0, 0, 0},
461	{0, 0, 0, 0}
462
463};
464
465static __maybe_unused const struct dynamic_odt dual_0S[4] = {
466	{0, 0, 0, 0},
467	{0, 0, 0, 0},
468	{	/* cs2 */
469		FSL_DDR_ODT_NEVER,
470		FSL_DDR_ODT_CS,
471		DDR3_RTT_40_OHM,
472		DDR3_RTT_OFF
473	},
474	{0, 0, 0, 0}
475
476};
477
478static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
479	{	/* cs0 */
480		FSL_DDR_ODT_NEVER,
481		FSL_DDR_ODT_CS,
482		DDR3_RTT_120_OHM,
483		DDR3_RTT_OFF
484	},
485	{	/* cs1 */
486		FSL_DDR_ODT_NEVER,
487		FSL_DDR_ODT_CS,
488		DDR3_RTT_120_OHM,
489		DDR3_RTT_OFF
490	},
491	{	/* cs2 */
492		FSL_DDR_ODT_NEVER,
493		FSL_DDR_ODT_CS,
494		DDR3_RTT_120_OHM,
495		DDR3_RTT_OFF
496	},
497	{	/* cs3 */
498		FSL_DDR_ODT_NEVER,
499		FSL_DDR_ODT_CS,
500		DDR3_RTT_120_OHM,
501		DDR3_RTT_OFF
502	}
503};
504#else	/* CONFIG_SYS_FSL_DDR3 */
505static __maybe_unused const struct dynamic_odt single_Q[4] = {
506	{0, 0, 0, 0},
507	{0, 0, 0, 0},
508	{0, 0, 0, 0},
509	{0, 0, 0, 0}
510};
511
512static __maybe_unused const struct dynamic_odt single_D[4] = {
513	{	/* cs0 */
514		FSL_DDR_ODT_NEVER,
515		FSL_DDR_ODT_ALL,
516		DDR2_RTT_150_OHM,
517		DDR2_RTT_OFF
518	},
519	{	/* cs1 */
520		FSL_DDR_ODT_NEVER,
521		FSL_DDR_ODT_NEVER,
522		DDR2_RTT_OFF,
523		DDR2_RTT_OFF
524	},
525	{0, 0, 0, 0},
526	{0, 0, 0, 0}
527};
528
529static __maybe_unused const struct dynamic_odt single_S[4] = {
530	{	/* cs0 */
531		FSL_DDR_ODT_NEVER,
532		FSL_DDR_ODT_ALL,
533		DDR2_RTT_150_OHM,
534		DDR2_RTT_OFF
535	},
536	{0, 0, 0, 0},
537	{0, 0, 0, 0},
538	{0, 0, 0, 0},
539};
540
541static __maybe_unused const struct dynamic_odt dual_DD[4] = {
542	{	/* cs0 */
543		FSL_DDR_ODT_OTHER_DIMM,
544		FSL_DDR_ODT_OTHER_DIMM,
545		DDR2_RTT_75_OHM,
546		DDR2_RTT_OFF
547	},
548	{	/* cs1 */
549		FSL_DDR_ODT_NEVER,
550		FSL_DDR_ODT_NEVER,
551		DDR2_RTT_OFF,
552		DDR2_RTT_OFF
553	},
554	{	/* cs2 */
555		FSL_DDR_ODT_OTHER_DIMM,
556		FSL_DDR_ODT_OTHER_DIMM,
557		DDR2_RTT_75_OHM,
558		DDR2_RTT_OFF
559	},
560	{	/* cs3 */
561		FSL_DDR_ODT_NEVER,
562		FSL_DDR_ODT_NEVER,
563		DDR2_RTT_OFF,
564		DDR2_RTT_OFF
565	}
566};
567
568static __maybe_unused const struct dynamic_odt dual_DS[4] = {
569	{	/* cs0 */
570		FSL_DDR_ODT_OTHER_DIMM,
571		FSL_DDR_ODT_OTHER_DIMM,
572		DDR2_RTT_75_OHM,
573		DDR2_RTT_OFF
574	},
575	{	/* cs1 */
576		FSL_DDR_ODT_NEVER,
577		FSL_DDR_ODT_NEVER,
578		DDR2_RTT_OFF,
579		DDR2_RTT_OFF
580	},
581	{	/* cs2 */
582		FSL_DDR_ODT_OTHER_DIMM,
583		FSL_DDR_ODT_OTHER_DIMM,
584		DDR2_RTT_75_OHM,
585		DDR2_RTT_OFF
586	},
587	{0, 0, 0, 0}
588};
589
590static __maybe_unused const struct dynamic_odt dual_SD[4] = {
591	{	/* cs0 */
592		FSL_DDR_ODT_OTHER_DIMM,
593		FSL_DDR_ODT_OTHER_DIMM,
594		DDR2_RTT_75_OHM,
595		DDR2_RTT_OFF
596	},
597	{0, 0, 0, 0},
598	{	/* cs2 */
599		FSL_DDR_ODT_OTHER_DIMM,
600		FSL_DDR_ODT_OTHER_DIMM,
601		DDR2_RTT_75_OHM,
602		DDR2_RTT_OFF
603	},
604	{	/* cs3 */
605		FSL_DDR_ODT_NEVER,
606		FSL_DDR_ODT_NEVER,
607		DDR2_RTT_OFF,
608		DDR2_RTT_OFF
609	}
610};
611
612static __maybe_unused const struct dynamic_odt dual_SS[4] = {
613	{	/* cs0 */
614		FSL_DDR_ODT_OTHER_DIMM,
615		FSL_DDR_ODT_OTHER_DIMM,
616		DDR2_RTT_75_OHM,
617		DDR2_RTT_OFF
618	},
619	{0, 0, 0, 0},
620	{	/* cs2 */
621		FSL_DDR_ODT_OTHER_DIMM,
622		FSL_DDR_ODT_OTHER_DIMM,
623		DDR2_RTT_75_OHM,
624		DDR2_RTT_OFF
625	},
626	{0, 0, 0, 0}
627};
628
629static __maybe_unused const struct dynamic_odt dual_D0[4] = {
630	{	/* cs0 */
631		FSL_DDR_ODT_NEVER,
632		FSL_DDR_ODT_ALL,
633		DDR2_RTT_150_OHM,
634		DDR2_RTT_OFF
635	},
636	{	/* cs1 */
637		FSL_DDR_ODT_NEVER,
638		FSL_DDR_ODT_NEVER,
639		DDR2_RTT_OFF,
640		DDR2_RTT_OFF
641	},
642	{0, 0, 0, 0},
643	{0, 0, 0, 0}
644};
645
646static __maybe_unused const struct dynamic_odt dual_0D[4] = {
647	{0, 0, 0, 0},
648	{0, 0, 0, 0},
649	{	/* cs2 */
650		FSL_DDR_ODT_NEVER,
651		FSL_DDR_ODT_ALL,
652		DDR2_RTT_150_OHM,
653		DDR2_RTT_OFF
654	},
655	{	/* cs3 */
656		FSL_DDR_ODT_NEVER,
657		FSL_DDR_ODT_NEVER,
658		DDR2_RTT_OFF,
659		DDR2_RTT_OFF
660	}
661};
662
663static __maybe_unused const struct dynamic_odt dual_S0[4] = {
664	{	/* cs0 */
665		FSL_DDR_ODT_NEVER,
666		FSL_DDR_ODT_CS,
667		DDR2_RTT_150_OHM,
668		DDR2_RTT_OFF
669	},
670	{0, 0, 0, 0},
671	{0, 0, 0, 0},
672	{0, 0, 0, 0}
673
674};
675
676static __maybe_unused const struct dynamic_odt dual_0S[4] = {
677	{0, 0, 0, 0},
678	{0, 0, 0, 0},
679	{	/* cs2 */
680		FSL_DDR_ODT_NEVER,
681		FSL_DDR_ODT_CS,
682		DDR2_RTT_150_OHM,
683		DDR2_RTT_OFF
684	},
685	{0, 0, 0, 0}
686
687};
688
689static __maybe_unused const struct dynamic_odt odt_unknown[4] = {
690	{	/* cs0 */
691		FSL_DDR_ODT_NEVER,
692		FSL_DDR_ODT_CS,
693		DDR2_RTT_75_OHM,
694		DDR2_RTT_OFF
695	},
696	{	/* cs1 */
697		FSL_DDR_ODT_NEVER,
698		FSL_DDR_ODT_NEVER,
699		DDR2_RTT_OFF,
700		DDR2_RTT_OFF
701	},
702	{	/* cs2 */
703		FSL_DDR_ODT_NEVER,
704		FSL_DDR_ODT_CS,
705		DDR2_RTT_75_OHM,
706		DDR2_RTT_OFF
707	},
708	{	/* cs3 */
709		FSL_DDR_ODT_NEVER,
710		FSL_DDR_ODT_NEVER,
711		DDR2_RTT_OFF,
712		DDR2_RTT_OFF
713	}
714};
715#endif
716
717/*
718 * Automatically seleect bank interleaving mode based on DIMMs
719 * in this order: cs0_cs1_cs2_cs3, cs0_cs1, null.
720 * This function only deal with one or two slots per controller.
721 */
722static inline unsigned int auto_bank_intlv(dimm_params_t *pdimm)
723{
724#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
725	if (pdimm[0].n_ranks == 4)
726		return FSL_DDR_CS0_CS1_CS2_CS3;
727	else if (pdimm[0].n_ranks == 2)
728		return FSL_DDR_CS0_CS1;
729#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
730#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
731	if (pdimm[0].n_ranks == 4)
732		return FSL_DDR_CS0_CS1_CS2_CS3;
733#endif
734	if (pdimm[0].n_ranks == 2) {
735		if (pdimm[1].n_ranks == 2)
736			return FSL_DDR_CS0_CS1_CS2_CS3;
737		else
738			return FSL_DDR_CS0_CS1;
739	}
740#endif
741	return 0;
742}
743
744unsigned int populate_memctl_options(const common_timing_params_t *common_dimm,
745			memctl_options_t *popts,
746			dimm_params_t *pdimm,
747			unsigned int ctrl_num)
748{
749	unsigned int i;
750	char buf[HWCONFIG_BUFFER_SIZE];
751#if defined(CONFIG_SYS_FSL_DDR3) || \
752	defined(CONFIG_SYS_FSL_DDR2) || \
753	defined(CONFIG_SYS_FSL_DDR4)
754	const struct dynamic_odt *pdodt = odt_unknown;
755#endif
756#if (CFG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
757	ulong ddr_freq;
758#endif
759
760	/*
761	 * Extract hwconfig from environment since we have not properly setup
762	 * the environment but need it for ddr config params
763	 */
764#if CONFIG_IS_ENABLED(ENV_SUPPORT)
765	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
766#endif
767		buf[0] = '\0';
768
769#if defined(CONFIG_SYS_FSL_DDR3) || \
770	defined(CONFIG_SYS_FSL_DDR2) || \
771	defined(CONFIG_SYS_FSL_DDR4)
772	/* Chip select options. */
773#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
774	switch (pdimm[0].n_ranks) {
775	case 1:
776		pdodt = single_S;
777		break;
778	case 2:
779		pdodt = single_D;
780		break;
781	case 4:
782		pdodt = single_Q;
783		break;
784	}
785#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
786	switch (pdimm[0].n_ranks) {
787#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
788	case 4:
789		pdodt = single_Q;
790		if (pdimm[1].n_ranks)
791			printf("Error: Quad- and Dual-rank DIMMs cannot be used together\n");
792		break;
793#endif
794	case 2:
795		switch (pdimm[1].n_ranks) {
796		case 2:
797			pdodt = dual_DD;
798			break;
799		case 1:
800			pdodt = dual_DS;
801			break;
802		case 0:
803			pdodt = dual_D0;
804			break;
805		}
806		break;
807	case 1:
808		switch (pdimm[1].n_ranks) {
809		case 2:
810			pdodt = dual_SD;
811			break;
812		case 1:
813			pdodt = dual_SS;
814			break;
815		case 0:
816			pdodt = dual_S0;
817			break;
818		}
819		break;
820	case 0:
821		switch (pdimm[1].n_ranks) {
822		case 2:
823			pdodt = dual_0D;
824			break;
825		case 1:
826			pdodt = dual_0S;
827			break;
828		}
829		break;
830	}
831#endif	/* CONFIG_DIMM_SLOTS_PER_CTLR */
832#endif	/* CONFIG_SYS_FSL_DDR2, 3, 4 */
833
834	/* Pick chip-select local options. */
835	for (i = 0; i < CONFIG_CHIP_SELECTS_PER_CTRL; i++) {
836#if defined(CONFIG_SYS_FSL_DDR3) || \
837	defined(CONFIG_SYS_FSL_DDR2) || \
838	defined(CONFIG_SYS_FSL_DDR4)
839		popts->cs_local_opts[i].odt_rd_cfg = pdodt[i].odt_rd_cfg;
840		popts->cs_local_opts[i].odt_wr_cfg = pdodt[i].odt_wr_cfg;
841		popts->cs_local_opts[i].odt_rtt_norm = pdodt[i].odt_rtt_norm;
842		popts->cs_local_opts[i].odt_rtt_wr = pdodt[i].odt_rtt_wr;
843#else
844		popts->cs_local_opts[i].odt_rd_cfg = FSL_DDR_ODT_NEVER;
845		popts->cs_local_opts[i].odt_wr_cfg = FSL_DDR_ODT_CS;
846#endif
847		popts->cs_local_opts[i].auto_precharge = 0;
848	}
849
850	/* Pick interleaving mode. */
851
852	/*
853	 * 0 = no interleaving
854	 * 1 = interleaving between 2 controllers
855	 */
856	popts->memctl_interleaving = 0;
857
858	/*
859	 * 0 = cacheline
860	 * 1 = page
861	 * 2 = (logical) bank
862	 * 3 = superbank (only if CS interleaving is enabled)
863	 */
864	popts->memctl_interleaving_mode = 0;
865
866	/*
867	 * 0: cacheline: bit 30 of the 36-bit physical addr selects the memctl
868	 * 1: page:      bit to the left of the column bits selects the memctl
869	 * 2: bank:      bit to the left of the bank bits selects the memctl
870	 * 3: superbank: bit to the left of the chip select selects the memctl
871	 *
872	 * NOTE: ba_intlv (rank interleaving) is independent of memory
873	 * controller interleaving; it is only within a memory controller.
874	 * Must use superbank interleaving if rank interleaving is used and
875	 * memory controller interleaving is enabled.
876	 */
877
878	/*
879	 * 0 = no
880	 * 0x40 = CS0,CS1
881	 * 0x20 = CS2,CS3
882	 * 0x60 = CS0,CS1 + CS2,CS3
883	 * 0x04 = CS0,CS1,CS2,CS3
884	 */
885	popts->ba_intlv_ctl = 0;
886
887	/* Memory Organization Parameters */
888	popts->registered_dimm_en = common_dimm->all_dimms_registered;
889
890	/* Operational Mode Paramters */
891
892	/* Pick ECC modes */
893	popts->ecc_mode = 0;		  /* 0 = disabled, 1 = enabled */
894#ifdef CONFIG_DDR_ECC
895	if (hwconfig_sub_f("fsl_ddr", "ecc", buf)) {
896		if (hwconfig_subarg_cmp_f("fsl_ddr", "ecc", "on", buf))
897			popts->ecc_mode = 1;
898	} else
899		popts->ecc_mode = 1;
900#endif
901	/* 1 = use memory controler to init data */
902	popts->ecc_init_using_memctl = popts->ecc_mode ? 1 : 0;
903
904	/*
905	 * Choose DQS config
906	 * 0 for DDR1
907	 * 1 for DDR2
908	 */
909#if defined(CONFIG_SYS_FSL_DDR1)
910	popts->dqs_config = 0;
911#elif defined(CONFIG_SYS_FSL_DDR2) || defined(CONFIG_SYS_FSL_DDR3)
912	popts->dqs_config = 1;
913#endif
914
915	/* Choose self-refresh during sleep. */
916	popts->self_refresh_in_sleep = 1;
917
918	/* Choose dynamic power management mode. */
919	popts->dynamic_power = 0;
920
921	/*
922	 * check first dimm for primary sdram width
923	 * presuming all dimms are similar
924	 * 0 = 64-bit, 1 = 32-bit, 2 = 16-bit
925	 */
926#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2)
927	if (pdimm[0].n_ranks != 0) {
928		if ((pdimm[0].data_width >= 64) && \
929			(pdimm[0].data_width <= 72))
930			popts->data_bus_width = 0;
931		else if ((pdimm[0].data_width >= 32) && \
932			(pdimm[0].data_width <= 40))
933			popts->data_bus_width = 1;
934		else {
935			panic("Error: data width %u is invalid!\n",
936				pdimm[0].data_width);
937		}
938	}
939#else
940	if (pdimm[0].n_ranks != 0) {
941		if (pdimm[0].primary_sdram_width == 64)
942			popts->data_bus_width = 0;
943		else if (pdimm[0].primary_sdram_width == 32)
944			popts->data_bus_width = 1;
945		else if (pdimm[0].primary_sdram_width == 16)
946			popts->data_bus_width = 2;
947		else {
948			panic("Error: primary sdram width %u is invalid!\n",
949				pdimm[0].primary_sdram_width);
950		}
951	}
952#endif
953
954	popts->x4_en = (pdimm[0].device_width == 4) ? 1 : 0;
955
956	/* Choose burst length. */
957#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
958#if defined(CONFIG_E500MC)
959	popts->otf_burst_chop_en = 0;	/* on-the-fly burst chop disable */
960	popts->burst_length = DDR_BL8;	/* Fixed 8-beat burst len */
961#else
962	if ((popts->data_bus_width == 1) || (popts->data_bus_width == 2)) {
963		/* 32-bit or 16-bit bus */
964		popts->otf_burst_chop_en = 0;
965		popts->burst_length = DDR_BL8;
966	} else {
967		popts->otf_burst_chop_en = 1;	/* on-the-fly burst chop */
968		popts->burst_length = DDR_OTF;	/* on-the-fly BC4 and BL8 */
969	}
970#endif
971#else
972	popts->burst_length = DDR_BL4;	/* has to be 4 for DDR2 */
973#endif
974
975	/* Choose ddr controller address mirror mode */
976#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
977	for (i = 0; i < CONFIG_DIMM_SLOTS_PER_CTLR; i++) {
978		if (pdimm[i].n_ranks) {
979			popts->mirrored_dimm = pdimm[i].mirrored_dimm;
980			break;
981		}
982	}
983#endif
984
985	/* Global Timing Parameters. */
986	debug("mclk_ps = %u ps\n", get_memory_clk_period_ps(ctrl_num));
987
988	/* Pick a caslat override. */
989	popts->cas_latency_override = 0;
990	popts->cas_latency_override_value = 3;
991	if (popts->cas_latency_override) {
992		debug("using caslat override value = %u\n",
993		       popts->cas_latency_override_value);
994	}
995
996	/* Decide whether to use the computed derated latency */
997	popts->use_derated_caslat = 0;
998
999	/* Choose an additive latency. */
1000	popts->additive_latency_override = 0;
1001	popts->additive_latency_override_value = 3;
1002	if (popts->additive_latency_override) {
1003		debug("using additive latency override value = %u\n",
1004		       popts->additive_latency_override_value);
1005	}
1006
1007	/*
1008	 * 2T_EN setting
1009	 *
1010	 * Factors to consider for 2T_EN:
1011	 *	- number of DIMMs installed
1012	 *	- number of components, number of active ranks
1013	 *	- how much time you want to spend playing around
1014	 */
1015	popts->twot_en = 0;
1016	popts->threet_en = 0;
1017
1018	/* for RDIMM and DDR4 UDIMM/discrete memory, address parity enable */
1019	if (popts->registered_dimm_en)
1020		popts->ap_en = 1; /* 0 = disable,  1 = enable */
1021	else
1022		popts->ap_en = 0; /* disabled for DDR4 UDIMM/discrete default */
1023
1024	if (hwconfig_sub_f("fsl_ddr", "parity", buf)) {
1025		if (hwconfig_subarg_cmp_f("fsl_ddr", "parity", "on", buf)) {
1026			if (popts->registered_dimm_en ||
1027			    (CFG_FSL_SDRAM_TYPE == SDRAM_TYPE_DDR4))
1028				popts->ap_en = 1;
1029		}
1030	}
1031
1032	/*
1033	 * BSTTOPRE precharge interval
1034	 *
1035	 * Set this to 0 for global auto precharge
1036	 * The value of 0x100 has been used for DDR1, DDR2, DDR3.
1037	 * It is not wrong. Any value should be OK. The performance depends on
1038	 * applications. There is no one good value for all. One way to set
1039	 * is to use 1/4 of refint value.
1040	 */
1041	popts->bstopre = picos_to_mclk(ctrl_num, common_dimm->refresh_rate_ps)
1042			 >> 2;
1043
1044	/*
1045	 * Window for four activates -- tFAW
1046	 *
1047	 * FIXME: UM: applies only to DDR2/DDR3 with eight logical banks only
1048	 * FIXME: varies depending upon number of column addresses or data
1049	 * FIXME: width, was considering looking at pdimm->primary_sdram_width
1050	 */
1051#if defined(CONFIG_SYS_FSL_DDR1)
1052	popts->tfaw_window_four_activates_ps = mclk_to_picos(ctrl_num, 1);
1053
1054#elif defined(CONFIG_SYS_FSL_DDR2)
1055	/*
1056	 * x4/x8;  some datasheets have 35000
1057	 * x16 wide columns only?  Use 50000?
1058	 */
1059	popts->tfaw_window_four_activates_ps = 37500;
1060
1061#else
1062	popts->tfaw_window_four_activates_ps = pdimm[0].tfaw_ps;
1063#endif
1064	popts->zq_en = 0;
1065	popts->wrlvl_en = 0;
1066#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4)
1067	/*
1068	 * due to ddr3 dimm is fly-by topology
1069	 * we suggest to enable write leveling to
1070	 * meet the tQDSS under different loading.
1071	 */
1072	popts->wrlvl_en = 1;
1073	popts->zq_en = 1;
1074	popts->wrlvl_override = 0;
1075#endif
1076
1077	/*
1078	 * Check interleaving configuration from environment.
1079	 * Please refer to doc/README.fsl-ddr for the detail.
1080	 *
1081	 * If memory controller interleaving is enabled, then the data
1082	 * bus widths must be programmed identically for all memory controllers.
1083	 *
1084	 * Attempt to set all controllers to the same chip select
1085	 * interleaving mode. It will do a best effort to get the
1086	 * requested ranks interleaved together such that the result
1087	 * should be a subset of the requested configuration.
1088	 *
1089	 * if CONFIG_SYS_FSL_DDR_INTLV_256B is defined, mandatory interleaving
1090	 * with 256 Byte is enabled.
1091	 */
1092#if (CONFIG_SYS_NUM_DDR_CTLRS > 1)
1093	if (!hwconfig_sub_f("fsl_ddr", "ctlr_intlv", buf))
1094#ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1095		;
1096#else
1097		goto done;
1098#endif
1099	if (pdimm[0].n_ranks == 0) {
1100		printf("There is no rank on CS0 for controller %d.\n", ctrl_num);
1101		popts->memctl_interleaving = 0;
1102		goto done;
1103	}
1104	popts->memctl_interleaving = 1;
1105#ifdef CONFIG_SYS_FSL_DDR_INTLV_256B
1106	popts->memctl_interleaving_mode = FSL_DDR_256B_INTERLEAVING;
1107	popts->memctl_interleaving = 1;
1108	debug("256 Byte interleaving\n");
1109#else
1110	/*
1111	 * test null first. if CONFIG_HWCONFIG is not defined
1112	 * hwconfig_arg_cmp returns non-zero
1113	 */
1114	if (hwconfig_subarg_cmp_f("fsl_ddr", "ctlr_intlv",
1115				    "null", buf)) {
1116		popts->memctl_interleaving = 0;
1117		debug("memory controller interleaving disabled.\n");
1118	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1119					"ctlr_intlv",
1120					"cacheline", buf)) {
1121		popts->memctl_interleaving_mode =
1122			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1123			0 : FSL_DDR_CACHE_LINE_INTERLEAVING;
1124		popts->memctl_interleaving =
1125			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1126			0 : 1;
1127	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1128					"ctlr_intlv",
1129					"page", buf)) {
1130		popts->memctl_interleaving_mode =
1131			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1132			0 : FSL_DDR_PAGE_INTERLEAVING;
1133		popts->memctl_interleaving =
1134			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1135			0 : 1;
1136	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1137					"ctlr_intlv",
1138					"bank", buf)) {
1139		popts->memctl_interleaving_mode =
1140			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1141			0 : FSL_DDR_BANK_INTERLEAVING;
1142		popts->memctl_interleaving =
1143			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1144			0 : 1;
1145	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1146					"ctlr_intlv",
1147					"superbank", buf)) {
1148		popts->memctl_interleaving_mode =
1149			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1150			0 : FSL_DDR_SUPERBANK_INTERLEAVING;
1151		popts->memctl_interleaving =
1152			((CONFIG_SYS_NUM_DDR_CTLRS == 3) && ctrl_num == 2) ?
1153			0 : 1;
1154#if (CONFIG_SYS_NUM_DDR_CTLRS == 3)
1155	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1156					"ctlr_intlv",
1157					"3way_1KB", buf)) {
1158		popts->memctl_interleaving_mode =
1159			FSL_DDR_3WAY_1KB_INTERLEAVING;
1160	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1161					"ctlr_intlv",
1162					"3way_4KB", buf)) {
1163		popts->memctl_interleaving_mode =
1164			FSL_DDR_3WAY_4KB_INTERLEAVING;
1165	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1166					"ctlr_intlv",
1167					"3way_8KB", buf)) {
1168		popts->memctl_interleaving_mode =
1169			FSL_DDR_3WAY_8KB_INTERLEAVING;
1170#elif (CONFIG_SYS_NUM_DDR_CTLRS == 4)
1171	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1172					"ctlr_intlv",
1173					"4way_1KB", buf)) {
1174		popts->memctl_interleaving_mode =
1175			FSL_DDR_4WAY_1KB_INTERLEAVING;
1176	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1177					"ctlr_intlv",
1178					"4way_4KB", buf)) {
1179		popts->memctl_interleaving_mode =
1180			FSL_DDR_4WAY_4KB_INTERLEAVING;
1181	} else if (hwconfig_subarg_cmp_f("fsl_ddr",
1182					"ctlr_intlv",
1183					"4way_8KB", buf)) {
1184		popts->memctl_interleaving_mode =
1185			FSL_DDR_4WAY_8KB_INTERLEAVING;
1186#endif
1187	} else {
1188		popts->memctl_interleaving = 0;
1189		printf("hwconfig has unrecognized parameter for ctlr_intlv.\n");
1190	}
1191#endif	/* CONFIG_SYS_FSL_DDR_INTLV_256B */
1192done:
1193#endif /* CONFIG_SYS_NUM_DDR_CTLRS > 1 */
1194	if ((hwconfig_sub_f("fsl_ddr", "bank_intlv", buf)) &&
1195		(CONFIG_CHIP_SELECTS_PER_CTRL > 1)) {
1196		/* test null first. if CONFIG_HWCONFIG is not defined,
1197		 * hwconfig_subarg_cmp_f returns non-zero */
1198		if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1199					    "null", buf))
1200			debug("bank interleaving disabled.\n");
1201		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1202						 "cs0_cs1", buf))
1203			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1;
1204		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1205						 "cs2_cs3", buf))
1206			popts->ba_intlv_ctl = FSL_DDR_CS2_CS3;
1207		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1208						 "cs0_cs1_and_cs2_cs3", buf))
1209			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_AND_CS2_CS3;
1210		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1211						 "cs0_cs1_cs2_cs3", buf))
1212			popts->ba_intlv_ctl = FSL_DDR_CS0_CS1_CS2_CS3;
1213		else if (hwconfig_subarg_cmp_f("fsl_ddr", "bank_intlv",
1214						"auto", buf))
1215			popts->ba_intlv_ctl = auto_bank_intlv(pdimm);
1216		else
1217			printf("hwconfig has unrecognized parameter for bank_intlv.\n");
1218		switch (popts->ba_intlv_ctl & FSL_DDR_CS0_CS1_CS2_CS3) {
1219		case FSL_DDR_CS0_CS1_CS2_CS3:
1220#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1221			if (pdimm[0].n_ranks < 4) {
1222				popts->ba_intlv_ctl = 0;
1223				printf("Not enough bank(chip-select) for "
1224					"CS0+CS1+CS2+CS3 on controller %d, "
1225					"interleaving disabled!\n", ctrl_num);
1226			}
1227#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1228#ifdef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE
1229			if (pdimm[0].n_ranks == 4)
1230				break;
1231#endif
1232			if ((pdimm[0].n_ranks < 2) && (pdimm[1].n_ranks < 2)) {
1233				popts->ba_intlv_ctl = 0;
1234				printf("Not enough bank(chip-select) for "
1235					"CS0+CS1+CS2+CS3 on controller %d, "
1236					"interleaving disabled!\n", ctrl_num);
1237			}
1238			if (pdimm[0].capacity != pdimm[1].capacity) {
1239				popts->ba_intlv_ctl = 0;
1240				printf("Not identical DIMM size for "
1241					"CS0+CS1+CS2+CS3 on controller %d, "
1242					"interleaving disabled!\n", ctrl_num);
1243			}
1244#endif
1245			break;
1246		case FSL_DDR_CS0_CS1:
1247			if (pdimm[0].n_ranks < 2) {
1248				popts->ba_intlv_ctl = 0;
1249				printf("Not enough bank(chip-select) for "
1250					"CS0+CS1 on controller %d, "
1251					"interleaving disabled!\n", ctrl_num);
1252			}
1253			break;
1254		case FSL_DDR_CS2_CS3:
1255#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1256			if (pdimm[0].n_ranks < 4) {
1257				popts->ba_intlv_ctl = 0;
1258				printf("Not enough bank(chip-select) for CS2+CS3 "
1259					"on controller %d, interleaving disabled!\n", ctrl_num);
1260			}
1261#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1262			if (pdimm[1].n_ranks < 2) {
1263				popts->ba_intlv_ctl = 0;
1264				printf("Not enough bank(chip-select) for CS2+CS3 "
1265					"on controller %d, interleaving disabled!\n", ctrl_num);
1266			}
1267#endif
1268			break;
1269		case FSL_DDR_CS0_CS1_AND_CS2_CS3:
1270#if (CONFIG_DIMM_SLOTS_PER_CTLR == 1)
1271			if (pdimm[0].n_ranks < 4) {
1272				popts->ba_intlv_ctl = 0;
1273				printf("Not enough bank(CS) for CS0+CS1 and "
1274					"CS2+CS3 on controller %d, "
1275					"interleaving disabled!\n", ctrl_num);
1276			}
1277#elif (CONFIG_DIMM_SLOTS_PER_CTLR == 2)
1278			if ((pdimm[0].n_ranks < 2) || (pdimm[1].n_ranks < 2)) {
1279				popts->ba_intlv_ctl = 0;
1280				printf("Not enough bank(CS) for CS0+CS1 and "
1281					"CS2+CS3 on controller %d, "
1282					"interleaving disabled!\n", ctrl_num);
1283			}
1284#endif
1285			break;
1286		default:
1287			popts->ba_intlv_ctl = 0;
1288			break;
1289		}
1290	}
1291
1292	if (hwconfig_sub_f("fsl_ddr", "addr_hash", buf)) {
1293		if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash", "null", buf))
1294			popts->addr_hash = 0;
1295		else if (hwconfig_subarg_cmp_f("fsl_ddr", "addr_hash",
1296					       "true", buf))
1297			popts->addr_hash = 1;
1298	}
1299
1300	if (pdimm[0].n_ranks == 4)
1301		popts->quad_rank_present = 1;
1302
1303	popts->package_3ds = pdimm->package_3ds;
1304
1305#if (CFG_FSL_SDRAM_TYPE != SDRAM_TYPE_DDR4)
1306	ddr_freq = get_ddr_freq(ctrl_num) / 1000000;
1307	if (popts->registered_dimm_en) {
1308		popts->rcw_override = 1;
1309		popts->rcw_1 = 0x000a5a00;
1310		if (ddr_freq <= 800)
1311			popts->rcw_2 = 0x00000000;
1312		else if (ddr_freq <= 1066)
1313			popts->rcw_2 = 0x00100000;
1314		else if (ddr_freq <= 1333)
1315			popts->rcw_2 = 0x00200000;
1316		else
1317			popts->rcw_2 = 0x00300000;
1318	}
1319#endif
1320
1321	fsl_ddr_board_options(popts, pdimm, ctrl_num);
1322
1323	return 0;
1324}
1325
1326void check_interleaving_options(fsl_ddr_info_t *pinfo)
1327{
1328	int i, j, k, check_n_ranks, intlv_invalid = 0;
1329	unsigned int check_intlv, check_n_row_addr, check_n_col_addr;
1330	unsigned long long check_rank_density;
1331	struct dimm_params_s *dimm;
1332	int first_ctrl = pinfo->first_ctrl;
1333	int last_ctrl = first_ctrl + pinfo->num_ctrls - 1;
1334
1335	/*
1336	 * Check if all controllers are configured for memory
1337	 * controller interleaving. Identical dimms are recommended. At least
1338	 * the size, row and col address should be checked.
1339	 */
1340	j = 0;
1341	check_n_ranks = pinfo->dimm_params[first_ctrl][0].n_ranks;
1342	check_rank_density = pinfo->dimm_params[first_ctrl][0].rank_density;
1343	check_n_row_addr =  pinfo->dimm_params[first_ctrl][0].n_row_addr;
1344	check_n_col_addr = pinfo->dimm_params[first_ctrl][0].n_col_addr;
1345	check_intlv = pinfo->memctl_opts[first_ctrl].memctl_interleaving_mode;
1346	for (i = first_ctrl; i <= last_ctrl; i++) {
1347		dimm = &pinfo->dimm_params[i][0];
1348		if (!pinfo->memctl_opts[i].memctl_interleaving) {
1349			continue;
1350		} else if (((check_rank_density != dimm->rank_density) ||
1351		     (check_n_ranks != dimm->n_ranks) ||
1352		     (check_n_row_addr != dimm->n_row_addr) ||
1353		     (check_n_col_addr != dimm->n_col_addr) ||
1354		     (check_intlv !=
1355			pinfo->memctl_opts[i].memctl_interleaving_mode))){
1356			intlv_invalid = 1;
1357			break;
1358		} else {
1359			j++;
1360		}
1361
1362	}
1363	if (intlv_invalid) {
1364		for (i = first_ctrl; i <= last_ctrl; i++)
1365			pinfo->memctl_opts[i].memctl_interleaving = 0;
1366		printf("Not all DIMMs are identical. "
1367			"Memory controller interleaving disabled.\n");
1368	} else {
1369		switch (check_intlv) {
1370		case FSL_DDR_256B_INTERLEAVING:
1371		case FSL_DDR_CACHE_LINE_INTERLEAVING:
1372		case FSL_DDR_PAGE_INTERLEAVING:
1373		case FSL_DDR_BANK_INTERLEAVING:
1374		case FSL_DDR_SUPERBANK_INTERLEAVING:
1375#if (3 == CONFIG_SYS_NUM_DDR_CTLRS)
1376				k = 2;
1377#else
1378				k = CONFIG_SYS_NUM_DDR_CTLRS;
1379#endif
1380			break;
1381		case FSL_DDR_3WAY_1KB_INTERLEAVING:
1382		case FSL_DDR_3WAY_4KB_INTERLEAVING:
1383		case FSL_DDR_3WAY_8KB_INTERLEAVING:
1384		case FSL_DDR_4WAY_1KB_INTERLEAVING:
1385		case FSL_DDR_4WAY_4KB_INTERLEAVING:
1386		case FSL_DDR_4WAY_8KB_INTERLEAVING:
1387		default:
1388			k = CONFIG_SYS_NUM_DDR_CTLRS;
1389			break;
1390		}
1391		debug("%d of %d controllers are interleaving.\n", j, k);
1392		if (j && (j != k)) {
1393			for (i = first_ctrl; i <= last_ctrl; i++)
1394				pinfo->memctl_opts[i].memctl_interleaving = 0;
1395			if ((last_ctrl - first_ctrl) > 1)
1396				puts("Not all controllers have compatible interleaving mode. All disabled.\n");
1397		}
1398	}
1399	debug("Checking interleaving options completed\n");
1400}
1401
1402int fsl_use_spd(void)
1403{
1404	int use_spd = 0;
1405
1406#ifdef CONFIG_DDR_SPD
1407	char buf[HWCONFIG_BUFFER_SIZE];
1408
1409	/*
1410	 * Extract hwconfig from environment since we have not properly setup
1411	 * the environment but need it for ddr config params
1412	 */
1413#if CONFIG_IS_ENABLED(ENV_SUPPORT)
1414	if (env_get_f("hwconfig", buf, sizeof(buf)) < 0)
1415#endif
1416		buf[0] = '\0';
1417
1418	/* if hwconfig is not enabled, or "sdram" is not defined, use spd */
1419	if (hwconfig_sub_f("fsl_ddr", "sdram", buf)) {
1420		if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram", "spd", buf))
1421			use_spd = 1;
1422		else if (hwconfig_subarg_cmp_f("fsl_ddr", "sdram",
1423					       "fixed", buf))
1424			use_spd = 0;
1425		else
1426			use_spd = 1;
1427	} else
1428		use_spd = 1;
1429#endif
1430
1431	return use_spd;
1432}
1433