1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6#include "ddr3_init.h" 7#include "mv_ddr_common.h" 8#include "mv_ddr_training_db.h" 9#include "mv_ddr_regs.h" 10#include "mv_ddr_sys_env_lib.h" 11 12#define DDR_INTERFACES_NUM 1 13#define DDR_INTERFACE_OCTETS_NUM 5 14 15/* 16 * 1. L2 filter should be set at binary header to 0xD000000, 17 * to avoid conflict with internal register IO. 18 * 2. U-Boot modifies internal registers base to 0xf100000, 19 * and than should update L2 filter accordingly to 0xf000000 (3.75 GB) 20 */ 21#define L2_FILTER_FOR_MAX_MEMORY_SIZE 0xC0000000 /* temporary limit l2 filter to 3gb (LSP issue) */ 22#define ADDRESS_FILTERING_END_REGISTER 0x8c04 23 24#define DYNAMIC_CS_SIZE_CONFIG 25#define DISABLE_L2_FILTERING_DURING_DDR_TRAINING 26 27/* Termal Sensor Registers */ 28#define TSEN_CONTROL_LSB_REG 0xE4070 29#define TSEN_CONTROL_LSB_TC_TRIM_OFFSET 0 30#define TSEN_CONTROL_LSB_TC_TRIM_MASK (0x7 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET) 31#define TSEN_CONTROL_MSB_REG 0xE4074 32#define TSEN_CONTROL_MSB_RST_OFFSET 8 33#define TSEN_CONTROL_MSB_RST_MASK (0x1 << TSEN_CONTROL_MSB_RST_OFFSET) 34#define TSEN_STATUS_REG 0xe4078 35#define TSEN_STATUS_READOUT_VALID_OFFSET 10 36#define TSEN_STATUS_READOUT_VALID_MASK (0x1 << \ 37 TSEN_STATUS_READOUT_VALID_OFFSET) 38#define TSEN_STATUS_TEMP_OUT_OFFSET 0 39#define TSEN_STATUS_TEMP_OUT_MASK (0x3ff << TSEN_STATUS_TEMP_OUT_OFFSET) 40 41#if defined(CONFIG_DDR4) 42static struct dlb_config ddr3_dlb_config_table[] = { 43 {DLB_CTRL_REG, 0x2000005f}, 44 {DLB_BUS_OPT_WT_REG, 0x00880000}, 45 {DLB_AGING_REG, 0x3f7f007f}, 46 {DLB_EVICTION_CTRL_REG, 0x0000129f}, 47 {DLB_EVICTION_TIMERS_REG, 0x00ff0000}, 48 {DLB_WTS_DIFF_CS_REG, 0x04030803}, 49 {DLB_WTS_DIFF_BG_REG, 0x00000A02}, 50 {DLB_WTS_SAME_BG_REG, 0x08000901}, 51 {DLB_WTS_CMDS_REG, 0x00020005}, 52 {DLB_WTS_ATTR_PRIO_REG, 0x00060f10}, 53 {DLB_QUEUE_MAP_REG, 0x00000543}, 54 {DLB_SPLIT_REG, 0x0000000f}, 55 {DLB_USER_CMD_REG, 0x00000000}, 56 {0x0, 0x0} 57}; 58#else /* !CONFIG_DDR4 */ 59static struct dlb_config ddr3_dlb_config_table[] = { 60 {DLB_CTRL_REG, 0x2000005c}, 61 {DLB_BUS_OPT_WT_REG, 0x00880000}, 62 {DLB_AGING_REG, 0x0f7f007f}, 63 {DLB_EVICTION_CTRL_REG, 0x0000129f}, 64 {DLB_EVICTION_TIMERS_REG, 0x00ff0000}, 65 {DLB_WTS_DIFF_CS_REG, 0x04030802}, 66 {DLB_WTS_DIFF_BG_REG, 0x00000a02}, 67 {DLB_WTS_SAME_BG_REG, 0x09000a01}, 68 {DLB_WTS_CMDS_REG, 0x00020005}, 69 {DLB_WTS_ATTR_PRIO_REG, 0x00060f10}, 70 {DLB_QUEUE_MAP_REG, 0x00000543}, 71 {DLB_SPLIT_REG, 0x00000000}, 72 {DLB_USER_CMD_REG, 0x00000000}, 73 {0x0, 0x0} 74}; 75#endif /* CONFIG_DDR4 */ 76 77static struct dlb_config *sys_env_dlb_config_ptr_get(void) 78{ 79 return &ddr3_dlb_config_table[0]; 80} 81 82static u8 a38x_bw_per_freq[MV_DDR_FREQ_LAST] = { 83 0x3, /* MV_DDR_FREQ_100 */ 84#if !defined(CONFIG_DDR4) 85 0x4, /* MV_DDR_FREQ_400 */ 86 0x4, /* MV_DDR_FREQ_533 */ 87#endif /* CONFIG_DDR4 */ 88 0x5, /* MV_DDR_FREQ_667 */ 89 0x5, /* MV_DDR_FREQ_800 */ 90 0x5, /* MV_DDR_FREQ_933 */ 91 0x5, /* MV_DDR_FREQ_1066 */ 92#if defined(CONFIG_DDR4) 93 0x5, /*MV_DDR_FREQ_900*/ 94 0x5, /*MV_DDR_FREQ_1000*/ 95#else /* CONFIG_DDR4 */ 96 0x3, /* MV_DDR_FREQ_311 */ 97 0x3, /* MV_DDR_FREQ_333 */ 98 0x4, /* MV_DDR_FREQ_467 */ 99 0x5, /* MV_DDR_FREQ_850 */ 100 0x5, /* MV_DDR_FREQ_600 */ 101 0x3, /* MV_DDR_FREQ_300 */ 102 0x5, /* MV_DDR_FREQ_900 */ 103 0x3, /* MV_DDR_FREQ_360 */ 104 0x5 /* MV_DDR_FREQ_1000 */ 105#endif /* CONFIG_DDR4 */ 106}; 107 108static u8 a38x_rate_per_freq[MV_DDR_FREQ_LAST] = { 109 0x1, /* MV_DDR_FREQ_100 */ 110#if !defined(CONFIG_DDR4) 111 0x2, /* MV_DDR_FREQ_400 */ 112 0x2, /* MV_DDR_FREQ_533 */ 113#endif /* CONFIG_DDR4 */ 114 0x2, /* MV_DDR_FREQ_667 */ 115 0x2, /* MV_DDR_FREQ_800 */ 116 0x3, /* MV_DDR_FREQ_933 */ 117 0x3, /* MV_DDR_FREQ_1066 */ 118#ifdef CONFIG_DDR4 119 0x2, /*MV_DDR_FREQ_900*/ 120 0x2, /*MV_DDR_FREQ_1000*/ 121#else /* CONFIG_DDR4 */ 122 0x1, /* MV_DDR_FREQ_311 */ 123 0x1, /* MV_DDR_FREQ_333 */ 124 0x2, /* MV_DDR_FREQ_467 */ 125 0x2, /* MV_DDR_FREQ_850 */ 126 0x2, /* MV_DDR_FREQ_600 */ 127 0x1, /* MV_DDR_FREQ_300 */ 128 0x2, /* MV_DDR_FREQ_900 */ 129 0x1, /* MV_DDR_FREQ_360 */ 130 0x2 /* MV_DDR_FREQ_1000 */ 131#endif /* CONFIG_DDR4 */ 132}; 133 134static u16 a38x_vco_freq_per_sar_ref_clk_25_mhz[] = { 135 666, /* 0 */ 136 1332, 137 800, 138 1600, 139 1066, 140 2132, 141 1200, 142 2400, 143 1332, 144 1332, 145 1500, 146 1500, 147 1600, /* 12 */ 148 1600, 149 1700, 150 1700, 151 1866, 152 1866, 153 1800, /* 18 */ 154 2000, 155 2000, 156 4000, 157 2132, 158 2132, 159 2300, 160 2300, 161 2400, 162 2400, 163 2500, 164 2500, 165 800 166}; 167 168static u16 a38x_vco_freq_per_sar_ref_clk_40_mhz[] = { 169 666, /* 0 */ 170 1332, 171 800, 172 800, /* 0x3 */ 173 1066, 174 1066, /* 0x5 */ 175 1200, 176 2400, 177 1332, 178 1332, 179 1500, /* 10 */ 180 1600, /* 0xB */ 181 1600, 182 1600, 183 1700, 184 1560, /* 0xF */ 185 1866, 186 1866, 187 1800, 188 2000, 189 2000, /* 20 */ 190 4000, 191 2132, 192 2132, 193 2300, 194 2300, 195 2400, 196 2400, 197 2500, 198 2500, 199 1800 /* 30 - 0x1E */ 200}; 201 202#if defined(CONFIG_DDR4) 203u16 odt_slope[] = { 204 21443, 205 1452, 206 482, 207 240, 208 141, 209 90, 210 67, 211 52 212}; 213 214u16 odt_intercept[] = { 215 1517, 216 328, 217 186, 218 131, 219 100, 220 80, 221 69, 222 61 223}; 224 225/* Map of scratch PHY registers used to store stability value */ 226u32 dmin_phy_reg_table[MAX_BUS_NUM * MAX_CS_NUM][2] = { 227 /* subphy, addr */ 228 {0, 0xc0}, /* cs 0, subphy 0 */ 229 {0, 0xc1}, /* cs 0, subphy 1 */ 230 {0, 0xc2}, /* cs 0, subphy 2 */ 231 {0, 0xc3}, /* cs 0, subphy 3 */ 232 {0, 0xc4}, /* cs 0, subphy 4 */ 233 {1, 0xc0}, /* cs 1, subphy 0 */ 234 {1, 0xc1}, /* cs 1, subphy 1 */ 235 {1, 0xc2}, /* cs 1, subphy 2 */ 236 {1, 0xc3}, /* cs 1, subphy 3 */ 237 {1, 0xc4}, /* cs 1, subphy 4 */ 238 {2, 0xc0}, /* cs 2, subphy 0 */ 239 {2, 0xc1}, /* cs 2, subphy 1 */ 240 {2, 0xc2}, /* cs 2, subphy 2 */ 241 {2, 0xc3}, /* cs 2, subphy 3 */ 242 {2, 0xc4}, /* cs 2, subphy 4 */ 243 {0, 0xc5}, /* cs 3, subphy 0 */ 244 {1, 0xc5}, /* cs 3, subphy 1 */ 245 {2, 0xc5}, /* cs 3, subphy 2 */ 246 {0, 0xc6}, /* cs 3, subphy 3 */ 247 {1, 0xc6} /* cs 3, subphy 4 */ 248}; 249#endif /* CONFIG_DDR4 */ 250 251static u32 dq_bit_map_2_phy_pin[] = { 252 1, 0, 2, 6, 9, 8, 3, 7, /* 0 */ 253 8, 9, 1, 7, 2, 6, 3, 0, /* 1 */ 254 3, 9, 7, 8, 1, 0, 2, 6, /* 2 */ 255 1, 0, 6, 2, 8, 3, 7, 9, /* 3 */ 256 0, 1, 2, 9, 7, 8, 3, 6, /* 4 */ 257}; 258 259void mv_ddr_mem_scrubbing(void) 260{ 261 ddr3_new_tip_ecc_scrub(); 262} 263 264static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id, 265 enum mv_ddr_freq freq); 266 267/* 268 * Read temperature TJ value 269 */ 270static u32 ddr3_ctrl_get_junc_temp(u8 dev_num) 271{ 272 int reg = 0; 273 274 /* Initiates TSEN hardware reset once */ 275 if ((reg_read(TSEN_CONTROL_MSB_REG) & TSEN_CONTROL_MSB_RST_MASK) == 0) { 276 reg_bit_set(TSEN_CONTROL_MSB_REG, TSEN_CONTROL_MSB_RST_MASK); 277 /* set Tsen Tc Trim to correct default value (errata #132698) */ 278 reg = reg_read(TSEN_CONTROL_LSB_REG); 279 reg &= ~TSEN_CONTROL_LSB_TC_TRIM_MASK; 280 reg |= 0x3 << TSEN_CONTROL_LSB_TC_TRIM_OFFSET; 281 reg_write(TSEN_CONTROL_LSB_REG, reg); 282 } 283 mdelay(10); 284 285 /* Check if the readout field is valid */ 286 if ((reg_read(TSEN_STATUS_REG) & TSEN_STATUS_READOUT_VALID_MASK) == 0) { 287 printf("%s: TSEN not ready\n", __func__); 288 return 0; 289 } 290 291 reg = reg_read(TSEN_STATUS_REG); 292 reg = (reg & TSEN_STATUS_TEMP_OUT_MASK) >> TSEN_STATUS_TEMP_OUT_OFFSET; 293 294 return ((((10000 * reg) / 21445) * 1000) - 272674) / 1000; 295} 296 297/* 298 * Name: ddr3_tip_a38x_get_freq_config. 299 * Desc: 300 * Args: 301 * Notes: 302 * Returns: MV_OK if success, other error code if fail. 303 */ 304static int ddr3_tip_a38x_get_freq_config(u8 dev_num, enum mv_ddr_freq freq, 305 struct hws_tip_freq_config_info 306 *freq_config_info) 307{ 308 if (a38x_bw_per_freq[freq] == 0xff) 309 return MV_NOT_SUPPORTED; 310 311 if (freq_config_info == NULL) 312 return MV_BAD_PARAM; 313 314 freq_config_info->bw_per_freq = a38x_bw_per_freq[freq]; 315 freq_config_info->rate_per_freq = a38x_rate_per_freq[freq]; 316 freq_config_info->is_supported = 1; 317 318 return MV_OK; 319} 320 321static void dunit_read(u32 addr, u32 mask, u32 *data) 322{ 323 *data = reg_read(addr) & mask; 324} 325 326static void dunit_write(u32 addr, u32 mask, u32 data) 327{ 328 u32 reg_val = data; 329 330 if (mask != MASK_ALL_BITS) { 331 dunit_read(addr, MASK_ALL_BITS, ®_val); 332 reg_val &= (~mask); 333 reg_val |= (data & mask); 334 } 335 336 reg_write(addr, reg_val); 337} 338 339#define ODPG_ENABLE_REG 0x186d4 340#define ODPG_EN_OFFS 0 341#define ODPG_EN_MASK 0x1 342#define ODPG_EN_ENA 1 343#define ODPG_EN_DONE 0 344#define ODPG_DIS_OFFS 8 345#define ODPG_DIS_MASK 0x1 346#define ODPG_DIS_DIS 1 347void mv_ddr_odpg_enable(void) 348{ 349 dunit_write(ODPG_ENABLE_REG, 350 ODPG_EN_MASK << ODPG_EN_OFFS, 351 ODPG_EN_ENA << ODPG_EN_OFFS); 352} 353 354void mv_ddr_odpg_disable(void) 355{ 356 dunit_write(ODPG_ENABLE_REG, 357 ODPG_DIS_MASK << ODPG_DIS_OFFS, 358 ODPG_DIS_DIS << ODPG_DIS_OFFS); 359} 360 361void mv_ddr_odpg_done_clr(void) 362{ 363 return; 364} 365 366int mv_ddr_is_odpg_done(u32 count) 367{ 368 u32 i, data; 369 370 for (i = 0; i < count; i++) { 371 dunit_read(ODPG_ENABLE_REG, MASK_ALL_BITS, &data); 372 if (((data >> ODPG_EN_OFFS) & ODPG_EN_MASK) == 373 ODPG_EN_DONE) 374 break; 375 } 376 377 if (i >= count) { 378 printf("%s: timeout\n", __func__); 379 return MV_FAIL; 380 } 381 382 return MV_OK; 383} 384 385void mv_ddr_training_enable(void) 386{ 387 dunit_write(GLOB_CTRL_STATUS_REG, 388 TRAINING_TRIGGER_MASK << TRAINING_TRIGGER_OFFS, 389 TRAINING_TRIGGER_ENA << TRAINING_TRIGGER_OFFS); 390} 391 392#define DRAM_INIT_CTRL_STATUS_REG 0x18488 393#define TRAINING_TRIGGER_OFFS 0 394#define TRAINING_TRIGGER_MASK 0x1 395#define TRAINING_TRIGGER_ENA 1 396#define TRAINING_DONE_OFFS 1 397#define TRAINING_DONE_MASK 0x1 398#define TRAINING_DONE_DONE 1 399#define TRAINING_DONE_NOT_DONE 0 400#define TRAINING_RESULT_OFFS 2 401#define TRAINING_RESULT_MASK 0x1 402#define TRAINING_RESULT_PASS 0 403#define TRAINING_RESULT_FAIL 1 404int mv_ddr_is_training_done(u32 count, u32 *result) 405{ 406 u32 i, data; 407 408 if (result == NULL) { 409 printf("%s: NULL result pointer found\n", __func__); 410 return MV_FAIL; 411 } 412 413 for (i = 0; i < count; i++) { 414 dunit_read(DRAM_INIT_CTRL_STATUS_REG, MASK_ALL_BITS, &data); 415 if (((data >> TRAINING_DONE_OFFS) & TRAINING_DONE_MASK) == 416 TRAINING_DONE_DONE) 417 break; 418 } 419 420 if (i >= count) { 421 printf("%s: timeout\n", __func__); 422 return MV_FAIL; 423 } 424 425 *result = (data >> TRAINING_RESULT_OFFS) & TRAINING_RESULT_MASK; 426 427 return MV_OK; 428} 429 430#define DM_PAD 10 431u32 mv_ddr_dm_pad_get(void) 432{ 433 return DM_PAD; 434} 435 436/* 437 * Name: ddr3_tip_a38x_select_ddr_controller. 438 * Desc: Enable/Disable access to Marvell's server. 439 * Args: dev_num - device number 440 * enable - whether to enable or disable the server 441 * Notes: 442 * Returns: MV_OK if success, other error code if fail. 443 */ 444static int ddr3_tip_a38x_select_ddr_controller(u8 dev_num, int enable) 445{ 446 u32 reg; 447 448 reg = reg_read(DUAL_DUNIT_CFG_REG); 449 450 if (enable) 451 reg |= (1 << 6); 452 else 453 reg &= ~(1 << 6); 454 455 reg_write(DUAL_DUNIT_CFG_REG, reg); 456 457 return MV_OK; 458} 459 460static u8 ddr3_tip_clock_mode(u32 frequency) 461{ 462 if ((frequency == MV_DDR_FREQ_LOW_FREQ) || (mv_ddr_freq_get(frequency) <= 400)) 463 return 1; 464 465 return 2; 466} 467 468static int mv_ddr_sar_freq_get(int dev_num, enum mv_ddr_freq *freq) 469{ 470 u32 reg, ref_clk_satr; 471 472 /* Read sample at reset setting */ 473 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >> 474 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) & 475 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK; 476 477 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG); 478 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) == 479 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) { 480 switch (reg) { 481#if !defined(CONFIG_DDR4) 482 case 0x1: 483 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 484 ("Warning: Unsupported freq mode for 333Mhz configured(%d)\n", 485 reg)); 486 /* fallthrough */ 487 case 0x0: 488 *freq = MV_DDR_FREQ_333; 489 break; 490 case 0x3: 491 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 492 ("Warning: Unsupported freq mode for 400Mhz configured(%d)\n", 493 reg)); 494 /* fallthrough */ 495 case 0x2: 496 *freq = MV_DDR_FREQ_400; 497 break; 498 case 0xd: 499 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 500 ("Warning: Unsupported freq mode for 533Mhz configured(%d)\n", 501 reg)); 502 /* fallthrough */ 503 case 0x4: 504 *freq = MV_DDR_FREQ_533; 505 break; 506 case 0x6: 507 *freq = MV_DDR_FREQ_600; 508 break; 509#endif /* CONFIG_DDR4 */ 510 case 0x11: 511 case 0x14: 512 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 513 ("Warning: Unsupported freq mode for 667Mhz configured(%d)\n", 514 reg)); 515 /* fallthrough */ 516 case 0x8: 517 *freq = MV_DDR_FREQ_667; 518 break; 519 case 0x15: 520 case 0x1b: 521 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 522 ("Warning: Unsupported freq mode for 800Mhz configured(%d)\n", 523 reg)); 524 /* fallthrough */ 525 case 0xc: 526 *freq = MV_DDR_FREQ_800; 527 break; 528 case 0x10: 529 *freq = MV_DDR_FREQ_933; 530 break; 531 case 0x12: 532 *freq = MV_DDR_FREQ_900; 533 break; 534#if defined(CONFIG_DDR4) 535 case 0x13: 536 *freq = MV_DDR_FREQ_1000; 537 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 538 ("Warning: Unsupported freq mode for 1000Mhz configured(%d)\n", 539 reg)); 540 break; 541#else /* CONFIG_DDR4 */ 542 case 0x13: 543 *freq = MV_DDR_FREQ_933; 544 break; 545#endif /* CONFIG_DDR4 */ 546 default: 547 *freq = 0; 548 return MV_NOT_SUPPORTED; 549 } 550 } else { /* REFCLK 40MHz case */ 551 switch (reg) { 552#if !defined(CONFIG_DDR4) 553 case 0x3: 554 *freq = MV_DDR_FREQ_400; 555 break; 556 case 0x5: 557 *freq = MV_DDR_FREQ_533; 558 break; 559#endif /* CONFIG_DDR4 */ 560 case 0xb: 561 *freq = MV_DDR_FREQ_800; 562 break; 563 case 0x1e: 564 *freq = MV_DDR_FREQ_900; 565 break; 566 default: 567 *freq = 0; 568 return MV_NOT_SUPPORTED; 569 } 570 } 571 572 return MV_OK; 573} 574 575#if !defined(CONFIG_DDR4) 576static int ddr3_tip_a38x_get_medium_freq(int dev_num, enum mv_ddr_freq *freq) 577{ 578 u32 reg, ref_clk_satr; 579 580 /* Read sample at reset setting */ 581 reg = (reg_read(REG_DEVICE_SAR1_ADDR) >> 582 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) & 583 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK; 584 585 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG); 586 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) == 587 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) { 588 switch (reg) { 589 case 0x0: 590 case 0x1: 591 /* Medium is same as TF to run PBS in this freq */ 592 *freq = MV_DDR_FREQ_333; 593 break; 594 case 0x2: 595 case 0x3: 596 /* Medium is same as TF to run PBS in this freq */ 597 *freq = MV_DDR_FREQ_400; 598 break; 599 case 0x4: 600 case 0xd: 601 /* Medium is same as TF to run PBS in this freq */ 602 *freq = MV_DDR_FREQ_533; 603 break; 604 case 0x8: 605 case 0x10: 606 case 0x11: 607 case 0x14: 608 *freq = MV_DDR_FREQ_333; 609 break; 610 case 0xc: 611 case 0x15: 612 case 0x1b: 613 *freq = MV_DDR_FREQ_400; 614 break; 615 case 0x6: 616 *freq = MV_DDR_FREQ_300; 617 break; 618 case 0x12: 619 *freq = MV_DDR_FREQ_360; 620 break; 621 case 0x13: 622 *freq = MV_DDR_FREQ_400; 623 break; 624 default: 625 *freq = 0; 626 return MV_NOT_SUPPORTED; 627 } 628 } else { /* REFCLK 40MHz case */ 629 switch (reg) { 630 case 0x3: 631 /* Medium is same as TF to run PBS in this freq */ 632 *freq = MV_DDR_FREQ_400; 633 break; 634 case 0x5: 635 /* Medium is same as TF to run PBS in this freq */ 636 *freq = MV_DDR_FREQ_533; 637 break; 638 case 0xb: 639 *freq = MV_DDR_FREQ_400; 640 break; 641 case 0x1e: 642 *freq = MV_DDR_FREQ_360; 643 break; 644 default: 645 *freq = 0; 646 return MV_NOT_SUPPORTED; 647 } 648 } 649 650 return MV_OK; 651} 652#endif /* CONFIG_DDR4 */ 653 654static int ddr3_tip_a38x_get_device_info(u8 dev_num, struct ddr3_device_info *info_ptr) 655{ 656 info_ptr->device_id = 0x6800; 657 info_ptr->ck_delay = ck_delay; 658 659 return MV_OK; 660} 661 662/* check indirect access to phy register file completed */ 663static int is_prfa_done(void) 664{ 665 u32 reg_val; 666 u32 iter = 0; 667 668 do { 669 if (iter++ > MAX_POLLING_ITERATIONS) { 670 printf("error: %s: polling timeout\n", __func__); 671 return MV_FAIL; 672 } 673 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val); 674 reg_val >>= PRFA_REQ_OFFS; 675 reg_val &= PRFA_REQ_MASK; 676 } while (reg_val == PRFA_REQ_ENA); /* request pending */ 677 678 return MV_OK; 679} 680 681/* write to phy register thru indirect access */ 682static int prfa_write(enum hws_access_type phy_access, u32 phy, 683 enum hws_ddr_phy phy_type, u32 addr, 684 u32 data, enum hws_operation op_type) 685{ 686 u32 reg_val = ((data & PRFA_DATA_MASK) << PRFA_DATA_OFFS) | 687 ((addr & PRFA_REG_NUM_MASK) << PRFA_REG_NUM_OFFS) | 688 ((phy & PRFA_PUP_NUM_MASK) << PRFA_PUP_NUM_OFFS) | 689 ((phy_type & PRFA_PUP_CTRL_DATA_MASK) << PRFA_PUP_CTRL_DATA_OFFS) | 690 ((phy_access & PRFA_PUP_BCAST_WR_ENA_MASK) << PRFA_PUP_BCAST_WR_ENA_OFFS) | 691 (((addr >> 6) & PRFA_REG_NUM_HI_MASK) << PRFA_REG_NUM_HI_OFFS) | 692 ((op_type & PRFA_TYPE_MASK) << PRFA_TYPE_OFFS); 693 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val); 694 reg_val |= (PRFA_REQ_ENA << PRFA_REQ_OFFS); 695 dunit_write(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, reg_val); 696 697 /* polling for prfa request completion */ 698 if (is_prfa_done() != MV_OK) 699 return MV_FAIL; 700 701 return MV_OK; 702} 703 704/* read from phy register thru indirect access */ 705static int prfa_read(enum hws_access_type phy_access, u32 phy, 706 enum hws_ddr_phy phy_type, u32 addr, u32 *data) 707{ 708 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 709 u32 max_phy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 710 u32 i, reg_val; 711 712 if (phy_access == ACCESS_TYPE_MULTICAST) { 713 for (i = 0; i < max_phy; i++) { 714 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, i); 715 if (prfa_write(ACCESS_TYPE_UNICAST, i, phy_type, addr, 0, OPERATION_READ) != MV_OK) 716 return MV_FAIL; 717 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val); 718 data[i] = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK; 719 } 720 } else { 721 if (prfa_write(phy_access, phy, phy_type, addr, 0, OPERATION_READ) != MV_OK) 722 return MV_FAIL; 723 dunit_read(PHY_REG_FILE_ACCESS_REG, MASK_ALL_BITS, ®_val); 724 *data = (reg_val >> PRFA_DATA_OFFS) & PRFA_DATA_MASK; 725 } 726 727 return MV_OK; 728} 729 730static int mv_ddr_sw_db_init(u32 dev_num, u32 board_id) 731{ 732 struct hws_tip_config_func_db config_func; 733 734 /* new read leveling version */ 735 config_func.mv_ddr_dunit_read = dunit_read; 736 config_func.mv_ddr_dunit_write = dunit_write; 737 config_func.tip_dunit_mux_select_func = 738 ddr3_tip_a38x_select_ddr_controller; 739 config_func.tip_get_freq_config_info_func = 740 ddr3_tip_a38x_get_freq_config; 741 config_func.tip_set_freq_divider_func = ddr3_tip_a38x_set_divider; 742 config_func.tip_get_device_info_func = ddr3_tip_a38x_get_device_info; 743 config_func.tip_get_temperature = ddr3_ctrl_get_junc_temp; 744 config_func.tip_get_clock_ratio = ddr3_tip_clock_mode; 745 config_func.tip_external_read = ddr3_tip_ext_read; 746 config_func.tip_external_write = ddr3_tip_ext_write; 747 config_func.mv_ddr_phy_read = prfa_read; 748 config_func.mv_ddr_phy_write = prfa_write; 749 750 ddr3_tip_init_config_func(dev_num, &config_func); 751 752 ddr3_tip_register_dq_table(dev_num, dq_bit_map_2_phy_pin); 753 754 /* set device attributes*/ 755 ddr3_tip_dev_attr_init(dev_num); 756 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_TIP_REV, MV_TIP_REV_4); 757 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_PHY_EDGE, MV_DDR_PHY_EDGE_POSITIVE); 758 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_OCTET_PER_INTERFACE, DDR_INTERFACE_OCTETS_NUM); 759 ddr3_tip_dev_attr_set(dev_num, MV_ATTR_INTERLEAVE_WA, 0); 760 761 ca_delay = 0; 762 delay_enable = 1; 763 dfs_low_freq = DFS_LOW_FREQ_VALUE; 764 calibration_update_control = 1; 765 766#if !defined(CONFIG_DDR4) 767 ddr3_tip_a38x_get_medium_freq(dev_num, &medium_freq); 768#endif /* CONFIG_DDR4 */ 769 770 return MV_OK; 771} 772 773static int mv_ddr_training_mask_set(void) 774{ 775 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 776#if defined(CONFIG_DDR4) 777 mask_tune_func = (SET_LOW_FREQ_MASK_BIT | 778 LOAD_PATTERN_MASK_BIT | 779 SET_TARGET_FREQ_MASK_BIT | 780 WRITE_LEVELING_TF_MASK_BIT | 781 READ_LEVELING_TF_MASK_BIT | 782 RECEIVER_CALIBRATION_MASK_BIT | 783 WL_PHASE_CORRECTION_MASK_BIT | 784 DQ_VREF_CALIBRATION_MASK_BIT); 785 /* Temporarily disable the DQ_MAPPING stage */ 786 /* DQ_MAPPING_MASK_BIT */ 787 rl_mid_freq_wa = 0; 788 789 /* In case A382, Vref calibration workaround isn't required */ 790 if (((reg_read(DEV_ID_REG) & 0xFFFF0000) >> 16) == 0x6811) { 791 printf("vref_calibration_wa is disabled\n"); 792 vref_calibration_wa = 0; 793 } 794 795 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask) == 1) 796 mask_tune_func &= ~WL_PHASE_CORRECTION_MASK_BIT; 797 798#else /* CONFIG_DDR4 */ 799 enum mv_ddr_freq ddr_freq = tm->interface_params[0].memory_freq; 800 801 mask_tune_func = (SET_LOW_FREQ_MASK_BIT | 802 LOAD_PATTERN_MASK_BIT | 803 SET_MEDIUM_FREQ_MASK_BIT | WRITE_LEVELING_MASK_BIT | 804 WRITE_LEVELING_SUPP_MASK_BIT | 805 READ_LEVELING_MASK_BIT | 806 PBS_RX_MASK_BIT | 807 PBS_TX_MASK_BIT | 808 SET_TARGET_FREQ_MASK_BIT | 809 WRITE_LEVELING_TF_MASK_BIT | 810 WRITE_LEVELING_SUPP_TF_MASK_BIT | 811 READ_LEVELING_TF_MASK_BIT | 812 CENTRALIZATION_RX_MASK_BIT | 813 CENTRALIZATION_TX_MASK_BIT); 814 rl_mid_freq_wa = 1; 815 816 if ((ddr_freq == MV_DDR_FREQ_333) || (ddr_freq == MV_DDR_FREQ_400)) { 817 mask_tune_func = (WRITE_LEVELING_MASK_BIT | 818 LOAD_PATTERN_2_MASK_BIT | 819 WRITE_LEVELING_SUPP_MASK_BIT | 820 READ_LEVELING_MASK_BIT | 821 PBS_RX_MASK_BIT | 822 PBS_TX_MASK_BIT | 823 CENTRALIZATION_RX_MASK_BIT | 824 CENTRALIZATION_TX_MASK_BIT); 825 rl_mid_freq_wa = 0; /* WA not needed if 333/400 is TF */ 826 } 827 828 /* Supplementary not supported for ECC modes */ 829 if (mv_ddr_is_ecc_ena()) { 830 mask_tune_func &= ~WRITE_LEVELING_SUPP_TF_MASK_BIT; 831 mask_tune_func &= ~WRITE_LEVELING_SUPP_MASK_BIT; 832 mask_tune_func &= ~PBS_TX_MASK_BIT; 833 mask_tune_func &= ~PBS_RX_MASK_BIT; 834 } 835#endif /* CONFIG_DDR4 */ 836 837 return MV_OK; 838} 839 840/* function: mv_ddr_set_calib_controller 841 * this function sets the controller which will control 842 * the calibration cycle in the end of the training. 843 * 1 - internal controller 844 * 2 - external controller 845 */ 846void mv_ddr_set_calib_controller(void) 847{ 848 calibration_update_control = CAL_UPDATE_CTRL_INT; 849} 850 851static int ddr3_tip_a38x_set_divider(u8 dev_num, u32 if_id, 852 enum mv_ddr_freq frequency) 853{ 854 u32 divider = 0; 855 u32 sar_val, ref_clk_satr; 856 u32 async_val; 857 u32 cpu_freq; 858 u32 ddr_freq = mv_ddr_freq_get(frequency); 859 860 if (if_id != 0) { 861 DEBUG_TRAINING_ACCESS(DEBUG_LEVEL_ERROR, 862 ("A38x does not support interface 0x%x\n", 863 if_id)); 864 return MV_BAD_PARAM; 865 } 866 867 /* get VCO freq index */ 868 sar_val = (reg_read(REG_DEVICE_SAR1_ADDR) >> 869 RST2_CPU_DDR_CLOCK_SELECT_IN_OFFSET) & 870 RST2_CPU_DDR_CLOCK_SELECT_IN_MASK; 871 872 ref_clk_satr = reg_read(DEVICE_SAMPLE_AT_RESET2_REG); 873 if (((ref_clk_satr >> DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_OFFSET) & 0x1) == 874 DEVICE_SAMPLE_AT_RESET2_REG_REFCLK_25MHZ) 875 cpu_freq = a38x_vco_freq_per_sar_ref_clk_25_mhz[sar_val]; 876 else 877 cpu_freq = a38x_vco_freq_per_sar_ref_clk_40_mhz[sar_val]; 878 879 divider = cpu_freq / ddr_freq; 880 881 if (((cpu_freq % ddr_freq != 0) || (divider != 2 && divider != 3)) && 882 (ddr_freq > 400)) { 883 /* Set async mode */ 884 dunit_write(0x20220, 0x1000, 0x1000); 885 dunit_write(0xe42f4, 0x200, 0x200); 886 887 /* Wait for async mode setup */ 888 mdelay(5); 889 890 /* Set KNL values */ 891 switch (frequency) { 892#ifndef CONFIG_DDR4 /* CONFIG_DDR3 */ 893 case MV_DDR_FREQ_467: 894 async_val = 0x806f012; 895 break; 896 case MV_DDR_FREQ_533: 897 async_val = 0x807f012; 898 break; 899 case MV_DDR_FREQ_600: 900 async_val = 0x805f00a; 901 break; 902#endif 903 case MV_DDR_FREQ_667: 904 async_val = 0x809f012; 905 break; 906 case MV_DDR_FREQ_800: 907 async_val = 0x807f00a; 908 break; 909#ifndef CONFIG_DDR4 /* CONFIG_DDR3 */ 910 case MV_DDR_FREQ_850: 911 async_val = 0x80cb012; 912 break; 913#endif 914 case MV_DDR_FREQ_900: 915 async_val = 0x80d7012; 916 break; 917 case MV_DDR_FREQ_933: 918 async_val = 0x80df012; 919 break; 920 case MV_DDR_FREQ_1000: 921 async_val = 0x80ef012; 922 break; 923 case MV_DDR_FREQ_1066: 924 async_val = 0x80ff012; 925 break; 926 default: 927 /* set MV_DDR_FREQ_667 as default */ 928 async_val = 0x809f012; 929 } 930 dunit_write(0xe42f0, 0xffffffff, async_val); 931 } else { 932 /* Set sync mode */ 933 dunit_write(0x20220, 0x1000, 0x0); 934 dunit_write(0xe42f4, 0x200, 0x0); 935 936 /* cpupll_clkdiv_reset_mask */ 937 dunit_write(0xe4264, 0xff, 0x1f); 938 939 /* cpupll_clkdiv_reload_smooth */ 940 dunit_write(0xe4260, (0xff << 8), (0x2 << 8)); 941 942 /* cpupll_clkdiv_relax_en */ 943 dunit_write(0xe4260, (0xff << 24), (0x2 << 24)); 944 945 /* write the divider */ 946 dunit_write(0xe4268, (0x3f << 8), (divider << 8)); 947 948 /* set cpupll_clkdiv_reload_ratio */ 949 dunit_write(0xe4264, (1 << 8), (1 << 8)); 950 951 /* undet cpupll_clkdiv_reload_ratio */ 952 dunit_write(0xe4264, (1 << 8), 0x0); 953 954 /* clear cpupll_clkdiv_reload_force */ 955 dunit_write(0xe4260, (0xff << 8), 0x0); 956 957 /* clear cpupll_clkdiv_relax_en */ 958 dunit_write(0xe4260, (0xff << 24), 0x0); 959 960 /* clear cpupll_clkdiv_reset_mask */ 961 dunit_write(0xe4264, 0xff, 0x0); 962 } 963 964 /* Dunit training clock + 1:1/2:1 mode */ 965 dunit_write(0x18488, (1 << 16), ((ddr3_tip_clock_mode(frequency) & 0x1) << 16)); 966 dunit_write(0x1524, (1 << 15), ((ddr3_tip_clock_mode(frequency) - 1) << 15)); 967 968 return MV_OK; 969} 970 971/* 972 * external read from memory 973 */ 974int ddr3_tip_ext_read(u32 dev_num, u32 if_id, u32 reg_addr, 975 u32 num_of_bursts, u32 *data) 976{ 977 u32 burst_num; 978 979 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++) 980 data[burst_num] = readl(reg_addr + 4 * burst_num); 981 982 return MV_OK; 983} 984 985/* 986 * external write to memory 987 */ 988int ddr3_tip_ext_write(u32 dev_num, u32 if_id, u32 reg_addr, 989 u32 num_of_bursts, u32 *data) { 990 u32 burst_num; 991 992 for (burst_num = 0; burst_num < num_of_bursts * 8; burst_num++) 993 writel(data[burst_num], reg_addr + 4 * burst_num); 994 995 return MV_OK; 996} 997 998int mv_ddr_early_init(void) 999{ 1000 /* FIXME: change this configuration per ddr type 1001 * configure a380 and a390 to work with receiver odt timing 1002 * the odt_config is defined: 1003 * '1' in ddr4 1004 * '0' in ddr3 1005 * here the parameter is run over in ddr4 and ddr3 to '1' (in ddr4 the default is '1') 1006 * to configure the odt to work with timing restrictions 1007 */ 1008 1009 mv_ddr_sw_db_init(0, 0); 1010 1011 return MV_OK; 1012} 1013 1014int mv_ddr_early_init2(void) 1015{ 1016 mv_ddr_training_mask_set(); 1017 1018 return MV_OK; 1019} 1020 1021int mv_ddr_pre_training_fixup(void) 1022{ 1023 return 0; 1024} 1025 1026int mv_ddr_post_training_fixup(void) 1027{ 1028 return 0; 1029} 1030 1031int ddr3_post_run_alg(void) 1032{ 1033 return MV_OK; 1034} 1035 1036int ddr3_silicon_post_init(void) 1037{ 1038 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1039 1040 /* Set half bus width */ 1041 if (DDR3_IS_16BIT_DRAM_MODE(tm->bus_act_mask)) { 1042 CHECK_STATUS(ddr3_tip_if_write 1043 (0, ACCESS_TYPE_UNICAST, PARAM_NOT_CARE, 1044 SDRAM_CFG_REG, 0x0, 0x8000)); 1045 } 1046 1047 return MV_OK; 1048} 1049 1050u32 mv_ddr_init_freq_get(void) 1051{ 1052 enum mv_ddr_freq freq; 1053 1054 mv_ddr_sar_freq_get(0, &freq); 1055 1056 return freq; 1057} 1058 1059static u32 ddr3_get_bus_width(void) 1060{ 1061 u32 bus_width; 1062 1063 bus_width = (reg_read(SDRAM_CFG_REG) & 0x8000) >> 1064 BUS_IN_USE_OFFS; 1065 1066 return (bus_width == 0) ? 16 : 32; 1067} 1068 1069static u32 ddr3_get_device_width(u32 cs) 1070{ 1071 u32 device_width; 1072 1073 device_width = (reg_read(SDRAM_ADDR_CTRL_REG) & 1074 (CS_STRUCT_MASK << CS_STRUCT_OFFS(cs))) >> 1075 CS_STRUCT_OFFS(cs); 1076 1077 return (device_width == 0) ? 8 : 16; 1078} 1079 1080static u32 ddr3_get_device_size(u32 cs) 1081{ 1082 u32 device_size_low, device_size_high, device_size; 1083 u32 data, cs_low_offset, cs_high_offset; 1084 1085 cs_low_offset = CS_SIZE_OFFS(cs); 1086 cs_high_offset = CS_SIZE_HIGH_OFFS(cs); 1087 1088 data = reg_read(SDRAM_ADDR_CTRL_REG); 1089 device_size_low = (data >> cs_low_offset) & 0x3; 1090 device_size_high = (data >> cs_high_offset) & 0x1; 1091 1092 device_size = device_size_low | (device_size_high << 2); 1093 1094 switch (device_size) { 1095 case 0: 1096 return 2048; 1097 case 2: 1098 return 512; 1099 case 3: 1100 return 1024; 1101 case 4: 1102 return 4096; 1103 case 5: 1104 return 8192; 1105 case 1: 1106 default: 1107 DEBUG_INIT_C("Error: Wrong device size of Cs: ", cs, 1); 1108 /* zeroes mem size in ddr3_calc_mem_cs_size */ 1109 return 0; 1110 } 1111} 1112 1113int ddr3_calc_mem_cs_size(u32 cs, uint64_t *cs_size) 1114{ 1115 u32 cs_mem_size; 1116 1117 /* Calculate in MiB */ 1118 cs_mem_size = ((ddr3_get_bus_width() / ddr3_get_device_width(cs)) * 1119 ddr3_get_device_size(cs)) / 8; 1120 1121 /* 1122 * Multiple controller bus width, 2x for 64 bit 1123 * (SoC controller may be 32 or 64 bit, 1124 * so bit 15 in 0x1400, that means if whole bus used or only half, 1125 * have a differnt meaning 1126 */ 1127 cs_mem_size *= DDR_CONTROLLER_BUS_WIDTH_MULTIPLIER; 1128 1129 if ((cs_mem_size < 128) || (cs_mem_size > 4096)) { 1130 DEBUG_INIT_C("Error: Wrong Memory size of Cs: ", cs, 1); 1131 return MV_BAD_VALUE; 1132 } 1133 1134 *cs_size = cs_mem_size; 1135 1136 return MV_OK; 1137} 1138 1139static int ddr3_fast_path_dynamic_cs_size_config(u32 cs_ena) 1140{ 1141 u32 reg, cs; 1142 uint64_t mem_total_size = 0; 1143 uint64_t cs_mem_size_mb = 0; 1144 uint64_t cs_mem_size = 0; 1145 uint64_t mem_total_size_c, cs_mem_size_c; 1146 1147 1148#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 1149 u32 physical_mem_size; 1150 u32 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE; 1151 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1152#endif 1153 1154 /* Open fast path windows */ 1155 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1156 if (cs_ena & (1 << cs)) { 1157 /* get CS size */ 1158 if (ddr3_calc_mem_cs_size(cs, &cs_mem_size_mb) != MV_OK) 1159 return MV_FAIL; 1160 cs_mem_size = cs_mem_size_mb * _1M; 1161 1162#ifdef DEVICE_MAX_DRAM_ADDRESS_SIZE 1163 /* 1164 * if number of address pins doesn't allow to use max 1165 * mem size that is defined in topology 1166 * mem size is defined by DEVICE_MAX_DRAM_ADDRESS_SIZE 1167 */ 1168 physical_mem_size = mem_size 1169 [tm->interface_params[0].memory_size]; 1170 1171 if (ddr3_get_device_width(cs) == 16) { 1172 /* 1173 * 16bit mem device can be twice more - no need 1174 * in less significant pin 1175 */ 1176 max_mem_size = DEVICE_MAX_DRAM_ADDRESS_SIZE * 2; 1177 } 1178 1179 if (physical_mem_size > max_mem_size) { 1180 cs_mem_size = max_mem_size * 1181 (ddr3_get_bus_width() / 1182 ddr3_get_device_width(cs)); 1183 printf("Updated Physical Mem size is from 0x%x to %x\n", 1184 physical_mem_size, 1185 DEVICE_MAX_DRAM_ADDRESS_SIZE); 1186 } 1187#endif 1188 1189 /* set fast path window control for the cs */ 1190 reg = 0xffffe1; 1191 reg |= (cs << 2); 1192 reg |= (cs_mem_size - 1) & 0xffff0000; 1193 /*Open fast path Window */ 1194 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(cs), reg); 1195 1196 /* Set fast path window base address for the cs */ 1197 reg = ((cs_mem_size) * cs) & 0xffff0000; 1198 /* Set base address */ 1199 reg_write(REG_FASTPATH_WIN_BASE_ADDR(cs), reg); 1200 1201 /* 1202 * Since memory size may be bigger than 4G the summ may 1203 * be more than 32 bit word, 1204 * so to estimate the result divide mem_total_size and 1205 * cs_mem_size by 0x10000 (it is equal to >> 16) 1206 */ 1207 mem_total_size_c = (mem_total_size >> 16) & 0xffffffffffff; 1208 cs_mem_size_c = (cs_mem_size >> 16) & 0xffffffffffff; 1209 1210 /* if the sum less than 2 G - calculate the value */ 1211 if (mem_total_size_c + cs_mem_size_c < 0x10000) 1212 mem_total_size += cs_mem_size; 1213 else /* put max possible size */ 1214 mem_total_size = L2_FILTER_FOR_MAX_MEMORY_SIZE; 1215 } 1216 } 1217 1218 /* Set L2 filtering to Max Memory size */ 1219 reg_write(ADDRESS_FILTERING_END_REGISTER, mem_total_size); 1220 1221 return MV_OK; 1222} 1223 1224static int ddr3_restore_and_set_final_windows(u32 *win, const char *ddr_type) 1225{ 1226 u32 win_ctrl_reg, num_of_win_regs; 1227 u32 cs_ena = mv_ddr_sys_env_get_cs_ena_from_reg(); 1228 u32 ui; 1229 1230 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR; 1231 num_of_win_regs = 16; 1232 1233 /* Return XBAR windows 4-7 or 16-19 init configuration */ 1234 for (ui = 0; ui < num_of_win_regs; ui++) 1235 reg_write((win_ctrl_reg + 0x4 * ui), win[ui]); 1236 1237 printf("%s Training Sequence - Switching XBAR Window to FastPath Window\n", 1238 ddr_type); 1239 1240#if defined DYNAMIC_CS_SIZE_CONFIG 1241 if (ddr3_fast_path_dynamic_cs_size_config(cs_ena) != MV_OK) 1242 printf("ddr3_fast_path_dynamic_cs_size_config FAILED\n"); 1243#else 1244 u32 reg, cs; 1245 reg = 0x1fffffe1; 1246 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1247 if (cs_ena & (1 << cs)) { 1248 reg |= (cs << 2); 1249 break; 1250 } 1251 } 1252 /* Open fast path Window to - 0.5G */ 1253 reg_write(REG_FASTPATH_WIN_CTRL_ADDR(0), reg); 1254#endif 1255 1256 return MV_OK; 1257} 1258 1259static int ddr3_save_and_set_training_windows(u32 *win) 1260{ 1261 u32 cs_ena; 1262 u32 reg, tmp_count, cs, ui; 1263 u32 win_ctrl_reg, win_base_reg, win_remap_reg; 1264 u32 num_of_win_regs, win_jump_index; 1265 win_ctrl_reg = REG_XBAR_WIN_4_CTRL_ADDR; 1266 win_base_reg = REG_XBAR_WIN_4_BASE_ADDR; 1267 win_remap_reg = REG_XBAR_WIN_4_REMAP_ADDR; 1268 win_jump_index = 0x10; 1269 num_of_win_regs = 16; 1270 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1271 1272#ifdef DISABLE_L2_FILTERING_DURING_DDR_TRAINING 1273 /* 1274 * Disable L2 filtering during DDR training 1275 * (when Cross Bar window is open) 1276 */ 1277 reg_write(ADDRESS_FILTERING_END_REGISTER, 0); 1278#endif 1279 1280 cs_ena = tm->interface_params[0].as_bus_params[0].cs_bitmask; 1281 1282 /* Close XBAR Window 19 - Not needed */ 1283 /* {0x000200e8} - Open Mbus Window - 2G */ 1284 reg_write(REG_XBAR_WIN_19_CTRL_ADDR, 0); 1285 1286 /* Save XBAR Windows 4-19 init configurations */ 1287 for (ui = 0; ui < num_of_win_regs; ui++) 1288 win[ui] = reg_read(win_ctrl_reg + 0x4 * ui); 1289 1290 /* Open XBAR Windows 4-7 or 16-19 for other CS */ 1291 reg = 0; 1292 tmp_count = 0; 1293 for (cs = 0; cs < MAX_CS_NUM; cs++) { 1294 if (cs_ena & (1 << cs)) { 1295 switch (cs) { 1296 case 0: 1297 reg = 0x0e00; 1298 break; 1299 case 1: 1300 reg = 0x0d00; 1301 break; 1302 case 2: 1303 reg = 0x0b00; 1304 break; 1305 case 3: 1306 reg = 0x0700; 1307 break; 1308 } 1309 reg |= (1 << 0); 1310 reg |= (SDRAM_CS_SIZE & 0xffff0000); 1311 1312 reg_write(win_ctrl_reg + win_jump_index * tmp_count, 1313 reg); 1314 reg = (((SDRAM_CS_SIZE + 1) * (tmp_count)) & 1315 0xffff0000); 1316 reg_write(win_base_reg + win_jump_index * tmp_count, 1317 reg); 1318 1319 if (win_remap_reg <= REG_XBAR_WIN_7_REMAP_ADDR) 1320 reg_write(win_remap_reg + 1321 win_jump_index * tmp_count, 0); 1322 1323 tmp_count++; 1324 } 1325 } 1326 1327 return MV_OK; 1328} 1329 1330static u32 win[16]; 1331 1332int mv_ddr_pre_training_soc_config(const char *ddr_type) 1333{ 1334 u32 soc_num; 1335 u32 reg_val; 1336 1337 /* Switching CPU to MRVL ID */ 1338 soc_num = (reg_read(REG_SAMPLE_RESET_HIGH_ADDR) & SAR1_CPU_CORE_MASK) >> 1339 SAR1_CPU_CORE_OFFSET; 1340 switch (soc_num) { 1341 case 0x3: 1342 reg_bit_set(CPU_CONFIGURATION_REG(3), CPU_MRVL_ID_OFFSET); 1343 reg_bit_set(CPU_CONFIGURATION_REG(2), CPU_MRVL_ID_OFFSET); 1344 /* fallthrough */ 1345 case 0x1: 1346 reg_bit_set(CPU_CONFIGURATION_REG(1), CPU_MRVL_ID_OFFSET); 1347 /* fallthrough */ 1348 case 0x0: 1349 reg_bit_set(CPU_CONFIGURATION_REG(0), CPU_MRVL_ID_OFFSET); 1350 /* fallthrough */ 1351 default: 1352 break; 1353 } 1354 1355 /* 1356 * Set DRAM Reset Mask in case detected GPIO indication of wakeup from 1357 * suspend i.e the DRAM values will not be overwritten / reset when 1358 * waking from suspend 1359 */ 1360 if (mv_ddr_sys_env_suspend_wakeup_check() == 1361 SUSPEND_WAKEUP_ENABLED_GPIO_DETECTED) { 1362 reg_bit_set(SDRAM_INIT_CTRL_REG, 1363 DRAM_RESET_MASK_MASKED << DRAM_RESET_MASK_OFFS); 1364 } 1365 1366 /* Fix read ready phases for all SOC in reg 0x15c8 */ 1367 reg_val = reg_read(TRAINING_DBG_3_REG); 1368 1369 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); 1370 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(0)); /* phase 0 */ 1371 1372 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); 1373 reg_val |= (0x4 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(1)); /* phase 1 */ 1374 1375 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); 1376 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(3)); /* phase 3 */ 1377 1378 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); 1379 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(4)); /* phase 4 */ 1380 1381 reg_val &= ~(TRN_DBG_RDY_INC_PH_2TO1_MASK << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); 1382 reg_val |= (0x6 << TRN_DBG_RDY_INC_PH_2TO1_OFFS(5)); /* phase 5 */ 1383 1384 reg_write(TRAINING_DBG_3_REG, reg_val); 1385 1386 /* 1387 * Axi_bresp_mode[8] = Compliant, 1388 * Axi_addr_decode_cntrl[11] = Internal, 1389 * Axi_data_bus_width[0] = 128bit 1390 * */ 1391 /* 0x14a8 - AXI Control Register */ 1392 reg_write(AXI_CTRL_REG, 0); 1393 1394 /* 1395 * Stage 2 - Training Values Setup 1396 */ 1397 /* Set X-BAR windows for the training sequence */ 1398 ddr3_save_and_set_training_windows(win); 1399 1400 return MV_OK; 1401} 1402 1403static int ddr3_new_tip_dlb_config(void) 1404{ 1405 u32 reg, i = 0; 1406 struct dlb_config *config_table_ptr = sys_env_dlb_config_ptr_get(); 1407 1408 /* Write the configuration */ 1409 while (config_table_ptr[i].reg_addr != 0) { 1410 reg_write(config_table_ptr[i].reg_addr, 1411 config_table_ptr[i].reg_data); 1412 i++; 1413 } 1414 1415#if defined(CONFIG_DDR4) 1416 reg = reg_read(DUNIT_CTRL_HIGH_REG); 1417 reg &= ~(CPU_INTERJECTION_ENA_MASK << CPU_INTERJECTION_ENA_OFFS); 1418 reg |= CPU_INTERJECTION_ENA_SPLIT_DIS << CPU_INTERJECTION_ENA_OFFS; 1419 reg_write(DUNIT_CTRL_HIGH_REG, reg); 1420#endif /* CONFIG_DDR4 */ 1421 1422 /* Enable DLB */ 1423 reg = reg_read(DLB_CTRL_REG); 1424 reg &= ~(DLB_EN_MASK << DLB_EN_OFFS) & 1425 ~(WR_COALESCE_EN_MASK << WR_COALESCE_EN_OFFS) & 1426 ~(AXI_PREFETCH_EN_MASK << AXI_PREFETCH_EN_OFFS) & 1427 ~(MBUS_PREFETCH_EN_MASK << MBUS_PREFETCH_EN_OFFS) & 1428 ~(PREFETCH_NXT_LN_SZ_TRIG_MASK << PREFETCH_NXT_LN_SZ_TRIG_OFFS); 1429 1430 reg |= (DLB_EN_ENA << DLB_EN_OFFS) | 1431 (WR_COALESCE_EN_ENA << WR_COALESCE_EN_OFFS) | 1432 (AXI_PREFETCH_EN_ENA << AXI_PREFETCH_EN_OFFS) | 1433 (MBUS_PREFETCH_EN_ENA << MBUS_PREFETCH_EN_OFFS) | 1434 (PREFETCH_NXT_LN_SZ_TRIG_ENA << PREFETCH_NXT_LN_SZ_TRIG_OFFS); 1435 1436 reg_write(DLB_CTRL_REG, reg); 1437 1438 return MV_OK; 1439} 1440 1441int mv_ddr_post_training_soc_config(const char *ddr_type) 1442{ 1443 u32 reg_val; 1444 1445 /* Restore and set windows */ 1446 ddr3_restore_and_set_final_windows(win, ddr_type); 1447 1448 /* Update DRAM init indication in bootROM register */ 1449 reg_val = reg_read(REG_BOOTROM_ROUTINE_ADDR); 1450 reg_write(REG_BOOTROM_ROUTINE_ADDR, 1451 reg_val | (1 << REG_BOOTROM_ROUTINE_DRAM_INIT_OFFS)); 1452 1453 /* DLB config */ 1454 ddr3_new_tip_dlb_config(); 1455 1456 return MV_OK; 1457} 1458 1459void mv_ddr_mc_config(void) 1460{ 1461 /* Memory controller initializations */ 1462 struct init_cntr_param init_param; 1463 int status; 1464 1465 init_param.do_mrs_phy = 1; 1466 init_param.is_ctrl64_bit = 0; 1467 init_param.init_phy = 1; 1468 init_param.msys_init = 1; 1469 status = hws_ddr3_tip_init_controller(0, &init_param); 1470 if (status != MV_OK) 1471 printf("DDR3 init controller - FAILED 0x%x\n", status); 1472 1473 status = mv_ddr_mc_init(); 1474 if (status != MV_OK) 1475 printf("DDR3 init_sequence - FAILED 0x%x\n", status); 1476} 1477/* function: mv_ddr_mc_init 1478 * this function enables the dunit after init controller configuration 1479 */ 1480int mv_ddr_mc_init(void) 1481{ 1482 CHECK_STATUS(ddr3_tip_enable_init_sequence(0)); 1483 1484 return MV_OK; 1485} 1486 1487/* function: ddr3_tip_configure_phy 1488 * configures phy and electrical parameters 1489 */ 1490int ddr3_tip_configure_phy(u32 dev_num) 1491{ 1492 u32 if_id, phy_id; 1493 u32 octets_per_if_num = ddr3_tip_dev_attr_get(dev_num, MV_ATTR_OCTET_PER_INTERFACE); 1494 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 1495 1496 CHECK_STATUS(ddr3_tip_bus_write 1497 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1498 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 1499 PAD_ZRI_CAL_PHY_REG, 1500 ((0x7f & g_zpri_data) << 7 | (0x7f & g_znri_data)))); 1501 CHECK_STATUS(ddr3_tip_bus_write 1502 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1503 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL, 1504 PAD_ZRI_CAL_PHY_REG, 1505 ((0x7f & g_zpri_ctrl) << 7 | (0x7f & g_znri_ctrl)))); 1506 CHECK_STATUS(ddr3_tip_bus_write 1507 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1508 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 1509 PAD_ODT_CAL_PHY_REG, 1510 ((0x3f & g_zpodt_data) << 6 | (0x3f & g_znodt_data)))); 1511 CHECK_STATUS(ddr3_tip_bus_write 1512 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1513 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL, 1514 PAD_ODT_CAL_PHY_REG, 1515 ((0x3f & g_zpodt_ctrl) << 6 | (0x3f & g_znodt_ctrl)))); 1516 1517 CHECK_STATUS(ddr3_tip_bus_write 1518 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1519 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 1520 PAD_PRE_DISABLE_PHY_REG, 0)); 1521 CHECK_STATUS(ddr3_tip_bus_write 1522 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1523 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_DATA, 1524 CMOS_CONFIG_PHY_REG, 0)); 1525 CHECK_STATUS(ddr3_tip_bus_write 1526 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1527 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, DDR_PHY_CONTROL, 1528 CMOS_CONFIG_PHY_REG, 0)); 1529 1530 for (if_id = 0; if_id <= MAX_INTERFACE_NUM - 1; if_id++) { 1531 /* check if the interface is enabled */ 1532 VALIDATE_IF_ACTIVE(tm->if_act_mask, if_id); 1533 1534 for (phy_id = 0; 1535 phy_id < octets_per_if_num; 1536 phy_id++) { 1537 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, phy_id); 1538 /* Vref & clamp */ 1539 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1540 (dev_num, ACCESS_TYPE_UNICAST, 1541 if_id, phy_id, DDR_PHY_DATA, 1542 PAD_CFG_PHY_REG, 1543 ((clamp_tbl[if_id] << 4) | vref_init_val), 1544 ((0x7 << 4) | 0x7))); 1545 /* clamp not relevant for control */ 1546 CHECK_STATUS(ddr3_tip_bus_read_modify_write 1547 (dev_num, ACCESS_TYPE_UNICAST, 1548 if_id, phy_id, DDR_PHY_CONTROL, 1549 PAD_CFG_PHY_REG, 0x4, 0x7)); 1550 } 1551 } 1552 1553 if (ddr3_tip_dev_attr_get(dev_num, MV_ATTR_PHY_EDGE) == 1554 MV_DDR_PHY_EDGE_POSITIVE) 1555 CHECK_STATUS(ddr3_tip_bus_write 1556 (dev_num, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1557 ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 1558 DDR_PHY_DATA, 0x90, 0x6002)); 1559 1560#if defined(CONFIG_DDR4) 1561 mv_ddr4_phy_config(dev_num); 1562#endif /* CONFIG_DDR4 */ 1563 1564 return MV_OK; 1565} 1566 1567#if defined(CONFIG_DDR4) 1568/* function: ddr4TipCalibrationValidate 1569 * this function validates the calibration values 1570 * the function is per soc due to the different processes the calibration values are different 1571 */ 1572int mv_ddr4_calibration_validate(u32 dev_num) 1573{ 1574 int status = MV_OK; 1575 u8 if_id = 0; 1576 u32 read_data[MAX_INTERFACE_NUM]; 1577 u32 cal_n = 0, cal_p = 0; 1578 1579 /* 1580 * Pad calibration control enable: during training set the calibration to be internal 1581 * at the end of the training it should be fixed to external to be configured by the mc6 1582 * FIXME: set the calibration to external in the end of the training 1583 */ 1584 1585 /* pad calibration control enable */ 1586 CHECK_STATUS(ddr3_tip_if_write 1587 (0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, MAIN_PADS_CAL_MACH_CTRL_REG, 1588 DYN_PADS_CAL_ENABLE_ENA << DYN_PADS_CAL_ENABLE_OFFS | 1589 CAL_UPDATE_CTRL_INT << CAL_UPDATE_CTRL_OFFS, 1590 DYN_PADS_CAL_ENABLE_MASK << DYN_PADS_CAL_ENABLE_OFFS | 1591 CAL_UPDATE_CTRL_MASK << CAL_UPDATE_CTRL_OFFS)); 1592 1593 /* Polling initial calibration is done*/ 1594 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 1595 CAL_MACH_RDY << CAL_MACH_STATUS_OFFS, 1596 CAL_MACH_STATUS_MASK << CAL_MACH_STATUS_OFFS, 1597 MAIN_PADS_CAL_MACH_CTRL_REG, MAX_POLLING_ITERATIONS) != MV_OK) 1598 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("ddr4TipCalibrationAdjust: DDR4 calibration poll failed(0)\n")); 1599 1600 /* Polling that calibration propagate to io */ 1601 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3FFFFFF, 0x3FFFFFF, PHY_LOCK_STATUS_REG, 1602 MAX_POLLING_ITERATIONS) != MV_OK) 1603 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("ddr4TipCalibrationAdjust: DDR4 calibration poll failed(1)\n")); 1604 1605 /* TODO - debug why polling not enough*/ 1606 mdelay(10); 1607 1608 /* pad calibration control disable */ 1609 CHECK_STATUS(ddr3_tip_if_write 1610 (0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, MAIN_PADS_CAL_MACH_CTRL_REG, 1611 DYN_PADS_CAL_ENABLE_DIS << DYN_PADS_CAL_ENABLE_OFFS | 1612 CAL_UPDATE_CTRL_INT << CAL_UPDATE_CTRL_OFFS, 1613 DYN_PADS_CAL_ENABLE_MASK << DYN_PADS_CAL_ENABLE_OFFS | 1614 CAL_UPDATE_CTRL_MASK << CAL_UPDATE_CTRL_OFFS)); 1615 1616 /* Polling initial calibration is done */ 1617 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 1618 CAL_MACH_RDY << CAL_MACH_STATUS_OFFS, 1619 CAL_MACH_STATUS_MASK << CAL_MACH_STATUS_OFFS, 1620 MAIN_PADS_CAL_MACH_CTRL_REG, MAX_POLLING_ITERATIONS) != MV_OK) 1621 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("ddr4TipCalibrationAdjust: DDR4 calibration poll failed(0)\n")); 1622 1623 /* Polling that calibration propagate to io */ 1624 if (ddr3_tip_if_polling(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x3FFFFFF, 0x3FFFFFF, PHY_LOCK_STATUS_REG, 1625 MAX_POLLING_ITERATIONS) != MV_OK) 1626 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, ("ddr4TipCalibrationAdjust: DDR4 calibration poll failed(1)\n")); 1627 1628 /* TODO - debug why polling not enough */ 1629 mdelay(10); 1630 1631 /* Read Cal value and set to manual val */ 1632 CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x1DC8, read_data, MASK_ALL_BITS)); 1633 cal_n = (read_data[if_id] & ((0x3F) << 10)) >> 10; 1634 cal_p = (read_data[if_id] & ((0x3F) << 4)) >> 4; 1635 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1636 ("ddr4TipCalibrationValidate::DDR4 SSTL calib val - Pcal = 0x%x , Ncal = 0x%x\n", 1637 cal_p, cal_n)); 1638 if ((cal_n >= 56) || (cal_n <= 6) || (cal_p >= 59) || (cal_p <= 7)) { 1639 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1640 ("%s: Error:DDR4 SSTL calib val - Pcal = 0x%x, Ncal = 0x%x are out of range\n", 1641 __func__, cal_p, cal_n)); 1642 status = MV_FAIL; 1643 } 1644 1645 /* 14C8 - Vertical */ 1646 CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x14C8, read_data, MASK_ALL_BITS)); 1647 cal_n = (read_data[if_id] & ((0x3F) << 10)) >> 10; 1648 cal_p = (read_data[if_id] & ((0x3F) << 4)) >> 4; 1649 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1650 ("ddr4TipCalibrationValidate::DDR4 POD-V calib val - Pcal = 0x%x , Ncal = 0x%x\n", 1651 cal_p, cal_n)); 1652 if ((cal_n >= 56) || (cal_n <= 6) || (cal_p >= 59) || (cal_p <= 7)) { 1653 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1654 ("%s: Error:DDR4 POD-V calib val - Pcal = 0x%x , Ncal= 0x%x are out of range\n", 1655 __func__, cal_p, cal_n)); 1656 status = MV_FAIL; 1657 } 1658 1659 /* 17C8 - Horizontal */ 1660 CHECK_STATUS(ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 0x17C8, read_data, MASK_ALL_BITS)); 1661 cal_n = (read_data[if_id] & ((0x3F) << 10)) >> 10; 1662 cal_p = (read_data[if_id] & ((0x3F) << 4)) >> 4; 1663 DEBUG_TRAINING_IP(DEBUG_LEVEL_INFO, 1664 ("ddr4TipCalibrationValidate::DDR4 POD-H calib val - Pcal = 0x%x , Ncal = 0x%x\n", 1665 cal_p, cal_n)); 1666 if ((cal_n >= 56) || (cal_n <= 6) || (cal_p >= 59) || (cal_p <= 7)) { 1667 DEBUG_TRAINING_IP(DEBUG_LEVEL_ERROR, 1668 ("%s: Error:DDR4 POD-H calib val - Pcal = 0x%x, Ncal = 0x%x are out of range\n", 1669 __func__, cal_p, cal_n)); 1670 status = MV_FAIL; 1671 } 1672 1673 return status; 1674} 1675#endif /* CONFIG_DDR4 */ 1676 1677int mv_ddr_manual_cal_do(void) 1678{ 1679 return 0; 1680} 1681