1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright (C) Marvell International Ltd. and its affiliates 4 */ 5 6#include "ddr3_init.h" 7#include "mv_ddr_regs.h" 8 9static u32 bist_offset = 32; 10enum hws_pattern sweep_pattern = PATTERN_KILLER_DQ0; 11 12static int ddr3_tip_bist_operation(u32 dev_num, 13 enum hws_access_type access_type, 14 u32 if_id, 15 enum hws_bist_operation oper_type); 16 17/* 18 * BIST activate 19 */ 20int ddr3_tip_bist_activate(u32 dev_num, enum hws_pattern pattern, 21 enum hws_access_type access_type, u32 if_num, 22 enum hws_dir dir, 23 enum hws_stress_jump addr_stress_jump, 24 enum hws_pattern_duration duration, 25 enum hws_bist_operation oper_type, 26 u32 offset, u32 cs_num, u32 pattern_addr_length) 27{ 28 u32 tx_burst_size; 29 u32 delay_between_burst; 30 u32 rd_mode; 31 struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); 32 33 /* odpg bist write enable */ 34 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 35 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 36 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 37 38 /* odpg bist read enable/disable */ 39 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 40 (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) : 41 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 42 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 43 44 ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset); 45 46 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_length, MASK_ALL_BITS); 47 tx_burst_size = (dir == OPER_WRITE) ? 48 pattern_table[pattern].tx_burst_size : 0; 49 delay_between_burst = (dir == OPER_WRITE) ? 2 : 0; 50 rd_mode = (dir == OPER_WRITE) ? 1 : 0; 51 ddr3_tip_configure_odpg(0, access_type, 0, dir, 52 pattern_table[pattern].num_of_phases_tx, tx_burst_size, 53 pattern_table[pattern].num_of_phases_rx, 54 delay_between_burst, 55 rd_mode, cs_num, addr_stress_jump, duration); 56 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_OFFS_REG, offset, MASK_ALL_BITS); 57 58 if (oper_type == BIST_STOP) { 59 ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); 60 } else { 61 ddr3_tip_bist_operation(0, access_type, 0, BIST_START); 62 if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) 63 return MV_FAIL; 64 ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); 65 } 66 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); 67 68 return MV_OK; 69} 70 71/* 72 * BIST read result 73 */ 74int ddr3_tip_bist_read_result(u32 dev_num, u32 if_id, 75 struct bist_result *pst_bist_result) 76{ 77 int ret; 78 u32 read_data[MAX_INTERFACE_NUM]; 79 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 80 81 if (IS_IF_ACTIVE(tm->if_act_mask, if_id) == 0) 82 return MV_NOT_SUPPORTED; 83 DEBUG_TRAINING_BIST_ENGINE(DEBUG_LEVEL_TRACE, 84 ("ddr3_tip_bist_read_result if_id %d\n", 85 if_id)); 86 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 87 ODPG_DATA_RX_WORD_ERR_DATA_HIGH_REG, read_data, 88 MASK_ALL_BITS); 89 if (ret != MV_OK) 90 return ret; 91 pst_bist_result->bist_fail_high = read_data[if_id]; 92 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 93 ODPG_DATA_RX_WORD_ERR_DATA_LOW_REG, read_data, 94 MASK_ALL_BITS); 95 if (ret != MV_OK) 96 return ret; 97 pst_bist_result->bist_fail_low = read_data[if_id]; 98 99 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 100 ODPG_DATA_RX_WORD_ERR_ADDR_REG, read_data, 101 MASK_ALL_BITS); 102 if (ret != MV_OK) 103 return ret; 104 pst_bist_result->bist_last_fail_addr = read_data[if_id]; 105 ret = ddr3_tip_if_read(dev_num, ACCESS_TYPE_UNICAST, if_id, 106 ODPG_DATA_RX_WORD_ERR_CNTR_REG, read_data, 107 MASK_ALL_BITS); 108 if (ret != MV_OK) 109 return ret; 110 pst_bist_result->bist_error_cnt = read_data[if_id]; 111 112 return MV_OK; 113} 114 115/* 116 * BIST flow - Activate & read result 117 */ 118int hws_ddr3_run_bist(u32 dev_num, enum hws_pattern pattern, u32 *result, 119 u32 cs_num) 120{ 121 int ret; 122 u32 i = 0; 123 u32 win_base; 124 struct bist_result st_bist_result; 125 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 126 127 for (i = 0; i < MAX_INTERFACE_NUM; i++) { 128 VALIDATE_IF_ACTIVE(tm->if_act_mask, i); 129 hws_ddr3_cs_base_adr_calc(i, cs_num, &win_base); 130 ret = ddr3_tip_bist_activate(dev_num, pattern, 131 ACCESS_TYPE_UNICAST, 132 i, OPER_WRITE, STRESS_NONE, 133 DURATION_SINGLE, BIST_START, 134 bist_offset + win_base, 135 cs_num, 15); 136 if (ret != MV_OK) { 137 printf("ddr3_tip_bist_activate failed (0x%x)\n", ret); 138 return ret; 139 } 140 141 ret = ddr3_tip_bist_activate(dev_num, pattern, 142 ACCESS_TYPE_UNICAST, 143 i, OPER_READ, STRESS_NONE, 144 DURATION_SINGLE, BIST_START, 145 bist_offset + win_base, 146 cs_num, 15); 147 if (ret != MV_OK) { 148 printf("ddr3_tip_bist_activate failed (0x%x)\n", ret); 149 return ret; 150 } 151 152 ret = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result); 153 if (ret != MV_OK) { 154 printf("ddr3_tip_bist_read_result failed\n"); 155 return ret; 156 } 157 result[i] = st_bist_result.bist_error_cnt; 158 } 159 160 return MV_OK; 161} 162 163/* 164 * Set BIST Operation 165 */ 166 167static int ddr3_tip_bist_operation(u32 dev_num, 168 enum hws_access_type access_type, 169 u32 if_id, enum hws_bist_operation oper_type) 170{ 171 if (oper_type == BIST_STOP) 172 mv_ddr_odpg_disable(); 173 else 174 mv_ddr_odpg_enable(); 175 176 return MV_OK; 177} 178 179/* 180 * Print BIST result 181 */ 182void ddr3_tip_print_bist_res(void) 183{ 184 u32 dev_num = 0; 185 u32 i; 186 struct bist_result st_bist_result[MAX_INTERFACE_NUM]; 187 int res; 188 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 189 190 for (i = 0; i < MAX_INTERFACE_NUM; i++) { 191 VALIDATE_IF_ACTIVE(tm->if_act_mask, i); 192 193 res = ddr3_tip_bist_read_result(dev_num, i, &st_bist_result[i]); 194 if (res != MV_OK) { 195 DEBUG_TRAINING_BIST_ENGINE( 196 DEBUG_LEVEL_ERROR, 197 ("ddr3_tip_bist_read_result failed\n")); 198 return; 199 } 200 } 201 202 DEBUG_TRAINING_BIST_ENGINE( 203 DEBUG_LEVEL_INFO, 204 ("interface | error_cnt | fail_low | fail_high | fail_addr\n")); 205 206 for (i = 0; i < MAX_INTERFACE_NUM; i++) { 207 VALIDATE_IF_ACTIVE(tm->if_act_mask, i); 208 209 DEBUG_TRAINING_BIST_ENGINE( 210 DEBUG_LEVEL_INFO, 211 ("%d | 0x%08x | 0x%08x | 0x%08x | 0x%08x\n", 212 i, st_bist_result[i].bist_error_cnt, 213 st_bist_result[i].bist_fail_low, 214 st_bist_result[i].bist_fail_high, 215 st_bist_result[i].bist_last_fail_addr)); 216 } 217} 218 219enum { 220 PASS, 221 FAIL 222}; 223#define TIP_ITERATION_NUM 31 224static int mv_ddr_tip_bist(enum hws_dir dir, u32 val, enum hws_pattern pattern, u32 cs, u32 *result) 225{ 226 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 227 enum hws_training_ip_stat training_result; 228 u16 *reg_map = ddr3_tip_get_mask_results_pup_reg_map(); 229 u32 max_subphy = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 230 u32 subphy, read_data; 231 232 ddr3_tip_ip_training(0, ACCESS_TYPE_MULTICAST, 0, ACCESS_TYPE_MULTICAST, PARAM_NOT_CARE, 233 RESULT_PER_BYTE, HWS_CONTROL_ELEMENT_ADLL, HWS_LOW2HIGH, dir, tm->if_act_mask, val, 234 TIP_ITERATION_NUM, pattern, EDGE_FP, CS_SINGLE, cs, &training_result); 235 236 for (subphy = 0; subphy < max_subphy; subphy++) { 237 ddr3_tip_if_read(0, ACCESS_TYPE_UNICAST, 0, reg_map[subphy], &read_data, MASK_ALL_BITS); 238 if (((read_data >> BLOCK_STATUS_OFFS) & BLOCK_STATUS_MASK) == BLOCK_STATUS_NOT_LOCKED) 239 *result |= (FAIL << subphy); 240 } 241 242 return MV_OK; 243} 244 245struct interval { 246 u8 *vector; 247 u8 lendpnt; /* interval's left endpoint */ 248 u8 rendpnt; /* interval's right endpoint */ 249 u8 size; /* interval's size */ 250 u8 lmarker; /* left marker */ 251 u8 rmarker; /* right marker */ 252 u8 pass_lendpnt; /* left endpoint of internal pass interval */ 253 u8 pass_rendpnt; /* right endpoint of internal pass interval */ 254}; 255 256static int interval_init(u8 *vector, u8 lendpnt, u8 rendpnt, 257 u8 lmarker, u8 rmarker, struct interval *intrvl) 258{ 259 if (intrvl == NULL) { 260 printf("%s: NULL intrvl pointer found\n", __func__); 261 return MV_FAIL; 262 } 263 264 if (vector == NULL) { 265 printf("%s: NULL vector pointer found\n", __func__); 266 return MV_FAIL; 267 } 268 intrvl->vector = vector; 269 270 if (lendpnt >= rendpnt) { 271 printf("%s: incorrect lendpnt and/or rendpnt parameters found\n", __func__); 272 return MV_FAIL; 273 } 274 intrvl->lendpnt = lendpnt; 275 intrvl->rendpnt = rendpnt; 276 intrvl->size = rendpnt - lendpnt + 1; 277 278 if ((lmarker < lendpnt) || (lmarker > rendpnt)) { 279 printf("%s: incorrect lmarker parameter found\n", __func__); 280 return MV_FAIL; 281 } 282 intrvl->lmarker = lmarker; 283 284 if ((rmarker < lmarker) || (rmarker > (intrvl->rendpnt + intrvl->size))) { 285 printf("%s: incorrect rmarker parameter found\n", __func__); 286 return MV_FAIL; 287 } 288 intrvl->rmarker = rmarker; 289 290 return MV_OK; 291} 292static int interval_set(u8 pass_lendpnt, u8 pass_rendpnt, struct interval *intrvl) 293{ 294 if (intrvl == NULL) { 295 printf("%s: NULL intrvl pointer found\n", __func__); 296 return MV_FAIL; 297 } 298 299 intrvl->pass_lendpnt = pass_lendpnt; 300 intrvl->pass_rendpnt = pass_rendpnt; 301 302 return MV_OK; 303} 304 305static int interval_proc(struct interval *intrvl) 306{ 307 int curr; 308 int pass_lendpnt, pass_rendpnt; 309 int lmt; 310 int fcnt = 0, pcnt = 0; 311 312 if (intrvl == NULL) { 313 printf("%s: NULL intrvl pointer found\n", __func__); 314 return MV_FAIL; 315 } 316 317 /* count fails and passes */ 318 curr = intrvl->lendpnt; 319 while (curr <= intrvl->rendpnt) { 320 if (intrvl->vector[curr] == PASS) 321 pcnt++; 322 else 323 fcnt++; 324 curr++; 325 } 326 327 /* check for all fail */ 328 if (fcnt == intrvl->size) { 329 printf("%s: no pass found\n", __func__); 330 return MV_FAIL; 331 } 332 333 /* check for all pass */ 334 if (pcnt == intrvl->size) { 335 if (interval_set(intrvl->lendpnt, intrvl->rendpnt, intrvl) != MV_OK) 336 return MV_FAIL; 337 return MV_OK; 338 } 339 340 /* proceed with rmarker */ 341 curr = intrvl->rmarker; 342 if (intrvl->vector[curr % intrvl->size] == PASS) { /* pass at rmarker */ 343 /* search for fail on right */ 344 if (intrvl->rmarker > intrvl->rendpnt) 345 lmt = intrvl->rendpnt + intrvl->size; 346 else 347 lmt = intrvl->rmarker + intrvl->size - 1; 348 while ((curr <= lmt) && 349 (intrvl->vector[curr % intrvl->size] == PASS)) 350 curr++; 351 if (curr > lmt) { /* fail not found */ 352 printf("%s: rmarker: fail following pass not found\n", __func__); 353 return MV_FAIL; 354 } 355 /* fail found */ 356 pass_rendpnt = curr - 1; 357 } else { /* fail at rmarker */ 358 /* search for pass on left */ 359 if (intrvl->rmarker > intrvl->rendpnt) 360 lmt = intrvl->rmarker - intrvl->size + 1; 361 else 362 lmt = intrvl->lendpnt; 363 while ((curr >= lmt) && 364 (intrvl->vector[curr % intrvl->size] == FAIL)) 365 curr--; 366 if (curr < lmt) { /* pass not found */ 367 printf("%s: rmarker: pass preceding fail not found\n", __func__); 368 return MV_FAIL; 369 } 370 /* pass found */ 371 pass_rendpnt = curr; 372 } 373 374 /* search for fail on left */ 375 curr = pass_rendpnt; 376 if (pass_rendpnt > intrvl->rendpnt) 377 lmt = pass_rendpnt - intrvl->size + 1; 378 else 379 lmt = intrvl->lendpnt; 380 while ((curr >= lmt) && 381 (intrvl->vector[curr % intrvl->size] == PASS)) 382 curr--; 383 if (curr < lmt) { /* fail not found */ 384 printf("%s: rmarker: fail preceding pass not found\n", __func__); 385 return MV_FAIL; 386 } 387 /* fail found */ 388 pass_lendpnt = curr + 1; 389 if (interval_set(pass_lendpnt, pass_rendpnt, intrvl) != MV_OK) 390 return MV_FAIL; 391 392 return MV_OK; 393} 394 395#define ADLL_TAPS_PER_PERIOD 64 396int mv_ddr_dm_to_dq_diff_get(u8 vw_sphy_hi_lmt, u8 vw_sphy_lo_lmt, u8 *vw_vector, 397 int *vw_sphy_hi_diff, int *vw_sphy_lo_diff) 398{ 399 struct interval intrvl; 400 401 /* init interval structure */ 402 if (interval_init(vw_vector, 0, ADLL_TAPS_PER_PERIOD - 1, 403 vw_sphy_lo_lmt, vw_sphy_hi_lmt, &intrvl) != MV_OK) 404 return MV_FAIL; 405 406 /* find pass sub-interval */ 407 if (interval_proc(&intrvl) != MV_OK) 408 return MV_FAIL; 409 410 /* check for all pass */ 411 if ((intrvl.pass_rendpnt == intrvl.rendpnt) && 412 (intrvl.pass_lendpnt == intrvl.lendpnt)) { 413 printf("%s: no fail found\n", __func__); 414 return MV_FAIL; 415 } 416 417 *vw_sphy_hi_diff = intrvl.pass_rendpnt - vw_sphy_hi_lmt; 418 *vw_sphy_lo_diff = vw_sphy_lo_lmt - intrvl.pass_lendpnt; 419 420 return MV_OK; 421} 422 423static int mv_ddr_bist_tx(enum hws_access_type access_type) 424{ 425 mv_ddr_odpg_done_clr(); 426 427 ddr3_tip_bist_operation(0, access_type, 0, BIST_START); 428 429 if (mv_ddr_is_odpg_done(MAX_POLLING_ITERATIONS) != MV_OK) 430 return MV_FAIL; 431 432 ddr3_tip_bist_operation(0, access_type, 0, BIST_STOP); 433 434 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); 435 436 return MV_OK; 437} 438 439/* prepare odpg for bist operation */ 440#define WR_OP_ODPG_DATA_CMD_BURST_DLY 2 441static int mv_ddr_odpg_bist_prepare(enum hws_pattern pattern, enum hws_access_type access_type, 442 enum hws_dir dir, enum hws_stress_jump stress_jump_addr, 443 enum hws_pattern_duration duration, u32 offset, u32 cs, 444 u32 pattern_addr_len, enum dm_direction dm_dir) 445{ 446 struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); 447 u32 tx_burst_size; 448 u32 burst_delay; 449 u32 rd_mode; 450 451 /* odpg bist write enable */ 452 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 453 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 454 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 455 456 /* odpg bist read enable/disable */ 457 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_CTRL_REG, 458 (dir == OPER_READ) ? (ODPG_WRBUF_RD_CTRL_ENA << ODPG_WRBUF_RD_CTRL_OFFS) : 459 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 460 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 461 462#if defined(CONFIG_DDR4) 463 if (pattern == PATTERN_ZERO || pattern == PATTERN_ONE) 464#else 465 if (pattern == PATTERN_00 || pattern == PATTERN_FF) 466#endif 467 ddr3_tip_load_pattern_to_odpg(0, access_type, 0, pattern, offset); 468 else 469 mv_ddr_load_dm_pattern_to_odpg(access_type, pattern, dm_dir); 470 471 ddr3_tip_if_write(0, access_type, 0, ODPG_DATA_BUFFER_SIZE_REG, pattern_addr_len, MASK_ALL_BITS); 472 if (dir == OPER_WRITE) { 473 tx_burst_size = pattern_table[pattern].tx_burst_size; 474 burst_delay = WR_OP_ODPG_DATA_CMD_BURST_DLY; 475 rd_mode = ODPG_MODE_TX; 476 } else { 477 tx_burst_size = 0; 478 burst_delay = 0; 479 rd_mode = ODPG_MODE_RX; 480 } 481 ddr3_tip_configure_odpg(0, access_type, 0, dir, pattern_table[pattern].num_of_phases_tx, 482 tx_burst_size, pattern_table[pattern].num_of_phases_rx, burst_delay, 483 rd_mode, cs, stress_jump_addr, duration); 484 485 return MV_OK; 486} 487 488#define BYTES_PER_BURST_64BIT 0x20 489#define BYTES_PER_BURST_32BIT 0x10 490int mv_ddr_dm_vw_get(enum hws_pattern pattern, u32 cs, u8 *vw_vector) 491{ 492 struct mv_ddr_topology_map *tm = mv_ddr_topology_map_get(); 493 struct pattern_info *pattern_table = ddr3_tip_get_pattern_table(); 494 u32 adll_tap; 495 u32 wr_ctrl_adll[MAX_BUS_NUM] = {0}; 496 u32 rd_ctrl_adll[MAX_BUS_NUM] = {0}; 497 u32 subphy; 498 u32 subphy_max = ddr3_tip_dev_attr_get(0, MV_ATTR_OCTET_PER_INTERFACE); 499 u32 odpg_addr = 0x0; 500 u32 result; 501 u32 idx; 502 /* burst length in bytes */ 503 u32 burst_len = (MV_DDR_IS_64BIT_DRAM_MODE(tm->bus_act_mask) ? 504 BYTES_PER_BURST_64BIT : BYTES_PER_BURST_32BIT); 505 506 /* save dqs values to restore after algorithm's run */ 507 ddr3_tip_read_adll_value(0, wr_ctrl_adll, CTX_PHY_REG(cs), MASK_ALL_BITS); 508 ddr3_tip_read_adll_value(0, rd_ctrl_adll, CRX_PHY_REG(cs), MASK_ALL_BITS); 509 510 /* fill memory with base pattern */ 511 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 0, MASK_ALL_BITS); 512 mv_ddr_odpg_bist_prepare(pattern, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE, 513 bist_offset, cs, pattern_table[pattern].num_of_phases_tx, 514#if defined(CONFIG_DDR4) 515 (pattern == PATTERN_ZERO) ? DM_DIR_DIRECT : DM_DIR_INVERSE); 516#else 517 (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE); 518#endif 519 520 for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { 521 /* change target odpg address */ 522 odpg_addr = adll_tap * burst_len; 523 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG, 524 odpg_addr, MASK_ALL_BITS); 525 526 ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE, 527 pattern_table[pattern].num_of_phases_tx, 528 pattern_table[pattern].tx_burst_size, 529 pattern_table[pattern].num_of_phases_rx, 530 WR_OP_ODPG_DATA_CMD_BURST_DLY, 531 ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE); 532 533 /* odpg bist write enable */ 534 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 535 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 536 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 537 538 /* odpg bist read disable */ 539 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 540 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 541 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 542 543 /* trigger odpg */ 544 mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST); 545 } 546 547 /* fill memory with vref pattern to increment addr using odpg bist */ 548 mv_ddr_odpg_bist_prepare(PATTERN_VREF, ACCESS_TYPE_UNICAST, OPER_WRITE, STRESS_NONE, DURATION_SINGLE, 549 bist_offset, cs, pattern_table[pattern].num_of_phases_tx, 550#if defined(CONFIG_DDR4) 551 (pattern == PATTERN_ZERO) ? DM_DIR_DIRECT : DM_DIR_INVERSE); 552#else 553 (pattern == PATTERN_00) ? DM_DIR_DIRECT : DM_DIR_INVERSE); 554#endif 555 556 for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { 557 ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_MULTICAST, 0, 558 DDR_PHY_DATA, CTX_PHY_REG(cs), adll_tap); 559 /* change target odpg address */ 560 odpg_addr = adll_tap * burst_len; 561 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_BUFFER_OFFS_REG, 562 odpg_addr, MASK_ALL_BITS); 563 ddr3_tip_configure_odpg(0, ACCESS_TYPE_UNICAST, 0, OPER_WRITE, 564 pattern_table[pattern].num_of_phases_tx, 565 pattern_table[pattern].tx_burst_size, 566 pattern_table[pattern].num_of_phases_rx, 567 WR_OP_ODPG_DATA_CMD_BURST_DLY, 568 ODPG_MODE_TX, cs, STRESS_NONE, DURATION_SINGLE); 569 570 /* odpg bist write enable */ 571 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 572 (ODPG_WRBUF_WR_CTRL_ENA << ODPG_WRBUF_WR_CTRL_OFFS), 573 (ODPG_WRBUF_WR_CTRL_MASK << ODPG_WRBUF_WR_CTRL_OFFS)); 574 575 /* odpg bist read disable */ 576 ddr3_tip_if_write(0, ACCESS_TYPE_UNICAST, 0, ODPG_DATA_CTRL_REG, 577 (ODPG_WRBUF_RD_CTRL_DIS << ODPG_WRBUF_RD_CTRL_OFFS), 578 (ODPG_WRBUF_RD_CTRL_MASK << ODPG_WRBUF_RD_CTRL_OFFS)); 579 580 /* trigger odpg */ 581 mv_ddr_bist_tx(ACCESS_TYPE_MULTICAST); 582 } 583 584 /* restore subphy's tx adll_tap to its position */ 585 for (subphy = 0; subphy < subphy_max; subphy++) { 586 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); 587 ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST, 588 subphy, DDR_PHY_DATA, CTX_PHY_REG(cs), 589 wr_ctrl_adll[subphy]); 590 } 591 592 /* read and validate bist (comparing with the base pattern) */ 593 for (adll_tap = 0; adll_tap < ADLL_TAPS_PER_PERIOD; adll_tap++) { 594 result = 0; 595 odpg_addr = adll_tap * burst_len; 596 /* change addr to fit write */ 597 mv_ddr_pattern_start_addr_set(pattern_table, pattern, odpg_addr); 598 mv_ddr_tip_bist(OPER_READ, 0, pattern, 0, &result); 599 for (subphy = 0; subphy < subphy_max; subphy++) { 600 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); 601 idx = ADLL_TAPS_PER_PERIOD * subphy + adll_tap; 602 vw_vector[idx] |= ((result >> subphy) & 0x1); 603 } 604 } 605 606 /* restore subphy's rx adll_tap to its position */ 607 for (subphy = 0; subphy < subphy_max; subphy++) { 608 VALIDATE_BUS_ACTIVE(tm->bus_act_mask, subphy); 609 ddr3_tip_bus_write(0, ACCESS_TYPE_UNICAST, 0, ACCESS_TYPE_UNICAST, 610 subphy, DDR_PHY_DATA, CRX_PHY_REG(cs), 611 rd_ctrl_adll[subphy]); 612 } 613 614 return MV_OK; 615} 616