1// SPDX-License-Identifier: GPL-2.0 2/* 3 * Copyright 2008-2016 Freescale Semiconductor, Inc. 4 * Copyright 2017-2021 NXP Semiconductor 5 */ 6 7#include <common.h> 8#include <fsl_ddr_sdram.h> 9#include <log.h> 10#include <asm/bitops.h> 11 12#include <fsl_ddr.h> 13 14#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) 15static unsigned int 16compute_cas_latency(const unsigned int ctrl_num, 17 const dimm_params_t *dimm_params, 18 common_timing_params_t *outpdimm, 19 unsigned int number_of_dimms) 20{ 21 unsigned int i; 22 unsigned int common_caslat; 23 unsigned int caslat_actual; 24 unsigned int retry = 16; 25 unsigned int tmp = ~0; 26 unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); 27#ifdef CONFIG_SYS_FSL_DDR3 28 const unsigned int taamax = 20000; 29#else 30 const unsigned int taamax = 18000; 31#endif 32 33 /* compute the common CAS latency supported between slots */ 34 for (i = 0; i < number_of_dimms; i++) { 35 if (dimm_params[i].n_ranks) 36 tmp &= dimm_params[i].caslat_x; 37 } 38 common_caslat = tmp; 39 40 if (!mclk_ps) { 41 printf("DDR clock (MCLK cycle was 0 ps), So setting it to slowest DIMM(s) (tCKmin %u ps).\n", 42 outpdimm->tckmin_x_ps); 43 mclk_ps = outpdimm->tckmin_x_ps; 44 } 45 46 /* validate if the memory clk is in the range of dimms */ 47 if (mclk_ps < outpdimm->tckmin_x_ps) { 48 printf("DDR clock (MCLK cycle %u ps) is faster than " 49 "the slowest DIMM(s) (tCKmin %u ps) can support.\n", 50 mclk_ps, outpdimm->tckmin_x_ps); 51 } 52#ifdef CONFIG_SYS_FSL_DDR4 53 if (mclk_ps > outpdimm->tckmax_ps) { 54 printf("DDR clock (MCLK cycle %u ps) is slower than DIMM(s) (tCKmax %u ps) can support.\n", 55 mclk_ps, outpdimm->tckmax_ps); 56 } 57#endif 58 /* determine the acutal cas latency */ 59 caslat_actual = (outpdimm->taamin_ps + mclk_ps - 1) / mclk_ps; 60 /* check if the dimms support the CAS latency */ 61 while (!(common_caslat & (1 << caslat_actual)) && retry > 0) { 62 caslat_actual++; 63 retry--; 64 } 65 /* once the caculation of caslat_actual is completed 66 * we must verify that this CAS latency value does not 67 * exceed tAAmax, which is 20 ns for all DDR3 speed grades, 68 * 18ns for all DDR4 speed grades. 69 */ 70 if (caslat_actual * mclk_ps > taamax) { 71 printf("The chosen cas latency %d is too large\n", 72 caslat_actual); 73 } 74 outpdimm->lowest_common_spd_caslat = caslat_actual; 75 debug("lowest_common_spd_caslat is 0x%x\n", caslat_actual); 76 77 return 0; 78} 79#else /* for DDR1 and DDR2 */ 80static unsigned int 81compute_cas_latency(const unsigned int ctrl_num, 82 const dimm_params_t *dimm_params, 83 common_timing_params_t *outpdimm, 84 unsigned int number_of_dimms) 85{ 86 int i; 87 const unsigned int mclk_ps = get_memory_clk_period_ps(ctrl_num); 88 unsigned int lowest_good_caslat; 89 unsigned int not_ok; 90 unsigned int temp1, temp2; 91 92 debug("using mclk_ps = %u\n", mclk_ps); 93 if (mclk_ps > outpdimm->tckmax_ps) { 94 printf("Warning: DDR clock (%u ps) is slower than DIMM(s) (tCKmax %u ps)\n", 95 mclk_ps, outpdimm->tckmax_ps); 96 } 97 98 /* 99 * Compute a CAS latency suitable for all DIMMs 100 * 101 * Strategy for SPD-defined latencies: compute only 102 * CAS latency defined by all DIMMs. 103 */ 104 105 /* 106 * Step 1: find CAS latency common to all DIMMs using bitwise 107 * operation. 108 */ 109 temp1 = 0xFF; 110 for (i = 0; i < number_of_dimms; i++) { 111 if (dimm_params[i].n_ranks) { 112 temp2 = 0; 113 temp2 |= 1 << dimm_params[i].caslat_x; 114 temp2 |= 1 << dimm_params[i].caslat_x_minus_1; 115 temp2 |= 1 << dimm_params[i].caslat_x_minus_2; 116 /* 117 * If there was no entry for X-2 (X-1) in 118 * the SPD, then caslat_x_minus_2 119 * (caslat_x_minus_1) contains either 255 or 120 * 0xFFFFFFFF because that's what the glorious 121 * __ilog2 function returns for an input of 0. 122 * On 32-bit PowerPC, left shift counts with bit 123 * 26 set (that the value of 255 or 0xFFFFFFFF 124 * will have), cause the destination register to 125 * be 0. That is why this works. 126 */ 127 temp1 &= temp2; 128 } 129 } 130 131 /* 132 * Step 2: check each common CAS latency against tCK of each 133 * DIMM's SPD. 134 */ 135 lowest_good_caslat = 0; 136 temp2 = 0; 137 while (temp1) { 138 not_ok = 0; 139 temp2 = __ilog2(temp1); 140 debug("checking common caslat = %u\n", temp2); 141 142 /* Check if this CAS latency will work on all DIMMs at tCK. */ 143 for (i = 0; i < number_of_dimms; i++) { 144 if (!dimm_params[i].n_ranks) 145 continue; 146 147 if (dimm_params[i].caslat_x == temp2) { 148 if (mclk_ps >= dimm_params[i].tckmin_x_ps) { 149 debug("CL = %u ok on DIMM %u at tCK=%u ps with tCKmin_X_ps of %u\n", 150 temp2, i, mclk_ps, 151 dimm_params[i].tckmin_x_ps); 152 continue; 153 } else { 154 not_ok++; 155 } 156 } 157 158 if (dimm_params[i].caslat_x_minus_1 == temp2) { 159 unsigned int tckmin_x_minus_1_ps 160 = dimm_params[i].tckmin_x_minus_1_ps; 161 if (mclk_ps >= tckmin_x_minus_1_ps) { 162 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_1_ps of %u\n", 163 temp2, i, mclk_ps, 164 tckmin_x_minus_1_ps); 165 continue; 166 } else { 167 not_ok++; 168 } 169 } 170 171 if (dimm_params[i].caslat_x_minus_2 == temp2) { 172 unsigned int tckmin_x_minus_2_ps 173 = dimm_params[i].tckmin_x_minus_2_ps; 174 if (mclk_ps >= tckmin_x_minus_2_ps) { 175 debug("CL = %u ok on DIMM %u at tCK=%u ps with tckmin_x_minus_2_ps of %u\n", 176 temp2, i, mclk_ps, 177 tckmin_x_minus_2_ps); 178 continue; 179 } else { 180 not_ok++; 181 } 182 } 183 } 184 185 if (!not_ok) 186 lowest_good_caslat = temp2; 187 188 temp1 &= ~(1 << temp2); 189 } 190 191 debug("lowest common SPD-defined CAS latency = %u\n", 192 lowest_good_caslat); 193 outpdimm->lowest_common_spd_caslat = lowest_good_caslat; 194 195 196 /* 197 * Compute a common 'de-rated' CAS latency. 198 * 199 * The strategy here is to find the *highest* dereated cas latency 200 * with the assumption that all of the DIMMs will support a dereated 201 * CAS latency higher than or equal to their lowest dereated value. 202 */ 203 temp1 = 0; 204 for (i = 0; i < number_of_dimms; i++) 205 temp1 = max(temp1, dimm_params[i].caslat_lowest_derated); 206 207 outpdimm->highest_common_derated_caslat = temp1; 208 debug("highest common dereated CAS latency = %u\n", temp1); 209 210 return 0; 211} 212#endif 213 214/* 215 * compute_lowest_common_dimm_parameters() 216 * 217 * Determine the worst-case DIMM timing parameters from the set of DIMMs 218 * whose parameters have been computed into the array pointed to 219 * by dimm_params. 220 */ 221unsigned int 222compute_lowest_common_dimm_parameters(const unsigned int ctrl_num, 223 const dimm_params_t *dimm_params, 224 common_timing_params_t *outpdimm, 225 const unsigned int number_of_dimms) 226{ 227 unsigned int i, j; 228 229 unsigned int tckmin_x_ps = 0; 230 unsigned int tckmax_ps = 0xFFFFFFFF; 231 unsigned int trcd_ps = 0; 232 unsigned int trp_ps = 0; 233 unsigned int tras_ps = 0; 234#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) 235 unsigned int taamin_ps = 0; 236#endif 237#ifdef CONFIG_SYS_FSL_DDR4 238 unsigned int twr_ps = 15000; 239 unsigned int trfc1_ps = 0; 240 unsigned int trfc2_ps = 0; 241 unsigned int trfc4_ps = 0; 242 unsigned int trrds_ps = 0; 243 unsigned int trrdl_ps = 0; 244 unsigned int tccdl_ps = 0; 245 unsigned int trfc_slr_ps = 0; 246#else 247 unsigned int twr_ps = 0; 248 unsigned int twtr_ps = 0; 249 unsigned int trfc_ps = 0; 250 unsigned int trrd_ps = 0; 251 unsigned int trtp_ps = 0; 252#endif 253 unsigned int trc_ps = 0; 254 unsigned int refresh_rate_ps = 0; 255 unsigned int extended_op_srt = 1; 256#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) 257 unsigned int tis_ps = 0; 258 unsigned int tih_ps = 0; 259 unsigned int tds_ps = 0; 260 unsigned int tdh_ps = 0; 261 unsigned int tdqsq_max_ps = 0; 262 unsigned int tqhs_ps = 0; 263#endif 264 unsigned int temp1, temp2; 265 unsigned int additive_latency = 0; 266 267 temp1 = 0; 268 for (i = 0; i < number_of_dimms; i++) { 269 /* 270 * If there are no ranks on this DIMM, 271 * it probably doesn't exist, so skip it. 272 */ 273 if (dimm_params[i].n_ranks == 0) { 274 temp1++; 275 continue; 276 } 277 if (dimm_params[i].n_ranks == 4 && i != 0) { 278 printf("Found Quad-rank DIMM in wrong bank, ignored." 279 " Software may not run as expected.\n"); 280 temp1++; 281 continue; 282 } 283 284 /* 285 * check if quad-rank DIMM is plugged if 286 * CONFIG_CHIP_SELECT_QUAD_CAPABLE is not defined 287 * Only the board with proper design is capable 288 */ 289#ifndef CONFIG_FSL_DDR_FIRST_SLOT_QUAD_CAPABLE 290 if (dimm_params[i].n_ranks == 4 && \ 291 CONFIG_CHIP_SELECTS_PER_CTRL/CONFIG_DIMM_SLOTS_PER_CTLR < 4) { 292 printf("Found Quad-rank DIMM, not able to support."); 293 temp1++; 294 continue; 295 } 296#endif 297 /* 298 * Find minimum tckmax_ps to find fastest slow speed, 299 * i.e., this is the slowest the whole system can go. 300 */ 301 tckmax_ps = min(tckmax_ps, 302 (unsigned int)dimm_params[i].tckmax_ps); 303#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) 304 taamin_ps = max(taamin_ps, 305 (unsigned int)dimm_params[i].taa_ps); 306#endif 307 tckmin_x_ps = max(tckmin_x_ps, 308 (unsigned int)dimm_params[i].tckmin_x_ps); 309 trcd_ps = max(trcd_ps, (unsigned int)dimm_params[i].trcd_ps); 310 trp_ps = max(trp_ps, (unsigned int)dimm_params[i].trp_ps); 311 tras_ps = max(tras_ps, (unsigned int)dimm_params[i].tras_ps); 312#ifdef CONFIG_SYS_FSL_DDR4 313 trfc1_ps = max(trfc1_ps, 314 (unsigned int)dimm_params[i].trfc1_ps); 315 trfc2_ps = max(trfc2_ps, 316 (unsigned int)dimm_params[i].trfc2_ps); 317 trfc4_ps = max(trfc4_ps, 318 (unsigned int)dimm_params[i].trfc4_ps); 319 trrds_ps = max(trrds_ps, 320 (unsigned int)dimm_params[i].trrds_ps); 321 trrdl_ps = max(trrdl_ps, 322 (unsigned int)dimm_params[i].trrdl_ps); 323 tccdl_ps = max(tccdl_ps, 324 (unsigned int)dimm_params[i].tccdl_ps); 325 trfc_slr_ps = max(trfc_slr_ps, 326 (unsigned int)dimm_params[i].trfc_slr_ps); 327#else 328 twr_ps = max(twr_ps, (unsigned int)dimm_params[i].twr_ps); 329 twtr_ps = max(twtr_ps, (unsigned int)dimm_params[i].twtr_ps); 330 trfc_ps = max(trfc_ps, (unsigned int)dimm_params[i].trfc_ps); 331 trrd_ps = max(trrd_ps, (unsigned int)dimm_params[i].trrd_ps); 332 trtp_ps = max(trtp_ps, (unsigned int)dimm_params[i].trtp_ps); 333#endif 334 trc_ps = max(trc_ps, (unsigned int)dimm_params[i].trc_ps); 335#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) 336 tis_ps = max(tis_ps, (unsigned int)dimm_params[i].tis_ps); 337 tih_ps = max(tih_ps, (unsigned int)dimm_params[i].tih_ps); 338 tds_ps = max(tds_ps, (unsigned int)dimm_params[i].tds_ps); 339 tdh_ps = max(tdh_ps, (unsigned int)dimm_params[i].tdh_ps); 340 tqhs_ps = max(tqhs_ps, (unsigned int)dimm_params[i].tqhs_ps); 341 /* 342 * Find maximum tdqsq_max_ps to find slowest. 343 * 344 * FIXME: is finding the slowest value the correct 345 * strategy for this parameter? 346 */ 347 tdqsq_max_ps = max(tdqsq_max_ps, 348 (unsigned int)dimm_params[i].tdqsq_max_ps); 349#endif 350 refresh_rate_ps = max(refresh_rate_ps, 351 (unsigned int)dimm_params[i].refresh_rate_ps); 352 /* extended_op_srt is either 0 or 1, 0 having priority */ 353 extended_op_srt = min(extended_op_srt, 354 (unsigned int)dimm_params[i].extended_op_srt); 355 } 356 357 outpdimm->ndimms_present = number_of_dimms - temp1; 358 359 if (temp1 == number_of_dimms) { 360 debug("no dimms this memory controller\n"); 361 return 0; 362 } 363 364 outpdimm->tckmin_x_ps = tckmin_x_ps; 365 outpdimm->tckmax_ps = tckmax_ps; 366#if defined(CONFIG_SYS_FSL_DDR3) || defined(CONFIG_SYS_FSL_DDR4) 367 outpdimm->taamin_ps = taamin_ps; 368#endif 369 outpdimm->trcd_ps = trcd_ps; 370 outpdimm->trp_ps = trp_ps; 371 outpdimm->tras_ps = tras_ps; 372#ifdef CONFIG_SYS_FSL_DDR4 373 outpdimm->trfc1_ps = trfc1_ps; 374 outpdimm->trfc2_ps = trfc2_ps; 375 outpdimm->trfc4_ps = trfc4_ps; 376 outpdimm->trrds_ps = trrds_ps; 377 outpdimm->trrdl_ps = trrdl_ps; 378 outpdimm->tccdl_ps = tccdl_ps; 379 outpdimm->trfc_slr_ps = trfc_slr_ps; 380#else 381 outpdimm->twtr_ps = twtr_ps; 382 outpdimm->trfc_ps = trfc_ps; 383 outpdimm->trrd_ps = trrd_ps; 384 outpdimm->trtp_ps = trtp_ps; 385#endif 386 outpdimm->twr_ps = twr_ps; 387 outpdimm->trc_ps = trc_ps; 388 outpdimm->refresh_rate_ps = refresh_rate_ps; 389 outpdimm->extended_op_srt = extended_op_srt; 390#if defined(CONFIG_SYS_FSL_DDR1) || defined(CONFIG_SYS_FSL_DDR2) 391 outpdimm->tis_ps = tis_ps; 392 outpdimm->tih_ps = tih_ps; 393 outpdimm->tds_ps = tds_ps; 394 outpdimm->tdh_ps = tdh_ps; 395 outpdimm->tdqsq_max_ps = tdqsq_max_ps; 396 outpdimm->tqhs_ps = tqhs_ps; 397#endif 398 399 /* Determine common burst length for all DIMMs. */ 400 temp1 = 0xff; 401 for (i = 0; i < number_of_dimms; i++) { 402 if (dimm_params[i].n_ranks) { 403 temp1 &= dimm_params[i].burst_lengths_bitmask; 404 } 405 } 406 outpdimm->all_dimms_burst_lengths_bitmask = temp1; 407 408 /* Determine if all DIMMs registered buffered. */ 409 temp1 = temp2 = 0; 410 for (i = 0; i < number_of_dimms; i++) { 411 if (dimm_params[i].n_ranks) { 412 if (dimm_params[i].registered_dimm) { 413 temp1 = 1; 414#ifndef CONFIG_SPL_BUILD 415 printf("Detected RDIMM %s\n", 416 dimm_params[i].mpart); 417#endif 418 } else { 419 temp2 = 1; 420#ifndef CONFIG_SPL_BUILD 421 printf("Detected UDIMM %s\n", 422 dimm_params[i].mpart); 423#endif 424 } 425#ifndef CONFIG_SPL_BUILD 426 puts(" "); 427#endif 428 } 429 } 430 431 outpdimm->all_dimms_registered = 0; 432 outpdimm->all_dimms_unbuffered = 0; 433 if (temp1 && !temp2) { 434 outpdimm->all_dimms_registered = 1; 435 } else if (!temp1 && temp2) { 436 outpdimm->all_dimms_unbuffered = 1; 437 } else { 438 printf("ERROR: Mix of registered buffered and unbuffered " 439 "DIMMs detected!\n"); 440 } 441 442 temp1 = 0; 443 if (outpdimm->all_dimms_registered) 444 for (j = 0; j < 16; j++) { 445 outpdimm->rcw[j] = dimm_params[0].rcw[j]; 446 for (i = 1; i < number_of_dimms; i++) { 447 if (!dimm_params[i].n_ranks) 448 continue; 449 if (dimm_params[i].rcw[j] != dimm_params[0].rcw[j]) { 450 temp1 = 1; 451 break; 452 } 453 } 454 } 455 456 if (temp1 != 0) 457 printf("ERROR: Mix different RDIMM detected!\n"); 458 459 /* calculate cas latency for all DDR types */ 460 if (compute_cas_latency(ctrl_num, dimm_params, 461 outpdimm, number_of_dimms)) 462 return 1; 463 464 /* Determine if all DIMMs ECC capable. */ 465 temp1 = 1; 466 for (i = 0; i < number_of_dimms; i++) { 467 if (dimm_params[i].n_ranks && 468 !(dimm_params[i].edc_config & EDC_ECC)) { 469 temp1 = 0; 470 break; 471 } 472 } 473 if (temp1) { 474 debug("all DIMMs ECC capable\n"); 475 } else { 476 debug("Warning: not all DIMMs ECC capable, cant enable ECC\n"); 477 } 478 outpdimm->all_dimms_ecc_capable = temp1; 479 480 /* 481 * Compute additive latency. 482 * 483 * For DDR1, additive latency should be 0. 484 * 485 * For DDR2, with ODT enabled, use "a value" less than ACTTORW, 486 * which comes from Trcd, and also note that: 487 * add_lat + caslat must be >= 4 488 * 489 * For DDR3, we use the AL=0 490 * 491 * When to use additive latency for DDR2: 492 * 493 * I. Because you are using CL=3 and need to do ODT on writes and 494 * want functionality. 495 * 1. Are you going to use ODT? (Does your board not have 496 * additional termination circuitry for DQ, DQS, DQS_, 497 * DM, RDQS, RDQS_ for x4/x8 configs?) 498 * 2. If so, is your lowest supported CL going to be 3? 499 * 3. If so, then you must set AL=1 because 500 * 501 * WL >= 3 for ODT on writes 502 * RL = AL + CL 503 * WL = RL - 1 504 * -> 505 * WL = AL + CL - 1 506 * AL + CL - 1 >= 3 507 * AL + CL >= 4 508 * QED 509 * 510 * RL >= 3 for ODT on reads 511 * RL = AL + CL 512 * 513 * Since CL aren't usually less than 2, AL=0 is a minimum, 514 * so the WL-derived AL should be the -- FIXME? 515 * 516 * II. Because you are using auto-precharge globally and want to 517 * use additive latency (posted CAS) to get more bandwidth. 518 * 1. Are you going to use auto-precharge mode globally? 519 * 520 * Use addtivie latency and compute AL to be 1 cycle less than 521 * tRCD, i.e. the READ or WRITE command is in the cycle 522 * immediately following the ACTIVATE command.. 523 * 524 * III. Because you feel like it or want to do some sort of 525 * degraded-performance experiment. 526 * 1. Do you just want to use additive latency because you feel 527 * like it? 528 * 529 * Validation: AL is less than tRCD, and within the other 530 * read-to-precharge constraints. 531 */ 532 533 additive_latency = 0; 534 535#if defined(CONFIG_SYS_FSL_DDR2) 536 if ((outpdimm->lowest_common_spd_caslat < 4) && 537 (picos_to_mclk(ctrl_num, trcd_ps) > 538 outpdimm->lowest_common_spd_caslat)) { 539 additive_latency = picos_to_mclk(ctrl_num, trcd_ps) - 540 outpdimm->lowest_common_spd_caslat; 541 if (mclk_to_picos(ctrl_num, additive_latency) > trcd_ps) { 542 additive_latency = picos_to_mclk(ctrl_num, trcd_ps); 543 debug("setting additive_latency to %u because it was " 544 " greater than tRCD_ps\n", additive_latency); 545 } 546 } 547#endif 548 549 /* 550 * Validate additive latency 551 * 552 * AL <= tRCD(min) 553 */ 554 if (mclk_to_picos(ctrl_num, additive_latency) > trcd_ps) { 555 printf("Error: invalid additive latency exceeds tRCD(min).\n"); 556 return 1; 557 } 558 559 /* 560 * RL = CL + AL; RL >= 3 for ODT_RD_CFG to be enabled 561 * WL = RL - 1; WL >= 3 for ODT_WL_CFG to be enabled 562 * ADD_LAT (the register) must be set to a value less 563 * than ACTTORW if WL = 1, then AL must be set to 1 564 * RD_TO_PRE (the register) must be set to a minimum 565 * tRTP + AL if AL is nonzero 566 */ 567 568 /* 569 * Additive latency will be applied only if the memctl option to 570 * use it. 571 */ 572 outpdimm->additive_latency = additive_latency; 573 574 debug("tCKmin_ps = %u\n", outpdimm->tckmin_x_ps); 575 debug("trcd_ps = %u\n", outpdimm->trcd_ps); 576 debug("trp_ps = %u\n", outpdimm->trp_ps); 577 debug("tras_ps = %u\n", outpdimm->tras_ps); 578#ifdef CONFIG_SYS_FSL_DDR4 579 debug("trfc1_ps = %u\n", trfc1_ps); 580 debug("trfc2_ps = %u\n", trfc2_ps); 581 debug("trfc4_ps = %u\n", trfc4_ps); 582 debug("trrds_ps = %u\n", trrds_ps); 583 debug("trrdl_ps = %u\n", trrdl_ps); 584 debug("tccdl_ps = %u\n", tccdl_ps); 585 debug("trfc_slr_ps = %u\n", trfc_slr_ps); 586#else 587 debug("twtr_ps = %u\n", outpdimm->twtr_ps); 588 debug("trfc_ps = %u\n", outpdimm->trfc_ps); 589 debug("trrd_ps = %u\n", outpdimm->trrd_ps); 590#endif 591 debug("twr_ps = %u\n", outpdimm->twr_ps); 592 debug("trc_ps = %u\n", outpdimm->trc_ps); 593 594 return 0; 595} 596