1// SPDX-License-Identifier: GPL-2.0+ 2/* 3 * Freescale i.MX28 NAND flash driver 4 * 5 * Copyright (C) 2011 Marek Vasut <marek.vasut@gmail.com> 6 * on behalf of DENX Software Engineering GmbH 7 * 8 * Based on code from LTIB: 9 * Freescale GPMI NFC NAND Flash Driver 10 * 11 * Copyright (C) 2010 Freescale Semiconductor, Inc. 12 * Copyright (C) 2008 Embedded Alley Solutions, Inc. 13 * Copyright 2017-2019 NXP 14 */ 15 16#include <common.h> 17#include <clk.h> 18#include <cpu_func.h> 19#include <dm.h> 20#include <dm/device_compat.h> 21#include <malloc.h> 22#include <mxs_nand.h> 23#include <asm/arch/clock.h> 24#include <asm/arch/imx-regs.h> 25#include <asm/arch/sys_proto.h> 26#include <asm/cache.h> 27#include <asm/io.h> 28#include <asm/mach-imx/regs-bch.h> 29#include <asm/mach-imx/regs-gpmi.h> 30#include <linux/delay.h> 31#include <linux/errno.h> 32#include <linux/mtd/rawnand.h> 33#include <linux/sizes.h> 34#include <linux/time.h> 35#include <linux/types.h> 36#include <linux/math64.h> 37 38#define MXS_NAND_DMA_DESCRIPTOR_COUNT 4 39 40#if defined(CONFIG_MX6) || defined(CONFIG_MX7) || defined(CONFIG_IMX8) || \ 41 defined(CONFIG_IMX8M) 42#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 2 43#else 44#define MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT 0 45#endif 46#define MXS_NAND_METADATA_SIZE 10 47#define MXS_NAND_BITS_PER_ECC_LEVEL 13 48 49#if !defined(CONFIG_SYS_CACHELINE_SIZE) || CONFIG_SYS_CACHELINE_SIZE < 32 50#define MXS_NAND_COMMAND_BUFFER_SIZE 32 51#else 52#define MXS_NAND_COMMAND_BUFFER_SIZE CONFIG_SYS_CACHELINE_SIZE 53#endif 54 55#define MXS_NAND_BCH_TIMEOUT 10000 56 57#define TO_CYCLES(duration, period) DIV_ROUND_UP_ULL(duration, period) 58 59struct nand_ecclayout fake_ecc_layout; 60 61/* 62 * Cache management functions 63 */ 64#if !CONFIG_IS_ENABLED(SYS_DCACHE_OFF) 65static void mxs_nand_flush_data_buf(struct mxs_nand_info *info) 66{ 67 uint32_t addr = (uintptr_t)info->data_buf; 68 69 flush_dcache_range(addr, addr + info->data_buf_size); 70} 71 72static void mxs_nand_inval_data_buf(struct mxs_nand_info *info) 73{ 74 uint32_t addr = (uintptr_t)info->data_buf; 75 76 invalidate_dcache_range(addr, addr + info->data_buf_size); 77} 78 79static void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) 80{ 81 uint32_t addr = (uintptr_t)info->cmd_buf; 82 83 flush_dcache_range(addr, addr + MXS_NAND_COMMAND_BUFFER_SIZE); 84} 85#else 86static inline void mxs_nand_flush_data_buf(struct mxs_nand_info *info) {} 87static inline void mxs_nand_inval_data_buf(struct mxs_nand_info *info) {} 88static inline void mxs_nand_flush_cmd_buf(struct mxs_nand_info *info) {} 89#endif 90 91static struct mxs_dma_desc *mxs_nand_get_dma_desc(struct mxs_nand_info *info) 92{ 93 struct mxs_dma_desc *desc; 94 95 if (info->desc_index >= MXS_NAND_DMA_DESCRIPTOR_COUNT) { 96 printf("MXS NAND: Too many DMA descriptors requested\n"); 97 return NULL; 98 } 99 100 desc = info->desc[info->desc_index]; 101 info->desc_index++; 102 103 return desc; 104} 105 106static void mxs_nand_return_dma_descs(struct mxs_nand_info *info) 107{ 108 int i; 109 struct mxs_dma_desc *desc; 110 111 for (i = 0; i < info->desc_index; i++) { 112 desc = info->desc[i]; 113 memset(desc, 0, sizeof(struct mxs_dma_desc)); 114 desc->address = (dma_addr_t)desc; 115 } 116 117 info->desc_index = 0; 118} 119 120static uint32_t mxs_nand_aux_status_offset(void) 121{ 122 return (MXS_NAND_METADATA_SIZE + 0x3) & ~0x3; 123} 124 125static inline bool mxs_nand_bbm_in_data_chunk(struct bch_geometry *geo, 126 struct mtd_info *mtd, 127 unsigned int *chunk_num) 128{ 129 unsigned int i, j; 130 131 if (geo->ecc_chunk0_size != geo->ecc_chunkn_size) { 132 dev_err(mtd->dev, "The size of chunk0 must equal to chunkn\n"); 133 return false; 134 } 135 136 i = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) / 137 (geo->gf_len * geo->ecc_strength + 138 geo->ecc_chunkn_size * 8); 139 140 j = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8) - 141 (geo->gf_len * geo->ecc_strength + 142 geo->ecc_chunkn_size * 8) * i; 143 144 if (j < geo->ecc_chunkn_size * 8) { 145 *chunk_num = i + 1; 146 dev_dbg(mtd->dev, "Set ecc to %d and bbm in chunk %d\n", 147 geo->ecc_strength, *chunk_num); 148 return true; 149 } 150 151 return false; 152} 153 154static inline int mxs_nand_calc_ecc_layout_by_info(struct bch_geometry *geo, 155 struct mtd_info *mtd, 156 unsigned int ecc_strength, 157 unsigned int ecc_step) 158{ 159 struct nand_chip *chip = mtd_to_nand(mtd); 160 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 161 unsigned int block_mark_bit_offset; 162 163 switch (ecc_step) { 164 case SZ_512: 165 geo->gf_len = 13; 166 break; 167 case SZ_1K: 168 geo->gf_len = 14; 169 break; 170 default: 171 return -EINVAL; 172 } 173 174 geo->ecc_chunk0_size = ecc_step; 175 geo->ecc_chunkn_size = ecc_step; 176 geo->ecc_strength = round_up(ecc_strength, 2); 177 178 /* Keep the C >= O */ 179 if (geo->ecc_chunkn_size < mtd->oobsize) 180 return -EINVAL; 181 182 if (geo->ecc_strength > nand_info->max_ecc_strength_supported) 183 return -EINVAL; 184 185 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 186 187 /* For bit swap. */ 188 block_mark_bit_offset = mtd->writesize * 8 - 189 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 190 + MXS_NAND_METADATA_SIZE * 8); 191 192 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 193 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 194 195 return 0; 196} 197 198static inline int mxs_nand_legacy_calc_ecc_layout(struct bch_geometry *geo, 199 struct mtd_info *mtd) 200{ 201 struct nand_chip *chip = mtd_to_nand(mtd); 202 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 203 unsigned int block_mark_bit_offset; 204 int corr, ds_corr; 205 206 /* The default for the length of Galois Field. */ 207 geo->gf_len = 13; 208 209 /* The default for chunk size. */ 210 geo->ecc_chunk0_size = 512; 211 geo->ecc_chunkn_size = 512; 212 213 if (geo->ecc_chunkn_size < mtd->oobsize) { 214 geo->gf_len = 14; 215 geo->ecc_chunk0_size *= 2; 216 geo->ecc_chunkn_size *= 2; 217 } 218 219 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 220 221 /* 222 * Determine the ECC layout with the formula: 223 * ECC bits per chunk = (total page spare data bits) / 224 * (bits per ECC level) / (chunks per page) 225 * where: 226 * total page spare data bits = 227 * (page oob size - meta data size) * (bits per byte) 228 */ 229 geo->ecc_strength = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 230 / (geo->gf_len * geo->ecc_chunk_count); 231 232 geo->ecc_strength = min(round_down(geo->ecc_strength, 2), 233 nand_info->max_ecc_strength_supported); 234 235 /* check ecc strength, same as nand_ecc_is_strong_enough() did*/ 236 if (chip->ecc_step_ds) { 237 corr = mtd->writesize * geo->ecc_strength / 238 geo->ecc_chunkn_size; 239 ds_corr = mtd->writesize * chip->ecc_strength_ds / 240 chip->ecc_step_ds; 241 if (corr < ds_corr || 242 geo->ecc_strength < chip->ecc_strength_ds) 243 return -EINVAL; 244 } 245 246 block_mark_bit_offset = mtd->writesize * 8 - 247 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - 1) 248 + MXS_NAND_METADATA_SIZE * 8); 249 250 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 251 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 252 253 return 0; 254} 255 256static inline int mxs_nand_calc_ecc_for_large_oob(struct bch_geometry *geo, 257 struct mtd_info *mtd) 258{ 259 struct nand_chip *chip = mtd_to_nand(mtd); 260 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 261 unsigned int block_mark_bit_offset; 262 unsigned int max_ecc; 263 unsigned int bbm_chunk; 264 unsigned int i; 265 266 /* sanity check for the minimum ecc nand required */ 267 if (!(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) 268 return -EINVAL; 269 geo->ecc_strength = chip->ecc_strength_ds; 270 271 /* calculate the maximum ecc platform can support*/ 272 geo->gf_len = 14; 273 geo->ecc_chunk0_size = 1024; 274 geo->ecc_chunkn_size = 1024; 275 geo->ecc_chunk_count = mtd->writesize / geo->ecc_chunkn_size; 276 max_ecc = ((mtd->oobsize - MXS_NAND_METADATA_SIZE) * 8) 277 / (geo->gf_len * geo->ecc_chunk_count); 278 max_ecc = min(round_down(max_ecc, 2), 279 nand_info->max_ecc_strength_supported); 280 281 282 /* search a supported ecc strength that makes bbm */ 283 /* located in data chunk */ 284 geo->ecc_strength = chip->ecc_strength_ds; 285 while (!(geo->ecc_strength > max_ecc)) { 286 if (mxs_nand_bbm_in_data_chunk(geo, mtd, &bbm_chunk)) 287 break; 288 geo->ecc_strength += 2; 289 } 290 291 /* if none of them works, keep using the minimum ecc */ 292 /* nand required but changing ecc page layout */ 293 if (geo->ecc_strength > max_ecc) { 294 geo->ecc_strength = chip->ecc_strength_ds; 295 /* add extra ecc for meta data */ 296 geo->ecc_chunk0_size = 0; 297 geo->ecc_chunk_count = (mtd->writesize / geo->ecc_chunkn_size) + 1; 298 geo->ecc_for_meta = 1; 299 /* check if oob can afford this extra ecc chunk */ 300 if (mtd->oobsize * 8 < MXS_NAND_METADATA_SIZE * 8 + 301 geo->gf_len * geo->ecc_strength 302 * geo->ecc_chunk_count) { 303 printf("unsupported NAND chip with new layout\n"); 304 return -EINVAL; 305 } 306 307 /* calculate in which chunk bbm located */ 308 bbm_chunk = (mtd->writesize * 8 - MXS_NAND_METADATA_SIZE * 8 - 309 geo->gf_len * geo->ecc_strength) / 310 (geo->gf_len * geo->ecc_strength + 311 geo->ecc_chunkn_size * 8) + 1; 312 } 313 314 /* calculate the number of ecc chunk behind the bbm */ 315 i = (mtd->writesize / geo->ecc_chunkn_size) - bbm_chunk + 1; 316 317 block_mark_bit_offset = mtd->writesize * 8 - 318 (geo->ecc_strength * geo->gf_len * (geo->ecc_chunk_count - i) 319 + MXS_NAND_METADATA_SIZE * 8); 320 321 geo->block_mark_byte_offset = block_mark_bit_offset / 8; 322 geo->block_mark_bit_offset = block_mark_bit_offset % 8; 323 324 return 0; 325} 326 327/* 328 * Wait for BCH complete IRQ and clear the IRQ 329 */ 330static int mxs_nand_wait_for_bch_complete(struct mxs_nand_info *nand_info) 331{ 332 int timeout = MXS_NAND_BCH_TIMEOUT; 333 int ret; 334 335 ret = mxs_wait_mask_set(&nand_info->bch_regs->hw_bch_ctrl_reg, 336 BCH_CTRL_COMPLETE_IRQ, timeout); 337 338 writel(BCH_CTRL_COMPLETE_IRQ, &nand_info->bch_regs->hw_bch_ctrl_clr); 339 340 return ret; 341} 342 343/* 344 * This is the function that we install in the cmd_ctrl function pointer of the 345 * owning struct nand_chip. The only functions in the reference implementation 346 * that use these functions pointers are cmdfunc and select_chip. 347 * 348 * In this driver, we implement our own select_chip, so this function will only 349 * be called by the reference implementation's cmdfunc. For this reason, we can 350 * ignore the chip enable bit and concentrate only on sending bytes to the NAND 351 * Flash. 352 */ 353static void mxs_nand_cmd_ctrl(struct mtd_info *mtd, int data, unsigned int ctrl) 354{ 355 struct nand_chip *nand = mtd_to_nand(mtd); 356 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 357 struct mxs_dma_desc *d; 358 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 359 int ret; 360 361 /* 362 * If this condition is true, something is _VERY_ wrong in MTD 363 * subsystem! 364 */ 365 if (nand_info->cmd_queue_len == MXS_NAND_COMMAND_BUFFER_SIZE) { 366 printf("MXS NAND: Command queue too long\n"); 367 return; 368 } 369 370 /* 371 * Every operation begins with a command byte and a series of zero or 372 * more address bytes. These are distinguished by either the Address 373 * Latch Enable (ALE) or Command Latch Enable (CLE) signals being 374 * asserted. When MTD is ready to execute the command, it will 375 * deasert both latch enables. 376 * 377 * Rather than run a separate DMA operation for every single byte, we 378 * queue them up and run a single DMA operation for the entire series 379 * of command and data bytes. 380 */ 381 if (ctrl & (NAND_ALE | NAND_CLE)) { 382 if (data != NAND_CMD_NONE) 383 nand_info->cmd_buf[nand_info->cmd_queue_len++] = data; 384 return; 385 } 386 387 /* 388 * If control arrives here, MTD has deasserted both the ALE and CLE, 389 * which means it's ready to run an operation. Check if we have any 390 * bytes to send. 391 */ 392 if (nand_info->cmd_queue_len == 0) 393 return; 394 395 /* Compile the DMA descriptor -- a descriptor that sends command. */ 396 d = mxs_nand_get_dma_desc(nand_info); 397 d->cmd.data = 398 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 399 MXS_DMA_DESC_CHAIN | MXS_DMA_DESC_DEC_SEM | 400 MXS_DMA_DESC_WAIT4END | (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 401 (nand_info->cmd_queue_len << MXS_DMA_DESC_BYTES_OFFSET); 402 403 d->cmd.address = (dma_addr_t)nand_info->cmd_buf; 404 405 d->cmd.pio_words[0] = 406 GPMI_CTRL0_COMMAND_MODE_WRITE | 407 GPMI_CTRL0_WORD_LENGTH | 408 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 409 GPMI_CTRL0_ADDRESS_NAND_CLE | 410 GPMI_CTRL0_ADDRESS_INCREMENT | 411 nand_info->cmd_queue_len; 412 413 mxs_dma_desc_append(channel, d); 414 415 /* Flush caches */ 416 mxs_nand_flush_cmd_buf(nand_info); 417 418 /* Execute the DMA chain. */ 419 ret = mxs_dma_go(channel); 420 if (ret) 421 printf("MXS NAND: Error sending command\n"); 422 423 mxs_nand_return_dma_descs(nand_info); 424 425 /* Reset the command queue. */ 426 nand_info->cmd_queue_len = 0; 427} 428 429/* 430 * Test if the NAND flash is ready. 431 */ 432static int mxs_nand_device_ready(struct mtd_info *mtd) 433{ 434 struct nand_chip *chip = mtd_to_nand(mtd); 435 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 436 uint32_t tmp; 437 438 tmp = readl(&nand_info->gpmi_regs->hw_gpmi_stat); 439 tmp >>= (GPMI_STAT_READY_BUSY_OFFSET + nand_info->cur_chip); 440 441 return tmp & 1; 442} 443 444/* 445 * Select the NAND chip. 446 */ 447static void mxs_nand_select_chip(struct mtd_info *mtd, int chip) 448{ 449 struct nand_chip *nand = mtd_to_nand(mtd); 450 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 451 452 nand_info->cur_chip = chip; 453} 454 455/* 456 * Handle block mark swapping. 457 * 458 * Note that, when this function is called, it doesn't know whether it's 459 * swapping the block mark, or swapping it *back* -- but it doesn't matter 460 * because the the operation is the same. 461 */ 462static void mxs_nand_swap_block_mark(struct bch_geometry *geo, 463 uint8_t *data_buf, uint8_t *oob_buf) 464{ 465 uint32_t bit_offset = geo->block_mark_bit_offset; 466 uint32_t buf_offset = geo->block_mark_byte_offset; 467 468 uint32_t src; 469 uint32_t dst; 470 471 /* 472 * Get the byte from the data area that overlays the block mark. Since 473 * the ECC engine applies its own view to the bits in the page, the 474 * physical block mark won't (in general) appear on a byte boundary in 475 * the data. 476 */ 477 src = data_buf[buf_offset] >> bit_offset; 478 src |= data_buf[buf_offset + 1] << (8 - bit_offset); 479 480 dst = oob_buf[0]; 481 482 oob_buf[0] = src; 483 484 data_buf[buf_offset] &= ~(0xff << bit_offset); 485 data_buf[buf_offset + 1] &= 0xff << bit_offset; 486 487 data_buf[buf_offset] |= dst << bit_offset; 488 data_buf[buf_offset + 1] |= dst >> (8 - bit_offset); 489} 490 491/* 492 * Read data from NAND. 493 */ 494static void mxs_nand_read_buf(struct mtd_info *mtd, uint8_t *buf, int length) 495{ 496 struct nand_chip *nand = mtd_to_nand(mtd); 497 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 498 struct mxs_dma_desc *d; 499 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 500 int ret; 501 502 if (length > NAND_MAX_PAGESIZE) { 503 printf("MXS NAND: DMA buffer too big\n"); 504 return; 505 } 506 507 if (!buf) { 508 printf("MXS NAND: DMA buffer is NULL\n"); 509 return; 510 } 511 512 /* Compile the DMA descriptor - a descriptor that reads data. */ 513 d = mxs_nand_get_dma_desc(nand_info); 514 d->cmd.data = 515 MXS_DMA_DESC_COMMAND_DMA_WRITE | MXS_DMA_DESC_IRQ | 516 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 517 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 518 (length << MXS_DMA_DESC_BYTES_OFFSET); 519 520 d->cmd.address = (dma_addr_t)nand_info->data_buf; 521 522 d->cmd.pio_words[0] = 523 GPMI_CTRL0_COMMAND_MODE_READ | 524 GPMI_CTRL0_WORD_LENGTH | 525 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 526 GPMI_CTRL0_ADDRESS_NAND_DATA | 527 length; 528 529 mxs_dma_desc_append(channel, d); 530 531 /* 532 * A DMA descriptor that waits for the command to end and the chip to 533 * become ready. 534 * 535 * I think we actually should *not* be waiting for the chip to become 536 * ready because, after all, we don't care. I think the original code 537 * did that and no one has re-thought it yet. 538 */ 539 d = mxs_nand_get_dma_desc(nand_info); 540 d->cmd.data = 541 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 542 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_DEC_SEM | 543 MXS_DMA_DESC_WAIT4END | (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 544 545 d->cmd.address = 0; 546 547 d->cmd.pio_words[0] = 548 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 549 GPMI_CTRL0_WORD_LENGTH | 550 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 551 GPMI_CTRL0_ADDRESS_NAND_DATA; 552 553 mxs_dma_desc_append(channel, d); 554 555 /* Invalidate caches */ 556 mxs_nand_inval_data_buf(nand_info); 557 558 /* Execute the DMA chain. */ 559 ret = mxs_dma_go(channel); 560 if (ret) { 561 printf("MXS NAND: DMA read error\n"); 562 goto rtn; 563 } 564 565 /* Invalidate caches */ 566 mxs_nand_inval_data_buf(nand_info); 567 568 memcpy(buf, nand_info->data_buf, length); 569 570rtn: 571 mxs_nand_return_dma_descs(nand_info); 572} 573 574/* 575 * Write data to NAND. 576 */ 577static void mxs_nand_write_buf(struct mtd_info *mtd, const uint8_t *buf, 578 int length) 579{ 580 struct nand_chip *nand = mtd_to_nand(mtd); 581 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 582 struct mxs_dma_desc *d; 583 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 584 int ret; 585 586 if (length > NAND_MAX_PAGESIZE) { 587 printf("MXS NAND: DMA buffer too big\n"); 588 return; 589 } 590 591 if (!buf) { 592 printf("MXS NAND: DMA buffer is NULL\n"); 593 return; 594 } 595 596 memcpy(nand_info->data_buf, buf, length); 597 598 /* Compile the DMA descriptor - a descriptor that writes data. */ 599 d = mxs_nand_get_dma_desc(nand_info); 600 d->cmd.data = 601 MXS_DMA_DESC_COMMAND_DMA_READ | MXS_DMA_DESC_IRQ | 602 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 603 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET) | 604 (length << MXS_DMA_DESC_BYTES_OFFSET); 605 606 d->cmd.address = (dma_addr_t)nand_info->data_buf; 607 608 d->cmd.pio_words[0] = 609 GPMI_CTRL0_COMMAND_MODE_WRITE | 610 GPMI_CTRL0_WORD_LENGTH | 611 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 612 GPMI_CTRL0_ADDRESS_NAND_DATA | 613 length; 614 615 mxs_dma_desc_append(channel, d); 616 617 /* Flush caches */ 618 mxs_nand_flush_data_buf(nand_info); 619 620 /* Execute the DMA chain. */ 621 ret = mxs_dma_go(channel); 622 if (ret) 623 printf("MXS NAND: DMA write error\n"); 624 625 mxs_nand_return_dma_descs(nand_info); 626} 627 628/* 629 * Read a single byte from NAND. 630 */ 631static uint8_t mxs_nand_read_byte(struct mtd_info *mtd) 632{ 633 uint8_t buf; 634 mxs_nand_read_buf(mtd, &buf, 1); 635 return buf; 636} 637 638static bool mxs_nand_erased_page(struct mtd_info *mtd, struct nand_chip *nand, 639 u8 *buf, int chunk, int page) 640{ 641 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 642 struct bch_geometry *geo = &nand_info->bch_geometry; 643 unsigned int flip_bits = 0, flip_bits_noecc = 0; 644 unsigned int threshold; 645 unsigned int base = geo->ecc_chunkn_size * chunk; 646 u32 *dma_buf = (u32 *)buf; 647 int i; 648 649 threshold = geo->gf_len / 2; 650 if (threshold > geo->ecc_strength) 651 threshold = geo->ecc_strength; 652 653 for (i = 0; i < geo->ecc_chunkn_size; i++) { 654 flip_bits += hweight8(~buf[base + i]); 655 if (flip_bits > threshold) 656 return false; 657 } 658 659 nand->cmdfunc(mtd, NAND_CMD_READ0, 0, page); 660 nand->read_buf(mtd, buf, mtd->writesize); 661 662 for (i = 0; i < mtd->writesize / 4; i++) { 663 flip_bits_noecc += hweight32(~dma_buf[i]); 664 if (flip_bits_noecc > threshold) 665 return false; 666 } 667 668 mtd->ecc_stats.corrected += flip_bits; 669 670 memset(buf, 0xff, mtd->writesize); 671 672 printf("The page(%d) is an erased page(%d,%d,%d,%d).\n", page, chunk, threshold, flip_bits, flip_bits_noecc); 673 674 return true; 675} 676 677/* 678 * Read a page from NAND. 679 */ 680static int mxs_nand_ecc_read_page(struct mtd_info *mtd, struct nand_chip *nand, 681 uint8_t *buf, int oob_required, 682 int page) 683{ 684 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 685 struct bch_geometry *geo = &nand_info->bch_geometry; 686 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 687 struct mxs_dma_desc *d; 688 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 689 uint32_t corrected = 0, failed = 0; 690 uint8_t *status; 691 int i, ret; 692 int flag = 0; 693 694 /* Compile the DMA descriptor - wait for ready. */ 695 d = mxs_nand_get_dma_desc(nand_info); 696 d->cmd.data = 697 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 698 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 699 (1 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 700 701 d->cmd.address = 0; 702 703 d->cmd.pio_words[0] = 704 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 705 GPMI_CTRL0_WORD_LENGTH | 706 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 707 GPMI_CTRL0_ADDRESS_NAND_DATA; 708 709 mxs_dma_desc_append(channel, d); 710 711 /* Compile the DMA descriptor - enable the BCH block and read. */ 712 d = mxs_nand_get_dma_desc(nand_info); 713 d->cmd.data = 714 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 715 MXS_DMA_DESC_WAIT4END | (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 716 717 d->cmd.address = 0; 718 719 d->cmd.pio_words[0] = 720 GPMI_CTRL0_COMMAND_MODE_READ | 721 GPMI_CTRL0_WORD_LENGTH | 722 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 723 GPMI_CTRL0_ADDRESS_NAND_DATA | 724 (mtd->writesize + mtd->oobsize); 725 d->cmd.pio_words[1] = 0; 726 d->cmd.pio_words[2] = 727 GPMI_ECCCTRL_ENABLE_ECC | 728 GPMI_ECCCTRL_ECC_CMD_DECODE | 729 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 730 d->cmd.pio_words[3] = mtd->writesize + mtd->oobsize; 731 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 732 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 733 734 if (nand_info->en_randomizer) { 735 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | 736 GPMI_ECCCTRL_RANDOMIZER_TYPE2; 737 d->cmd.pio_words[3] |= (page % 256) << 16; 738 } 739 740 mxs_dma_desc_append(channel, d); 741 742 /* Compile the DMA descriptor - disable the BCH block. */ 743 d = mxs_nand_get_dma_desc(nand_info); 744 d->cmd.data = 745 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_CHAIN | 746 MXS_DMA_DESC_NAND_WAIT_4_READY | MXS_DMA_DESC_WAIT4END | 747 (3 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 748 749 d->cmd.address = 0; 750 751 d->cmd.pio_words[0] = 752 GPMI_CTRL0_COMMAND_MODE_WAIT_FOR_READY | 753 GPMI_CTRL0_WORD_LENGTH | 754 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 755 GPMI_CTRL0_ADDRESS_NAND_DATA | 756 (mtd->writesize + mtd->oobsize); 757 d->cmd.pio_words[1] = 0; 758 d->cmd.pio_words[2] = 0; 759 760 mxs_dma_desc_append(channel, d); 761 762 /* Compile the DMA descriptor - deassert the NAND lock and interrupt. */ 763 d = mxs_nand_get_dma_desc(nand_info); 764 d->cmd.data = 765 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 766 MXS_DMA_DESC_DEC_SEM; 767 768 d->cmd.address = 0; 769 770 mxs_dma_desc_append(channel, d); 771 772 /* Invalidate caches */ 773 mxs_nand_inval_data_buf(nand_info); 774 775 /* Execute the DMA chain. */ 776 ret = mxs_dma_go(channel); 777 if (ret) { 778 printf("MXS NAND: DMA read error\n"); 779 goto rtn; 780 } 781 782 ret = mxs_nand_wait_for_bch_complete(nand_info); 783 if (ret) { 784 printf("MXS NAND: BCH read timeout\n"); 785 goto rtn; 786 } 787 788 mxs_nand_return_dma_descs(nand_info); 789 790 /* Invalidate caches */ 791 mxs_nand_inval_data_buf(nand_info); 792 793 /* Read DMA completed, now do the mark swapping. */ 794 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 795 796 /* Loop over status bytes, accumulating ECC status. */ 797 status = nand_info->oob_buf + mxs_nand_aux_status_offset(); 798 for (i = 0; i < geo->ecc_chunk_count; i++) { 799 if (status[i] == 0x00) 800 continue; 801 802 if (status[i] == 0xff) { 803 if (!nand_info->en_randomizer && 804 (is_mx6dqp() || is_mx7() || is_mx6ul() || 805 is_imx8() || is_imx8m())) 806 if (readl(&bch_regs->hw_bch_debug1)) 807 flag = 1; 808 continue; 809 } 810 811 if (status[i] == 0xfe) { 812 if (mxs_nand_erased_page(mtd, nand, 813 nand_info->data_buf, i, page)) 814 break; 815 failed++; 816 continue; 817 } 818 819 corrected += status[i]; 820 } 821 822 /* Propagate ECC status to the owning MTD. */ 823 mtd->ecc_stats.failed += failed; 824 mtd->ecc_stats.corrected += corrected; 825 826 /* 827 * It's time to deliver the OOB bytes. See mxs_nand_ecc_read_oob() for 828 * details about our policy for delivering the OOB. 829 * 830 * We fill the caller's buffer with set bits, and then copy the block 831 * mark to the caller's buffer. Note that, if block mark swapping was 832 * necessary, it has already been done, so we can rely on the first 833 * byte of the auxiliary buffer to contain the block mark. 834 */ 835 memset(nand->oob_poi, 0xff, mtd->oobsize); 836 837 nand->oob_poi[0] = nand_info->oob_buf[0]; 838 839 memcpy(buf, nand_info->data_buf, mtd->writesize); 840 841 if (flag) 842 memset(buf, 0xff, mtd->writesize); 843rtn: 844 mxs_nand_return_dma_descs(nand_info); 845 846 return ret; 847} 848 849/* 850 * Write a page to NAND. 851 */ 852static int mxs_nand_ecc_write_page(struct mtd_info *mtd, 853 struct nand_chip *nand, const uint8_t *buf, 854 int oob_required, int page) 855{ 856 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 857 struct bch_geometry *geo = &nand_info->bch_geometry; 858 struct mxs_dma_desc *d; 859 uint32_t channel = MXS_DMA_CHANNEL_AHB_APBH_GPMI0 + nand_info->cur_chip; 860 int ret; 861 862 memcpy(nand_info->data_buf, buf, mtd->writesize); 863 memcpy(nand_info->oob_buf, nand->oob_poi, mtd->oobsize); 864 865 /* Handle block mark swapping. */ 866 mxs_nand_swap_block_mark(geo, nand_info->data_buf, nand_info->oob_buf); 867 868 /* Compile the DMA descriptor - write data. */ 869 d = mxs_nand_get_dma_desc(nand_info); 870 d->cmd.data = 871 MXS_DMA_DESC_COMMAND_NO_DMAXFER | MXS_DMA_DESC_IRQ | 872 MXS_DMA_DESC_DEC_SEM | MXS_DMA_DESC_WAIT4END | 873 (6 << MXS_DMA_DESC_PIO_WORDS_OFFSET); 874 875 d->cmd.address = 0; 876 877 d->cmd.pio_words[0] = 878 GPMI_CTRL0_COMMAND_MODE_WRITE | 879 GPMI_CTRL0_WORD_LENGTH | 880 (nand_info->cur_chip << GPMI_CTRL0_CS_OFFSET) | 881 GPMI_CTRL0_ADDRESS_NAND_DATA; 882 d->cmd.pio_words[1] = 0; 883 d->cmd.pio_words[2] = 884 GPMI_ECCCTRL_ENABLE_ECC | 885 GPMI_ECCCTRL_ECC_CMD_ENCODE | 886 GPMI_ECCCTRL_BUFFER_MASK_BCH_PAGE; 887 d->cmd.pio_words[3] = (mtd->writesize + mtd->oobsize); 888 d->cmd.pio_words[4] = (dma_addr_t)nand_info->data_buf; 889 d->cmd.pio_words[5] = (dma_addr_t)nand_info->oob_buf; 890 891 if (nand_info->en_randomizer) { 892 d->cmd.pio_words[2] |= GPMI_ECCCTRL_RANDOMIZER_ENABLE | 893 GPMI_ECCCTRL_RANDOMIZER_TYPE2; 894 /* 895 * Write NAND page number needed to be randomized 896 * to GPMI_ECCCOUNT register. 897 * 898 * The value is between 0-255. For additional details 899 * check 9.6.6.4 of i.MX7D Applications Processor reference 900 */ 901 d->cmd.pio_words[3] |= (page % 256) << 16; 902 } 903 904 mxs_dma_desc_append(channel, d); 905 906 /* Flush caches */ 907 mxs_nand_flush_data_buf(nand_info); 908 909 /* Execute the DMA chain. */ 910 ret = mxs_dma_go(channel); 911 if (ret) { 912 printf("MXS NAND: DMA write error\n"); 913 goto rtn; 914 } 915 916 ret = mxs_nand_wait_for_bch_complete(nand_info); 917 if (ret) { 918 printf("MXS NAND: BCH write timeout\n"); 919 goto rtn; 920 } 921 922rtn: 923 mxs_nand_return_dma_descs(nand_info); 924 return 0; 925} 926 927/* 928 * Read OOB from NAND. 929 * 930 * This function is a veneer that replaces the function originally installed by 931 * the NAND Flash MTD code. 932 */ 933static int mxs_nand_hook_read_oob(struct mtd_info *mtd, loff_t from, 934 struct mtd_oob_ops *ops) 935{ 936 struct nand_chip *chip = mtd_to_nand(mtd); 937 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 938 int ret; 939 940 if (ops->mode == MTD_OPS_RAW) 941 nand_info->raw_oob_mode = 1; 942 else 943 nand_info->raw_oob_mode = 0; 944 945 ret = nand_info->hooked_read_oob(mtd, from, ops); 946 947 nand_info->raw_oob_mode = 0; 948 949 return ret; 950} 951 952/* 953 * Write OOB to NAND. 954 * 955 * This function is a veneer that replaces the function originally installed by 956 * the NAND Flash MTD code. 957 */ 958static int mxs_nand_hook_write_oob(struct mtd_info *mtd, loff_t to, 959 struct mtd_oob_ops *ops) 960{ 961 struct nand_chip *chip = mtd_to_nand(mtd); 962 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 963 int ret; 964 965 if (ops->mode == MTD_OPS_RAW) 966 nand_info->raw_oob_mode = 1; 967 else 968 nand_info->raw_oob_mode = 0; 969 970 ret = nand_info->hooked_write_oob(mtd, to, ops); 971 972 nand_info->raw_oob_mode = 0; 973 974 return ret; 975} 976 977/* 978 * Mark a block bad in NAND. 979 * 980 * This function is a veneer that replaces the function originally installed by 981 * the NAND Flash MTD code. 982 */ 983static int mxs_nand_hook_block_markbad(struct mtd_info *mtd, loff_t ofs) 984{ 985 struct nand_chip *chip = mtd_to_nand(mtd); 986 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 987 int ret; 988 989 nand_info->marking_block_bad = 1; 990 991 ret = nand_info->hooked_block_markbad(mtd, ofs); 992 993 nand_info->marking_block_bad = 0; 994 995 return ret; 996} 997 998/* 999 * There are several places in this driver where we have to handle the OOB and 1000 * block marks. This is the function where things are the most complicated, so 1001 * this is where we try to explain it all. All the other places refer back to 1002 * here. 1003 * 1004 * These are the rules, in order of decreasing importance: 1005 * 1006 * 1) Nothing the caller does can be allowed to imperil the block mark, so all 1007 * write operations take measures to protect it. 1008 * 1009 * 2) In read operations, the first byte of the OOB we return must reflect the 1010 * true state of the block mark, no matter where that block mark appears in 1011 * the physical page. 1012 * 1013 * 3) ECC-based read operations return an OOB full of set bits (since we never 1014 * allow ECC-based writes to the OOB, it doesn't matter what ECC-based reads 1015 * return). 1016 * 1017 * 4) "Raw" read operations return a direct view of the physical bytes in the 1018 * page, using the conventional definition of which bytes are data and which 1019 * are OOB. This gives the caller a way to see the actual, physical bytes 1020 * in the page, without the distortions applied by our ECC engine. 1021 * 1022 * What we do for this specific read operation depends on whether we're doing 1023 * "raw" read, or an ECC-based read. 1024 * 1025 * It turns out that knowing whether we want an "ECC-based" or "raw" read is not 1026 * easy. When reading a page, for example, the NAND Flash MTD code calls our 1027 * ecc.read_page or ecc.read_page_raw function. Thus, the fact that MTD wants an 1028 * ECC-based or raw view of the page is implicit in which function it calls 1029 * (there is a similar pair of ECC-based/raw functions for writing). 1030 * 1031 * Since MTD assumes the OOB is not covered by ECC, there is no pair of 1032 * ECC-based/raw functions for reading or or writing the OOB. The fact that the 1033 * caller wants an ECC-based or raw view of the page is not propagated down to 1034 * this driver. 1035 * 1036 * Since our OOB *is* covered by ECC, we need this information. So, we hook the 1037 * ecc.read_oob and ecc.write_oob function pointers in the owning 1038 * struct mtd_info with our own functions. These hook functions set the 1039 * raw_oob_mode field so that, when control finally arrives here, we'll know 1040 * what to do. 1041 */ 1042static int mxs_nand_ecc_read_oob(struct mtd_info *mtd, struct nand_chip *nand, 1043 int page) 1044{ 1045 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1046 1047 /* 1048 * First, fill in the OOB buffer. If we're doing a raw read, we need to 1049 * get the bytes from the physical page. If we're not doing a raw read, 1050 * we need to fill the buffer with set bits. 1051 */ 1052 if (nand_info->raw_oob_mode) { 1053 /* 1054 * If control arrives here, we're doing a "raw" read. Send the 1055 * command to read the conventional OOB and read it. 1056 */ 1057 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1058 nand->read_buf(mtd, nand->oob_poi, mtd->oobsize); 1059 } else { 1060 /* 1061 * If control arrives here, we're not doing a "raw" read. Fill 1062 * the OOB buffer with set bits and correct the block mark. 1063 */ 1064 memset(nand->oob_poi, 0xff, mtd->oobsize); 1065 1066 nand->cmdfunc(mtd, NAND_CMD_READ0, mtd->writesize, page); 1067 mxs_nand_read_buf(mtd, nand->oob_poi, 1); 1068 } 1069 1070 return 0; 1071 1072} 1073 1074/* 1075 * Write OOB data to NAND. 1076 */ 1077static int mxs_nand_ecc_write_oob(struct mtd_info *mtd, struct nand_chip *nand, 1078 int page) 1079{ 1080 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1081 uint8_t block_mark = 0; 1082 1083 /* 1084 * There are fundamental incompatibilities between the i.MX GPMI NFC and 1085 * the NAND Flash MTD model that make it essentially impossible to write 1086 * the out-of-band bytes. 1087 * 1088 * We permit *ONE* exception. If the *intent* of writing the OOB is to 1089 * mark a block bad, we can do that. 1090 */ 1091 1092 if (!nand_info->marking_block_bad) { 1093 printf("NXS NAND: Writing OOB isn't supported\n"); 1094 return -EIO; 1095 } 1096 1097 /* Write the block mark. */ 1098 nand->cmdfunc(mtd, NAND_CMD_SEQIN, mtd->writesize, page); 1099 nand->write_buf(mtd, &block_mark, 1); 1100 nand->cmdfunc(mtd, NAND_CMD_PAGEPROG, -1, -1); 1101 1102 /* Check if it worked. */ 1103 if (nand->waitfunc(mtd, nand) & NAND_STATUS_FAIL) 1104 return -EIO; 1105 1106 return 0; 1107} 1108 1109/* 1110 * Claims all blocks are good. 1111 * 1112 * In principle, this function is *only* called when the NAND Flash MTD system 1113 * isn't allowed to keep an in-memory bad block table, so it is forced to ask 1114 * the driver for bad block information. 1115 * 1116 * In fact, we permit the NAND Flash MTD system to have an in-memory BBT, so 1117 * this function is *only* called when we take it away. 1118 * 1119 * Thus, this function is only called when we want *all* blocks to look good, 1120 * so it *always* return success. 1121 */ 1122static int mxs_nand_block_bad(struct mtd_info *mtd, loff_t ofs) 1123{ 1124 return 0; 1125} 1126 1127static int mxs_nand_set_geometry(struct mtd_info *mtd, struct bch_geometry *geo) 1128{ 1129 struct nand_chip *chip = mtd_to_nand(mtd); 1130 struct nand_chip *nand = mtd_to_nand(mtd); 1131 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1132 int err; 1133 1134 if (chip->ecc_strength_ds > nand_info->max_ecc_strength_supported) { 1135 printf("unsupported NAND chip, minimum ecc required %d\n" 1136 , chip->ecc_strength_ds); 1137 return -EINVAL; 1138 } 1139 1140 /* use the legacy bch setting by default */ 1141 if ((!nand_info->use_minimum_ecc && mtd->oobsize < 1024) || 1142 !(chip->ecc_strength_ds > 0 && chip->ecc_step_ds > 0)) { 1143 dev_dbg(mtd->dev, "use legacy bch geometry\n"); 1144 err = mxs_nand_legacy_calc_ecc_layout(geo, mtd); 1145 if (!err) 1146 return 0; 1147 } 1148 1149 /* for large oob nand */ 1150 if (mtd->oobsize > 1024) { 1151 dev_dbg(mtd->dev, "use large oob bch geometry\n"); 1152 err = mxs_nand_calc_ecc_for_large_oob(geo, mtd); 1153 if (!err) 1154 return 0; 1155 } 1156 1157 /* otherwise use the minimum ecc nand chips required */ 1158 dev_dbg(mtd->dev, "use minimum ecc bch geometry\n"); 1159 err = mxs_nand_calc_ecc_layout_by_info(geo, mtd, chip->ecc_strength_ds, 1160 chip->ecc_step_ds); 1161 1162 if (err) 1163 dev_err(mtd->dev, "none of the bch geometry setting works\n"); 1164 1165 return err; 1166} 1167 1168void mxs_nand_dump_geo(struct mtd_info *mtd) 1169{ 1170 struct nand_chip *nand = mtd_to_nand(mtd); 1171 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1172 struct bch_geometry *geo = &nand_info->bch_geometry; 1173 1174 dev_dbg(mtd->dev, "BCH Geometry :\n" 1175 "GF Length\t\t: %u\n" 1176 "ECC Strength\t\t: %u\n" 1177 "ECC for Meta\t\t: %u\n" 1178 "ECC Chunk0 Size\t\t: %u\n" 1179 "ECC Chunkn Size\t\t: %u\n" 1180 "ECC Chunk Count\t\t: %u\n" 1181 "Block Mark Byte Offset\t: %u\n" 1182 "Block Mark Bit Offset\t: %u\n", 1183 geo->gf_len, 1184 geo->ecc_strength, 1185 geo->ecc_for_meta, 1186 geo->ecc_chunk0_size, 1187 geo->ecc_chunkn_size, 1188 geo->ecc_chunk_count, 1189 geo->block_mark_byte_offset, 1190 geo->block_mark_bit_offset); 1191} 1192 1193/* 1194 * At this point, the physical NAND Flash chips have been identified and 1195 * counted, so we know the physical geometry. This enables us to make some 1196 * important configuration decisions. 1197 * 1198 * The return value of this function propagates directly back to this driver's 1199 * board_nand_init(). Anything other than zero will cause this driver to 1200 * tear everything down and declare failure. 1201 */ 1202int mxs_nand_setup_ecc(struct mtd_info *mtd) 1203{ 1204 struct nand_chip *nand = mtd_to_nand(mtd); 1205 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1206 struct bch_geometry *geo = &nand_info->bch_geometry; 1207 struct mxs_bch_regs *bch_regs = nand_info->bch_regs; 1208 uint32_t tmp; 1209 int ret; 1210 1211 nand_info->en_randomizer = 0; 1212 nand_info->oobsize = mtd->oobsize; 1213 nand_info->writesize = mtd->writesize; 1214 1215 ret = mxs_nand_set_geometry(mtd, geo); 1216 if (ret) 1217 return ret; 1218 1219 mxs_nand_dump_geo(mtd); 1220 1221 /* Configure BCH and set NFC geometry */ 1222 mxs_reset_block(&bch_regs->hw_bch_ctrl_reg); 1223 1224 /* Configure layout 0 */ 1225 tmp = (geo->ecc_chunk_count - 1) << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1226 tmp |= MXS_NAND_METADATA_SIZE << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1227 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1228 tmp |= geo->ecc_chunk0_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1229 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1230 BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1231 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1232 nand_info->bch_flash0layout0 = tmp; 1233 1234 tmp = (mtd->writesize + mtd->oobsize) 1235 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1236 tmp |= (geo->ecc_strength >> 1) << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1237 tmp |= geo->ecc_chunkn_size >> MXS_NAND_CHUNK_DATA_CHUNK_SIZE_SHIFT; 1238 tmp |= (geo->gf_len == 14 ? 1 : 0) << 1239 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1240 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1241 nand_info->bch_flash0layout1 = tmp; 1242 1243 /* Set erase threshold to ecc strength for mx6ul, mx6qp and mx7 */ 1244 if (is_mx6dqp() || is_mx7() || 1245 is_mx6ul() || is_imx8() || is_imx8m()) 1246 writel(BCH_MODE_ERASE_THRESHOLD(geo->ecc_strength), 1247 &bch_regs->hw_bch_mode); 1248 1249 /* Set *all* chip selects to use layout 0 */ 1250 writel(0, &bch_regs->hw_bch_layoutselect); 1251 1252 /* Enable BCH complete interrupt */ 1253 writel(BCH_CTRL_COMPLETE_IRQ_EN, &bch_regs->hw_bch_ctrl_set); 1254 1255 return 0; 1256} 1257 1258/* 1259 * Allocate DMA buffers 1260 */ 1261int mxs_nand_alloc_buffers(struct mxs_nand_info *nand_info) 1262{ 1263 uint8_t *buf; 1264 const int size = NAND_MAX_PAGESIZE + NAND_MAX_OOBSIZE; 1265 1266 nand_info->data_buf_size = roundup(size, MXS_DMA_ALIGNMENT); 1267 1268 /* DMA buffers */ 1269 buf = memalign(MXS_DMA_ALIGNMENT, nand_info->data_buf_size); 1270 if (!buf) { 1271 printf("MXS NAND: Error allocating DMA buffers\n"); 1272 return -ENOMEM; 1273 } 1274 1275 memset(buf, 0, nand_info->data_buf_size); 1276 1277 nand_info->data_buf = buf; 1278 nand_info->oob_buf = buf + NAND_MAX_PAGESIZE; 1279 /* Command buffers */ 1280 nand_info->cmd_buf = memalign(MXS_DMA_ALIGNMENT, 1281 MXS_NAND_COMMAND_BUFFER_SIZE); 1282 if (!nand_info->cmd_buf) { 1283 free(buf); 1284 printf("MXS NAND: Error allocating command buffers\n"); 1285 return -ENOMEM; 1286 } 1287 memset(nand_info->cmd_buf, 0, MXS_NAND_COMMAND_BUFFER_SIZE); 1288 nand_info->cmd_queue_len = 0; 1289 1290 return 0; 1291} 1292 1293/* 1294 * Initializes the NFC hardware. 1295 */ 1296static int mxs_nand_init_dma(struct mxs_nand_info *info) 1297{ 1298 int i = 0, j, ret = 0; 1299 1300 info->desc = malloc(sizeof(struct mxs_dma_desc *) * 1301 MXS_NAND_DMA_DESCRIPTOR_COUNT); 1302 if (!info->desc) { 1303 ret = -ENOMEM; 1304 goto err1; 1305 } 1306 1307 /* Allocate the DMA descriptors. */ 1308 for (i = 0; i < MXS_NAND_DMA_DESCRIPTOR_COUNT; i++) { 1309 info->desc[i] = mxs_dma_desc_alloc(); 1310 if (!info->desc[i]) { 1311 ret = -ENOMEM; 1312 goto err2; 1313 } 1314 } 1315 1316 /* Init the DMA controller. */ 1317 mxs_dma_init(); 1318 for (j = MXS_DMA_CHANNEL_AHB_APBH_GPMI0; 1319 j <= MXS_DMA_CHANNEL_AHB_APBH_GPMI7; j++) { 1320 ret = mxs_dma_init_channel(j); 1321 if (ret) 1322 goto err3; 1323 } 1324 1325 /* Reset the GPMI block. */ 1326 mxs_reset_block(&info->gpmi_regs->hw_gpmi_ctrl0_reg); 1327 mxs_reset_block(&info->bch_regs->hw_bch_ctrl_reg); 1328 1329 /* 1330 * Choose NAND mode, set IRQ polarity, disable write protection and 1331 * select BCH ECC. 1332 */ 1333 clrsetbits_le32(&info->gpmi_regs->hw_gpmi_ctrl1, 1334 GPMI_CTRL1_GPMI_MODE, 1335 GPMI_CTRL1_ATA_IRQRDY_POLARITY | GPMI_CTRL1_DEV_RESET | 1336 GPMI_CTRL1_BCH_MODE); 1337 1338 return 0; 1339 1340err3: 1341 for (--j; j >= MXS_DMA_CHANNEL_AHB_APBH_GPMI0; j--) 1342 mxs_dma_release(j); 1343err2: 1344 for (--i; i >= 0; i--) 1345 mxs_dma_desc_free(info->desc[i]); 1346 free(info->desc); 1347err1: 1348 if (ret == -ENOMEM) 1349 printf("MXS NAND: Unable to allocate DMA descriptors\n"); 1350 return ret; 1351} 1352 1353/* 1354 * <1> Firstly, we should know what's the GPMI-clock means. 1355 * The GPMI-clock is the internal clock in the gpmi nand controller. 1356 * If you set 100MHz to gpmi nand controller, the GPMI-clock's period 1357 * is 10ns. Mark the GPMI-clock's period as GPMI-clock-period. 1358 * 1359 * <2> Secondly, we should know what's the frequency on the nand chip pins. 1360 * The frequency on the nand chip pins is derived from the GPMI-clock. 1361 * We can get it from the following equation: 1362 * 1363 * F = G / (DS + DH) 1364 * 1365 * F : the frequency on the nand chip pins. 1366 * G : the GPMI clock, such as 100MHz. 1367 * DS : GPMI_HW_GPMI_TIMING0:DATA_SETUP 1368 * DH : GPMI_HW_GPMI_TIMING0:DATA_HOLD 1369 * 1370 * <3> Thirdly, when the frequency on the nand chip pins is above 33MHz, 1371 * the nand EDO(extended Data Out) timing could be applied. 1372 * The GPMI implements a feedback read strobe to sample the read data. 1373 * The feedback read strobe can be delayed to support the nand EDO timing 1374 * where the read strobe may deasserts before the read data is valid, and 1375 * read data is valid for some time after read strobe. 1376 * 1377 * The following figure illustrates some aspects of a NAND Flash read: 1378 * 1379 * |<---tREA---->| 1380 * | | 1381 * | | | 1382 * |<--tRP-->| | 1383 * | | | 1384 * __ ___|__________________________________ 1385 * RDN \________/ | 1386 * | 1387 * /---------\ 1388 * Read Data --------------< >--------- 1389 * \---------/ 1390 * | | 1391 * |<-D->| 1392 * FeedbackRDN ________ ____________ 1393 * \___________/ 1394 * 1395 * D stands for delay, set in the HW_GPMI_CTRL1:RDN_DELAY. 1396 * 1397 * 1398 * <4> Now, we begin to describe how to compute the right RDN_DELAY. 1399 * 1400 * 4.1) From the aspect of the nand chip pins: 1401 * Delay = (tREA + C - tRP) {1} 1402 * 1403 * tREA : the maximum read access time. 1404 * C : a constant to adjust the delay. default is 4000ps. 1405 * tRP : the read pulse width, which is exactly: 1406 * tRP = (GPMI-clock-period) * DATA_SETUP 1407 * 1408 * 4.2) From the aspect of the GPMI nand controller: 1409 * Delay = RDN_DELAY * 0.125 * RP {2} 1410 * 1411 * RP : the DLL reference period. 1412 * if (GPMI-clock-period > DLL_THRETHOLD) 1413 * RP = GPMI-clock-period / 2; 1414 * else 1415 * RP = GPMI-clock-period; 1416 * 1417 * Set the HW_GPMI_CTRL1:HALF_PERIOD if GPMI-clock-period 1418 * is greater DLL_THRETHOLD. In other SOCs, the DLL_THRETHOLD 1419 * is 16000ps, but in mx6q, we use 12000ps. 1420 * 1421 * 4.3) since {1} equals {2}, we get: 1422 * 1423 * (tREA + 4000 - tRP) * 8 1424 * RDN_DELAY = ----------------------- {3} 1425 * RP 1426 */ 1427static void mxs_compute_timings(struct nand_chip *chip, 1428 const struct nand_sdr_timings *sdr) 1429{ 1430 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1431 unsigned long clk_rate; 1432 unsigned int dll_wait_time_us; 1433 unsigned int dll_threshold_ps = nand_info->max_chain_delay; 1434 unsigned int period_ps, reference_period_ps; 1435 unsigned int data_setup_cycles, data_hold_cycles, addr_setup_cycles; 1436 unsigned int tRP_ps; 1437 bool use_half_period; 1438 int sample_delay_ps, sample_delay_factor; 1439 u16 busy_timeout_cycles; 1440 u8 wrn_dly_sel; 1441 u32 timing0; 1442 u32 timing1; 1443 u32 ctrl1n; 1444 1445 if (sdr->tRC_min >= 30000) { 1446 /* ONFI non-EDO modes [0-3] */ 1447 clk_rate = 22000000; 1448 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_4_TO_8NS; 1449 } else if (sdr->tRC_min >= 25000) { 1450 /* ONFI EDO mode 4 */ 1451 clk_rate = 80000000; 1452 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 1453 debug("%s, setting ONFI onfi edo 4\n", __func__); 1454 } else { 1455 /* ONFI EDO mode 5 */ 1456 clk_rate = 100000000; 1457 wrn_dly_sel = GPMI_CTRL1_WRN_DLY_SEL_NO_DELAY; 1458 debug("%s, setting ONFI onfi edo 5\n", __func__); 1459 } 1460 1461 /* SDR core timings are given in picoseconds */ 1462 period_ps = div_u64((u64)NSEC_PER_SEC * 1000, clk_rate); 1463 1464 addr_setup_cycles = TO_CYCLES(sdr->tALS_min, period_ps); 1465 data_setup_cycles = TO_CYCLES(sdr->tDS_min, period_ps); 1466 data_hold_cycles = TO_CYCLES(sdr->tDH_min, period_ps); 1467 busy_timeout_cycles = TO_CYCLES(sdr->tWB_max + sdr->tR_max, period_ps); 1468 1469 timing0 = (addr_setup_cycles << GPMI_TIMING0_ADDRESS_SETUP_OFFSET) | 1470 (data_hold_cycles << GPMI_TIMING0_DATA_HOLD_OFFSET) | 1471 (data_setup_cycles << GPMI_TIMING0_DATA_SETUP_OFFSET); 1472 timing1 = (busy_timeout_cycles * 4096) << GPMI_TIMING1_DEVICE_BUSY_TIMEOUT_OFFSET; 1473 1474 /* 1475 * Derive NFC ideal delay from {3}: 1476 * 1477 * (tREA + 4000 - tRP) * 8 1478 * RDN_DELAY = ----------------------- 1479 * RP 1480 */ 1481 if (period_ps > dll_threshold_ps) { 1482 use_half_period = true; 1483 reference_period_ps = period_ps / 2; 1484 } else { 1485 use_half_period = false; 1486 reference_period_ps = period_ps; 1487 } 1488 1489 tRP_ps = data_setup_cycles * period_ps; 1490 sample_delay_ps = (sdr->tREA_max + 4000 - tRP_ps) * 8; 1491 if (sample_delay_ps > 0) 1492 sample_delay_factor = sample_delay_ps / reference_period_ps; 1493 else 1494 sample_delay_factor = 0; 1495 1496 ctrl1n = (wrn_dly_sel << GPMI_CTRL1_WRN_DLY_SEL_OFFSET); 1497 if (sample_delay_factor) 1498 ctrl1n |= (sample_delay_factor << GPMI_CTRL1_RDN_DELAY_OFFSET) | 1499 GPMI_CTRL1_DLL_ENABLE | 1500 (use_half_period ? GPMI_CTRL1_HALF_PERIOD : 0); 1501 1502 writel(timing0, &nand_info->gpmi_regs->hw_gpmi_timing0); 1503 writel(timing1, &nand_info->gpmi_regs->hw_gpmi_timing1); 1504 1505 /* 1506 * Clear several CTRL1 fields, DLL must be disabled when setting 1507 * RDN_DELAY or HALF_PERIOD. 1508 */ 1509 writel(GPMI_CTRL1_CLEAR_MASK, &nand_info->gpmi_regs->hw_gpmi_ctrl1_clr); 1510 writel(ctrl1n, &nand_info->gpmi_regs->hw_gpmi_ctrl1_set); 1511 1512 clk_set_rate(nand_info->gpmi_clk, clk_rate); 1513 1514 /* Wait 64 clock cycles before using the GPMI after enabling the DLL */ 1515 dll_wait_time_us = USEC_PER_SEC / clk_rate * 64; 1516 if (!dll_wait_time_us) 1517 dll_wait_time_us = 1; 1518 1519 /* Wait for the DLL to settle. */ 1520 udelay(dll_wait_time_us); 1521} 1522 1523static int mxs_nand_setup_interface(struct mtd_info *mtd, int chipnr, 1524 const struct nand_data_interface *conf) 1525{ 1526 struct nand_chip *chip = mtd_to_nand(mtd); 1527 const struct nand_sdr_timings *sdr; 1528 1529 sdr = nand_get_sdr_timings(conf); 1530 if (IS_ERR(sdr)) 1531 return PTR_ERR(sdr); 1532 1533 /* Stop here if this call was just a check */ 1534 if (chipnr < 0) 1535 return 0; 1536 1537 /* Do the actual derivation of the controller timings */ 1538 mxs_compute_timings(chip, sdr); 1539 1540 return 0; 1541} 1542 1543int mxs_nand_init_spl(struct nand_chip *nand) 1544{ 1545 struct mxs_nand_info *nand_info; 1546 int err; 1547 1548 nand_info = malloc(sizeof(struct mxs_nand_info)); 1549 if (!nand_info) { 1550 printf("MXS NAND: Failed to allocate private data\n"); 1551 return -ENOMEM; 1552 } 1553 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1554 1555 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1556 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1557 1558 if (is_mx6sx() || is_mx7() || is_imx8() || is_imx8m()) 1559 nand_info->max_ecc_strength_supported = 62; 1560 else 1561 nand_info->max_ecc_strength_supported = 40; 1562 1563 if (IS_ENABLED(CONFIG_NAND_MXS_USE_MINIMUM_ECC)) 1564 nand_info->use_minimum_ecc = true; 1565 1566 err = mxs_nand_alloc_buffers(nand_info); 1567 if (err) 1568 return err; 1569 1570 err = mxs_nand_init_dma(nand_info); 1571 if (err) 1572 return err; 1573 1574 nand_set_controller_data(nand, nand_info); 1575 1576 nand->options |= NAND_NO_SUBPAGE_WRITE; 1577 1578 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1579 nand->dev_ready = mxs_nand_device_ready; 1580 nand->select_chip = mxs_nand_select_chip; 1581 1582 nand->read_byte = mxs_nand_read_byte; 1583 nand->read_buf = mxs_nand_read_buf; 1584 1585 nand->ecc.read_page = mxs_nand_ecc_read_page; 1586 1587 nand->ecc.mode = NAND_ECC_HW; 1588 1589 return 0; 1590} 1591 1592int mxs_nand_init_ctrl(struct mxs_nand_info *nand_info) 1593{ 1594 struct mtd_info *mtd; 1595 struct nand_chip *nand; 1596 int err; 1597 1598 nand = &nand_info->chip; 1599 mtd = nand_to_mtd(nand); 1600 err = mxs_nand_alloc_buffers(nand_info); 1601 if (err) 1602 return err; 1603 1604 err = mxs_nand_init_dma(nand_info); 1605 if (err) 1606 goto err_free_buffers; 1607 1608 memset(&fake_ecc_layout, 0, sizeof(fake_ecc_layout)); 1609 1610#ifdef CONFIG_SYS_NAND_USE_FLASH_BBT 1611 nand->bbt_options |= NAND_BBT_USE_FLASH | NAND_BBT_NO_OOB; 1612#endif 1613 1614 nand_set_controller_data(nand, nand_info); 1615 nand->options |= NAND_NO_SUBPAGE_WRITE; 1616 1617 if (nand_info->dev) 1618 nand->flash_node = dev_ofnode(nand_info->dev); 1619 1620 nand->cmd_ctrl = mxs_nand_cmd_ctrl; 1621 1622 nand->dev_ready = mxs_nand_device_ready; 1623 nand->select_chip = mxs_nand_select_chip; 1624 nand->block_bad = mxs_nand_block_bad; 1625 1626 nand->read_byte = mxs_nand_read_byte; 1627 1628 nand->read_buf = mxs_nand_read_buf; 1629 nand->write_buf = mxs_nand_write_buf; 1630 1631 if (nand_info->gpmi_clk) 1632 nand->setup_data_interface = mxs_nand_setup_interface; 1633 1634 /* first scan to find the device and get the page size */ 1635 if (nand_scan_ident(mtd, CONFIG_SYS_MAX_NAND_DEVICE, NULL)) 1636 goto err_free_buffers; 1637 1638 if (mxs_nand_setup_ecc(mtd)) 1639 goto err_free_buffers; 1640 1641 nand->ecc.read_page = mxs_nand_ecc_read_page; 1642 nand->ecc.write_page = mxs_nand_ecc_write_page; 1643 nand->ecc.read_oob = mxs_nand_ecc_read_oob; 1644 nand->ecc.write_oob = mxs_nand_ecc_write_oob; 1645 1646 nand->ecc.layout = &fake_ecc_layout; 1647 nand->ecc.mode = NAND_ECC_HW; 1648 nand->ecc.size = nand_info->bch_geometry.ecc_chunkn_size; 1649 nand->ecc.strength = nand_info->bch_geometry.ecc_strength; 1650 1651 /* second phase scan */ 1652 err = nand_scan_tail(mtd); 1653 if (err) 1654 goto err_free_buffers; 1655 1656 /* Hook some operations at the MTD level. */ 1657 if (mtd->_read_oob != mxs_nand_hook_read_oob) { 1658 nand_info->hooked_read_oob = mtd->_read_oob; 1659 mtd->_read_oob = mxs_nand_hook_read_oob; 1660 } 1661 1662 if (mtd->_write_oob != mxs_nand_hook_write_oob) { 1663 nand_info->hooked_write_oob = mtd->_write_oob; 1664 mtd->_write_oob = mxs_nand_hook_write_oob; 1665 } 1666 1667 if (mtd->_block_markbad != mxs_nand_hook_block_markbad) { 1668 nand_info->hooked_block_markbad = mtd->_block_markbad; 1669 mtd->_block_markbad = mxs_nand_hook_block_markbad; 1670 } 1671 1672 err = nand_register(0, mtd); 1673 if (err) 1674 goto err_free_buffers; 1675 1676 return 0; 1677 1678err_free_buffers: 1679 free(nand_info->data_buf); 1680 free(nand_info->cmd_buf); 1681 1682 return err; 1683} 1684 1685#ifndef CONFIG_NAND_MXS_DT 1686void board_nand_init(void) 1687{ 1688 struct mxs_nand_info *nand_info; 1689 1690 nand_info = malloc(sizeof(struct mxs_nand_info)); 1691 if (!nand_info) { 1692 printf("MXS NAND: Failed to allocate private data\n"); 1693 return; 1694 } 1695 memset(nand_info, 0, sizeof(struct mxs_nand_info)); 1696 1697 nand_info->gpmi_regs = (struct mxs_gpmi_regs *)MXS_GPMI_BASE; 1698 nand_info->bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1699 1700 /* Refer to Chapter 17 for i.MX6DQ, Chapter 18 for i.MX6SX */ 1701 if (is_mx6sx() || is_mx7()) 1702 nand_info->max_ecc_strength_supported = 62; 1703 else 1704 nand_info->max_ecc_strength_supported = 40; 1705 1706#ifdef CONFIG_NAND_MXS_USE_MINIMUM_ECC 1707 nand_info->use_minimum_ecc = true; 1708#endif 1709 1710 if (mxs_nand_init_ctrl(nand_info) < 0) 1711 goto err; 1712 1713 return; 1714 1715err: 1716 free(nand_info); 1717} 1718#endif 1719 1720/* 1721 * Read NAND layout for FCB block generation. 1722 */ 1723void mxs_nand_get_layout(struct mtd_info *mtd, struct mxs_nand_layout *l) 1724{ 1725 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1726 u32 tmp; 1727 1728 tmp = readl(&bch_regs->hw_bch_flash0layout0); 1729 l->nblocks = (tmp & BCH_FLASHLAYOUT0_NBLOCKS_MASK) >> 1730 BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1731 l->meta_size = (tmp & BCH_FLASHLAYOUT0_META_SIZE_MASK) >> 1732 BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1733 1734 tmp = readl(&bch_regs->hw_bch_flash0layout1); 1735 l->data0_size = 4 * ((tmp & BCH_FLASHLAYOUT0_DATA0_SIZE_MASK) >> 1736 BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET); 1737 l->ecc0 = (tmp & BCH_FLASHLAYOUT0_ECC0_MASK) >> 1738 BCH_FLASHLAYOUT0_ECC0_OFFSET; 1739 l->datan_size = 4 * ((tmp & BCH_FLASHLAYOUT1_DATAN_SIZE_MASK) >> 1740 BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET); 1741 l->eccn = (tmp & BCH_FLASHLAYOUT1_ECCN_MASK) >> 1742 BCH_FLASHLAYOUT1_ECCN_OFFSET; 1743 l->gf_len = (tmp & BCH_FLASHLAYOUT1_GF13_0_GF14_1_MASK) >> 1744 BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1745} 1746 1747/* 1748 * Set BCH to specific layout used by ROM bootloader to read FCB. 1749 */ 1750void mxs_nand_mode_fcb_62bit(struct mtd_info *mtd) 1751{ 1752 u32 tmp; 1753 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1754 struct nand_chip *nand = mtd_to_nand(mtd); 1755 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1756 1757 nand_info->en_randomizer = 1; 1758 1759 mtd->writesize = 1024; 1760 mtd->oobsize = 1862 - 1024; 1761 1762 /* 8 ecc_chunks_*/ 1763 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1764 /* 32 bytes for metadata */ 1765 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1766 /* using ECC62 level to be performed */ 1767 tmp |= 0x1F << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1768 /* 0x20 * 4 bytes of the data0 block */ 1769 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; 1770 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1771 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1772 1773 /* 1024 for data + 838 for OOB */ 1774 tmp = 1862 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1775 /* using ECC62 level to be performed */ 1776 tmp |= 0x1F << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1777 /* 0x20 * 4 bytes of the data0 block */ 1778 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; 1779 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1780 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1781} 1782 1783/* 1784 * Set BCH to specific layout used by ROM bootloader to read FCB. 1785 */ 1786void mxs_nand_mode_fcb_40bit(struct mtd_info *mtd) 1787{ 1788 u32 tmp; 1789 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1790 struct nand_chip *nand = mtd_to_nand(mtd); 1791 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1792 1793 /* no randomizer in this setting*/ 1794 nand_info->en_randomizer = 0; 1795 1796 mtd->writesize = 1024; 1797 mtd->oobsize = 1576 - 1024; 1798 1799 /* 8 ecc_chunks_*/ 1800 tmp = 7 << BCH_FLASHLAYOUT0_NBLOCKS_OFFSET; 1801 /* 32 bytes for metadata */ 1802 tmp |= 32 << BCH_FLASHLAYOUT0_META_SIZE_OFFSET; 1803 /* using ECC40 level to be performed */ 1804 tmp |= 0x14 << BCH_FLASHLAYOUT0_ECC0_OFFSET; 1805 /* 0x20 * 4 bytes of the data0 block */ 1806 tmp |= 0x20 << BCH_FLASHLAYOUT0_DATA0_SIZE_OFFSET; 1807 tmp |= 0 << BCH_FLASHLAYOUT0_GF13_0_GF14_1_OFFSET; 1808 writel(tmp, &bch_regs->hw_bch_flash0layout0); 1809 1810 /* 1024 for data + 552 for OOB */ 1811 tmp = 1576 << BCH_FLASHLAYOUT1_PAGE_SIZE_OFFSET; 1812 /* using ECC40 level to be performed */ 1813 tmp |= 0x14 << BCH_FLASHLAYOUT1_ECCN_OFFSET; 1814 /* 0x20 * 4 bytes of the data0 block */ 1815 tmp |= 0x20 << BCH_FLASHLAYOUT1_DATAN_SIZE_OFFSET; 1816 tmp |= 0 << BCH_FLASHLAYOUT1_GF13_0_GF14_1_OFFSET; 1817 writel(tmp, &bch_regs->hw_bch_flash0layout1); 1818} 1819 1820/* 1821 * Restore BCH to normal settings. 1822 */ 1823void mxs_nand_mode_normal(struct mtd_info *mtd) 1824{ 1825 struct mxs_bch_regs *bch_regs = (struct mxs_bch_regs *)MXS_BCH_BASE; 1826 struct nand_chip *nand = mtd_to_nand(mtd); 1827 struct mxs_nand_info *nand_info = nand_get_controller_data(nand); 1828 1829 nand_info->en_randomizer = 0; 1830 1831 mtd->writesize = nand_info->writesize; 1832 mtd->oobsize = nand_info->oobsize; 1833 1834 writel(nand_info->bch_flash0layout0, &bch_regs->hw_bch_flash0layout0); 1835 writel(nand_info->bch_flash0layout1, &bch_regs->hw_bch_flash0layout1); 1836} 1837 1838uint32_t mxs_nand_mark_byte_offset(struct mtd_info *mtd) 1839{ 1840 struct nand_chip *chip = mtd_to_nand(mtd); 1841 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1842 struct bch_geometry *geo = &nand_info->bch_geometry; 1843 1844 return geo->block_mark_byte_offset; 1845} 1846 1847uint32_t mxs_nand_mark_bit_offset(struct mtd_info *mtd) 1848{ 1849 struct nand_chip *chip = mtd_to_nand(mtd); 1850 struct mxs_nand_info *nand_info = nand_get_controller_data(chip); 1851 struct bch_geometry *geo = &nand_info->bch_geometry; 1852 1853 return geo->block_mark_bit_offset; 1854} 1855