1/* 2 * Broadcom NAND flash controller interface 3 * 4 * Copyright (C) 2012, Broadcom Corporation. All Rights Reserved. 5 * 6 * Permission to use, copy, modify, and/or distribute this software for any 7 * purpose with or without fee is hereby granted, provided that the above 8 * copyright notice and this permission notice appear in all copies. 9 * 10 * THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES 11 * WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF 12 * MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY 13 * SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES 14 * WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN ACTION 15 * OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF OR IN 16 * CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE. 17 * 18 * $Id $ 19 */ 20 21#include <linux/version.h> 22 23#include <linux/module.h> 24#include <linux/slab.h> 25#include <linux/ioport.h> 26#include <linux/mtd/mtd.h> 27#include <linux/mtd/nand.h> 28#include <linux/mtd/nand_ecc.h> 29#include <linux/errno.h> 30#include <linux/pci.h> 31#include <linux/delay.h> 32#include <asm/io.h> 33 34#include <typedefs.h> 35#include <osl.h> 36#include <bcmutils.h> 37#include <bcmdevs.h> 38#include <bcmnvram.h> 39#include <siutils.h> 40#include <hndpci.h> 41#include <pcicfg.h> 42#include <hndsoc.h> 43#define NFLASH_SUPPORT 44#include <sbchipc.h> 45#include <nflash.h> 46 47#include "brcmnand_priv.h" 48 49spinlock_t *partitions_lock_init(void); 50#define NFLASH_LOCK(lock) if (lock) spin_lock(lock) 51#define NFLASH_UNLOCK(lock) if (lock) spin_unlock(lock) 52 53#ifdef CONFIG_MTD_PARTITIONS 54#include <linux/mtd/partitions.h> 55 56extern struct mtd_partition * init_brcmnand_mtd_partitions(struct mtd_info *mtd, size_t size); 57#endif 58 59static int nflash_lock = 0; 60 61#ifdef __mips__ 62#define PLATFORM_IOFLUSH_WAR() __sync() 63#else 64#define PLATFORM_IOFLUSH_WAR mb /* Should work for MIPS too */ 65#endif 66 67#define BRCMNAND_POLL_TIMEOUT 3000 68 69#define BRCMNAND_CORRECTABLE_ECC_ERROR (1) 70#define BRCMNAND_SUCCESS (0) 71#define BRCMNAND_UNCORRECTABLE_ECC_ERROR (-1) 72#define BRCMNAND_FLASH_STATUS_ERROR (-2) 73#define BRCMNAND_TIMED_OUT (-3) 74 75#define BRCMNAND_OOBBUF(pbuf) (&((pbuf)->databuf[NAND_MAX_PAGESIZE])) 76 77/* Fill-in internal bit fields if missing */ 78#ifndef NAND_ALE_COL 79#define NAND_ALE_COL 0x0100 80#define NAND_ALE_ROW 0x0200 81#endif 82 83/* 84 * Number of required ECC bytes per 512B slice 85 */ 86static const unsigned int brcmnand_eccbytes[16] = { 87 [BRCMNAND_ECC_DISABLE] = 0, 88 [BRCMNAND_ECC_BCH_1] = 2, 89 [BRCMNAND_ECC_BCH_2] = 4, 90 [BRCMNAND_ECC_BCH_3] = 5, 91 [BRCMNAND_ECC_BCH_4] = 7, 92 [BRCMNAND_ECC_BCH_5] = 9, 93 [BRCMNAND_ECC_BCH_6] = 10, 94 [BRCMNAND_ECC_BCH_7] = 12, 95 [BRCMNAND_ECC_BCH_8] = 13, 96 [BRCMNAND_ECC_BCH_9] = 15, 97 [BRCMNAND_ECC_BCH_10] = 17, 98 [BRCMNAND_ECC_BCH_11] = 18, 99 [BRCMNAND_ECC_BCH_12] = 20, 100 [BRCMNAND_ECC_RESVD_1] = 0, 101 [BRCMNAND_ECC_RESVD_2] = 0, 102 [BRCMNAND_ECC_HAMMING] = 3, 103}; 104 105static const unsigned char ffchars[] = { 106 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 107 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 16 */ 108 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 109 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 32 */ 110 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 111 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 48 */ 112 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 113 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 64 */ 114 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 115 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 80 */ 116 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 117 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 96 */ 118 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 119 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 112 */ 120 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 121 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, 0xff, /* 128 */ 122}; 123 124static struct nand_ecclayout brcmnand_oob_128 = { 125 .eccbytes = 24, 126 .eccpos = 127 { 128 6, 7, 8, 129 22, 23, 24, 130 38, 39, 40, 131 54, 55, 56, 132 70, 71, 72, 133 86, 87, 88, 134 102, 103, 104, 135 118, 119, 120 136 }, 137 .oobfree = 138 { 139 /* 0-1 used for BBT and/or manufacturer bad block marker, 140 * first slice loses 2 bytes for BBT 141 */ 142 {.offset = 2, .length = 4}, 143 {.offset = 9, .length = 13}, 144 /* First slice {9,7} 2nd slice {16,6}are combined */ 145 /* ST uses 6th byte (offset=5) as Bad Block Indicator, 146 * in addition to the 1st byte, and will be adjusted at run time 147 */ 148 {.offset = 25, .length = 13}, /* 2nd slice */ 149 {.offset = 41, .length = 13}, /* 4th slice */ 150 {.offset = 57, .length = 13}, /* 5th slice */ 151 {.offset = 73, .length = 13}, /* 6th slice */ 152 {.offset = 89, .length = 13}, /* 7th slice */ 153 {.offset = 105, .length = 13}, /* 8th slice */ 154#if MTD_MAX_OOBFREE_ENTRIES > 8 155 {.offset = 121, .length = 7}, /* 9th slice */ 156 {.offset = 0, .length = 0} /* End marker */ 157#endif 158 } 159}; 160 161static struct nand_ecclayout brcmnand_oob_64 = { 162 .eccbytes = 12, 163 .eccpos = 164 { 165 6, 7, 8, 166 22, 23, 24, 167 38, 39, 40, 168 54, 55, 56 169 }, 170 .oobfree = 171 { 172 /* 0-1 used for BBT and/or manufacturer bad block marker, 173 * first slice loses 2 bytes for BBT 174 */ 175 {.offset = 2, .length = 4}, 176 {.offset = 9, .length = 13}, 177 /* First slice {9,7} 2nd slice {16,6}are combined */ 178 /* ST uses 6th byte (offset=5) as Bad Block Indicator, 179 * in addition to the 1st byte, and will be adjusted at run time 180 */ 181 {.offset = 25, .length = 13}, /* 2nd slice */ 182 {.offset = 41, .length = 13}, /* 3rd slice */ 183 {.offset = 57, .length = 7}, /* 4th slice */ 184 {.offset = 0, .length = 0} /* End marker */ 185 } 186}; 187 188/** 189 * brcmnand_oob oob info for 512 page 190 */ 191static struct nand_ecclayout brcmnand_oob_16 = { 192 .eccbytes = 3, 193 .eccpos = {6, 7, 8}, 194 .oobfree = { 195 {.offset = 0, .length = 5}, 196 {.offset = 9, .length = 7}, /* Byte 5 (6th byte) used for BI */ 197 {.offset = 0, .length = 0}} /* End marker */ 198 /* Bytes offset 4&5 are used by BBT. Actually only byte 5 is used, 199 * but in order to accomodate for 16 bit bus width, byte 4 is also not used. 200 * If we only use byte-width chip, (We did) 201 * then we can also use byte 4 as free bytes. 202 */ 203}; 204 205/* Small page with BCH-4 */ 206static struct nand_ecclayout brcmnand_oob_bch4_512 = { 207 .eccbytes = 7, 208 .eccpos = {9, 10, 11, 12, 13, 14, 15}, 209 .oobfree = { 210 {.offset = 0, .length = 5}, 211 {.offset = 7, .length = 2}, /* Byte 5 (6th byte) used for BI */ 212 {.offset = 0, .length = 0}} /* End marker */ 213}; 214 215/* 216 * 2K page SLC/MLC with BCH-4 ECC, uses 7 ECC bytes per 512B ECC step 217 */ 218static struct nand_ecclayout brcmnand_oob_bch4_2k = { 219 .eccbytes = 7 * 8, /* 7 * 8 = 56 bytes */ 220 .eccpos = 221 { 222 9, 10, 11, 12, 13, 14, 15, 223 25, 26, 27, 28, 29, 30, 31, 224 41, 42, 43, 44, 45, 46, 47, 225 57, 58, 59, 60, 61, 62, 63 226 }, 227 .oobfree = 228 { 229 /* 0 used for BBT and/or manufacturer bad block marker, 230 * first slice loses 1 byte for BBT 231 */ 232 {.offset = 1, .length = 8}, /* 1st slice loses byte 0 */ 233 {.offset = 16, .length = 9}, /* 2nd slice */ 234 {.offset = 32, .length = 9}, /* 3rd slice */ 235 {.offset = 48, .length = 9}, /* 4th slice */ 236 {.offset = 0, .length = 0} /* End marker */ 237 } 238}; 239 240 241static void *page_buffer = NULL; 242 243/* Private global state */ 244struct brcmnand_mtd brcmnand_info; 245 246static INLINE void 247brcmnand_cmd(osl_t *osh, chipcregs_t *cc, uint opcode) 248{ 249 W_REG(osh, &cc->nand_cmd_start, opcode); 250 /* read after write to flush the command */ 251 R_REG(osh, &cc->nand_cmd_start); 252} 253 254int brcmnand_ctrl_verify_ecc(struct nand_chip *chip, int state) 255{ 256 si_t *sih = brcmnand_info.sih; 257 chipcregs_t *cc = brcmnand_info.cc; 258 osl_t *osh; 259 uint32_t addr, ext_addr; 260 int err = 0; 261 262 if (state != FL_READING) 263 return BRCMNAND_SUCCESS; 264 osh = si_osh(sih); 265 addr = R_REG(osh, &cc->nand_ecc_corr_addr); 266 if (addr) { 267 ext_addr = R_REG(osh, &cc->nand_ecc_corr_addr_x); 268 /* clear */ 269 W_REG(osh, &cc->nand_ecc_corr_addr, 0); 270 W_REG(osh, &cc->nand_ecc_corr_addr_x, 0); 271 err = BRCMNAND_CORRECTABLE_ECC_ERROR; 272 } 273 /* In BCH4 case, the controller will report BRCMNAND_UNCORRECTABLE_ECC_ERROR 274 * but we cannot resolve this issue in this version. In this case, if we don't 275 * check nand_ecc_unc_addr the process also work smoothly. 276 */ 277 if (sih->ccrev != 38) { 278 addr = R_REG(osh, &cc->nand_ecc_unc_addr); 279 if (addr) { 280 ext_addr = R_REG(osh, &cc->nand_ecc_unc_addr_x); 281 /* clear */ 282 W_REG(osh, &cc->nand_ecc_unc_addr, 0); 283 W_REG(osh, &cc->nand_ecc_unc_addr_x, 0); 284 /* If the block was just erased, and have not yet been written to, 285 * this will be flagged, so this could be a false alarm 286 */ 287 err = BRCMNAND_UNCORRECTABLE_ECC_ERROR; 288 } 289 } 290 return (err); 291} 292 293uint32 brcmnand_poll(uint32 pollmask) 294{ 295 si_t *sih = brcmnand_info.sih; 296 chipcregs_t *cc = brcmnand_info.cc; 297 osl_t *osh; 298 uint32 status; 299 300 osh = si_osh(sih); 301 status = R_REG(osh, &cc->nand_intfc_status); 302 status &= pollmask; 303 304 return status; 305} 306 307int brcmnand_cache_is_valid(struct mtd_info *mtd, struct nand_chip *chip, int state) 308{ 309 uint32 pollmask = NIST_CTRL_READY | 0x1; 310 unsigned long timeout = msecs_to_jiffies(BRCMNAND_POLL_TIMEOUT); 311 unsigned long now = jiffies; 312 uint32 status = 0; 313 int ret; 314 315 for (;;) { 316 if ((status = brcmnand_poll(pollmask)) != 0) { 317 break; 318 } 319 if (time_after(jiffies, now + timeout)) { 320 status = brcmnand_poll(pollmask); 321 break; 322 } 323 udelay(1); 324 } 325 326 if (status == 0) 327 ret = BRCMNAND_TIMED_OUT; 328 else if (status & 0x1) 329 ret = BRCMNAND_FLASH_STATUS_ERROR; 330 else 331 ret = brcmnand_ctrl_verify_ecc(chip, state); 332 333 return ret; 334} 335 336int brcmnand_spare_is_valid(struct mtd_info *mtd, struct nand_chip *chip, int state) 337{ 338 uint32 pollmask = NIST_CTRL_READY; 339 unsigned long timeout = msecs_to_jiffies(BRCMNAND_POLL_TIMEOUT); 340 unsigned long now = jiffies; 341 uint32 status = 0; 342 int ret; 343 344 for (;;) { 345 if ((status = brcmnand_poll(pollmask)) != 0) { 346 break; 347 } 348 if (time_after(jiffies, now + timeout)) { 349 status = brcmnand_poll(pollmask); 350 break; 351 } 352 udelay(1); 353 } 354 355 if (status == 0) 356 ret = 0 /* timed out */; 357 else 358 ret = 1; 359 360 return ret; 361} 362 363/** 364 * nand_release_device - [GENERIC] release chip 365 * @mtd: MTD device structure 366 * 367 * Deselect, release chip lock and wake up anyone waiting on the device 368 */ 369static spinlock_t mtd_lock; 370static void brcmnand_release_device(struct mtd_info *mtd) 371{ 372 if (nflash_lock == 1) { 373 brcmnand_info.nflash->enable( brcmnand_info.nflash, 0); 374 NFLASH_LOCK(mtd->mlock); 375 } 376 nflash_lock --; 377 spin_unlock(&mtd_lock); 378} 379 380/** 381 * brcmnand_get_device - [GENERIC] Get chip for selected access 382 * @param chip the nand chip descriptor 383 * @param mtd MTD device structure 384 * @param new_state the state which is requested 385 * 386 * Get the device and lock it for exclusive access 387 */ 388static int brcmnand_get_device( struct mtd_info *mtd) 389{ 390 spin_lock(&mtd_lock); 391 if (nflash_lock == 0) { 392 NFLASH_UNLOCK(mtd->mlock); 393 brcmnand_info.nflash->enable( brcmnand_info.nflash, 1); 394 } 395 nflash_lock ++; 396 return 0; 397} 398 399/** 400 * brcmnand_release_device_bcm4706 - [GENERIC] release chip 401 * @mtd: MTD device structure 402 * 403 * Deselect, release chip lock and wake up anyone waiting on the device 404 */ 405static void 406brcmnand_release_device_bcm4706(struct mtd_info *mtd) 407{ 408 NFLASH_UNLOCK(mtd->mlock); 409} 410 411/** 412 * brcmnand_get_device_bcm4706 - [GENERIC] Get chip for selected access 413 * @param chip the nand chip descriptor 414 * @param mtd MTD device structure 415 * @param new_state the state which is requested 416 * 417 * Get the device and lock it for exclusive access 418 */ 419static int 420brcmnand_get_device_bcm4706( struct mtd_info *mtd ) 421{ 422 NFLASH_LOCK(mtd->mlock); 423 return 0; 424} 425 426/** 427 * brcmnand_block_checkbad - [GENERIC] Check if a block is marked bad 428 * @mtd: MTD device structure 429 * @ofs: offset from device start 430 * @getchip: 0, if the chip is already selected 431 * @allowbbt: 1, if its allowed to access the bbt area 432 * 433 * Check, if the block is bad. Either by reading the bad block table or 434 * calling of the scan function. 435 */ 436static int brcmnand_block_checkbad(struct mtd_info *mtd, loff_t ofs, int getchip, 437 int allowbbt) 438{ 439 struct nand_chip *chip = mtd->priv; 440 int ret; 441 442 if (getchip) 443 brcmnand_get_device( mtd ); 444 445 if (!chip->bbt) 446 ret = chip->block_bad(mtd, ofs, getchip); 447 else 448 ret = brcmnand_isbad_bbt(mtd, ofs, allowbbt); 449 450 if (getchip) 451 brcmnand_release_device(mtd); 452 return (ret); 453} 454 455/* 456 * Returns 0 on success 457 */ 458static int brcmnand_handle_false_read_ecc_unc_errors(struct mtd_info *mtd, 459 struct nand_chip *chip, uint8_t *buf, uint8_t *oob, uint32_t offset) 460{ 461 static uint32_t oobbuf[4]; 462 uint32_t *p32 = (oob ? (uint32_t *)oob : (uint32_t *)&oobbuf[0]); 463 int ret = 0; 464 uint8_t *oobarea; 465 int erased = 0, allFF = 0; 466 int i; 467 si_t *sih = brcmnand_info.sih; 468 chipcregs_t *cc = brcmnand_info.cc; 469 osl_t *osh; 470 471 osh = si_osh(sih); 472 oobarea = (uint8_t *)p32; 473 for (i = 0; i < 4; i++) { 474 p32[i] = R_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_rd0 + (i * 4))); 475 } 476 if (brcmnand_info.level == BRCMNAND_ECC_HAMMING) { 477 erased = 478 (oobarea[6] == 0xff && oobarea[7] == 0xff && oobarea[8] == 0xff); 479 allFF = 480 (oobarea[6] == 0x00 && oobarea[7] == 0x00 && oobarea[8] == 0x00); 481 } else if (brcmnand_info.level >= BRCMNAND_ECC_BCH_1 && 482 brcmnand_info.level <= BRCMNAND_ECC_BCH_12) { 483 erased = allFF = 1; 484 /* For BCH-n, the ECC bytes are at the end of the OOB area */ 485 for (i = mtd->oobsize - chip->ecc.bytes; i < mtd->oobsize; i++) { 486 erased = erased && (oobarea[i] == 0xff); 487 allFF = allFF && (oobarea[i] == 0x00); 488 } 489 } else { 490 printk("BUG: Unsupported ECC level %d\n", brcmnand_info.level ); 491 BUG(); 492 } 493 494 if (erased || allFF) { 495 /* 496 * For the first case, the slice is an erased block, and the ECC bytes 497 * are all 0xFF, for the 2nd, all bytes are 0xFF, so the Hamming Codes 498 * for it are all zeroes. The current version of the BrcmNAND 499 * controller treats these as un-correctable errors. For either case, 500 * fill data buffer with 0xff and return success. The error has 501 * already been cleared inside brcmnand_verify_ecc. Both case will be 502 * handled correctly by the BrcmNand controller in later releases. 503 */ 504 p32 = (uint32_t *)buf; 505 for (i = 0; i < chip->ecc.size/4; i++) { 506 p32[i] = 0xFFFFFFFF; 507 } 508 ret = 0; /* Success */ 509 } else { 510 /* Real error: Disturb read returns uncorrectable errors */ 511 ret = -EBADMSG; 512 printk("<-- %s: ret -EBADMSG\n", __FUNCTION__); 513 } 514 return ret; 515} 516 517static int brcmnand_posted_read_cache(struct mtd_info *mtd, struct nand_chip *chip, 518 uint8_t *buf, uint8_t *oob, uint32_t offset) 519{ 520 uint32_t mask = chip->ecc.size - 1; 521 si_t *sih = brcmnand_info.sih; 522 chipcregs_t *cc = brcmnand_info.cc; 523 osl_t *osh; 524 int valid; 525 uint32_t *to; 526 int ret = 0, i; 527 528 if (offset & mask) 529 return -EINVAL; 530 531 osh = si_osh(sih); 532 W_REG(osh, &cc->nand_cmd_addr, offset); 533 PLATFORM_IOFLUSH_WAR(); 534 brcmnand_cmd(osh, cc, NCMD_PAGE_RD); 535 valid = brcmnand_cache_is_valid(mtd, chip, FL_READING); 536 537 switch (valid) { 538 case BRCMNAND_CORRECTABLE_ECC_ERROR: 539 case BRCMNAND_SUCCESS: 540 if (buf) { 541 to = (uint32_t *)buf; 542 PLATFORM_IOFLUSH_WAR(); 543 for (i = 0; i < chip->ecc.size; i += 4, to++) { 544 *to = R_REG(osh, &cc->nand_cache_data); 545 } 546 } 547 if (oob) { 548 to = (uint32_t *)oob; 549 PLATFORM_IOFLUSH_WAR(); 550 for (i = 0; i < mtd->oobsize; i += 4, to++) { 551 *to = R_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_rd0 + i)); 552 } 553 } 554 break; 555 case BRCMNAND_UNCORRECTABLE_ECC_ERROR: 556 ret = brcmnand_handle_false_read_ecc_unc_errors(mtd, chip, buf, oob, offset); 557 break; 558 case BRCMNAND_FLASH_STATUS_ERROR: 559 ret = -EBADMSG; 560 break; 561 case BRCMNAND_TIMED_OUT: 562 ret = -ETIMEDOUT; 563 break; 564 default: 565 ret = -EFAULT; 566 break; 567 } 568 569 return (ret); 570} 571 572/** 573 * nand_read_page_hwecc - [REPLACABLE] hardware ecc based page read function 574 * @mtd: mtd info structure 575 * @chip: nand chip info structure 576 * @buf: buffer to store read data 577 * 578 * Not for syndrome calculating ecc controllers which need a special oob layout 579 */ 580static int brcmnand_read_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 581 uint8_t *buf, int page) 582{ 583 int eccsteps; 584 int data_read = 0; 585 int oob_read = 0; 586 int corrected = 0; 587 int ret = 0; 588 uint32_t offset = page << chip->page_shift; 589 uint8_t *oob = chip->oob_poi; 590 591 brcmnand_get_device( mtd ); 592 for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) { 593 ret = brcmnand_posted_read_cache(mtd, chip, &buf[data_read], 594 oob ? &oob[oob_read]: NULL, offset + data_read); 595 if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR && !corrected) { 596 mtd->ecc_stats.corrected++; 597 corrected = 1; 598 ret = 0; 599 } else { 600 if (ret < 0) 601 break; 602 } 603 data_read += chip->ecc.size; 604 oob_read += mtd->oobsize; 605 } 606 brcmnand_release_device(mtd); 607 return (ret); 608} 609 610/** 611 * brcmnand_transfer_oob - [Internal] Transfer oob to client buffer 612 * @chip: nand chip structure 613 * @oob: oob destination address 614 * @ops: oob ops structure 615 * @len: size of oob to transfer 616 */ 617static uint8_t *brcmnand_transfer_oob(struct nand_chip *chip, uint8_t *oob, 618 struct mtd_oob_ops *ops, size_t len) 619{ 620 switch (ops->mode) { 621 622 case MTD_OOB_PLACE: 623 case MTD_OOB_RAW: 624 memcpy(oob, chip->oob_poi + ops->ooboffs, len); 625 return oob + len; 626 627 case MTD_OOB_AUTO: { 628 struct nand_oobfree *free = chip->ecc.layout->oobfree; 629 uint32_t boffs = 0, roffs = ops->ooboffs; 630 size_t bytes = 0; 631 632 for (; free->length && len; free++, len -= bytes) { 633 /* Read request not from offset 0 ? */ 634 if (unlikely(roffs)) { 635 if (roffs >= free->length) { 636 roffs -= free->length; 637 continue; 638 } 639 boffs = free->offset + roffs; 640 bytes = min_t(size_t, len, 641 (free->length - roffs)); 642 roffs = 0; 643 } else { 644 bytes = min_t(size_t, len, free->length); 645 boffs = free->offset; 646 } 647 memcpy(oob, chip->oob_poi + boffs, bytes); 648 oob += bytes; 649 } 650 return oob; 651 } 652 default: 653 BUG(); 654 } 655 return NULL; 656} 657 658/** 659 * brcmnand_do_read_ops - [Internal] Read data with ECC 660 * 661 * @mtd: MTD device structure 662 * @from: offset to read from 663 * @ops: oob ops structure 664 * 665 * Internal function. Called with chip held. 666 */ 667static int brcmnand_do_read_ops(struct mtd_info *mtd, loff_t from, 668 struct mtd_oob_ops *ops) 669{ 670 int page, realpage, col, bytes, aligned; 671 struct nand_chip *chip = mtd->priv; 672 struct mtd_ecc_stats stats; 673 int ret = 0; 674 uint32_t readlen = ops->len; 675 uint32_t oobreadlen = ops->ooblen; 676 uint8_t *bufpoi, *oob, *buf; 677 678 stats = mtd->ecc_stats; 679 680 realpage = (int)(from >> chip->page_shift); 681 page = realpage & chip->pagemask; 682 683 col = (int)(from & (mtd->writesize - 1)); 684 685 buf = ops->datbuf; 686 oob = ops->oobbuf; 687 688 while (1) { 689 bytes = min(mtd->writesize - col, readlen); 690 aligned = (bytes == mtd->writesize); 691 692 /* Is the current page in the buffer ? */ 693 if (realpage != chip->pagebuf || oob) { 694 bufpoi = aligned ? buf : chip->buffers->databuf; 695 chip->pagebuf = page; 696 /* Now read the page into the buffer */ 697 ret = chip->ecc.read_page(mtd, chip, bufpoi, page); 698 if (ret < 0) 699 break; 700 701 /* Transfer not aligned data */ 702 if (!aligned) { 703 chip->pagebuf = realpage; 704 memcpy(buf, chip->buffers->databuf + col, bytes); 705 } 706 707 buf += bytes; 708 709 if (unlikely(oob)) { 710 if (ops->mode != MTD_OOB_RAW) { 711 int toread = min(oobreadlen, 712 chip->ecc.layout->oobavail); 713 if (toread) { 714 oob = brcmnand_transfer_oob(chip, 715 oob, ops, toread); 716 oobreadlen -= toread; 717 } 718 } else 719 buf = brcmnand_transfer_oob(chip, 720 buf, ops, mtd->oobsize); 721 } 722 } else { 723 memcpy(buf, chip->buffers->databuf + col, bytes); 724 buf += bytes; 725 } 726 727 readlen -= bytes; 728 729 if (!readlen) 730 break; 731 732 /* For subsequent reads align to page boundary. */ 733 col = 0; 734 /* Increment page address */ 735 realpage++; 736 737 page = realpage & chip->pagemask; 738 } 739 740 ops->retlen = ops->len - (size_t) readlen; 741 if (oob) 742 ops->oobretlen = ops->ooblen - oobreadlen; 743 744 if (ret) 745 return ret; 746 747 if (mtd->ecc_stats.failed - stats.failed) 748 return -EBADMSG; 749 750 return mtd->ecc_stats.corrected - stats.corrected ? -EUCLEAN : 0; 751} 752 753/** 754 * brcmnand_read - [MTD Interface] MTD compability function for nand_do_read_ecc 755 * @mtd: MTD device structure 756 * @from: offset to read from 757 * @len: number of bytes to read 758 * @retlen: pointer to variable to store the number of read bytes 759 * @buf: the databuffer to put data 760 * 761 * Get hold of the chip and call nand_do_read 762 */ 763static int 764brcmnand_read(struct mtd_info *mtd, loff_t from, size_t len, size_t *retlen, u_char *buf) 765{ 766 struct nand_chip *chip = mtd->priv; 767 int ret; 768 769 if ((from + len) > mtd->size) 770 return -EINVAL; 771 if (!len) 772 return 0; 773 774 brcmnand_get_device( mtd ); 775 chip->ops.len = len; 776 chip->ops.datbuf = buf; 777 chip->ops.oobbuf = NULL; 778 779 ret = brcmnand_do_read_ops(mtd, from, &chip->ops); 780 781 *retlen = chip->ops.retlen; 782 783 brcmnand_release_device(mtd); 784 785 return ret; 786} 787 788static int brcmnand_posted_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 789 uint8_t *oob, uint32_t offset) 790{ 791 uint32_t mask = chip->ecc.size - 1; 792 si_t *sih = brcmnand_info.sih; 793 chipcregs_t *cc = brcmnand_info.cc; 794 osl_t *osh; 795 int valid; 796 uint32 *to; 797 int ret = 0, i; 798 799 if (offset & mask) 800 return -EINVAL; 801 802 osh = si_osh(sih); 803 W_REG(osh, &cc->nand_cmd_addr, offset); 804 PLATFORM_IOFLUSH_WAR(); 805 brcmnand_cmd(osh, cc, NCMD_SPARE_RD); 806 valid = brcmnand_spare_is_valid(mtd, chip, FL_READING); 807 808 switch (valid) { 809 case 1: 810 if (oob) { 811 to = (uint32 *)oob; 812 for (i = 0; i < mtd->oobsize; i += 4, to++) { 813 *to = R_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_rd0 + i)); 814 } 815 } 816 break; 817 case 0: 818 ret = -ETIMEDOUT; 819 break; 820 default: 821 ret = -EFAULT; 822 break; 823 } 824 return (ret); 825} 826 827/** 828 * brcmnand_read_oob_hwecc - [REPLACABLE] the most common OOB data read function 829 * @mtd: mtd info structure 830 * @chip: nand chip info structure 831 * @page: page number to read 832 * @sndcmd: flag whether to issue read command or not 833 */ 834static int brcmnand_read_oob_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 835 int page, int sndcmd) 836{ 837 int eccsteps; 838 int data_read = 0; 839 int oob_read = 0; 840 int corrected = 0; 841 int ret = 0; 842 uint32_t offset = page << chip->page_shift; 843 uint8_t *oob = chip->oob_poi; 844 845 for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) { 846 ret = brcmnand_posted_read_oob(mtd, chip, &oob[oob_read], offset + data_read); 847 if (ret == BRCMNAND_CORRECTABLE_ECC_ERROR && !corrected) { 848 mtd->ecc_stats.corrected++; 849 /* Only update stats once per page */ 850 corrected = 1; 851 ret = 0; 852 } else { 853 if (ret < 0) 854 break; 855 } 856 data_read += chip->ecc.size; 857 oob_read += mtd->oobsize; 858 } 859 860 return (ret); 861} 862 863/** 864 * brcmnand_do_read_oob - [Intern] NAND read out-of-band 865 * @mtd: MTD device structure 866 * @from: offset to read from 867 * @ops: oob operations description structure 868 * 869 * NAND read out-of-band data from the spare area 870 */ 871static int brcmnand_do_read_oob(struct mtd_info *mtd, loff_t from, 872 struct mtd_oob_ops *ops) 873{ 874 int page, realpage; 875 struct nand_chip *chip = mtd->priv; 876 int readlen = ops->ooblen; 877 int len; 878 uint8_t *buf = ops->oobbuf; 879 int ret; 880 881 DEBUG(MTD_DEBUG_LEVEL3, "nand_read_oob: from = 0x%08Lx, len = %i\n", 882 (unsigned long long)from, readlen); 883 884 if (ops->mode == MTD_OOB_AUTO) 885 len = chip->ecc.layout->oobavail; 886 else 887 len = mtd->oobsize; 888 889 if (unlikely(ops->ooboffs >= len)) { 890 DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " 891 "Attempt to start read outside oob\n"); 892 return -EINVAL; 893 } 894 895 /* Do not allow reads past end of device */ 896 if (unlikely(from >= mtd->size || 897 ops->ooboffs + readlen > ((mtd->size >> chip->page_shift) - 898 (from >> chip->page_shift)) * len)) { 899 DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " 900 "Attempt read beyond end of device\n"); 901 return -EINVAL; 902 } 903 /* Shift to get page */ 904 realpage = (int)(from >> chip->page_shift); 905 page = realpage & chip->pagemask; 906 907 while (1) { 908 ret = chip->ecc.read_oob(mtd, chip, page, 0); 909 if (ret) 910 break; 911 len = min(len, readlen); 912 buf = brcmnand_transfer_oob(chip, buf, ops, len); 913 914 readlen -= len; 915 if (!readlen) 916 break; 917 918 /* Increment page address */ 919 realpage++; 920 921 page = realpage & chip->pagemask; 922 } 923 924 ops->oobretlen = ops->ooblen; 925 return (ret); 926} 927 928/** 929 * brcmnand_read_oob - [MTD Interface] NAND read data and/or out-of-band 930 * @mtd: MTD device structure 931 * @from: offset to read from 932 * @ops: oob operation description structure 933 * 934 * NAND read data and/or out-of-band data 935 */ 936static int brcmnand_read_oob(struct mtd_info *mtd, loff_t from, 937 struct mtd_oob_ops *ops) 938{ 939 int ret = -ENOTSUPP; 940 941 ops->retlen = 0; 942 943 /* Do not allow reads past end of device */ 944 if (ops->datbuf && (from + ops->len) > mtd->size) { 945 DEBUG(MTD_DEBUG_LEVEL0, "brcmnand_read_oob: " 946 "Attempt read beyond end of device\n"); 947 return -EINVAL; 948 } 949 950 brcmnand_get_device( mtd ); 951 952 switch (ops->mode) { 953 case MTD_OOB_PLACE: 954 case MTD_OOB_AUTO: 955 case MTD_OOB_RAW: 956 break; 957 958 default: 959 goto out; 960 } 961 962 if (!ops->datbuf) 963 ret = brcmnand_do_read_oob(mtd, from, ops); 964 else 965 ret = brcmnand_do_read_ops(mtd, from, ops); 966 967out: 968 brcmnand_release_device(mtd); 969 return ret; 970} 971 972static int brcmnand_ctrl_write_is_complete(struct mtd_info *mtd, struct nand_chip *chip, 973 int *need_bbt) 974{ 975 uint32 pollmask = NIST_CTRL_READY | 0x1; 976 unsigned long timeout = msecs_to_jiffies(BRCMNAND_POLL_TIMEOUT); 977 unsigned long now = jiffies; 978 uint32 status = 0; 979 int ret; 980 981 for (;;) { 982 if ((status = brcmnand_poll(pollmask)) != 0) { 983 break; 984 } 985 if (time_after(jiffies, now + timeout)) { 986 status = brcmnand_poll(pollmask); 987 break; 988 } 989 udelay(1); 990 } 991 992 *need_bbt = 0; 993 if (status == 0) 994 ret = 0; /* timed out */ 995 else { 996 ret = 1; 997 if (status & 0x1) 998 *need_bbt = 1; 999 } 1000 1001 return ret; 1002} 1003 1004/** 1005 * brcmnand_posted_write - [BrcmNAND Interface] Write a buffer to the flash 1006 * cache 1007 * Assuming brcmnand_get_device() has been called to obtain exclusive lock 1008 * 1009 * @param mtd MTD data structure 1010 * @param chip nand chip info structure 1011 * @param buf the databuffer to put/get data 1012 * @param oob Spare area, pass NULL if not interested 1013 * @param offset offset to write to, and must be 512B aligned 1014 * 1015 */ 1016static int brcmnand_posted_write_cache(struct mtd_info *mtd, struct nand_chip *chip, 1017 const uint8_t *buf, uint8_t *oob, uint32_t offset) 1018{ 1019 uint32_t mask = chip->ecc.size - 1; 1020 si_t *sih = brcmnand_info.sih; 1021 chipcregs_t *cc = brcmnand_info.cc; 1022 osl_t *osh; 1023 int i, ret = 0; 1024 uint32_t *from; 1025 1026 if (offset & mask) { 1027 ret = -EINVAL; 1028 goto out; 1029 } 1030 1031 osh = si_osh(sih); 1032 from = (uint32_t *)buf; 1033 for (i = 0; i < chip->ecc.size; i += 4, from++) { 1034 W_REG(osh, &cc->nand_cache_data, *from); 1035 } 1036out: 1037 return (ret); 1038} 1039 1040/** 1041 * brcmnand_write_page_hwecc - [REPLACABLE] hardware ecc based page write function 1042 * @mtd: mtd info structure 1043 * @chip: nand chip info structure 1044 * @buf: data buffer 1045 */ 1046static void brcmnand_write_page_hwecc(struct mtd_info *mtd, struct nand_chip *chip, 1047 const uint8_t *buf) 1048{ 1049 int eccsize = chip->ecc.size; 1050 int eccsteps; 1051 int data_written = 0; 1052 int oob_written = 0; 1053 si_t *sih = brcmnand_info.sih; 1054 chipcregs_t *cc = brcmnand_info.cc; 1055 osl_t *osh; 1056 uint32_t reg; 1057 int ret = 0, need_bbt = 0; 1058 uint32_t offset = chip->pagebuf << chip->page_shift; 1059 1060 uint8_t oob_buf[NAND_MAX_OOBSIZE]; 1061 int *eccpos = chip->ecc.layout->eccpos; 1062 int i; 1063 uint8_t *oob = chip->oob_poi; 1064 1065 brcmnand_get_device( mtd ); 1066 osh = si_osh(sih); 1067 /* full page write */ 1068 /* disable partial page enable */ 1069 reg = R_REG(osh, &cc->nand_acc_control); 1070 reg &= ~NAC_PARTIAL_PAGE_EN; 1071 W_REG(osh, &cc->nand_acc_control, reg); 1072 1073 for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) { 1074 W_REG(osh, &cc->nand_cache_addr, 0); 1075 W_REG(osh, &cc->nand_cmd_addr, data_written); 1076 ret = brcmnand_posted_write_cache(mtd, chip, &buf[data_written], 1077 oob ? &oob[oob_written]: NULL, offset + data_written); 1078 if (ret < 0) { 1079 goto out; 1080 } 1081 data_written += eccsize; 1082 oob_written += mtd->oobsize; 1083 } 1084 1085 W_REG(osh, &cc->nand_cmd_addr, offset + mtd->writesize - NFL_SECTOR_SIZE); 1086 brcmnand_cmd(osh, cc, NCMD_PAGE_PROG); 1087 if (brcmnand_ctrl_write_is_complete(mtd, chip, &need_bbt)) { 1088 if (!need_bbt) { 1089 /* write the oob */ 1090 if (oob) { 1091 /* Enable partial page program so that we can 1092 * overwrite the spare area 1093 */ 1094 reg = R_REG(osh, &cc->nand_acc_control); 1095 reg |= NAC_PARTIAL_PAGE_EN; 1096 W_REG(osh, &cc->nand_acc_control, reg); 1097 1098 memcpy(oob_buf, oob, NAND_MAX_OOBSIZE); 1099 /* read from the spare area first */ 1100 ret = chip->ecc.read_oob(mtd, chip, chip->pagebuf, 0); 1101 if (ret != 0) 1102 goto out; 1103 /* merge the oob */ 1104 for (i = 0; i < chip->ecc.total; i++) 1105 oob_buf[eccpos[i]] = chip->oob_poi[eccpos[i]]; 1106 memcpy(chip->oob_poi, oob_buf, NAND_MAX_OOBSIZE); 1107 /* write back to the spare area */ 1108 ret = chip->ecc.write_oob(mtd, chip, chip->pagebuf); 1109 } 1110 goto out; 1111 } else { 1112 ret = chip->block_markbad(mtd, offset); 1113 goto out; 1114 } 1115 } 1116 /* timed out */ 1117 ret = -ETIMEDOUT; 1118 1119out: 1120 if (ret != 0) 1121 printk(KERN_ERR "brcmnand_write_page_hwecc failed\n"); 1122 brcmnand_release_device(mtd); 1123 return; 1124} 1125 1126/* 1127 * brcmnand_posted_write_oob - [BrcmNAND Interface] Write the spare area 1128 * @mtd: MTD data structure 1129 * @chip: nand chip info structure 1130 * @oob: Spare area, pass NULL if not interested. Must be able to 1131 * hold mtd->oobsize (16) bytes. 1132 * @offset: offset to write to, and must be 512B aligned 1133 * 1134 */ 1135static int brcmnand_posted_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 1136 uint8_t *oob, uint32_t offset) 1137{ 1138 uint32_t mask = chip->ecc.size - 1; 1139 si_t *sih = brcmnand_info.sih; 1140 chipcregs_t *cc = brcmnand_info.cc; 1141 osl_t *osh; 1142 int i, ret = 0, need_bbt = 0; 1143 uint32_t *from; 1144 uint32_t reg; 1145 uint8_t oob_buf0[16]; 1146 1147 if (offset & mask) { 1148 ret = -EINVAL; 1149 goto out; 1150 } 1151 1152 osh = si_osh(sih); 1153 /* Make sure we are in partial page program mode */ 1154 reg = R_REG(osh, &cc->nand_acc_control); 1155 reg |= NAC_PARTIAL_PAGE_EN; 1156 W_REG(osh, &cc->nand_acc_control, reg); 1157 1158 W_REG(osh, &cc->nand_cmd_addr, offset); 1159 if (!oob) { 1160 ret = -EINVAL; 1161 goto out; 1162 } 1163 memcpy(oob_buf0, oob, mtd->oobsize); 1164 from = (uint32_t *)oob_buf0; 1165 for (i = 0; i < mtd->oobsize; i += 4, from++) { 1166 W_REG(osh, (uint32_t *)((uint32_t)&cc->nand_spare_wr0 + i), *from); 1167 } 1168 PLATFORM_IOFLUSH_WAR(); 1169 brcmnand_cmd(osh, cc, NCMD_SPARE_PROG); 1170 if (brcmnand_ctrl_write_is_complete(mtd, chip, &need_bbt)) { 1171 if (!need_bbt) { 1172 ret = 0; 1173 goto out; 1174 } else { 1175 ret = chip->block_markbad(mtd, offset); 1176 goto out; 1177 } 1178 } 1179 /* timed out */ 1180 ret = -ETIMEDOUT; 1181out: 1182 return ret; 1183} 1184 1185 1186/** 1187 * brcmnand_write_page - [REPLACEABLE] write one page 1188 * @mtd: MTD device structure 1189 * @chip: NAND chip descriptor 1190 * @buf: the data to write 1191 * @page: page number to write 1192 * @cached: cached programming 1193 * @raw: use _raw version of write_page 1194 */ 1195static int brcmnand_write_page(struct mtd_info *mtd, struct nand_chip *chip, 1196 const uint8_t *buf, int page, int cached, int raw) 1197{ 1198 chip->pagebuf = page; 1199 chip->ecc.write_page(mtd, chip, buf); 1200 1201 return 0; 1202} 1203 1204/** 1205 * brcmnand_fill_oob - [Internal] Transfer client buffer to oob 1206 * @chip: nand chip structure 1207 * @oob: oob data buffer 1208 * @ops: oob ops structure 1209 */ 1210static uint8_t *brcmnand_fill_oob(struct nand_chip *chip, uint8_t *oob, 1211 struct mtd_oob_ops *ops) 1212{ 1213 size_t len = ops->ooblen; 1214 1215 switch (ops->mode) { 1216 1217 case MTD_OOB_PLACE: 1218 case MTD_OOB_RAW: 1219 memcpy(chip->oob_poi + ops->ooboffs, oob, len); 1220 return oob + len; 1221 1222 case MTD_OOB_AUTO: { 1223 struct nand_oobfree *free = chip->ecc.layout->oobfree; 1224 uint32_t boffs = 0, woffs = ops->ooboffs; 1225 size_t bytes = 0; 1226 1227 for (; free->length && len; free++, len -= bytes) { 1228 /* Write request not from offset 0 ? */ 1229 if (unlikely(woffs)) { 1230 if (woffs >= free->length) { 1231 woffs -= free->length; 1232 continue; 1233 } 1234 boffs = free->offset + woffs; 1235 bytes = min_t(size_t, len, 1236 (free->length - woffs)); 1237 woffs = 0; 1238 } else { 1239 bytes = min_t(size_t, len, free->length); 1240 boffs = free->offset; 1241 } 1242 memcpy(chip->oob_poi + boffs, oob, bytes); 1243 oob += bytes; 1244 } 1245 return oob; 1246 } 1247 default: 1248 BUG(); 1249 } 1250 return NULL; 1251} 1252 1253#define NOTALIGNED(x) (x & (chip->subpagesize - 1)) != 0 1254 1255/** 1256 * brcmnand_do_write_ops - [Internal] NAND write with ECC 1257 * @mtd: MTD device structure 1258 * @to: offset to write to 1259 * @ops: oob operations description structure 1260 * 1261 * NAND write with ECC 1262 */ 1263static int brcmnand_do_write_ops(struct mtd_info *mtd, loff_t to, 1264 struct mtd_oob_ops *ops) 1265{ 1266 int realpage, page, blockmask; 1267 struct nand_chip *chip = mtd->priv; 1268 uint32_t writelen = ops->len; 1269 uint8_t *oob = ops->oobbuf; 1270 uint8_t *buf = ops->datbuf; 1271 int ret; 1272 1273 ops->retlen = 0; 1274 if (!writelen) 1275 return 0; 1276 1277 /* reject writes, which are not page aligned */ 1278 if (NOTALIGNED(to) || NOTALIGNED(ops->len)) { 1279 printk(KERN_NOTICE "nand_write: " 1280 "Attempt to write not page aligned data\n"); 1281 return -EINVAL; 1282 } 1283 1284 1285 realpage = (int)(to >> chip->page_shift); 1286 page = realpage & chip->pagemask; 1287 blockmask = (1 << (chip->phys_erase_shift - chip->page_shift)) - 1; 1288 1289 /* Invalidate the page cache, when we write to the cached page */ 1290 if (to <= (chip->pagebuf << chip->page_shift) && 1291 (chip->pagebuf << chip->page_shift) < (to + ops->len)) 1292 chip->pagebuf = -1; 1293 1294 /* If we're not given explicit OOB data, let it be 0xFF */ 1295 if (likely(!oob)) 1296 memset(chip->oob_poi, 0xff, mtd->oobsize); 1297 1298 while (1) { 1299 int bytes = mtd->writesize; 1300 int cached = writelen > bytes && page != blockmask; 1301 uint8_t *wbuf = buf; 1302 1303 if (unlikely(oob)) 1304 oob = brcmnand_fill_oob(chip, oob, ops); 1305 1306 ret = chip->write_page(mtd, chip, wbuf, page, cached, 1307 (ops->mode == MTD_OOB_RAW)); 1308 if (ret) 1309 break; 1310 1311 writelen -= bytes; 1312 if (!writelen) 1313 break; 1314 1315 buf += bytes; 1316 realpage++; 1317 1318 page = realpage & chip->pagemask; 1319 } 1320 1321 ops->retlen = ops->len - writelen; 1322 if (unlikely(oob)) 1323 ops->oobretlen = ops->ooblen; 1324 return ret; 1325} 1326 1327/** 1328 * brcmnand_write - [MTD Interface] NAND write with ECC 1329 * @mtd: MTD device structure 1330 * @to: offset to write to 1331 * @len: number of bytes to write 1332 * @retlen: pointer to variable to store the number of written bytes 1333 * @buf: the data to write 1334 * 1335 * NAND write with ECC 1336 */ 1337static int brcmnand_write(struct mtd_info *mtd, loff_t to, size_t len, 1338 size_t *retlen, const uint8_t *buf) 1339{ 1340 struct nand_chip *chip = mtd->priv; 1341 int ret; 1342 1343 /* Do not allow reads past end of device */ 1344 if ((to + len) > mtd->size) 1345 return -EINVAL; 1346 if (!len) 1347 return 0; 1348 1349 brcmnand_get_device( mtd ); 1350 1351 chip->ops.len = len; 1352 chip->ops.datbuf = (uint8_t *)buf; 1353 chip->ops.oobbuf = NULL; 1354 1355 ret = brcmnand_do_write_ops(mtd, to, &chip->ops); 1356 1357 *retlen = chip->ops.retlen; 1358 1359 brcmnand_release_device(mtd); 1360 1361 return ret; 1362} 1363 1364/** 1365 * brcmnand_write_oob_hwecc - [INTERNAL] write one page 1366 * @mtd: MTD device structure 1367 * @chip: NAND chip descriptor. The oob_poi ptr points to the OOB buffer. 1368 * @page: page number to write 1369 */ 1370static int brcmnand_write_oob_hwecc(struct mtd_info *mtd, struct nand_chip *chip, int page) 1371{ 1372 int eccsteps; 1373 int oob_written = 0, data_written = 0; 1374 uint32_t offset = page << chip->page_shift; 1375 uint8_t *oob = chip->oob_poi; 1376 int ret = 0; 1377 1378 for (eccsteps = 0; eccsteps < chip->ecc.steps; eccsteps++) { 1379 ret = brcmnand_posted_write_oob(mtd, chip, oob + oob_written, 1380 offset + data_written); 1381 if (ret < 0) 1382 break; 1383 data_written += chip->ecc.size; 1384 oob_written += mtd->oobsize; 1385 } 1386 return (ret); 1387} 1388 1389/** 1390 * brcmnand_do_write_oob - [MTD Interface] NAND write out-of-band 1391 * @mtd: MTD device structure 1392 * @to: offset to write to 1393 * @ops: oob operation description structure 1394 * 1395 * NAND write out-of-band 1396 */ 1397static int brcmnand_do_write_oob(struct mtd_info *mtd, loff_t to, 1398 struct mtd_oob_ops *ops) 1399{ 1400 int page, status, len; 1401 struct nand_chip *chip = mtd->priv; 1402 1403 DEBUG(MTD_DEBUG_LEVEL3, "nand_write_oob: to = 0x%08x, len = %i\n", 1404 (unsigned int)to, (int)ops->ooblen); 1405 1406 if (ops->mode == MTD_OOB_AUTO) 1407 len = chip->ecc.layout->oobavail; 1408 else 1409 len = mtd->oobsize; 1410 1411 /* Do not allow write past end of page */ 1412 if ((ops->ooboffs + ops->ooblen) > len) { 1413 DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: " 1414 "Attempt to write past end of page\n"); 1415 return -EINVAL; 1416 } 1417 1418 if (unlikely(ops->ooboffs >= len)) { 1419 DEBUG(MTD_DEBUG_LEVEL0, "nand_write_oob: " 1420 "Attempt to start write outside oob\n"); 1421 return -EINVAL; 1422 } 1423 1424 /* Do not allow reads past end of device */ 1425 if (unlikely(to >= mtd->size || 1426 ops->ooboffs + ops->ooblen > 1427 ((mtd->size >> chip->page_shift) - 1428 (to >> chip->page_shift)) * len)) { 1429 DEBUG(MTD_DEBUG_LEVEL0, "nand_read_oob: " 1430 "Attempt write beyond end of device\n"); 1431 return -EINVAL; 1432 } 1433 1434 1435 /* Shift to get page */ 1436 page = (int)(to >> chip->page_shift); 1437 1438 /* Invalidate the page cache, if we write to the cached page */ 1439 if (page == chip->pagebuf) 1440 chip->pagebuf = -1; 1441 1442 memset(chip->oob_poi, 0xff, mtd->oobsize); 1443 brcmnand_fill_oob(chip, ops->oobbuf, ops); 1444 status = chip->ecc.write_oob(mtd, chip, page & chip->pagemask); 1445 memset(chip->oob_poi, 0xff, mtd->oobsize); 1446 1447 if (status) 1448 return status; 1449 1450 ops->oobretlen = ops->ooblen; 1451 1452 return 0; 1453} 1454 1455/** 1456 * brcmnand_write_oob - [MTD Interface] NAND write data and/or out-of-band 1457 * @mtd: MTD device structure 1458 * @to: offset to write to 1459 * @ops: oob operation description structure 1460 */ 1461static int brcmnand_write_oob(struct mtd_info *mtd, loff_t to, 1462 struct mtd_oob_ops *ops) 1463{ 1464 int ret = -ENOTSUPP; 1465 1466 ops->retlen = 0; 1467 1468 /* Do not allow writes past end of device */ 1469 if (ops->datbuf && (to + ops->len) > mtd->size) { 1470 DEBUG(MTD_DEBUG_LEVEL0, "brcmnand_write_oob: " 1471 "Attempt write beyond end of device\n"); 1472 return -EINVAL; 1473 } 1474 1475 brcmnand_get_device( mtd ); 1476 1477 switch (ops->mode) { 1478 case MTD_OOB_PLACE: 1479 case MTD_OOB_AUTO: 1480 case MTD_OOB_RAW: 1481 break; 1482 1483 default: 1484 goto out; 1485 } 1486 1487 if (!ops->datbuf) 1488 ret = brcmnand_do_write_oob(mtd, to, ops); 1489 else 1490 ret = brcmnand_do_write_ops(mtd, to, ops); 1491 1492out: 1493 brcmnand_release_device(mtd); 1494 return ret; 1495} 1496 1497static int brcmnand_erase_bbt(struct mtd_info *mtd, struct erase_info *instr, int allowbbt) 1498{ 1499 struct nand_chip * chip = mtd->priv; 1500 int page, len, pages_per_block, block_size; 1501 loff_t addr; 1502 int ret = 0; 1503 int need_bbt = 0; 1504 si_t *sih = brcmnand_info.sih; 1505 chipcregs_t *cc = brcmnand_info.cc; 1506 osl_t *osh; 1507 1508 DEBUG(MTD_DEBUG_LEVEL3, "nand_erase: start = 0x%08x, len = %i\n", 1509 (unsigned int)instr->addr, (unsigned int)instr->len); 1510 1511 block_size = 1 << chip->phys_erase_shift; 1512 1513 /* Start address must align on block boundary */ 1514 if (instr->addr & (block_size - 1)) { 1515 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: Unaligned address\n"); 1516 return -EINVAL; 1517 } 1518 1519 /* Length must align on block boundary */ 1520 if (instr->len & (block_size - 1)) { 1521 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 1522 "Length not block aligned\n"); 1523 return -EINVAL; 1524 } 1525 1526 /* Do not allow erase past end of device */ 1527 if ((instr->len + instr->addr) > mtd->size) { 1528 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 1529 "Erase past end of device\n"); 1530 return -EINVAL; 1531 } 1532 1533 instr->fail_addr = 0xffffffff; 1534 1535 /* Grab the lock and see if the device is available */ 1536 brcmnand_get_device( mtd ); 1537 1538 /* Shift to get first page */ 1539 page = (int)(instr->addr >> chip->page_shift); 1540 1541 /* Calculate pages in each block */ 1542 pages_per_block = 1 << (chip->phys_erase_shift - chip->page_shift); 1543 1544 osh = si_osh(sih); 1545 /* Clear ECC registers */ 1546 W_REG(osh, &cc->nand_ecc_corr_addr, 0); 1547 W_REG(osh, &cc->nand_ecc_corr_addr_x, 0); 1548 W_REG(osh, &cc->nand_ecc_unc_addr, 0); 1549 W_REG(osh, &cc->nand_ecc_unc_addr_x, 0); 1550 1551 /* Loop throught the pages */ 1552 len = instr->len; 1553 addr = instr->addr; 1554 instr->state = MTD_ERASING; 1555 1556 while (len) { 1557 /* 1558 * heck if we have a bad block, we do not erase bad blocks ! 1559 */ 1560 if (brcmnand_block_checkbad(mtd, ((loff_t) page) << 1561 chip->page_shift, 0, allowbbt)) { 1562 printk(KERN_WARNING "nand_erase: attempt to erase a " 1563 "bad block at page 0x%08x\n", page); 1564 instr->state = MTD_ERASE_FAILED; 1565 goto erase_exit; 1566 } 1567 1568 /* 1569 * Invalidate the page cache, if we erase the block which 1570 * contains the current cached page 1571 */ 1572 if (page <= chip->pagebuf && chip->pagebuf < 1573 (page + pages_per_block)) 1574 chip->pagebuf = -1; 1575 1576 W_REG(osh, &cc->nand_cmd_addr, (page << chip->page_shift)); 1577 brcmnand_cmd(osh, cc, NCMD_BLOCK_ERASE); 1578 1579 /* Wait until flash is ready */ 1580 ret = brcmnand_ctrl_write_is_complete(mtd, chip, &need_bbt); 1581 1582 if (need_bbt) { 1583 if (!allowbbt) { 1584 DEBUG(MTD_DEBUG_LEVEL0, "nand_erase: " 1585 "Failed erase, page 0x%08x\n", page); 1586 instr->state = MTD_ERASE_FAILED; 1587 instr->fail_addr = (page << chip->page_shift); 1588 chip->block_markbad(mtd, addr); 1589 goto erase_exit; 1590 } 1591 } 1592 1593 /* Increment page address and decrement length */ 1594 len -= (1 << chip->phys_erase_shift); 1595 page += pages_per_block; 1596 } 1597 instr->state = MTD_ERASE_DONE; 1598 1599erase_exit: 1600 1601 ret = instr->state == MTD_ERASE_DONE ? 0 : -EIO; 1602 /* Do call back function */ 1603 if (!ret) 1604 mtd_erase_callback(instr); 1605 1606 /* Deselect and wake up anyone waiting on the device */ 1607 brcmnand_release_device(mtd); 1608 1609 return ret; 1610} 1611 1612static int 1613brcmnand_erase(struct mtd_info *mtd, struct erase_info *instr) 1614{ 1615 int allowbbt = 0; 1616 int ret = 0; 1617 1618 /* do not allow erase of bbt */ 1619 ret = brcmnand_erase_bbt(mtd, instr, allowbbt); 1620 1621 return ret; 1622} 1623 1624/** 1625 * brcmnand_sync - [MTD Interface] sync 1626 * @mtd: MTD device structure 1627 * 1628 * Sync is actually a wait for chip ready function 1629 */ 1630static void brcmnand_sync(struct mtd_info *mtd) 1631{ 1632 1633 DEBUG(MTD_DEBUG_LEVEL3, "nand_sync: called\n"); 1634 1635 /* Grab the lock and see if the device is available */ 1636 brcmnand_get_device( mtd ); 1637 PLATFORM_IOFLUSH_WAR(); 1638 1639 /* Release it and go back */ 1640 brcmnand_release_device(mtd); 1641} 1642 1643/** 1644 * brcmnand_block_isbad - [MTD Interface] Check if block at offset is bad 1645 * @mtd: MTD device structure 1646 * @offs: offset relative to mtd start 1647 */ 1648static int brcmnand_block_isbad(struct mtd_info *mtd, loff_t offs) 1649{ 1650 /* Check for invalid offset */ 1651 if (offs > mtd->size) 1652 return -EINVAL; 1653 1654 return brcmnand_block_checkbad(mtd, offs, 1, 0); 1655} 1656 1657/** 1658 * brcmnand_default_block_markbad - [DEFAULT] mark a block bad 1659 * @mtd: MTD device structure 1660 * @ofs: offset from device start 1661 * 1662 * This is the default implementation, which can be overridden by 1663 * a hardware specific driver. 1664*/ 1665static int brcmnand_default_block_markbad(struct mtd_info *mtd, loff_t ofs) 1666{ 1667 struct nand_chip *chip = mtd->priv; 1668 uint8_t bbmarker[1] = {0}; 1669 uint8_t *buf = chip->oob_poi; 1670 int block, ret; 1671 int page, dir; 1672 1673 /* Get block number */ 1674 block = (int)(ofs >> chip->bbt_erase_shift); 1675 /* Get page number */ 1676 page = block << (chip->bbt_erase_shift - chip->page_shift); 1677 dir = 1; 1678 1679 if (chip->bbt) 1680 chip->bbt[block >> 2] |= 0x01 << ((block & 0x03) << 1); 1681 memcpy(buf, ffchars, NAND_MAX_OOBSIZE); 1682 memcpy(buf + chip->badblockpos, bbmarker, sizeof(bbmarker)); 1683 ret = chip->ecc.write_oob(mtd, chip, page); 1684 page += dir; 1685 ret = chip->ecc.write_oob(mtd, chip, page); 1686 1687 /* According to the HW guy, even if the write fails, the controller have 1688 * written a 0 pattern that certainly would have written a non 0xFF value 1689 * into the BI marker. 1690 * 1691 * Ignoring ret. Even if we fail to write the BI bytes, just ignore it, 1692 * and mark the block as bad in the BBT 1693 */ 1694 ret = brcmnand_update_bbt(mtd, ofs); 1695 mtd->ecc_stats.badblocks++; 1696 return ret; 1697} 1698 1699/** 1700 * brcmnand_block_markbad - [MTD Interface] Mark block at the given offset as bad 1701 * @mtd: MTD device structure 1702 * @ofs: offset relative to mtd start 1703 */ 1704static int brcmnand_block_markbad(struct mtd_info *mtd, loff_t ofs) 1705{ 1706 struct nand_chip *chip = mtd->priv; 1707 int ret; 1708 1709 if ((ret = brcmnand_block_isbad(mtd, ofs))) { 1710 /* If it was bad already, return success and do nothing. */ 1711 if (ret > 0) 1712 return 0; 1713 return ret; 1714 } 1715 1716 return chip->block_markbad(mtd, ofs); 1717} 1718 1719/** 1720 * brcmnand_suspend - [MTD Interface] Suspend the NAND flash 1721 * @mtd: MTD device structure 1722 */ 1723static int brcmnand_suspend(struct mtd_info *mtd) 1724{ 1725 return brcmnand_get_device( mtd ); 1726} 1727 1728/** 1729 * brcmnand_resume - [MTD Interface] Resume the NAND flash 1730 * @mtd: MTD device structure 1731 */ 1732static void brcmnand_resume(struct mtd_info *mtd) 1733{ 1734 struct nand_chip *chip = mtd->priv; 1735 1736 if (chip->state == FL_PM_SUSPENDED) 1737 brcmnand_release_device(mtd); 1738 else 1739 printk(KERN_ERR "brcmnand_resume() called for a chip which is not " 1740 "in suspended state\n"); 1741} 1742 1743struct mtd_partition brcmnand_parts[] = { 1744 { 1745 .name = "brcmnand", 1746 .size = 0, 1747 .offset = 0 1748 }, 1749 { 1750 .name = 0, 1751 .size = 0, 1752 .offset = 0 1753 } 1754}; 1755 1756struct mtd_partition *init_brcmnand_mtd_partitions(struct mtd_info *mtd, size_t size) 1757{ 1758 brcmnand_parts[0].offset = NFL_BOOT_OS_SIZE; 1759 brcmnand_parts[0].size = size - NFL_BOOT_OS_SIZE - NFL_BBT_SIZE; 1760 1761 return brcmnand_parts; 1762} 1763 1764/** 1765 * brcmnand_check_command_done - [DEFAULT] check if command is done 1766 * @mtd: MTD device structure 1767 * 1768 * Return 0 to process next command 1769 */ 1770static int 1771brcmnand_check_command_done(void) 1772{ 1773 si_t *sih = brcmnand_info.sih; 1774 chipcregs_t *cc = brcmnand_info.cc; 1775 osl_t *osh; 1776 int count = 0; 1777 1778 osh = si_osh(sih); 1779 1780 while (R_REG(osh, &cc->nflashctrl) & NFC_START) { 1781 if (++count > BRCMNAND_POLL_TIMEOUT) { 1782 printk("brcmnand_check_command_done: command timeout\n"); 1783 return -1; 1784 } 1785 } 1786 1787 return 0; 1788} 1789 1790/** 1791 * brcmnand_hwcontrol - [DEFAULT] Issue command and address cycles to the chip 1792 * @mtd: MTD device structure 1793 * @cmd: the command to be sent 1794 * @ctrl: the control code to be sent 1795 * 1796 * Issue command and address cycles to the chip 1797 */ 1798static void 1799brcmnand_hwcontrol(struct mtd_info *mtd, int cmd, unsigned int ctrl) 1800{ 1801 si_t *sih = brcmnand_info.sih; 1802 chipcregs_t *cc = brcmnand_info.cc; 1803 osl_t *osh; 1804 unsigned int val = 0; 1805 1806 osh = si_osh(sih); 1807 1808 if (cmd == NAND_CMD_NONE) 1809 return; 1810 1811 if (ctrl & NAND_CLE) { 1812 val = cmd | NFC_CMD0; 1813 } 1814 else { 1815 switch (ctrl & (NAND_ALE_COL | NAND_ALE_ROW)) { 1816 case NAND_ALE_COL: 1817 W_REG(osh, &cc->nflashcoladdr, cmd); 1818 val = NFC_COL; 1819 break; 1820 case NAND_ALE_ROW: 1821 W_REG(osh, &cc->nflashrowaddr, cmd); 1822 val = NFC_ROW; 1823 break; 1824 default: 1825 BUG(); 1826 } 1827 } 1828 1829 /* nCS is not needed for reset command */ 1830 if (cmd != NAND_CMD_RESET) 1831 val |= NFC_CSA; 1832 1833 val |= NFC_START; 1834 W_REG(osh, &cc->nflashctrl, val); 1835 1836 brcmnand_check_command_done(); 1837} 1838 1839/** 1840 * brcmnand_command_lp - [DEFAULT] Send command to NAND large page device 1841 * @mtd: MTD device structure 1842 * @command: the command to be sent 1843 * @column: the column address for this command, -1 if none 1844 * @page_addr: the page address for this command, -1 if none 1845 * 1846 * Send command to NAND device. This is the version for the new large page 1847 * devices We dont have the separate regions as we have in the small page 1848 * devices. We must emulate NAND_CMD_READOOB to keep the code compatible. 1849 */ 1850static void 1851brcmnand_command_lp(struct mtd_info *mtd, unsigned int command, int column, int page_addr) 1852{ 1853 register struct nand_chip *chip = mtd->priv; 1854 1855 /* Emulate NAND_CMD_READOOB */ 1856 if (command == NAND_CMD_READOOB) { 1857 column += mtd->writesize; 1858 command = NAND_CMD_READ0; 1859 } 1860 1861 /* Command latch cycle */ 1862 chip->cmd_ctrl(mtd, command & 0xff, NAND_NCE | NAND_CLE); 1863 1864 if (column != -1 || page_addr != -1) { 1865 int ctrl = NAND_NCE | NAND_ALE; 1866 1867 /* Serially input address */ 1868 if (column != -1) { 1869 ctrl |= NAND_ALE_COL; 1870 1871 /* Adjust columns for 16 bit buswidth */ 1872 if (chip->options & NAND_BUSWIDTH_16) 1873 column >>= 1; 1874 1875 chip->cmd_ctrl(mtd, column, ctrl); 1876 } 1877 1878 if (page_addr != -1) { 1879 ctrl &= ~NAND_ALE_COL; 1880 ctrl |= NAND_ALE_ROW; 1881 1882 chip->cmd_ctrl(mtd, page_addr, ctrl); 1883 } 1884 } 1885 1886 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE); 1887 1888 /* 1889 * program and erase have their own busy handlers 1890 * status, sequential in, and deplete1 need no delay 1891 */ 1892 switch (command) { 1893 1894 case NAND_CMD_CACHEDPROG: 1895 case NAND_CMD_PAGEPROG: 1896 case NAND_CMD_ERASE1: 1897 case NAND_CMD_ERASE2: 1898 case NAND_CMD_SEQIN: 1899 case NAND_CMD_RNDIN: 1900 case NAND_CMD_STATUS: 1901 case NAND_CMD_DEPLETE1: 1902 return; 1903 1904 /* 1905 * read error status commands require only a short delay 1906 */ 1907 case NAND_CMD_STATUS_ERROR: 1908 case NAND_CMD_STATUS_ERROR0: 1909 case NAND_CMD_STATUS_ERROR1: 1910 case NAND_CMD_STATUS_ERROR2: 1911 case NAND_CMD_STATUS_ERROR3: 1912 udelay(chip->chip_delay); 1913 return; 1914 1915 case NAND_CMD_RESET: 1916 if (chip->dev_ready) 1917 break; 1918 1919 udelay(chip->chip_delay); 1920 1921 chip->cmd_ctrl(mtd, NAND_CMD_STATUS, NAND_NCE | NAND_CLE); 1922 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE); 1923 1924 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)); 1925 return; 1926 1927 case NAND_CMD_RNDOUT: 1928 /* No ready / busy check necessary */ 1929 chip->cmd_ctrl(mtd, NAND_CMD_RNDOUTSTART, NAND_NCE | NAND_CLE); 1930 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE); 1931 return; 1932 1933 case NAND_CMD_READ0: 1934 chip->cmd_ctrl(mtd, NAND_CMD_READSTART, NAND_NCE | NAND_CLE); 1935 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE); 1936 1937 /* This applies to read commands */ 1938 default: 1939 /* 1940 * If we don't have access to the busy pin, we apply the given 1941 * command delay 1942 */ 1943 if (!chip->dev_ready) { 1944 udelay(chip->chip_delay); 1945 return; 1946 } 1947 } 1948 1949 /* Apply this short delay always to ensure that we do wait tWB in 1950 * any case on any machine. 1951 */ 1952 ndelay(100); 1953 1954 nand_wait_ready(mtd); 1955} 1956 1957/** 1958 * brcmnand_command - [DEFAULT] Send command to NAND device 1959 * @mtd: MTD device structure 1960 * @command: the command to be sent 1961 * @column: the column address for this command, -1 if none 1962 * @page_addr: the page address for this command, -1 if none 1963 * 1964 * Send command to NAND device. This function is used for small page 1965 * devices (256/512 Bytes per page) 1966 */ 1967static void 1968brcmnand_command(struct mtd_info *mtd, unsigned int command, int column, int page_addr) 1969{ 1970 register struct nand_chip *chip = mtd->priv; 1971 int ctrl = NAND_CTRL_CLE; 1972 1973 /* Invoke large page command function */ 1974 if (mtd->writesize > 512) { 1975 brcmnand_command_lp(mtd, command, column, page_addr); 1976 return; 1977 } 1978 1979 /* 1980 * Write out the command to the device. 1981 */ 1982 if (command == NAND_CMD_SEQIN) { 1983 int readcmd; 1984 1985 if (column >= mtd->writesize) { 1986 /* OOB area */ 1987 column -= mtd->writesize; 1988 readcmd = NAND_CMD_READOOB; 1989 } else if (column < 256) { 1990 /* First 256 bytes --> READ0 */ 1991 readcmd = NAND_CMD_READ0; 1992 } else { 1993 column -= 256; 1994 readcmd = NAND_CMD_READ1; 1995 } 1996 1997 chip->cmd_ctrl(mtd, readcmd, ctrl); 1998 } 1999 2000 chip->cmd_ctrl(mtd, command, ctrl); 2001 2002 /* 2003 * Address cycle, when necessary 2004 */ 2005 ctrl = NAND_CTRL_ALE; 2006 2007 /* Serially input address */ 2008 if (column != -1) { 2009 ctrl |= NAND_ALE_COL; 2010 2011 /* Adjust columns for 16 bit buswidth */ 2012 if (chip->options & NAND_BUSWIDTH_16) 2013 column >>= 1; 2014 2015 chip->cmd_ctrl(mtd, column, ctrl); 2016 } 2017 2018 if (page_addr != -1) { 2019 ctrl &= ~NAND_ALE_COL; 2020 ctrl |= NAND_ALE_ROW; 2021 2022 chip->cmd_ctrl(mtd, page_addr, ctrl); 2023 } 2024 2025 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE); 2026 2027 /* 2028 * program and erase have their own busy handlers 2029 * status and sequential in needs no delay 2030 */ 2031 switch (command) { 2032 2033 case NAND_CMD_PAGEPROG: 2034 case NAND_CMD_ERASE1: 2035 case NAND_CMD_ERASE2: 2036 case NAND_CMD_SEQIN: 2037 case NAND_CMD_STATUS: 2038 return; 2039 2040 case NAND_CMD_RESET: 2041 if (chip->dev_ready) 2042 break; 2043 2044 udelay(chip->chip_delay); 2045 2046 chip->cmd_ctrl(mtd, NAND_CMD_STATUS, NAND_CTRL_CLE); 2047 2048 chip->cmd_ctrl(mtd, NAND_CMD_NONE, NAND_NCE); 2049 2050 while (!(chip->read_byte(mtd) & NAND_STATUS_READY)); 2051 2052 return; 2053 2054 /* This applies to read commands */ 2055 default: 2056 /* 2057 * If we don't have access to the busy pin, we apply the given 2058 * command delay 2059 */ 2060 if (!chip->dev_ready) { 2061 udelay(chip->chip_delay); 2062 return; 2063 } 2064 } 2065 2066 /* Apply this short delay always to ensure that we do wait tWB in 2067 * any case on any machine. 2068 */ 2069 ndelay(100); 2070 2071 nand_wait_ready(mtd); 2072} 2073 2074/** 2075 * brcmnand_read_byte - [DEFAULT] read one byte from the chip 2076 * @mtd: MTD device structure 2077 * 2078 * Default read function for 8bit bus width 2079 */ 2080static uint8_t 2081brcmnand_read_byte(struct mtd_info *mtd) 2082{ 2083 si_t *sih = brcmnand_info.sih; 2084 chipcregs_t *cc = brcmnand_info.cc; 2085 osl_t *osh; 2086 register struct nand_chip *chip = mtd->priv; 2087 unsigned int val; 2088 2089 osh = si_osh(sih); 2090 2091 val = NFC_DREAD | NFC_CSA | NFC_START; 2092 W_REG(osh, &cc->nflashctrl, val); 2093 2094 brcmnand_check_command_done(); 2095 2096 return readb(chip->IO_ADDR_R); 2097} 2098 2099/** 2100 * brcmnand_write_byte - [DEFAULT] write one byte from the chip 2101 * @mtd: MTD device structure 2102 * 2103 * Default write function for 8bit bus width 2104 */ 2105static int 2106brcmnand_write_byte(struct mtd_info *mtd, u_char ch) 2107{ 2108 si_t *sih = brcmnand_info.sih; 2109 chipcregs_t *cc = brcmnand_info.cc; 2110 osl_t *osh; 2111 unsigned int val; 2112 2113 osh = si_osh(sih); 2114 2115 W_REG(osh, &cc->nflashdata, (unsigned int)ch); 2116 2117 val = NFC_DWRITE | NFC_CSA | NFC_START; 2118 W_REG(osh, &cc->nflashctrl, val); 2119 2120 brcmnand_check_command_done(); 2121 2122 return 0; 2123} 2124 2125/** 2126 * brcmnand_read_buf - [DEFAULT] read data from chip into buf 2127 * @mtd: MTD device structure 2128 * @buf: data buffer 2129 * @len: number of bytes to read 2130 * 2131 * Default read function for 8bit bus width 2132 */ 2133static void 2134brcmnand_read_buf(struct mtd_info *mtd, u_char *buf, int len) 2135{ 2136 int count = 0; 2137 2138 while (len > 0) { 2139 buf[count++] = brcmnand_read_byte(mtd); 2140 len--; 2141 } 2142} 2143 2144/** 2145 * brcmnand_write_buf - [DEFAULT] write buffer to chip 2146 * @mtd: MTD device structure 2147 * @buf: data buffer 2148 * @len: number of bytes to write 2149 * 2150 * Default write function for 8bit bus width 2151 */ 2152static void 2153brcmnand_write_buf(struct mtd_info *mtd, const u_char *buf, int len) 2154{ 2155 int count = 0; 2156 2157 while (len > 0) { 2158 brcmnand_write_byte(mtd, buf[count++]); 2159 len--; 2160 } 2161} 2162 2163/** 2164 * nand_verify_buf - [DEFAULT] Verify chip data against buffer 2165 * @mtd: MTD device structure 2166 * @buf: buffer containing the data to compare 2167 * @len: number of bytes to compare 2168 * 2169 * Default verify function for 8bit buswith 2170 */ 2171static int 2172brcmnand_verify_buf(struct mtd_info *mtd, const uint8_t *buf, int len) 2173{ 2174 int i; 2175 struct nand_chip *chip = mtd->priv; 2176 uint8_t chbuf; 2177 2178 for (i = 0; i < len; i++) { 2179 chbuf = chip->read_byte(mtd); 2180 if (buf[i] != chbuf) { 2181 return -EFAULT; 2182 } 2183 } 2184 2185 return 0; 2186} 2187 2188/** 2189 * brcmnand_devready - [DEFAULT] Check if nand flash device is ready 2190 * @mtd: MTD device structure 2191 * 2192 * Return 0 if nand flash device is busy 2193 */ 2194static int 2195brcmnand_devready(struct mtd_info *mtd) 2196{ 2197 si_t *sih = brcmnand_info.sih; 2198 chipcregs_t *cc = brcmnand_info.cc; 2199 osl_t *osh; 2200 int status; 2201 2202 osh = si_osh(sih); 2203 2204 status = (R_REG(osh, &cc->nflashctrl) & NFC_RDYBUSY) ? 1 : 0; 2205 2206 return status; 2207} 2208 2209/** 2210 * brcmnand_select_chip - [DEFAULT] select chip 2211 * @mtd: MTD device structure 2212 * @chip: chip to be selected 2213 * 2214 * For BCM4706 just return because of only one chip is used 2215 */ 2216static void 2217brcmnand_select_chip(struct mtd_info *mtd, int chip) 2218{ 2219 return; 2220} 2221 2222/** 2223 * brcmnand_init_nandchip - [DEFAULT] init mtd_info and nand_chip 2224 * @mtd: MTD device structure 2225 * @chip: chip to be selected 2226 * 2227 */ 2228static int 2229brcmnand_init_nandchip(struct mtd_info *mtd, struct nand_chip *chip) 2230{ 2231 chipcregs_t *cc = brcmnand_info.cc; 2232 int ret = 0; 2233 2234 chip->cmdfunc = brcmnand_command; 2235 chip->read_byte = brcmnand_read_byte; 2236 chip->write_buf = brcmnand_write_buf; 2237 chip->read_buf = brcmnand_read_buf; 2238 chip->verify_buf = brcmnand_verify_buf; 2239 chip->select_chip = brcmnand_select_chip; 2240 chip->cmd_ctrl = brcmnand_hwcontrol; 2241 chip->dev_ready = brcmnand_devready; 2242 mtd->get_device = brcmnand_get_device_bcm4706; 2243 mtd->put_device = brcmnand_release_device_bcm4706; 2244 2245 chip->numchips = 1; 2246 chip->chip_shift = 0; 2247 chip->chip_delay = 50; 2248 chip->priv = mtd; 2249 chip->options = NAND_USE_FLASH_BBT; 2250 2251 chip->controller = &chip->hwcontrol; 2252 spin_lock_init(&chip->controller->lock); 2253 init_waitqueue_head(&chip->controller->wq); 2254 2255 chip->IO_ADDR_W = (void __iomem *)&cc->nflashdata; 2256 chip->IO_ADDR_R = chip->IO_ADDR_W; 2257 2258 /* BCM4706 only support software ECC mode */ 2259 chip->ecc.mode = NAND_ECC_SOFT; 2260 chip->ecc.layout = NULL; 2261 2262 mtd->name = "brcmnand"; 2263 mtd->priv = chip; 2264 mtd->owner = THIS_MODULE; 2265 2266 mtd->mlock = partitions_lock_init(); 2267 if (!mtd->mlock) 2268 ret = -ENOMEM; 2269 2270 return ret; 2271} 2272 2273static int __init 2274brcmnand_mtd_init(void) 2275{ 2276 int ret = 0; 2277 hndnand_t *info; 2278 struct pci_dev *dev = NULL; 2279 struct nand_chip *chip; 2280 struct mtd_info *mtd; 2281#ifdef CONFIG_MTD_PARTITIONS 2282 struct mtd_partition *parts; 2283 int i; 2284#endif 2285 2286 list_for_each_entry(dev, &((pci_find_bus(0, 0))->devices), bus_list) { 2287 if ((dev != NULL) && (dev->device == CC_CORE_ID)) 2288 break; 2289 } 2290 2291 if (dev == NULL) { 2292 printk(KERN_ERR "brcmnand: chipcommon not found\n"); 2293 return -ENODEV; 2294 } 2295 2296 memset(&brcmnand_info, 0, sizeof(struct brcmnand_mtd)); 2297 2298 /* attach to the backplane */ 2299 if (!(brcmnand_info.sih = si_kattach(SI_OSH))) { 2300 printk(KERN_ERR "brcmnand: error attaching to backplane\n"); 2301 ret = -EIO; 2302 goto fail; 2303 } 2304 2305 /* Map registers and flash base */ 2306 if (!(brcmnand_info.cc = ioremap_nocache( 2307 pci_resource_start(dev, 0), 2308 pci_resource_len(dev, 0)))) { 2309 printk(KERN_ERR "brcmnand: error mapping registers\n"); 2310 ret = -EIO; 2311 goto fail; 2312 } 2313 2314 /* Initialize serial flash access */ 2315 if (!(info = hndnand_init(brcmnand_info.sih ))) { 2316 printk(KERN_ERR "brcmnand: found no supported devices\n"); 2317 ret = -ENODEV; 2318 goto fail; 2319 } 2320 2321 if (CHIPID(brcmnand_info.sih->chip) == BCM4706_CHIP_ID) { 2322 mtd = &brcmnand_info.mtd; 2323 chip = &brcmnand_info.chip; 2324 2325 if ((ret = brcmnand_init_nandchip(mtd, chip)) != 0) { 2326 printk(KERN_ERR "brcmnand_mtd_init: brcmnand_init_nandchip failed\n"); 2327 goto fail; 2328 } 2329 2330 if ((ret = nand_scan(mtd, chip->numchips)) != 0) { 2331 printk(KERN_ERR "brcmnand_mtd_init: nand_scan failed\n"); 2332 goto fail; 2333 } 2334 2335 goto init_partitions; 2336 } 2337 2338 page_buffer = kmalloc(sizeof(struct nand_buffers), GFP_KERNEL); 2339 if (!page_buffer) { 2340 printk(KERN_ERR "brcmnand: cannot allocate memory for page buffer\n"); 2341 return -ENOMEM; 2342 } 2343 memset(page_buffer, 0, sizeof(struct nand_buffers)); 2344 2345 chip = &brcmnand_info.chip; 2346 mtd = &brcmnand_info.mtd; 2347 brcmnand_info.nflash = info ; 2348 2349 chip->ecc.mode = NAND_ECC_HW; 2350 2351 chip->buffers = (struct nand_buffers *)page_buffer; 2352 chip->numchips = 1; 2353 chip->chip_shift = 0; 2354 chip->priv = mtd; 2355 chip->options |= NAND_USE_FLASH_BBT; 2356 /* At most 2GB is supported */ 2357 chip->chipsize = (info->size >= (1 << 11)) ? (1 << 31) : (info->size << 20); 2358 brcmnand_info.level = info->ecclevel; 2359 2360 /* Register with MTD */ 2361 mtd->name = "brcmnand"; 2362 mtd->priv = &brcmnand_info.chip; 2363 mtd->owner = THIS_MODULE; 2364 mtd->mlock = partitions_lock_init(); 2365 spin_lock_init(&mtd_lock); 2366 if (!mtd->mlock) { 2367 ret = -ENOMEM; 2368 goto fail; 2369 } 2370 2371 mtd->size = chip->chipsize; 2372 mtd->erasesize = info->blocksize; 2373 mtd->writesize = info->pagesize; 2374 /* 16B oob for 512B page, 64B for 2KB page, etc.. */ 2375 mtd->oobsize = (info->pagesize >> 5); 2376 2377 /* Calculate the address shift from the page size */ 2378 chip->page_shift = ffs(mtd->writesize) - 1; 2379 /* Convert chipsize to number of pages per chip -1. */ 2380 chip->pagemask = (chip->chipsize >> chip->page_shift) - 1; 2381 2382 chip->bbt_erase_shift = chip->phys_erase_shift = 2383 ffs(mtd->erasesize) - 1; 2384 chip->chip_shift = ffs(chip->chipsize) - 1; 2385 2386 /* Set the bad block position */ 2387 chip->badblockpos = (mtd->writesize > 512) ? 2388 NAND_LARGE_BADBLOCK_POS : NAND_SMALL_BADBLOCK_POS; 2389 2390 if (!chip->controller) { 2391 chip->controller = &chip->hwcontrol; 2392 spin_lock_init(&chip->controller->lock); 2393 init_waitqueue_head(&chip->controller->wq); 2394 } 2395 2396 /* Preset the internal oob write buffer */ 2397 memset(BRCMNAND_OOBBUF(chip->buffers), 0xff, mtd->oobsize); 2398 2399 /* Set the internal oob buffer location, just after the page data */ 2400 chip->oob_poi = BRCMNAND_OOBBUF(chip->buffers); 2401 2402 /* 2403 * If no default placement scheme is given, select an appropriate one 2404 */ 2405 if (!chip->ecc.layout) { 2406 switch (mtd->oobsize) { 2407 case 16: 2408 if (brcmnand_info.level== BRCMNAND_ECC_HAMMING) 2409 chip->ecc.layout = &brcmnand_oob_16; 2410 else 2411 chip->ecc.layout = &brcmnand_oob_bch4_512; 2412 break; 2413 case 64: 2414 if (brcmnand_info.level== BRCMNAND_ECC_HAMMING) 2415 chip->ecc.layout = &brcmnand_oob_64; 2416 else if (brcmnand_info.level== BRCMNAND_ECC_BCH_4) { 2417 if (mtd->writesize == 2048) 2418 chip->ecc.layout = &brcmnand_oob_bch4_2k; 2419 else { 2420 printk(KERN_ERR "Unsupported page size of %d\n", 2421 mtd->writesize); 2422 BUG(); 2423 } 2424 } 2425 break; 2426 case 128: 2427 if (brcmnand_info.level== BRCMNAND_ECC_HAMMING) 2428 chip->ecc.layout = &brcmnand_oob_128; 2429 else { 2430 printk(KERN_ERR "Unsupported page size of %d\n", 2431 mtd->writesize); 2432 BUG(); 2433 } 2434 break; 2435 default: 2436 printk(KERN_WARNING "No oob scheme defined for " 2437 "oobsize %d\n", mtd->oobsize); 2438 BUG(); 2439 } 2440 } 2441 2442 if (!chip->write_page) 2443 chip->write_page = brcmnand_write_page; 2444 2445 switch (chip->ecc.mode) { 2446 case NAND_ECC_HW: 2447 if (!chip->ecc.read_page) 2448 chip->ecc.read_page = brcmnand_read_page_hwecc; 2449 if (!chip->ecc.write_page) 2450 chip->ecc.write_page = brcmnand_write_page_hwecc; 2451 if (!chip->ecc.read_oob) 2452 chip->ecc.read_oob = brcmnand_read_oob_hwecc; 2453 if (!chip->ecc.write_oob) 2454 chip->ecc.write_oob = brcmnand_write_oob_hwecc; 2455 break; 2456 case NAND_ECC_SOFT: 2457 break; 2458 case NAND_ECC_NONE: 2459 break; 2460 default: 2461 printk(KERN_WARNING "Invalid NAND_ECC_MODE %d\n", 2462 chip->ecc.mode); 2463 BUG(); 2464 break; 2465 } 2466 2467 /* 2468 * The number of bytes available for a client to place data into 2469 * the out of band area 2470 */ 2471 chip->ecc.layout->oobavail = 0; 2472 for (i = 0; chip->ecc.layout->oobfree[i].length; i++) 2473 chip->ecc.layout->oobavail += 2474 chip->ecc.layout->oobfree[i].length; 2475 mtd->oobavail = chip->ecc.layout->oobavail; 2476 2477 /* 2478 * Set the number of read / write steps for one page 2479 */ 2480 chip->ecc.size = NFL_SECTOR_SIZE; /* Fixed for Broadcom controller. */ 2481 mtd->oobsize = 16; /* Fixed for Hamming code or 4-bit BCH for now. */ 2482 chip->ecc.bytes = brcmnand_eccbytes[brcmnand_info.level]; 2483 chip->ecc.steps = mtd->writesize / chip->ecc.size; 2484 if (chip->ecc.steps * chip->ecc.size != mtd->writesize) { 2485 printk(KERN_WARNING "Invalid ecc parameters\n"); 2486 BUG(); 2487 } 2488 chip->ecc.total = chip->ecc.steps * chip->ecc.bytes; 2489 2490 /* 2491 * Allow subpage writes up to ecc.steps. Not possible for MLC 2492 * FLASH. 2493 */ 2494 if (!(chip->options & NAND_NO_SUBPAGE_WRITE) && 2495 !(chip->cellinfo & NAND_CI_CELLTYPE_MSK)) { 2496 switch (chip->ecc.steps) { 2497 case 2: 2498 mtd->subpage_sft = 1; 2499 break; 2500 case 4: 2501 mtd->subpage_sft = 2; 2502 break; 2503 case 8: 2504 mtd->subpage_sft = 3; 2505 break; 2506 } 2507 } 2508 chip->subpagesize = mtd->writesize >> mtd->subpage_sft; 2509 2510 /* Initialize state */ 2511 chip->state = FL_READY; 2512 2513 /* Invalidate the pagebuffer reference */ 2514 chip->pagebuf = -1; 2515 2516 if (!chip->block_markbad) 2517 chip->block_markbad = brcmnand_default_block_markbad; 2518 if (!chip->scan_bbt) 2519 chip->scan_bbt = brcmnand_default_bbt; 2520 if (!mtd->get_device) 2521 mtd->get_device = brcmnand_get_device; 2522 if (!mtd->put_device) 2523 mtd->put_device = brcmnand_release_device; 2524 2525 mtd->type = MTD_NANDFLASH; 2526 mtd->flags = MTD_CAP_NANDFLASH; 2527 mtd->erase = brcmnand_erase; 2528 mtd->point = NULL; 2529 mtd->unpoint = NULL; 2530 mtd->read = brcmnand_read; 2531 mtd->write = brcmnand_write; 2532 mtd->read_oob = brcmnand_read_oob; 2533 mtd->write_oob = brcmnand_write_oob; 2534 mtd->sync = brcmnand_sync; 2535 mtd->lock = NULL; 2536 mtd->unlock = NULL; 2537 mtd->suspend = brcmnand_suspend; 2538 mtd->resume = brcmnand_resume; 2539 mtd->block_isbad = brcmnand_block_isbad; 2540 mtd->block_markbad = brcmnand_block_markbad; 2541 2542 /* propagate ecc.layout to mtd_info */ 2543 mtd->ecclayout = chip->ecc.layout; 2544 2545 ret = chip->scan_bbt(mtd); 2546 if (ret) { 2547 printk(KERN_ERR "brcmnand: scan_bbt failed\n"); 2548 goto fail; 2549 } 2550 2551init_partitions: 2552#ifdef CONFIG_MTD_PARTITIONS 2553 parts = init_brcmnand_mtd_partitions(mtd, mtd->size); 2554 if (!parts) 2555 goto fail; 2556 for (i = 0; parts[i].name; i++); 2557 ret = add_mtd_partitions(mtd, parts, i); 2558 if (ret) { 2559 printk(KERN_ERR "brcmnand: add_mtd failed\n"); 2560 goto fail; 2561 } 2562 brcmnand_info.parts = parts; 2563#endif 2564 return 0; 2565 2566fail: 2567 if (brcmnand_info.cc) 2568 iounmap((void *) brcmnand_info.cc); 2569 if (brcmnand_info.sih) 2570 si_detach(brcmnand_info.sih); 2571 if (page_buffer) 2572 kfree(page_buffer); 2573 return ret; 2574} 2575 2576static void __exit 2577brcmnand_mtd_exit(void) 2578{ 2579#ifdef CONFIG_MTD_PARTITIONS 2580 del_mtd_partitions(&brcmnand_info.mtd); 2581#else 2582 del_mtd_device(&brcmnand_info.mtd); 2583#endif 2584 iounmap((void *) brcmnand_info.cc); 2585 si_detach(brcmnand_info.sih); 2586} 2587 2588module_init(brcmnand_mtd_init); 2589module_exit(brcmnand_mtd_exit); 2590