1/* 2 * Nortstar NAND controller driver 3 * for Linux NAND library and MTD interface 4 * 5 * (c) Broadcom, Inc. 2012 All Rights Reserved. 6 * 7 * This module interfaces the NAND controller and hardware ECC capabilities 8 * tp the generic NAND chip support in the NAND library. 9 * 10 * Notes: 11 * This driver depends on generic NAND driver, but works at the 12 * page level for operations. 13 * 14 * When a page is written, the ECC calculated also protects the OOB 15 * bytes not taken by ECC, and so the OOB must be combined with any 16 * OOB data that preceded the page-write operation in order for the 17 * ECC to be calculated correctly. 18 * Also, when the page is erased, but OOB data is not, HW ECC will 19 * indicate an error, because it checks OOB too, which calls for some 20 * help from the software in this driver. 21 * 22 * TBD: 23 * Block locking/unlocking support, OTP support 24 */ 25 26 27#include <linux/kernel.h> 28#include <linux/module.h> 29#include <linux/platform_device.h> 30#include <linux/io.h> 31#include <linux/ioport.h> 32#include <linux/interrupt.h> 33#include <linux/delay.h> 34#include <linux/bug.h> 35#include <linux/err.h> 36 37#include <linux/mtd/mtd.h> 38#include <linux/mtd/nand.h> 39#ifdef CONFIG_MTD_PARTITIONS 40#include <linux/mtd/partitions.h> 41#endif 42 43#define NANDC_MAX_CHIPS 2 /* Only 2 CSn supported in NorthStar */ 44 45#define DRV_NAME "iproc-nand" 46#define DRV_VERSION "0.1" 47#define DRV_DESC "Northstar on-chip NAND Flash Controller driver" 48 49/* 50 * RESOURCES 51 */ 52static struct resource nandc_regs[] = { 53 { 54 .name = "nandc_regs", .flags = IORESOURCE_MEM, 55 .start = 0x18028000, .end = 0x18028FFF, 56 }, 57}; 58 59static struct resource nandc_idm_regs[] = { 60 { 61 .name = "nandc_idm_regs", .flags= IORESOURCE_MEM, 62 .start = 0x1811a000, .end = 0x1811afff, 63 }, 64}; 65 66static struct resource nandc_irqs[] = { 67 { 68 .name = "nandc_irqs", .flags = IORESOURCE_IRQ, 69 .start = 96, .end = 103, 70 }, 71}; 72 73 74/* 75 * Driver private control structure 76 */ 77struct nandc_ctrl { 78 struct mtd_info mtd; 79 struct nand_chip nand; 80#ifdef CONFIG_MTD_PARTITIONS 81 struct mtd_partition *parts; 82#endif 83 struct device *device; 84 85 struct completion op_completion; 86 87 void * __iomem reg_base; 88 void * __iomem idm_base; 89 int irq_base; 90 struct nand_ecclayout ecclayout; 91 int cmd_ret; /* saved error code */ 92 unsigned char oob_index, 93 id_byte_index, 94 chip_num, 95 last_cmd, 96 ecc_level, 97 sector_size_shift, 98 sec_per_page_shift; 99}; 100 101 102/* 103 * IRQ numbers - offset from first irq in nandc_irq resource 104 */ 105#define NANDC_IRQ_RD_MISS 0 106#define NANDC_IRQ_ERASE_COMPLETE 1 107#define NANDC_IRQ_COPYBACK_COMPLETE 2 108#define NANDC_IRQ_PROGRAM_COMPLETE 3 109#define NANDC_IRQ_CONTROLLER_RDY 4 110#define NANDC_IRQ_RDBSY_RDY 5 111#define NANDC_IRQ_ECC_UNCORRECTABLE 6 112#define NANDC_IRQ_ECC_CORRECTABLE 7 113#define NANDC_IRQ_NUM 8 114 115/* 116 * REGISTERS 117 * 118 * Individual bit-fields aof registers are specificed here 119 * for clarity, and the rest of the code will access each field 120 * as if it was its own register. 121 * 122 * Following registers are off <reg_base>: 123 */ 124#define REG_BIT_FIELD(r,p,w) ((reg_bit_field_t){(r),(p),(w)}) 125 126#define NANDC_8KB_PAGE_SUPPORT REG_BIT_FIELD(0x0, 31, 1) 127#define NANDC_REV_MAJOR REG_BIT_FIELD(0x0, 8, 8) 128#define NANDC_REV_MINOR REG_BIT_FIELD(0x0, 0, 8) 129 130#define NANDC_CMD_START_OPCODE REG_BIT_FIELD(0x4, 24, 5) 131 132#define NANDC_CMD_CS_SEL REG_BIT_FIELD(0x8, 16, 3) 133#define NANDC_CMD_EXT_ADDR REG_BIT_FIELD(0x8, 0, 16) 134 135#define NANDC_CMD_ADDRESS REG_BIT_FIELD(0xc, 0, 32) 136#define NANDC_CMD_END_ADDRESS REG_BIT_FIELD(0x10, 0, 32) 137 138#define NANDC_INT_STATUS REG_BIT_FIELD(0x14, 0, 32) 139#define NANDC_INT_STAT_CTLR_RDY REG_BIT_FIELD(0x14, 31, 1) 140#define NANDC_INT_STAT_FLASH_RDY REG_BIT_FIELD(0x14, 30, 1) 141#define NANDC_INT_STAT_CACHE_VALID REG_BIT_FIELD(0x14, 29, 1) 142#define NANDC_INT_STAT_SPARE_VALID REG_BIT_FIELD(0x14, 28, 1) 143#define NANDC_INT_STAT_ERASED REG_BIT_FIELD(0x14, 27, 1) 144#define NANDC_INT_STAT_PLANE_RDY REG_BIT_FIELD(0x14, 26, 1) 145#define NANDC_INT_STAT_FLASH_STATUS REG_BIT_FIELD(0x14, 0, 8) 146 147#define NANDC_CS_LOCK REG_BIT_FIELD(0x18, 31, 1) 148#define NANDC_CS_AUTO_CONFIG REG_BIT_FIELD(0x18, 30, 1) 149#define NANDC_CS_NAND_WP REG_BIT_FIELD(0x18, 29, 1) 150#define NANDC_CS_BLK0_WP REG_BIT_FIELD(0x18, 28, 1) 151#define NANDC_CS_SW_USING_CS(n) REG_BIT_FIELD(0x18, 8+(n), 1) 152#define NANDC_CS_MAP_SEL_CS(n) REG_BIT_FIELD(0x18, 0+(n), 1) 153 154#define NANDC_XOR_ADDR_BLK0_ONLY REG_BIT_FIELD(0x1c, 31, 1) 155#define NANDC_XOR_ADDR_CS(n) REG_BIT_FIELD(0x1c, 0+(n), 1) 156 157#define NANDC_LL_OP_RET_IDLE REG_BIT_FIELD(0x20, 31, 1) 158#define NANDC_LL_OP_CLE REG_BIT_FIELD(0x20, 19, 1) 159#define NANDC_LL_OP_ALE REG_BIT_FIELD(0x20, 18, 1) 160#define NANDC_LL_OP_WE REG_BIT_FIELD(0x20, 17, 1) 161#define NANDC_LL_OP_RE REG_BIT_FIELD(0x20, 16, 1) 162#define NANDC_LL_OP_DATA REG_BIT_FIELD(0x20, 0, 16) 163 164#define NANDC_MPLANE_ADDR_EXT REG_BIT_FIELD(0x24, 0, 16) 165#define NANDC_MPLANE_ADDR REG_BIT_FIELD(0x28, 0, 32) 166 167#define NANDC_ACC_CTRL_CS(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 32) 168#define NANDC_ACC_CTRL_RD_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 31, 1) 169#define NANDC_ACC_CTRL_WR_ECC(n) REG_BIT_FIELD(0x50+((n)<<4), 30, 1) 170#define NANDC_ACC_CTRL_CE_CARE(n) REG_BIT_FIELD(0x50+((n)<<4), 29, 1) 171#define NANDC_ACC_CTRL_PGM_RDIN(n) REG_BIT_FIELD(0x50+((n)<<4), 28, 1) 172#define NANDC_ACC_CTRL_ERA_ECC_ERR(n) REG_BIT_FIELD(0x50+((n)<<4), 27, 1) 173#define NANDC_ACC_CTRL_PGM_PARTIAL(n) REG_BIT_FIELD(0x50+((n)<<4), 26, 1) 174#define NANDC_ACC_CTRL_WR_PREEMPT(n) REG_BIT_FIELD(0x50+((n)<<4), 25, 1) 175#define NANDC_ACC_CTRL_PG_HIT(n) REG_BIT_FIELD(0x50+((n)<<4), 24, 1) 176#define NANDC_ACC_CTRL_PREFETCH(n) REG_BIT_FIELD(0x50+((n)<<4), 23, 1) 177#define NANDC_ACC_CTRL_CACHE_MODE(n) REG_BIT_FIELD(0x50+((n)<<4), 22, 1) 178#define NANDC_ACC_CTRL_CACHE_LASTPG(n) REG_BIT_FIELD(0x50+((n)<<4), 21, 1) 179#define NANDC_ACC_CTRL_ECC_LEVEL(n) REG_BIT_FIELD(0x50+((n)<<4), 16, 5) 180#define NANDC_ACC_CTRL_SECTOR_1K(n) REG_BIT_FIELD(0x50+((n)<<4), 7, 1) 181#define NANDC_ACC_CTRL_SPARE_SIZE(n) REG_BIT_FIELD(0x50+((n)<<4), 0, 7) 182 183#define NANDC_CONFIG_CS(n) REG_BIT_FIELD(0x54+((n)<<4), 0, 32) 184#define NANDC_CONFIG_LOCK(n) REG_BIT_FIELD(0x54+((n)<<4), 31, 1) 185#define NANDC_CONFIG_BLK_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 28, 3) 186#define NANDC_CONFIG_CHIP_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 24, 4) 187#define NANDC_CONFIG_CHIP_WIDTH(n) REG_BIT_FIELD(0x54+((n)<<4), 23, 1) 188#define NANDC_CONFIG_PAGE_SIZE(n) REG_BIT_FIELD(0x54+((n)<<4), 20, 2) 189#define NANDC_CONFIG_FUL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 16, 3) 190#define NANDC_CONFIG_COL_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 12, 3) 191#define NANDC_CONFIG_BLK_ADDR_BYTES(n) REG_BIT_FIELD(0x54+((n)<<4), 8, 3) 192 193#define NANDC_TIMING_1_CS(n) REG_BIT_FIELD(0x58+((n)<<4), 0, 32) 194#define NANDC_TIMING_2_CS(n) REG_BIT_FIELD(0x5c+((n)<<4), 0, 32) 195 /* Individual bits for Timing registers - TBD */ 196 197#define NANDC_CORR_STAT_THRESH_CS(n) REG_BIT_FIELD(0xc0, 6*(n), 6) 198 199#define NANDC_BLK_WP_END_ADDR REG_BIT_FIELD(0xc8, 0, 32) 200 201#define NANDC_MPLANE_ERASE_CYC2_OPCODE REG_BIT_FIELD(0xcc, 24, 8) 202#define NANDC_MPLANE_READ_STAT_OPCODE REG_BIT_FIELD(0xcc, 16, 8) 203#define NANDC_MPLANE_PROG_ODD_OPCODE REG_BIT_FIELD(0xcc, 8, 8) 204#define NANDC_MPLANE_PROG_TRL_OPCODE REG_BIT_FIELD(0xcc, 0, 8) 205 206#define NANDC_MPLANE_PGCACHE_TRL_OPCODE REG_BIT_FIELD(0xd0, 24, 8) 207#define NANDC_MPLANE_READ_STAT2_OPCODE REG_BIT_FIELD(0xd0, 16, 8) 208#define NANDC_MPLANE_READ_EVEN_OPCODE REG_BIT_FIELD(0xd0, 8, 8) 209#define NANDC_MPLANE_READ_ODD__OPCODE REG_BIT_FIELD(0xd0, 0, 8) 210 211#define NANDC_MPLANE_CTRL_ERASE_CYC2_EN REG_BIT_FIELD(0xd4, 31, 1) 212#define NANDC_MPLANE_CTRL_RD_ADDR_SIZE REG_BIT_FIELD(0xd4, 30, 1) 213#define NANDC_MPLANE_CTRL_RD_CYC_ADDR REG_BIT_FIELD(0xd4, 29, 1) 214#define NANDC_MPLANE_CTRL_RD_COL_ADDR REG_BIT_FIELD(0xd4, 28, 1) 215 216#define NANDC_UNCORR_ERR_COUNT REG_BIT_FIELD(0xfc, 0, 32) 217 218#define NANDC_CORR_ERR_COUNT REG_BIT_FIELD(0x100, 0, 32) 219 220#define NANDC_READ_CORR_BIT_COUNT REG_BIT_FIELD(0x104, 0, 32) 221 222#define NANDC_BLOCK_LOCK_STATUS REG_BIT_FIELD(0x108, 0, 8) 223 224#define NANDC_ECC_CORR_ADDR_CS REG_BIT_FIELD(0x10c, 16, 3) 225#define NANDC_ECC_CORR_ADDR_EXT REG_BIT_FIELD(0x10c, 0, 16) 226 227#define NANDC_ECC_CORR_ADDR REG_BIT_FIELD(0x110, 0, 32) 228 229#define NANDC_ECC_UNC_ADDR_CS REG_BIT_FIELD(0x114, 16, 3) 230#define NANDC_ECC_UNC_ADDR_EXT REG_BIT_FIELD(0x114, 0, 16) 231 232#define NANDC_ECC_UNC_ADDR REG_BIT_FIELD(0x118, 0, 32) 233 234#define NANDC_READ_ADDR_CS REG_BIT_FIELD(0x11c, 16, 3) 235#define NANDC_READ_ADDR_EXT REG_BIT_FIELD(0x11c, 0, 16) 236#define NANDC_READ_ADDR REG_BIT_FIELD(0x120, 0, 32) 237 238#define NANDC_PROG_ADDR_CS REG_BIT_FIELD(0x124, 16, 3) 239#define NANDC_PROG_ADDR_EXT REG_BIT_FIELD(0x124, 0, 16) 240#define NANDC_PROG_ADDR REG_BIT_FIELD(0x128, 0, 32) 241 242#define NANDC_CPYBK_ADDR_CS REG_BIT_FIELD(0x12c, 16, 3) 243#define NANDC_CPYBK_ADDR_EXT REG_BIT_FIELD(0x12c, 0, 16) 244#define NANDC_CPYBK_ADDR REG_BIT_FIELD(0x130, 0, 32) 245 246#define NANDC_ERASE_ADDR_CS REG_BIT_FIELD(0x134, 16, 3) 247#define NANDC_ERASE_ADDR_EXT REG_BIT_FIELD(0x134, 0, 16) 248#define NANDC_ERASE_ADDR REG_BIT_FIELD(0x138, 0, 32) 249 250#define NANDC_INV_READ_ADDR_CS REG_BIT_FIELD(0x13c, 16, 3) 251#define NANDC_INV_READ_ADDR_EXT REG_BIT_FIELD(0x13c, 0, 16) 252#define NANDC_INV_READ_ADDR REG_BIT_FIELD(0x140, 0, 32) 253 254#define NANDC_INIT_STAT REG_BIT_FIELD(0x144, 0, 32) 255#define NANDC_INIT_ONFI_DONE REG_BIT_FIELD(0x144, 31, 1) 256#define NANDC_INIT_DEVID_DONE REG_BIT_FIELD(0x144, 30, 1) 257#define NANDC_INIT_SUCCESS REG_BIT_FIELD(0x144, 29, 1) 258#define NANDC_INIT_FAIL REG_BIT_FIELD(0x144, 28, 1) 259#define NANDC_INIT_BLANK REG_BIT_FIELD(0x144, 27, 1) 260#define NANDC_INIT_TIMEOUT REG_BIT_FIELD(0x144, 26, 1) 261#define NANDC_INIT_UNC_ERROR REG_BIT_FIELD(0x144, 25, 1) 262#define NANDC_INIT_CORR_ERROR REG_BIT_FIELD(0x144, 24, 1) 263#define NANDC_INIT_PARAM_RDY REG_BIT_FIELD(0x144, 23, 1) 264#define NANDC_INIT_AUTH_FAIL REG_BIT_FIELD(0x144, 22, 1) 265 266#define NANDC_ONFI_STAT REG_BIT_FIELD(0x148, 0, 32) 267#define NANDC_ONFI_DEBUG REG_BIT_FIELD(0x148, 28, 4) 268#define NANDC_ONFI_PRESENT REG_BIT_FIELD(0x148, 27, 1) 269#define NANDC_ONFI_BADID_PG2 REG_BIT_FIELD(0x148, 5, 1) 270#define NANDC_ONFI_BADID_PG1 REG_BIT_FIELD(0x148, 4, 1) 271#define NANDC_ONFI_BADID_PG0 REG_BIT_FIELD(0x148, 3, 1) 272#define NANDC_ONFI_BADCRC_PG2 REG_BIT_FIELD(0x148, 2, 1) 273#define NANDC_ONFI_BADCRC_PG1 REG_BIT_FIELD(0x148, 1, 1) 274#define NANDC_ONFI_BADCRC_PG0 REG_BIT_FIELD(0x148, 0, 1) 275 276#define NANDC_ONFI_DEBUG_DATA REG_BIT_FIELD(0x14c, 0, 32) 277 278#define NANDC_SEMAPHORE REG_BIT_FIELD(0x150, 0, 8) 279 280#define NANDC_DEVID_BYTE(b) REG_BIT_FIELD(0x194+((b)&0x4), \ 281 24-(((b)&3)<<3), 8) 282 283#define NANDC_LL_RDDATA REG_BIT_FIELD(0x19c, 0, 16) 284 285#define NANDC_INT_N_REG(n) REG_BIT_FIELD(0xf00|((n)<<2), 0, 1) 286#define NANDC_INT_DIREC_READ_MISS REG_BIT_FIELD(0xf00, 0, 1) 287#define NANDC_INT_ERASE_DONE REG_BIT_FIELD(0xf04, 0, 1) 288#define NANDC_INT_CPYBK_DONE REG_BIT_FIELD(0xf08, 0, 1) 289#define NANDC_INT_PROGRAM_DONE REG_BIT_FIELD(0xf0c, 0, 1) 290#define NANDC_INT_CONTROLLER_RDY REG_BIT_FIELD(0xf10, 0, 1) 291#define NANDC_INT_RDBSY_RDY REG_BIT_FIELD(0xf14, 0, 1) 292#define NANDC_INT_ECC_UNCORRECTABLE REG_BIT_FIELD(0xf18, 0, 1) 293#define NANDC_INT_ECC_CORRECTABLE REG_BIT_FIELD(0xf1c, 0, 1) 294 295/* 296 * Following registers are treated as contigous IO memory, offset is from 297 * <reg_base>, and the data is in big-endian byte order 298 */ 299#define NANDC_SPARE_AREA_READ_OFF 0x200 300#define NANDC_SPARE_AREA_WRITE_OFF 0x280 301#define NANDC_CACHE_OFF 0x400 302#define NANDC_CACHE_SIZE (128*4) 303 304/* 305 * Following are IDM (a.k.a. Slave Wrapper) registers are off <idm_base>: 306 */ 307#define IDMREG_BIT_FIELD(r,p,w) ((idm_reg_bit_field_t){(r),(p),(w)}) 308 309#define NANDC_IDM_AXI_BIG_ENDIAN IDMREG_BIT_FIELD(0x408, 28, 1) 310#define NANDC_IDM_APB_LITTLE_ENDIAN IDMREG_BIT_FIELD(0x408, 24, 1) 311#define NANDC_IDM_TM IDMREG_BIT_FIELD(0x408, 16, 5) 312#define NANDC_IDM_IRQ_CORRECABLE_EN IDMREG_BIT_FIELD(0x408, 9, 1) 313#define NANDC_IDM_IRQ_UNCORRECABLE_EN IDMREG_BIT_FIELD(0x408, 8, 1) 314#define NANDC_IDM_IRQ_RDYBSY_RDY_EN IDMREG_BIT_FIELD(0x408, 7, 1) 315#define NANDC_IDM_IRQ_CONTROLLER_RDY_EN IDMREG_BIT_FIELD(0x408, 6, 1) 316#define NANDC_IDM_IRQ_PRPOGRAM_COMP_EN IDMREG_BIT_FIELD(0x408, 5, 1) 317#define NANDC_IDM_IRQ_COPYBK_COMP_EN IDMREG_BIT_FIELD(0x408, 4, 1) 318#define NANDC_IDM_IRQ_ERASE_COMP_EN IDMREG_BIT_FIELD(0x408, 3, 1) 319#define NANDC_IDM_IRQ_READ_MISS_EN IDMREG_BIT_FIELD(0x408, 2, 1) 320#define NANDC_IDM_IRQ_N_EN(n) IDMREG_BIT_FIELD(0x408, 2+(n), 1) 321 322#define NANDC_IDM_CLOCK_EN IDMREG_BIT_FIELD(0x408, 0, 1) 323 324#define NANDC_IDM_IO_ECC_CORR IDMREG_BIT_FIELD(0x500, 3, 1) 325#define NANDC_IDM_IO_ECC_UNCORR IDMREG_BIT_FIELD(0x500, 2, 1) 326#define NANDC_IDM_IO_RDYBSY IDMREG_BIT_FIELD(0x500, 1, 1) 327#define NANDC_IDM_IO_CTRL_RDY IDMREG_BIT_FIELD(0x500, 0, 1) 328 329#define NANDC_IDM_RESET IDMREG_BIT_FIELD(0x800, 0, 1) 330 /* Remaining IDM registers do not seem to be useful, skipped */ 331 332/* 333 * NAND Controller has its own command opcodes 334 * different from opcodes sent to the actual flash chip 335 */ 336#define NANDC_CMD_OPCODE_NULL 0 337#define NANDC_CMD_OPCODE_PAGE_READ 1 338#define NANDC_CMD_OPCODE_SPARE_READ 2 339#define NANDC_CMD_OPCODE_STATUS_READ 3 340#define NANDC_CMD_OPCODE_PAGE_PROG 4 341#define NANDC_CMD_OPCODE_SPARE_PROG 5 342#define NANDC_CMD_OPCODE_DEVID_READ 7 343#define NANDC_CMD_OPCODE_BLOCK_ERASE 8 344#define NANDC_CMD_OPCODE_FLASH_RESET 9 345 346/* 347 * Forward declarations 348 */ 349static void nandc_cmdfunc( struct mtd_info *mtd, 350 unsigned command, 351 int column, 352 int page_addr); 353 354/* 355 * NAND Controller hardware ECC data size 356 * 357 * The following table contains the number of bytes needed for 358 * each of the ECC levels, per "sector", which is either 512 or 1024 bytes. 359 * The actual layout is as follows: 360 * The entire spare area is equally divided into as many sections as there 361 * are sectors per page, and the ECC data is located at the end of each 362 * of these sections. 363 * For example, given a 2K per page and 64 bytes spare device, configured for 364 * sector size 1k and ECC level of 4, the spare area will be divided into 2 365 * sections 32 bytes each, and the last 14 bytes of 32 in each section will 366 * be filled with ECC data. 367 * Note: the name of the algorythm and the number of error bits it can correct 368 * is of no consequence to this driver, therefore omitted. 369 */ 370static const struct nandc_ecc_size_s { 371 unsigned char 372 sector_size_shift, 373 ecc_level, 374 ecc_bytes_per_sec, 375 reserved ; 376} nandc_ecc_sizes [] = { 377 { 9, 0, 0 }, 378 { 10, 0, 0 }, 379 { 9, 1, 2 }, 380 { 10, 1, 4 }, 381 { 9, 2, 4 }, 382 { 10, 2, 7 }, 383 { 9, 3, 6 }, 384 { 10, 3, 11 }, 385 { 9, 4, 7 }, 386 { 10, 4, 14 }, 387 { 9, 5, 9 }, 388 { 10, 5, 18 }, 389 { 9, 6, 11 }, 390 { 10, 6, 21 }, 391 { 9, 7, 13 }, 392 { 10, 7, 25 }, 393 { 9, 8, 14 }, 394 { 10, 8, 28 }, 395 396 { 9, 9, 16 }, 397 { 9, 10, 18 }, 398 { 9, 11, 20 }, 399 { 9, 12, 21 }, 400 401 { 10, 9, 32 }, 402 { 10, 10, 35 }, 403 { 10, 11, 39 }, 404 { 10, 12, 42 }, 405}; 406 407/* 408 * INTERNAL - Populate the various fields that depend on how 409 * the hardware ECC data is located in the spare area 410 * 411 * For this controiller, it is easier to fill-in these 412 * structures at run time. 413 * 414 * The bad-block marker is assumed to occupy one byte 415 * at chip->badblockpos, which must be in the first 416 * sector of the spare area, namely it is either 417 * at offset 0 or 5. 418 * Some chips use both for manufacturer's bad block 419 * markers, but we ingore that issue here, and assume only 420 * one byte is used as bad-block marker always. 421 */ 422static int nandc_hw_ecc_layout( struct nandc_ctrl * ctrl ) 423{ 424 struct nand_ecclayout * layout ; 425 unsigned i, j, k; 426 unsigned ecc_per_sec, oob_per_sec ; 427 unsigned bbm_pos = ctrl->nand.badblockpos; 428 429 DEBUG(MTD_DEBUG_LEVEL1, "%s: ecc_level %d\n", 430 __func__, ctrl->ecc_level); 431 432 /* Caclculate spare area per sector size */ 433 oob_per_sec = ctrl->mtd.oobsize >> ctrl->sec_per_page_shift ; 434 435 /* Try to calculate the amount of ECC bytes per sector with a formula */ 436 if( ctrl->sector_size_shift == 9 ) 437 ecc_per_sec = ((ctrl->ecc_level * 14) + 7) >> 3 ; 438 else if( ctrl->sector_size_shift == 10 ) 439 ecc_per_sec = ((ctrl->ecc_level * 14) + 3) >> 2 ; 440 else 441 ecc_per_sec = oob_per_sec + 1 ; /* cause an error if not in table */ 442 443 DEBUG(MTD_DEBUG_LEVEL1, "%s: calc eccbytes %d\n", 444 __func__, ecc_per_sec ); 445 446 /* Now find out the answer according to the table */ 447 for(i = 0; i < ARRAY_SIZE(nandc_ecc_sizes); i++ ) { 448 if( nandc_ecc_sizes[i].ecc_level == ctrl->ecc_level && 449 nandc_ecc_sizes[i].sector_size_shift == 450 ctrl->sector_size_shift ) { 451 DEBUG(MTD_DEBUG_LEVEL1, "%s: table eccbytes %d\n", 452 __func__, 453 nandc_ecc_sizes[i].ecc_bytes_per_sec ); 454 break; 455 } 456 } 457 458 /* Table match overrides formula */ 459 if( nandc_ecc_sizes[i].ecc_level == ctrl->ecc_level && 460 nandc_ecc_sizes[i].sector_size_shift == ctrl->sector_size_shift ) 461 ecc_per_sec = nandc_ecc_sizes[i].ecc_bytes_per_sec ; 462 463 /* Return an error if calculated ECC leaves no room for OOB */ 464 if( (ctrl->sec_per_page_shift != 0 && ecc_per_sec >= oob_per_sec) || 465 (ctrl->sec_per_page_shift == 0 && ecc_per_sec >= (oob_per_sec-1))){ 466 DEBUG(MTD_DEBUG_LEVEL0, "%s: ERROR: ECC level %d too high, " 467 "leaves no room for OOB data\n", 468 __func__, ctrl->ecc_level ); 469 return -EINVAL ; 470 } 471 472 /* Fill in the needed fields */ 473 ctrl->nand.ecc.size = ctrl->mtd.writesize >> ctrl->sec_per_page_shift; 474 ctrl->nand.ecc.bytes = ecc_per_sec ; 475 ctrl->nand.ecc.steps = 1 << ctrl->sec_per_page_shift ; 476 ctrl->nand.ecc.total = ecc_per_sec << ctrl->sec_per_page_shift ; 477 478 /* Build an ecc layout data structure */ 479 layout = & ctrl->ecclayout ; 480 memset( layout, 0, sizeof( * layout )); 481 482 /* Total number of bytes used by HW ECC */ 483 layout->eccbytes = ecc_per_sec << ctrl->sec_per_page_shift ; 484 485 /* Location for each of the HW ECC bytes */ 486 for(i = j = 0, k = 1 ; 487 i < ARRAY_SIZE(layout->eccpos) && i < layout->eccbytes; 488 i++, j++ ) { 489 /* switch sector # */ 490 if( j == ecc_per_sec ) { 491 j = 0; 492 k ++; 493 } 494 /* save position of each HW-generated ECC byte */ 495 layout->eccpos[i] = (oob_per_sec * k) - ecc_per_sec + j; 496 497 /* Check that HW ECC does not overlap bad-block marker */ 498 if( bbm_pos == layout->eccpos[i] ) { 499 DEBUG(MTD_DEBUG_LEVEL0, "%s: ERROR: ECC level %d too high, " 500 "HW ECC collides with bad-block marker position\n", 501 __func__, ctrl->ecc_level ); 502 503 return -EINVAL; 504 } 505 } 506 507 /* Location of all user-available OOB byte-ranges */ 508 for( i = 0; i < ARRAY_SIZE(layout->oobfree); i++ ) { 509 if( i >= (1 << ctrl->sec_per_page_shift )) 510 break ; 511 layout->oobfree[i].offset = oob_per_sec * i ; 512 layout->oobfree[i].length = oob_per_sec - ecc_per_sec ; 513 514 515 /* Bad-block marker must be in the first sector spare area */ 516 BUG_ON( bbm_pos >= 517 (layout->oobfree[i].offset+layout->oobfree[i].length)); 518 if( i != 0 ) 519 continue; 520 521 /* Remove bad-block marker from available byte range */ 522 if( bbm_pos == layout->oobfree[i].offset ) { 523 layout->oobfree[i].offset += 1 ; 524 layout->oobfree[i].length -= 1 ; 525 } 526 else if( bbm_pos == 527 (layout->oobfree[i].offset+layout->oobfree[i].length-1)){ 528 layout->oobfree[i].length -= 1 ; 529 } 530 else { 531 layout->oobfree[i+1].offset = bbm_pos + 1 ; 532 layout->oobfree[i+1].length = 533 layout->oobfree[i].length - bbm_pos - 1; 534 layout->oobfree[i].length = bbm_pos ; 535 i ++ ; 536 } 537 } 538 539 layout->oobavail = ((oob_per_sec - ecc_per_sec) 540 << ctrl->sec_per_page_shift) - 1 ; 541 542 ctrl->mtd.oobavail = layout->oobavail ; 543 ctrl->nand.ecclayout = layout ; 544 ctrl->nand.ecc.layout = layout ; 545 546 /* Output layout for debugging */ 547 printk("Spare area=%d eccbytes %d, ecc bytes located at:\n", 548 ctrl->mtd.oobsize, layout->eccbytes ); 549 for(i = j = 0; i<ARRAY_SIZE(layout->eccpos)&&i<layout->eccbytes; i++ ) { 550 printk(" %d", layout->eccpos[i]); 551 } 552 printk("\nAvailable %d bytes at (off,len):\n", layout->oobavail ); 553 for(i = 0; i < ARRAY_SIZE(layout->oobfree); i++ ) { 554 printk("(%d,%d) ", 555 layout->oobfree[i].offset, 556 layout->oobfree[i].length ); 557 } 558 printk("\n"); 559 560 return 0 ; 561} 562 563/* 564 * Register bit-field manipulation routines 565 * NOTE: These compile to just a few machine instructions in-line 566 */ 567 568typedef struct {unsigned reg, pos, width;} reg_bit_field_t ; 569 570static unsigned inline _reg_read( struct nandc_ctrl *ctrl, reg_bit_field_t rbf ) 571{ 572 void * __iomem base = (void *) ctrl->reg_base; 573 unsigned val ; 574 575 val = __raw_readl( base + rbf.reg); 576 val >>= rbf.pos; 577 val &= (1 << rbf.width)-1; 578 579 return val; 580} 581 582static void inline _reg_write( struct nandc_ctrl *ctrl, 583 reg_bit_field_t rbf, unsigned newval ) 584{ 585 void * __iomem base = (void *) ctrl->reg_base; 586 unsigned val, msk ; 587 588 msk = (1 << rbf.width)-1; 589 msk <<= rbf.pos; 590 newval <<= rbf.pos; 591 newval &= msk ; 592 593 val = __raw_readl( base + rbf.reg); 594 val &= ~msk ; 595 val |= newval ; 596 __raw_writel( val, base + rbf.reg); 597} 598 599 600typedef struct {unsigned reg, pos, width;} idm_reg_bit_field_t ; 601 602static unsigned inline _idm_reg_read( struct nandc_ctrl *ctrl, 603 idm_reg_bit_field_t rbf ) 604{ 605 void * __iomem base = (void *) ctrl->idm_base; 606 unsigned val ; 607 608 val = __raw_readl( base + rbf.reg); 609 val >>= rbf.pos; 610 val &= (1 << rbf.width)-1; 611 612 return val; 613} 614 615static void inline _idm_reg_write( struct nandc_ctrl *ctrl, 616 idm_reg_bit_field_t rbf, unsigned newval ) 617{ 618 void * __iomem base = (void *) ctrl->idm_base; 619 unsigned val, msk ; 620 621 msk = (1 << rbf.width)-1; 622 msk <<= rbf.pos; 623 newval <<= rbf.pos; 624 newval &= msk ; 625 626 val = __raw_readl( base + rbf.reg); 627 val &= ~msk ; 628 val |= newval ; 629 __raw_writel( val, base + rbf.reg); 630} 631 632/* 633 * INTERNAL - print NAND chip options 634 * 635 * Useful for debugging 636 */ 637static void nandc_options_print( unsigned opts ) 638{ 639 unsigned bit; 640 const char * n ; 641 642 printk("Options: "); 643 for(bit = 0; bit < 32; bit ++ ) { 644 if( 0 == (opts & (1<<bit))) 645 continue; 646 switch(1<<bit){ 647 default: 648 printk("OPT_%x",1<<bit); 649 n = NULL; 650 break; 651 case NAND_NO_AUTOINCR: 652 n = "NO_AUTOINCR"; break; 653 case NAND_BUSWIDTH_16: 654 n = "BUSWIDTH_16"; break; 655 case NAND_NO_PADDING: 656 n = "NO_PADDING"; break; 657 case NAND_CACHEPRG: 658 n = "CACHEPRG"; break; 659 case NAND_COPYBACK: 660 n = "COPYBACK"; break; 661 case NAND_IS_AND: 662 n = "IS_AND"; break; 663 case NAND_4PAGE_ARRAY: 664 n = "4PAGE_ARRAY"; break; 665 case BBT_AUTO_REFRESH: 666 n = "AUTO_REFRESH"; break; 667 case NAND_NO_READRDY: 668 n = "NO_READRDY"; break; 669 case NAND_NO_SUBPAGE_WRITE: 670 n = "NO_SUBPAGE_WRITE"; break; 671 case NAND_BROKEN_XD: 672 n = "BROKEN_XD"; break; 673 case NAND_ROM: 674 n = "ROM"; break; 675 case NAND_USE_FLASH_BBT: 676 n = "USE_FLASH_BBT"; break; 677 case NAND_SKIP_BBTSCAN: 678 n = "SKIP_BBTSCAN"; break; 679 case NAND_OWN_BUFFERS: 680 n = "OWN_BUFFERS"; break; 681 case NAND_SCAN_SILENT_NODEV: 682 n = "SCAN_SILENT_NODEV"; break; 683 case NAND_CONTROLLER_ALLOC: 684 n = "SCAN_SILENT_NODEV"; break; 685 case NAND_BBT_SCAN2NDPAGE: 686 n = "BBT_SCAN2NDPAGE"; break; 687 case NAND_BBT_SCANBYTE1AND6: 688 n = "BBT_SCANBYTE1AND6"; break; 689 case NAND_BBT_SCANLASTPAGE: 690 n = "BBT_SCANLASTPAGE"; break; 691 } 692 printk("%s,",n); 693 } 694 printk("\n"); 695} 696 697/* 698 * NAND Interface - dev_ready 699 * 700 * Return 1 iff device is ready, 0 otherwise 701 */ 702static int nandc_dev_ready( struct mtd_info * mtd) 703{ 704 struct nand_chip * chip = mtd->priv ; 705 struct nandc_ctrl * ctrl = chip->priv ; 706 int rdy; 707 708 rdy = _idm_reg_read( ctrl, NANDC_IDM_IO_CTRL_RDY ); 709 DEBUG(MTD_DEBUG_LEVEL1, "%s: %d\n", __func__, rdy); 710 711 return rdy ; 712} 713 714/* 715 * Interrupt service routines 716 */ 717static irqreturn_t nandc_isr(int irq, void *dev_id) 718{ 719 struct nandc_ctrl *ctrl = dev_id; 720 int irq_off; 721 722 irq_off = irq - ctrl->irq_base ; 723 BUG_ON( irq_off < 0 || irq_off >= NANDC_IRQ_NUM ); 724 725 DEBUG(MTD_DEBUG_LEVEL3, "%s:start irqoff=%d irq_reg=%x en=%d cmd=%#x\n", 726 __func__, irq_off, 727 _reg_read( ctrl, NANDC_INT_N_REG(irq_off)), 728 _idm_reg_read(ctrl, NANDC_IDM_IRQ_N_EN(irq_off)), 729 ctrl->last_cmd 730 ); 731 732 if( ! _reg_read( ctrl, NANDC_INT_N_REG(irq_off))) 733 return IRQ_NONE; 734 735 /* Acknowledge interrupt */ 736 _reg_write( ctrl, NANDC_INT_N_REG(irq_off), 1 ); 737 738 /* Wake up task */ 739 complete(&ctrl->op_completion); 740 741 return IRQ_HANDLED; 742} 743 744static int nandc_wait_interrupt( struct nandc_ctrl * ctrl, 745 unsigned irq_off, unsigned timeout_usec ) 746{ 747 long timeout_jiffies ; 748 int ret = 0 ; 749 750 INIT_COMPLETION(ctrl->op_completion); 751 752 /* Acknowledge interrupt */ 753 _reg_write( ctrl, NANDC_INT_N_REG(irq_off), 1 ); 754 755 /* Enable IRQ to wait on */ 756 _idm_reg_write(ctrl, NANDC_IDM_IRQ_N_EN(irq_off),1); 757 758 timeout_jiffies = 1 + usecs_to_jiffies( timeout_usec ); 759 760 if( irq_off != NANDC_IRQ_CONTROLLER_RDY || 761 0 == _idm_reg_read( ctrl, NANDC_IDM_IO_CTRL_RDY)) { 762 763 DEBUG(MTD_DEBUG_LEVEL3, "%s: wait start to=%ld\n", __func__, 764 timeout_jiffies); 765 766 timeout_jiffies = wait_for_completion_interruptible_timeout( 767 &ctrl->op_completion, timeout_jiffies ); 768 769 DEBUG(MTD_DEBUG_LEVEL3, "%s: wait done to=%ld\n", __func__, 770 timeout_jiffies); 771 772 if( timeout_jiffies < 0 ) 773 ret = timeout_jiffies; 774 if( timeout_jiffies == 0 ) 775 ret = -ETIME; 776 } 777 778 /* Disable IRQ, we're done waiting */ 779 _idm_reg_write(ctrl, NANDC_IDM_IRQ_N_EN(irq_off),0); 780 781 if( _idm_reg_read( ctrl, NANDC_IDM_IO_CTRL_RDY ) ) 782 ret = 0; 783 784 if( ret < 0 ) 785 DEBUG(MTD_DEBUG_LEVEL0, "%s: to=%d, timeout!\n", 786 __func__, timeout_usec); 787 788 return ret ; 789} 790 791/* 792 * INTERNAL - wait for command completion 793 */ 794static int nandc_wait_cmd( struct nandc_ctrl * ctrl, unsigned timeout_usec ) 795{ 796 unsigned retries; 797 798 if( _reg_read( ctrl, NANDC_INT_STAT_CTLR_RDY )) 799 return 0; 800 801 /* If the timeout is long, wait for interrupt */ 802 if( timeout_usec >= jiffies_to_usecs(1) >> 4 ) 803 return nandc_wait_interrupt( 804 ctrl, NANDC_IRQ_CONTROLLER_RDY, timeout_usec ); 805 806 /* Wait for completion of the prior command */ 807 retries = (timeout_usec >> 3) + 1 ; 808 809 while( retries -- && 810 0 == _reg_read( ctrl, NANDC_INT_STAT_CTLR_RDY )) { 811 cpu_relax(); 812 udelay(6); 813 } 814 815 if(retries == 0 ) 816 { 817 DEBUG(MTD_DEBUG_LEVEL0, "%s: to=%d, timeout!\n", 818 __func__, timeout_usec); 819 return -ETIME ; 820 } 821 return 0; 822} 823 824 825/* 826 * NAND Interface - waitfunc 827 */ 828static int nandc_waitfunc( struct mtd_info *mtd, struct nand_chip *chip ) 829{ 830 struct nandc_ctrl * ctrl = chip->priv ; 831 unsigned to; 832 int ret ; 833 834 /* figure out timeout based on what command is on */ 835 switch( ctrl->last_cmd ) { 836 default: 837 case NAND_CMD_ERASE1: 838 case NAND_CMD_ERASE2: to = 1 << 16; break; 839 case NAND_CMD_STATUS: 840 case NAND_CMD_RESET: to = 256; break; 841 case NAND_CMD_READID: to = 1024; break; 842 case NAND_CMD_READ1: 843 case NAND_CMD_READ0: to = 2048; break; 844 case NAND_CMD_PAGEPROG: to = 4096; break; 845 case NAND_CMD_READOOB: to = 512; break; 846 } 847 848 /* deliver deferred error code if any */ 849 if( (ret = ctrl->cmd_ret) < 0 ) { 850 ctrl->cmd_ret = 0; 851 } 852 else { 853 ret = nandc_wait_cmd( ctrl, to) ; 854 } 855 856 /* Timeout */ 857 if( ret < 0 ) { 858 DEBUG(MTD_DEBUG_LEVEL0, "%s: timeout\n", __func__ ); 859 return NAND_STATUS_FAIL ; 860 } 861 862 ret = _reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS); 863 864 DEBUG(MTD_DEBUG_LEVEL3, "%s: status=%#x\n", __func__, ret); 865 866 return ret; 867} 868 869/* 870 * NAND Interface - read_oob 871 */ 872static int nandc_read_oob(struct mtd_info *mtd, struct nand_chip *chip, 873 int page, int sndcmd) 874{ 875 struct nandc_ctrl * ctrl = chip->priv ; 876 unsigned n = ctrl->chip_num ; 877 void * __iomem ctrl_spare ; 878 unsigned spare_per_sec, sector ; 879 u64 nand_addr; 880 881 DEBUG(MTD_DEBUG_LEVEL3, "%s: page=%#x\n", __func__, page); 882 883 ctrl_spare = ctrl->reg_base + NANDC_SPARE_AREA_READ_OFF; 884 885 /* Set the page address for the following commands */ 886 nand_addr = ((u64)page << chip->page_shift); 887 _reg_write( ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32 ); 888 889 spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift; 890 891 /* Disable ECC validation for spare area reads */ 892 _reg_write( ctrl, NANDC_ACC_CTRL_RD_ECC(n), 0 ); 893 894 /* Loop all sectors in page */ 895 for( sector = 0 ; sector < (1<<ctrl->sec_per_page_shift); sector ++ ) { 896 unsigned col ; 897 898 col = (sector << ctrl->sector_size_shift); 899 900 /* Issue command to read partial page */ 901 _reg_write( ctrl, NANDC_CMD_ADDRESS, nand_addr + col); 902 903 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 904 NANDC_CMD_OPCODE_SPARE_READ); 905 906 /* Wait for the command to complete */ 907 if( nandc_wait_cmd(ctrl, (sector==0)? 10000: 100) ) 908 return -EIO; 909 910 if( !_reg_read( ctrl, NANDC_INT_STAT_SPARE_VALID ) ) { 911 DEBUG(MTD_DEBUG_LEVEL0, "%s: data not valid\n", 912 __func__); 913 return -EIO; 914 } 915 916 /* Set controller to Little Endian mode for copying */ 917 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1); 918 919 memcpy( chip->oob_poi + sector * spare_per_sec, 920 ctrl_spare, 921 spare_per_sec ); 922 923 /* Return to Big Endian mode for commands etc */ 924 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0); 925 } /* for */ 926 927 return 0; 928} 929 930/* 931 * NAND Interface - write_oob 932 */ 933static int nandc_write_oob(struct mtd_info *mtd, struct nand_chip *chip, 934 int page ) 935{ 936 struct nandc_ctrl * ctrl = chip->priv ; 937 unsigned n = ctrl->chip_num ; 938 void * __iomem ctrl_spare ; 939 unsigned spare_per_sec, sector, num_sec; 940 u64 nand_addr; 941 int to, status = 0; 942 943 DEBUG(MTD_DEBUG_LEVEL3, "%s: page=%#x\n", __func__, page); 944 945 ctrl_spare = ctrl->reg_base + NANDC_SPARE_AREA_WRITE_OFF; 946 947 /* Disable ECC generation for spare area writes */ 948 _reg_write( ctrl, NANDC_ACC_CTRL_WR_ECC(n), 0 ); 949 950 spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift; 951 952 /* Set the page address for the following commands */ 953 nand_addr = ((u64)page << chip->page_shift); 954 _reg_write( ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32 ); 955 956 /* Must allow partial programming to change spare area only */ 957 _reg_write( ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 1 ); 958 959 num_sec = 1 << ctrl->sec_per_page_shift; 960 /* Loop all sectors in page */ 961 for( sector = 0 ; sector < num_sec; sector ++ ) { 962 unsigned col ; 963 964 /* Spare area accessed by the data sector offset */ 965 col = (sector << ctrl->sector_size_shift); 966 967 _reg_write( ctrl, NANDC_CMD_ADDRESS, nand_addr + col); 968 969 /* Set controller to Little Endian mode for copying */ 970 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1); 971 972 memcpy( ctrl_spare, 973 chip->oob_poi + sector * spare_per_sec, 974 spare_per_sec ); 975 976 /* Return to Big Endian mode for commands etc */ 977 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0); 978 979 /* Push spare bytes into internal buffer, last goes to flash */ 980 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 981 NANDC_CMD_OPCODE_SPARE_PROG); 982 983 if(sector == (num_sec-1)) 984 to = 1 << 16; 985 else 986 to = 1 << 10; 987 988 if( nandc_wait_cmd(ctrl, to ) ) 989 return -EIO; 990 } /* for */ 991 992 /* Restore partial programming inhibition */ 993 _reg_write( ctrl, NANDC_ACC_CTRL_PGM_PARTIAL(n), 0 ); 994 995 status = nandc_waitfunc(mtd, chip); 996 return status & NAND_STATUS_FAIL ? -EIO : 0; 997} 998 999/* 1000 * INTERNAL - verify that a buffer is all erased 1001 */ 1002static bool _nandc_buf_erased( const void * buf, unsigned len ) 1003{ 1004 unsigned i; 1005 const u32 * p = buf ; 1006 1007 for(i=0; i < (len >> 2) ; i++ ) { 1008 if( p[i] != 0xffffffff ) 1009 return false; 1010 } 1011 return true; 1012} 1013 1014/* 1015 * INTERNAL - read a page, with or without ECC checking 1016 */ 1017static int _nandc_read_page_do(struct mtd_info *mtd, struct nand_chip *chip, 1018 uint8_t *buf, int page, bool ecc) 1019{ 1020 struct nandc_ctrl * ctrl = chip->priv ; 1021 unsigned n = ctrl->chip_num ; 1022 unsigned cache_reg_size = NANDC_CACHE_SIZE; 1023 void * __iomem ctrl_cache ; 1024 void * __iomem ctrl_spare ; 1025 unsigned data_bytes ; 1026 unsigned spare_per_sec ; 1027 unsigned sector, to = 1 << 16 ; 1028 u32 err_soft_reg, err_hard_reg; 1029 unsigned hard_err_count = 0; 1030 int ret; 1031 u64 nand_addr ; 1032 1033 DEBUG(MTD_DEBUG_LEVEL3, "%s: page=%#x\n", __func__, page); 1034 1035 ctrl_cache = ctrl->reg_base + NANDC_CACHE_OFF; 1036 ctrl_spare = ctrl->reg_base + NANDC_SPARE_AREA_READ_OFF; 1037 1038 /* Reset ECC error stats */ 1039 err_hard_reg = _reg_read(ctrl, NANDC_UNCORR_ERR_COUNT); 1040 err_soft_reg = _reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT); 1041 1042 spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift; 1043 1044 /* Set the page address for the following commands */ 1045 nand_addr = ((u64)page << chip->page_shift); 1046 _reg_write( ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32 ); 1047 1048 /* Enable ECC validation for ecc page reads */ 1049 _reg_write( ctrl, NANDC_ACC_CTRL_RD_ECC(n), ecc ); 1050 1051 /* Loop all sectors in page */ 1052 for( sector = 0 ; sector < (1<<ctrl->sec_per_page_shift); sector ++ ) { 1053 data_bytes = 0; 1054 1055 /* Copy partial sectors sized by cache reg */ 1056 while( data_bytes < (1<<ctrl->sector_size_shift)) { 1057 unsigned col ; 1058 1059 col = data_bytes + 1060 (sector << ctrl->sector_size_shift); 1061 1062 _reg_write( ctrl, NANDC_CMD_ADDRESS, nand_addr + col); 1063 1064 /* Issue command to read partial page */ 1065 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1066 NANDC_CMD_OPCODE_PAGE_READ); 1067 1068 /* Wait for the command to complete */ 1069 if((ret = nandc_wait_cmd(ctrl, to)) < 0 ) 1070 return ret; 1071 1072 /* Set controller to Little Endian mode for copying */ 1073 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1); 1074 1075 if(data_bytes == 0 ) { 1076 memcpy( 1077 chip->oob_poi + sector * spare_per_sec, 1078 ctrl_spare, 1079 spare_per_sec ); 1080 } 1081 1082 memcpy( buf+col, ctrl_cache, cache_reg_size); 1083 data_bytes += cache_reg_size ; 1084 1085 /* Return to Big Endian mode for commands etc */ 1086 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0); 1087 1088 /* Next iterations should go fast */ 1089 to = 1 << 10; 1090 1091 /* capture hard errors for each partial */ 1092 if( err_hard_reg != 1093 _reg_read(ctrl, NANDC_UNCORR_ERR_COUNT)){ 1094 int era = _reg_read(ctrl,NANDC_INT_STAT_ERASED); 1095 if( (!era) && 1096 (!_nandc_buf_erased(buf+col, cache_reg_size))) 1097 hard_err_count ++ ; 1098 1099 err_hard_reg = 1100 _reg_read(ctrl, NANDC_UNCORR_ERR_COUNT); 1101 } 1102 } /* while FlashCache buffer */ 1103 } /* for sector */ 1104 1105 if( ! ecc ) 1106 return 0; 1107 1108 /* Report hard ECC errors */ 1109 if( hard_err_count ) { 1110 mtd->ecc_stats.failed ++; 1111 } 1112 1113 /* Get ECC soft error stats */ 1114 mtd->ecc_stats.corrected += err_soft_reg - 1115 _reg_read(ctrl, NANDC_READ_CORR_BIT_COUNT); 1116 1117 DEBUG(MTD_DEBUG_LEVEL3, "%s: page=%#x err hard %d soft %d\n", 1118 __func__, page, 1119 mtd->ecc_stats.failed, mtd->ecc_stats.corrected); 1120 1121 return 0; 1122} 1123 1124/* 1125 * NAND Interface - read_page_ecc 1126 */ 1127static int nandc_read_page_ecc(struct mtd_info *mtd, struct nand_chip *chip, 1128 uint8_t *buf, int page) 1129{ 1130 return _nandc_read_page_do( mtd, chip, buf, page, true ); 1131} 1132 1133/* 1134 * NAND Interface - read_page_raw 1135 */ 1136static int nandc_read_page_raw(struct mtd_info *mtd, struct nand_chip *chip, 1137 uint8_t *buf, int page) 1138{ 1139 return _nandc_read_page_do( mtd, chip, buf, page, true ); 1140} 1141 1142/* 1143 * INTERNAL - do page write, with or without ECC generation enabled 1144 */ 1145static void _nandc_write_page_do( struct mtd_info *mtd, struct nand_chip *chip, 1146 const uint8_t *buf, bool ecc) 1147{ 1148 struct nandc_ctrl * ctrl = chip->priv ; 1149 const unsigned cache_reg_size = NANDC_CACHE_SIZE; 1150 unsigned n = ctrl->chip_num ; 1151 void * __iomem ctrl_cache ; 1152 void * __iomem ctrl_spare ; 1153 unsigned spare_per_sec, sector, num_sec ; 1154 unsigned data_bytes, spare_bytes ; 1155 int i, ret, to ; 1156 uint8_t tmp_poi[ NAND_MAX_OOBSIZE ]; 1157 u32 nand_addr; 1158 1159 ctrl_cache = ctrl->reg_base + NANDC_CACHE_OFF; 1160 ctrl_spare = ctrl->reg_base + NANDC_SPARE_AREA_WRITE_OFF; 1161 1162 /* Get start-of-page address */ 1163 nand_addr = _reg_read( ctrl, NANDC_CMD_ADDRESS ); 1164 1165 DEBUG(MTD_DEBUG_LEVEL3, "%s: page=%#x\n", __func__, 1166 nand_addr >> chip->page_shift ); 1167 1168 BUG_ON( mtd->oobsize > sizeof(tmp_poi)); 1169 1170 /* Retreive pre-existing OOB values */ 1171 memcpy( tmp_poi, chip->oob_poi, mtd->oobsize ); 1172 ret = nandc_read_oob( mtd, chip, nand_addr >> chip->page_shift, 1 ); 1173 if( (ctrl->cmd_ret = ret) < 0 ) 1174 return ; 1175 1176 /* Apply new OOB data bytes just like they would end up on the chip */ 1177 for(i = 0; i < mtd->oobsize; i ++ ) 1178 chip->oob_poi[i] &= tmp_poi[i]; 1179 1180 spare_per_sec = mtd->oobsize >> ctrl->sec_per_page_shift; 1181 1182 /* Enable ECC generation for ecc page write, if requested */ 1183 _reg_write( ctrl, NANDC_ACC_CTRL_WR_ECC(n), ecc ); 1184 1185 spare_bytes = 0; 1186 num_sec = 1 << ctrl->sec_per_page_shift ; 1187 1188 /* Loop all sectors in page */ 1189 for( sector = 0 ; sector < num_sec; sector ++ ) { 1190 1191 data_bytes = 0; 1192 1193 /* Copy partial sectors sized by cache reg */ 1194 while( data_bytes < (1<<ctrl->sector_size_shift)) { 1195 unsigned col ; 1196 1197 col = data_bytes + 1198 (sector << ctrl->sector_size_shift); 1199 1200 /* Set address of 512-byte sub-page */ 1201 _reg_write( ctrl, NANDC_CMD_ADDRESS, nand_addr + col ); 1202 1203 /* Set controller to Little Endian mode for copying */ 1204 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 1); 1205 1206 /* Set spare area is written at each sector start */ 1207 if(data_bytes == 0 ) { 1208 memcpy( ctrl_spare, 1209 chip->oob_poi + spare_bytes, 1210 spare_per_sec ); 1211 spare_bytes += spare_per_sec ; 1212 } 1213 1214 /* Copy sub-page data */ 1215 memcpy( ctrl_cache, buf+col, cache_reg_size); 1216 data_bytes += cache_reg_size ; 1217 1218 /* Return to Big Endian mode for commands etc */ 1219 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0); 1220 1221 /* Push data into internal cache */ 1222 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1223 NANDC_CMD_OPCODE_PAGE_PROG); 1224 1225 /* Wait for the command to complete */ 1226 if( sector == (num_sec-1)) 1227 to = 1 << 16; 1228 else 1229 to = 1 << 10; 1230 ret = nandc_wait_cmd(ctrl, to ); 1231 if( (ctrl->cmd_ret = ret) < 0 ) 1232 return ; /* error deferred */ 1233 } /* while */ 1234 } /* for */ 1235} 1236 1237/* 1238 * NAND Interface = write_page_ecc 1239 */ 1240static void nandc_write_page_ecc( struct mtd_info *mtd, struct nand_chip *chip, 1241 const uint8_t *buf) 1242{ 1243 _nandc_write_page_do( mtd, chip, buf, true ); 1244} 1245 1246/* 1247 * NAND Interface = write_page_raw 1248 */ 1249static void nandc_write_page_raw( struct mtd_info *mtd, struct nand_chip *chip, 1250 const uint8_t *buf) 1251{ 1252 _nandc_write_page_do( mtd, chip, buf, false ); 1253} 1254 1255/* 1256 * MTD Interface - read_byte 1257 * 1258 * This function emulates simple controllers behavior 1259 * for just a few relevant commands 1260 */ 1261static uint8_t nandc_read_byte( struct mtd_info *mtd ) 1262{ 1263 struct nand_chip * nand = mtd->priv ; 1264 struct nandc_ctrl * ctrl = nand->priv ; 1265 uint8_t b = ~0; 1266 1267 switch( ctrl->last_cmd ) { 1268 case NAND_CMD_READID: 1269 if( ctrl->id_byte_index < 8) { 1270 b = _reg_read( ctrl, NANDC_DEVID_BYTE( 1271 ctrl->id_byte_index )); 1272 ctrl->id_byte_index ++; 1273 } 1274 break; 1275 case NAND_CMD_READOOB: 1276 if( ctrl->oob_index < mtd->oobsize ) { 1277 b = nand->oob_poi[ ctrl->oob_index ++ ] ; 1278 } 1279 break; 1280 case NAND_CMD_STATUS: 1281 b = _reg_read(ctrl, NANDC_INT_STAT_FLASH_STATUS); 1282 break; 1283 default: 1284 BUG_ON( 1); 1285 } 1286 DEBUG(MTD_DEBUG_LEVEL3, "%s: %#x\n", __func__, b ); 1287 return b; 1288} 1289 1290/* 1291 * MTD Interface - read_word 1292 * 1293 * Can not be tested without x16 chip, but the SoC does not support x16 i/f. 1294 */ 1295static u16 nandc_read_word( struct mtd_info *mtd ) 1296{ 1297 u16 w = ~0; 1298 1299 w = nandc_read_byte(mtd); 1300 barrier(); 1301 w |= nandc_read_byte(mtd) << 8; 1302 1303 DEBUG(MTD_DEBUG_LEVEL0, "%s: %#x\n", __func__, w ); 1304 1305 return w; 1306} 1307 1308/* 1309 * MTD Interface - select a chip from an array 1310 */ 1311static void nandc_select_chip( struct mtd_info *mtd, int chip) 1312{ 1313 struct nand_chip * nand = mtd->priv ; 1314 struct nandc_ctrl * ctrl = nand->priv ; 1315 1316 ctrl->chip_num = chip; 1317 _reg_write( ctrl, NANDC_CMD_CS_SEL, chip); 1318} 1319 1320/* 1321 * NAND Interface - emulate low-level NAND commands 1322 * 1323 * Only a few low-level commands are really needed by generic NAND, 1324 * and they do not call for CMD_LL operations the controller can support. 1325 */ 1326static void nandc_cmdfunc( struct mtd_info *mtd, 1327 unsigned command, 1328 int column, 1329 int page_addr) 1330{ 1331 struct nand_chip * nand = mtd->priv ; 1332 struct nandc_ctrl * ctrl = nand->priv ; 1333 u64 nand_addr; 1334 unsigned to = 1; 1335 1336 DEBUG(MTD_DEBUG_LEVEL3, "%s: cmd=%#x col=%#x pg=%#x\n", __func__, 1337 command, column, page_addr ); 1338 1339 ctrl->last_cmd = command ; 1340 1341 /* Set address for some commands */ 1342 switch( command ) { 1343 case NAND_CMD_ERASE1: 1344 column = 0; 1345 /*FALLTHROUGH*/ 1346 case NAND_CMD_SEQIN: 1347 case NAND_CMD_READ0: 1348 case NAND_CMD_READ1: 1349 BUG_ON( column >= mtd->writesize ); 1350 nand_addr = (u64) column | 1351 ((u64)page_addr << nand->page_shift); 1352 _reg_write( ctrl, NANDC_CMD_EXT_ADDR, nand_addr >> 32 ); 1353 _reg_write( ctrl, NANDC_CMD_ADDRESS, nand_addr ); 1354 break; 1355 case NAND_CMD_ERASE2: 1356 case NAND_CMD_RESET: 1357 case NAND_CMD_READID: 1358 case NAND_CMD_READOOB: 1359 case NAND_CMD_PAGEPROG: 1360 default: 1361 /* Do nothing, address not used */ 1362 break; 1363 } 1364 1365 /* Issue appropriate command to controller */ 1366 switch( command ) { 1367 case NAND_CMD_SEQIN: 1368 /* Only need to load command address, done */ 1369 return ; 1370 1371 case NAND_CMD_RESET: 1372 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1373 NANDC_CMD_OPCODE_FLASH_RESET); 1374 to = 1 << 8; 1375 break; 1376 1377 case NAND_CMD_READID: 1378 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1379 NANDC_CMD_OPCODE_DEVID_READ); 1380 ctrl->id_byte_index = 0; 1381 to = 1 << 8; 1382 break; 1383 1384 case NAND_CMD_READ0: 1385 case NAND_CMD_READ1: 1386 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1387 NANDC_CMD_OPCODE_PAGE_READ); 1388 to = 1 << 15; 1389 break; 1390 case NAND_CMD_STATUS: 1391 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1392 NANDC_CMD_OPCODE_STATUS_READ ); 1393 to = 1 << 8; 1394 break; 1395 case NAND_CMD_ERASE1: 1396 return ; 1397 1398 case NAND_CMD_ERASE2: 1399 _reg_write(ctrl, NANDC_CMD_START_OPCODE, 1400 NANDC_CMD_OPCODE_BLOCK_ERASE); 1401 to = 1 << 18; 1402 break; 1403 1404 case NAND_CMD_PAGEPROG: 1405 /* Cmd already set from write_page */ 1406 return; 1407 1408 case NAND_CMD_READOOB: 1409 /* Emulate simple interface */ 1410 nandc_read_oob( mtd, nand, page_addr, 1); 1411 ctrl->oob_index = 0; 1412 return; 1413 1414 default: 1415 BUG_ON(1); 1416 1417 } /* switch */ 1418 1419 /* Wait for command to complete */ 1420 ctrl->cmd_ret = nandc_wait_cmd( ctrl, to) ; 1421 1422} 1423 1424/* 1425 * 1426 */ 1427static int nandc_scan( struct mtd_info *mtd ) 1428{ 1429 struct nand_chip * nand = mtd->priv ; 1430 struct nandc_ctrl * ctrl = nand->priv ; 1431 bool sector_1k = false; 1432 unsigned chip_num = 0; 1433 int ecc_level = 0; 1434 int ret; 1435 1436 ret = nand_scan_ident( mtd, NANDC_MAX_CHIPS, NULL); 1437 if( ret ) 1438 return ret; 1439 1440 DEBUG(MTD_DEBUG_LEVEL0, "%s: scan_ident ret=%d num_chips=%d\n", 1441 __func__, ret, nand->numchips ); 1442 1443#ifdef __INTERNAL_DEBUG__ 1444 /* For debug - change sector size, ecc level */ 1445 _reg_write( ctrl, NANDC_ACC_CTRL_SECTOR_1K(0), 1); 1446 _reg_write( ctrl, NANDC_ACC_CTRL_ECC_LEVEL(0), 4); 1447#endif 1448 1449 /* Get configuration from first chip */ 1450 sector_1k = _reg_read( ctrl, NANDC_ACC_CTRL_SECTOR_1K(0) ); 1451 ecc_level = _reg_read( ctrl, NANDC_ACC_CTRL_ECC_LEVEL(0) ); 1452 mtd->writesize_shift = nand->page_shift ; 1453 1454 ctrl->ecc_level = ecc_level ; 1455 ctrl->sector_size_shift = (sector_1k)? 10: 9; 1456 1457 /* Configure spare area, tweak as needed */ 1458 do { 1459 ctrl->sec_per_page_shift = 1460 mtd->writesize_shift - ctrl->sector_size_shift; 1461 1462 /* will return -EINVAL if OOB space exhausted */ 1463 ret = nandc_hw_ecc_layout(ctrl) ; 1464 1465 /* First try to bump sector size to 1k, then decrease level */ 1466 if( ret && nand->page_shift > 9 && ctrl->sector_size_shift < 10) 1467 ctrl->sector_size_shift = 10; 1468 else if( ret ) 1469 ctrl->ecc_level -- ; 1470 1471 } while( ret && ctrl->ecc_level > 0 ); 1472 1473 BUG_ON(ctrl->ecc_level == 0); 1474 1475 if( (ctrl->sector_size_shift > 9 ) != (sector_1k==1) ) { 1476 printk(KERN_WARNING "%s: sector size adjusted to 1k\n", DRV_NAME ); 1477 sector_1k = 1; 1478 } 1479 1480 if( ecc_level != ctrl->ecc_level ) { 1481 printk(KERN_WARNING "%s: ECC level adjusted from %u to %u\n", 1482 DRV_NAME, ecc_level, ctrl->ecc_level ); 1483 ecc_level = ctrl->ecc_level ; 1484 } 1485 1486 /* handle the hardware chip config registers */ 1487 for( chip_num = 0; chip_num < nand->numchips; chip_num ++ ) { 1488 _reg_write( ctrl, NANDC_ACC_CTRL_SECTOR_1K(chip_num), 1489 sector_1k); 1490 _reg_write( ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip_num), 1491 ecc_level); 1492 1493 /* Large pages: no partial page programming */ 1494 if( mtd->writesize > 512 ) { 1495 _reg_write( ctrl, 1496 NANDC_ACC_CTRL_PGM_RDIN(chip_num), 0 ); 1497 _reg_write( ctrl, 1498 NANDC_ACC_CTRL_PGM_PARTIAL(chip_num), 0 ); 1499 } 1500 1501 /* Do not raise ECC error when reading erased pages */ 1502 /* This bit has only partial effect, driver needs to help */ 1503 _reg_write( ctrl, NANDC_ACC_CTRL_ERA_ECC_ERR(chip_num), 0 ); 1504 1505 _reg_write( ctrl, NANDC_ACC_CTRL_PG_HIT(chip_num), 0 ); 1506 _reg_write( ctrl, NANDC_ACC_CTRL_PREFETCH(chip_num), 0 ); 1507 _reg_write( ctrl, NANDC_ACC_CTRL_CACHE_MODE(chip_num), 0 ); 1508 _reg_write( ctrl, NANDC_ACC_CTRL_CACHE_LASTPG(chip_num), 0 ); 1509 1510 /* TBD: consolidate or at least verify the s/w and h/w geometries agree */ 1511 } 1512 1513 /* Allow writing on device */ 1514 if( !(nand->options & NAND_ROM) ) { 1515 _reg_write( ctrl, NANDC_CS_NAND_WP, 0); 1516 } 1517 1518 DEBUG(MTD_DEBUG_LEVEL0, "%s: layout.oobavail=%d\n", __func__, 1519 nand->ecc.layout->oobavail ); 1520 1521 ret = nand_scan_tail( mtd ); 1522 1523 DEBUG(MTD_DEBUG_LEVEL0, "%s: scan_tail ret=%d\n", __func__, ret ); 1524 1525 if( nand->badblockbits == 0 ) 1526 nand->badblockbits = 8; 1527 BUG_ON( (1<<nand->page_shift) != mtd->writesize ); 1528 1529 /* Spit out some key chip parameters as detected by nand_base */ 1530 DEBUG(MTD_DEBUG_LEVEL0, 1531 "%s: erasesize=%d writesize=%d oobsize=%d " 1532 " page_shift=%d badblockpos=%d badblockbits=%d\n", 1533 __func__, mtd->erasesize, mtd->writesize, mtd->oobsize, 1534 nand->page_shift, nand->badblockpos, nand->badblockbits ); 1535 1536 nandc_options_print(nand->options); 1537 1538 return ret ; 1539} 1540 1541/* 1542 * Dummy function to make sure generic NAND does not call anything unexpected. 1543 */ 1544static int nandc_dummy_func( struct mtd_info * mtd ) 1545{ 1546 BUG_ON(1); 1547} 1548 1549/* 1550 * INTERNAL - main intiailization function 1551 */ 1552static int nandc_ctrl_init( struct nandc_ctrl * ctrl ) 1553{ 1554 unsigned chip; 1555 struct nand_chip * nand ; 1556 struct mtd_info * mtd ; 1557 unsigned n = 0; 1558 1559 /* Software variables init */ 1560 nand = &ctrl->nand ; 1561 mtd = &ctrl->mtd ; 1562 1563 init_completion( &ctrl->op_completion); 1564 1565 spin_lock_init( &nand->hwcontrol.lock ); 1566 init_waitqueue_head( &nand->hwcontrol.wq ); 1567 1568 mtd->priv = nand ; 1569 mtd->owner = THIS_MODULE ; 1570 mtd->name = DRV_NAME ; 1571 1572 nand->priv = ctrl ; 1573 nand->controller = & nand->hwcontrol; 1574 1575 nand->chip_delay = 5 ; /* not used */ 1576 nand->IO_ADDR_R = nand->IO_ADDR_W = (void *)~0L; 1577 1578 if( _reg_read( ctrl, NANDC_CONFIG_CHIP_WIDTH(n) )) 1579 nand->options |= NAND_BUSWIDTH_16; 1580 nand->options |= NAND_SKIP_BBTSCAN ; /* Dont need BBTs */ 1581 1582 nand->options |= NAND_NO_SUBPAGE_WRITE ; /* Subpages unsupported */ 1583 1584 nand->ecc.mode = NAND_ECC_HW; 1585 1586 nand->dev_ready = nandc_dev_ready; 1587 nand->read_byte = nandc_read_byte; 1588 nand->read_word = nandc_read_word; 1589 nand->ecc.read_page_raw = nandc_read_page_raw; 1590 nand->ecc.write_page_raw= nandc_write_page_raw; 1591 nand->ecc.read_page = nandc_read_page_ecc; 1592 nand->ecc.write_page = nandc_write_page_ecc; 1593 nand->ecc.read_oob = nandc_read_oob; 1594 nand->ecc.write_oob = nandc_write_oob; 1595 1596 nand->select_chip = nandc_select_chip ; 1597 nand->cmdfunc = nandc_cmdfunc ; 1598 nand->waitfunc = nandc_waitfunc ; 1599 nand->read_buf = (void *) nandc_dummy_func ; 1600 nand->write_buf = (void *) nandc_dummy_func ; 1601 nand->verify_buf = (void *) nandc_dummy_func ; 1602 1603 /* Set AUTO_CNFIG bit - try to auto-detect chips */ 1604 _reg_write( ctrl, NANDC_CS_AUTO_CONFIG, 1); 1605 1606 udelay(1000); 1607 1608 /* Print out current chip config */ 1609 for(chip = 0; chip < NANDC_MAX_CHIPS; chip ++ ) { 1610 printk("nandc[%d]: size=%#x block=%#x page=%#x ecc_level=%#x\n" 1611 ,chip, 1612 _reg_read( ctrl, NANDC_CONFIG_CHIP_SIZE(chip)), 1613 _reg_read( ctrl, NANDC_CONFIG_BLK_SIZE(chip)), 1614 _reg_read( ctrl, NANDC_CONFIG_PAGE_SIZE(chip)), 1615 _reg_read( ctrl, NANDC_ACC_CTRL_ECC_LEVEL(chip)) 1616 ); 1617 } 1618 1619 printk("%s: ready=%d\n", __FUNCTION__, 1620 _idm_reg_read(ctrl, NANDC_IDM_IO_CTRL_RDY) ); 1621 1622 if( nandc_scan( mtd ) ) 1623 return -ENXIO ; 1624 1625 return 0; 1626} 1627 1628static int __init nandc_idm_init( struct nandc_ctrl * ctrl ) 1629{ 1630 int irq_off; 1631 unsigned retries = 0x1000; 1632 1633 if( _idm_reg_read( ctrl, NANDC_IDM_RESET) ) 1634 printk("%s: stuck in reset ?\n", __FUNCTION__ ); 1635 1636 _idm_reg_write( ctrl, NANDC_IDM_RESET, 1); 1637 if( !_idm_reg_read( ctrl, NANDC_IDM_RESET) ) { 1638 DEBUG(MTD_DEBUG_LEVEL0, "%s: reset failed\n", __func__); 1639 return -1; 1640 } 1641 1642 while( _idm_reg_read( ctrl, NANDC_IDM_RESET) ) { 1643 _idm_reg_write( ctrl, NANDC_IDM_RESET, 0); 1644 udelay(100); 1645 if( ! (retries--) ) { 1646 DEBUG(MTD_DEBUG_LEVEL0, 1647 "%s: reset timeout\n", __func__); 1648 return -1; 1649 } 1650 } 1651 1652 _idm_reg_write( ctrl, NANDC_IDM_CLOCK_EN, 1); 1653 _idm_reg_write( ctrl, NANDC_IDM_APB_LITTLE_ENDIAN, 0); 1654 udelay(10); 1655 1656 printk("%s: NAND Controller rev %d.%d\n", __FUNCTION__, 1657 _reg_read(ctrl, NANDC_REV_MAJOR), 1658 _reg_read(ctrl, NANDC_REV_MINOR) 1659 ); 1660 1661 udelay(250); 1662 1663 /* Disable all IRQs */ 1664 for(irq_off = 0; irq_off < NANDC_IRQ_NUM; irq_off ++ ) 1665 _idm_reg_write(ctrl, NANDC_IDM_IRQ_N_EN(irq_off),0); 1666 1667 return 0; 1668} 1669 1670 1671static int __init nandc_regs_map( struct nandc_ctrl * ctrl ) 1672{ 1673 int res, irq ; 1674 1675 res = request_resource( &iomem_resource, &nandc_regs[ 0 ] ); 1676 if( res != 0) { 1677 DEBUG(MTD_DEBUG_LEVEL0, "%s: reg resource failure\n", __func__); 1678 return res ; 1679 } 1680 1681 res = request_resource( &iomem_resource, &nandc_idm_regs[ 0 ] ); 1682 if( res != 0) { 1683 DEBUG(MTD_DEBUG_LEVEL0, "%s: idm resource failure\n", __func__); 1684 return res ; 1685 } 1686 1687 /* map controller registers in virtual space */ 1688 ctrl->reg_base = ioremap( nandc_regs[ 0 ].start, 1689 resource_size( &nandc_regs[ 0 ] )); 1690 1691 if( IS_ERR_OR_NULL(ctrl->reg_base) ) { 1692 res = PTR_ERR(ctrl->reg_base); 1693 DEBUG(MTD_DEBUG_LEVEL0, "%s: error mapping regs\n", __func__); 1694 /* Release resources */ 1695 release_resource( &nandc_regs[ 0 ] ); 1696 return res; 1697 } 1698 1699 /* map wrapper registers in virtual space */ 1700 ctrl->idm_base = ioremap( nandc_idm_regs[ 0 ].start, 1701 resource_size( &nandc_idm_regs[ 0 ] )); 1702 1703 if( IS_ERR_OR_NULL(ctrl->idm_base) ) { 1704 /* Release resources */ 1705 res = PTR_ERR(ctrl->idm_base); 1706 DEBUG(MTD_DEBUG_LEVEL0, "%s: error mapping wrapper\n", __func__); 1707 release_resource( &nandc_idm_regs[ 0 ] ); 1708 iounmap( ctrl->reg_base ); 1709 ctrl->reg_base = NULL; 1710 release_resource( &nandc_regs[ 0 ] ); 1711 return res; 1712 } 1713 1714 /* Acquire all interrupt lines */ 1715 ctrl->irq_base = nandc_irqs[0].start ; 1716 for( irq = nandc_irqs[0].start ; irq <= nandc_irqs[0].end; irq ++ ) { 1717 res = request_irq( irq, nandc_isr, 0, "nandc", ctrl ); 1718 if( res < 0 ) { 1719 DEBUG(MTD_DEBUG_LEVEL0, "%s: irq %d failure\n", 1720 __func__, irq); 1721 } 1722 } 1723 1724 1725 return 0; 1726} 1727 1728#ifdef CONFIG_MTD_PARTITIONS 1729static const char *part_probes[] = { "cfenandpart", "cmdlinepart", NULL }; 1730#endif 1731 1732/* 1733 * Top-level init function 1734 */ 1735static int __devinit nandc_probe(struct platform_device *pdev ) 1736{ 1737 static struct nandc_ctrl _nand_ctrl; 1738 struct nandc_ctrl * ctrl = & _nand_ctrl ; 1739 int res ; 1740 1741 ctrl = pdev->dev.platform_data; 1742 1743 platform_set_drvdata(pdev, ctrl); 1744 1745 ctrl->device = &pdev->dev ; 1746 1747 res = nandc_regs_map( ctrl ); 1748 if( res ) { 1749 DEBUG(MTD_DEBUG_LEVEL0, "%s: regs_map failed\n", __func__); 1750 goto probe_error ; 1751 } 1752 1753 res = nandc_idm_init( ctrl ); 1754 if( res ) { 1755 DEBUG(MTD_DEBUG_LEVEL0, "%s: idm_init failed\n", __func__); 1756 goto probe_error ; 1757 } 1758 1759 res = nandc_ctrl_init( ctrl ); 1760 if( res ) { 1761 DEBUG(MTD_DEBUG_LEVEL0, "%s: ctrl_init failed\n", __func__); 1762 goto probe_error ; 1763 } 1764 1765 ctrl->mtd.dev.parent = ctrl->device; 1766 1767#ifdef CONFIG_MTD_PARTITIONS 1768 res = parse_mtd_partitions( &ctrl->mtd, part_probes, &ctrl->parts, 0 ); 1769 1770 if( res > 0 ) { 1771 DEBUG(MTD_DEBUG_LEVEL0, "%s: registering MTD partitions\n", 1772 __func__); 1773 res = add_mtd_partitions( &ctrl->mtd, ctrl->parts, res ); 1774 if( res < 0 ) { 1775 DEBUG(MTD_DEBUG_LEVEL0, 1776 "%s: failed to register partitions\n", 1777 __func__); 1778 goto probe_error ; 1779 } 1780 } 1781 else 1782#else 1783 { 1784 DEBUG(MTD_DEBUG_LEVEL0, "%s: registering the entire device MTD\n", 1785 __func__); 1786 if( (res = add_mtd_device( &ctrl->mtd ))) { 1787 DEBUG(MTD_DEBUG_LEVEL0, "%s: failed to register\n", __func__); 1788 goto probe_error ; 1789 } 1790 } 1791#endif 1792 1793 return 0; 1794 probe_error: 1795 return res; 1796} 1797 1798/* Single device present */ 1799static struct nandc_ctrl _nand_ctrl; 1800 1801/* driver structure for object model */ 1802static struct platform_driver nandc_driver = { 1803 .probe = nandc_probe, 1804 .driver = { 1805 .name = DRV_NAME, 1806 .owner = THIS_MODULE, 1807 }, 1808}; 1809 1810static struct platform_device platform_nand_devices = { 1811 .name = DRV_NAME, 1812 .id = 0x5a5d, 1813 .dev = { 1814 .platform_data = &_nand_ctrl, 1815 }, 1816}; 1817 1818 1819static int nandc_init(void) 1820{ 1821 int ret; 1822 printk(KERN_INFO "%s, Version %s (c) Broadcom Inc. 2012\n", 1823 DRV_DESC, DRV_VERSION ); 1824 1825 if( (ret = platform_driver_register( &nandc_driver ))) 1826 return ret; 1827 return 0; 1828} 1829 1830static int nandc_dev_start(void) 1831{ 1832 printk("%s:\n", __func__); 1833 return platform_device_register( &platform_nand_devices ); 1834} 1835 1836module_init(nandc_init); 1837 1838/* Device init myst be delayed to let "mtd" module initialize before it does */ 1839/* I wish there was a way to explicitly declare dependencies */ 1840late_initcall( nandc_dev_start ); 1841 1842MODULE_LICENSE("GPL"); 1843MODULE_DESCRIPTION(DRV_DESC); 1844MODULE_ALIAS("platform:" DRV_NAME); 1845