Lines Matching refs:snf

13 // like the following: (sizeof(FDM + ECC) = snf->nfi_cfg.spare_size)
35 // 1. Always set snf->autofmt = true in prepare_io_req (even when ECC is off).
328 static inline int snand_prepare_bouncebuf(struct mtk_snand *snf, size_t size)
330 if (snf->buf_len >= size)
332 kfree(snf->buf);
333 snf->buf = kmalloc(size, GFP_KERNEL);
334 if (!snf->buf)
336 snf->buf_len = size;
337 memset(snf->buf, 0xff, snf->buf_len);
341 static inline u32 nfi_read32(struct mtk_snand *snf, u32 reg)
343 return readl(snf->nfi_base + reg);
346 static inline void nfi_write32(struct mtk_snand *snf, u32 reg, u32 val)
348 writel(val, snf->nfi_base + reg);
351 static inline void nfi_write16(struct mtk_snand *snf, u32 reg, u16 val)
353 writew(val, snf->nfi_base + reg);
356 static inline void nfi_rmw32(struct mtk_snand *snf, u32 reg, u32 clr, u32 set)
360 val = readl(snf->nfi_base + reg);
363 writel(val, snf->nfi_base + reg);
366 static void nfi_read_data(struct mtk_snand *snf, u32 reg, u8 *data, u32 len)
372 val = nfi_read32(snf, i & ~(es - 1));
378 static int mtk_nfi_reset(struct mtk_snand *snf)
383 nfi_write32(snf, NFI_CON, CON_FIFO_FLUSH | CON_NFI_RST);
385 ret = readw_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
386 !(val & snf->caps->mastersta_mask), 0,
389 dev_err(snf->dev, "NFI master is still busy after reset\n");
393 ret = readl_poll_timeout(snf->nfi_base + NFI_STA, val,
394 !(val & (NFI_FSM | snf->caps->nandfsm_mask)), 0,
397 dev_err(snf->dev, "Failed to reset NFI\n");
401 fifo_mask = ((snf->caps->fifo_size - 1) << FIFO_RD_REMAIN_S) |
402 ((snf->caps->fifo_size - 1) << FIFO_WR_REMAIN_S);
403 ret = readw_poll_timeout(snf->nfi_base + NFI_FIFOSTA, val,
406 dev_err(snf->dev, "NFI FIFOs are not empty\n");
413 static int mtk_snand_mac_reset(struct mtk_snand *snf)
418 nfi_rmw32(snf, SNF_MISC_CTL, 0, SW_RST);
420 ret = readl_poll_timeout(snf->nfi_base + SNF_STA_CTL1, val,
423 dev_err(snf->dev, "Failed to reset SNFI MAC\n");
425 nfi_write32(snf, SNF_MISC_CTL,
431 static int mtk_snand_mac_trigger(struct mtk_snand *snf, u32 outlen, u32 inlen)
436 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN);
437 nfi_write32(snf, SNF_MAC_OUTL, outlen);
438 nfi_write32(snf, SNF_MAC_INL, inlen);
440 nfi_write32(snf, SNF_MAC_CTL, SF_MAC_EN | SF_TRIG);
442 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val,
445 dev_err(snf->dev, "Timed out waiting for WIP_READY\n");
449 ret = readl_poll_timeout(snf->nfi_base + SNF_MAC_CTL, val, !(val & WIP),
452 dev_err(snf->dev, "Timed out waiting for WIP cleared\n");
455 nfi_write32(snf, SNF_MAC_CTL, 0);
460 static int mtk_snand_mac_io(struct mtk_snand *snf, const struct spi_mem_op *op)
477 mtk_snand_mac_reset(snf);
483 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
492 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
499 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
508 nfi_write32(snf, SNF_GPRAM + reg_offs - 3, val);
515 nfi_write32(snf, SNF_GPRAM + (reg_offs & ~3), val);
518 dev_dbg(snf->dev, "%d: %08X", i,
519 nfi_read32(snf, SNF_GPRAM + i));
521 dev_dbg(snf->dev, "SNF TX: %u RX: %u", reg_offs, rx_len);
523 ret = mtk_snand_mac_trigger(snf, reg_offs, rx_len);
530 nfi_read_data(snf, SNF_GPRAM + reg_offs, rx_buf, rx_len);
534 static int mtk_snand_setup_pagefmt(struct mtk_snand *snf, u32 page_size,
544 if (snf->nfi_cfg.page_size == page_size &&
545 snf->nfi_cfg.oob_size == oob_size)
548 nsectors = page_size / snf->caps->sector_size;
549 if (nsectors > snf->caps->max_sectors) {
550 dev_err(snf->dev, "too many sectors required.\n");
554 if (snf->caps->sector_size == 512) {
567 if (snf->caps->sector_size == 512)
573 if (snf->caps->sector_size == 512)
579 if (snf->caps->sector_size == 512)
588 dev_err(snf->dev, "unsupported page size.\n");
595 if (snf->caps->sector_size == 1024)
598 for (i = snf->caps->num_spare_size - 1; i >= 0; i--) {
599 if (snf->caps->spare_sizes[i] <= spare_size) {
600 spare_size = snf->caps->spare_sizes[i];
601 if (snf->caps->sector_size == 1024)
609 dev_err(snf->dev, "unsupported spare size: %u\n", spare_size);
613 nfi_write32(snf, NFI_PAGEFMT,
614 (snf->caps->fdm_ecc_size << NFI_FDM_ECC_NUM_S) |
615 (snf->caps->fdm_size << NFI_FDM_NUM_S) |
620 snf->nfi_cfg.page_size = page_size;
621 snf->nfi_cfg.oob_size = oob_size;
622 snf->nfi_cfg.nsectors = nsectors;
623 snf->nfi_cfg.spare_size = spare_size;
625 dev_dbg(snf->dev, "page format: (%u + %u) * %u\n",
626 snf->caps->sector_size, spare_size, nsectors);
627 return snand_prepare_bouncebuf(snf, page_size + oob_size);
629 dev_err(snf->dev, "page size %u + %u is not supported\n", page_size,
662 struct mtk_snand *snf = nand_to_mtk_snand(nand);
673 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
696 strength = desired_correction / snf->nfi_cfg.nsectors;
700 ecc_cfg->sectors = snf->nfi_cfg.nsectors;
701 ecc_cfg->len = snf->caps->sector_size + snf->caps->fdm_ecc_size;
704 parity_bits = mtk_ecc_get_parity_bits(snf->ecc);
705 max_ecc_bytes = snf->nfi_cfg.spare_size - snf->caps->fdm_size;
707 mtk_ecc_adjust_strength(snf->ecc, &ecc_cfg->strength);
716 mtk_ecc_adjust_strength(snf->ecc, &s_next);
727 conf->step_size = snf->caps->sector_size;
731 dev_warn(snf->dev, "unable to fulfill ECC of %u bits.\n",
733 dev_info(snf->dev, "ECC strength: %u bits per %u bytes\n",
734 ecc_cfg->strength, snf->caps->sector_size);
749 struct mtk_snand *snf = nand_to_mtk_snand(nand);
753 ret = mtk_snand_setup_pagefmt(snf, nand->memorg.pagesize,
757 snf->autofmt = true;
758 snf->ecc_cfg = ecc_cfg;
765 struct mtk_snand *snf = nand_to_mtk_snand(nand);
768 snf->ecc_cfg = NULL;
769 snf->autofmt = false;
773 if (snf->ecc_stats.failed)
774 mtd->ecc_stats.failed += snf->ecc_stats.failed;
775 mtd->ecc_stats.corrected += snf->ecc_stats.corrected;
776 return snf->ecc_stats.failed ? -EBADMSG : snf->ecc_stats.bitflips;
786 static void mtk_snand_read_fdm(struct mtk_snand *snf, u8 *buf)
792 for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
793 vall = nfi_read32(snf, NFI_FDML(i));
794 valm = nfi_read32(snf, NFI_FDMM(i));
796 for (j = 0; j < snf->caps->fdm_size; j++)
799 oobptr += snf->caps->fdm_size;
803 static void mtk_snand_write_fdm(struct mtk_snand *snf, const u8 *buf)
805 u32 fdm_size = snf->caps->fdm_size;
810 for (i = 0; i < snf->nfi_cfg.nsectors; i++) {
823 nfi_write32(snf, NFI_FDML(i), vall);
824 nfi_write32(snf, NFI_FDMM(i), valm);
830 static void mtk_snand_bm_swap(struct mtk_snand *snf, u8 *buf)
834 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
839 buf_bbm_pos = snf->nfi_cfg.page_size -
840 (snf->nfi_cfg.nsectors - 1) * snf->nfi_cfg.spare_size;
841 fdm_bbm_pos = snf->nfi_cfg.page_size +
842 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
844 swap(snf->buf[fdm_bbm_pos], buf[buf_bbm_pos]);
847 static void mtk_snand_fdm_bm_swap(struct mtk_snand *snf)
851 if (!snf->caps->bbm_swap || snf->nfi_cfg.nsectors == 1)
855 fdm_bbm_pos1 = snf->nfi_cfg.page_size;
856 fdm_bbm_pos2 = snf->nfi_cfg.page_size +
857 (snf->nfi_cfg.nsectors - 1) * snf->caps->fdm_size;
858 swap(snf->buf[fdm_bbm_pos1], snf->buf[fdm_bbm_pos2]);
861 static int mtk_snand_read_page_cache(struct mtk_snand *snf,
864 u8 *buf = snf->buf;
865 u8 *buf_fdm = buf + snf->nfi_cfg.page_size;
872 u32 dma_len = snf->buf_len;
877 if (snf->autofmt) {
881 dma_len = snf->nfi_cfg.page_size;
890 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
896 if (rd_offset == 0 && op->data.nbytes >= snf->nfi_cfg.page_size)
899 mtk_snand_mac_reset(snf);
900 mtk_nfi_reset(snf);
903 nfi_write32(snf, SNF_RD_CTL2,
908 nfi_write32(snf, SNF_RD_CTL3, op_addr);
920 nfi_rmw32(snf, SNF_MISC_CTL, DATA_READ_MODE,
924 rd_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
925 snf->nfi_cfg.nsectors;
926 nfi_write32(snf, SNF_MISC_CTL2,
930 nfi_write16(snf, NFI_CNFG,
934 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
936 buf_dma = dma_map_single(snf->dev, buf, dma_len, DMA_FROM_DEVICE);
937 ret = dma_mapping_error(snf->dev, buf_dma);
939 dev_err(snf->dev, "DMA mapping failed.\n");
942 nfi_write32(snf, NFI_STRADDR, buf_dma);
944 snf->ecc_cfg->op = ECC_DECODE;
945 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
950 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_READ);
951 reinit_completion(&snf->op_done);
954 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_READ);
957 nfi_rmw32(snf, NFI_CON, 0, CON_BRD);
958 nfi_write16(snf, NFI_STRDATA, STR_DATA);
961 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
962 dev_err(snf->dev, "DMA timed out for reading from cache.\n");
968 ret = readl_poll_timeout(snf->nfi_base + NFI_BYTELEN, val,
969 BUS_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
972 dev_err(snf->dev, "Timed out waiting for BUS_SEC_CNTR\n");
977 ret = readl_poll_timeout(snf->nfi_base + NFI_MASTERSTA, val,
978 !(val & snf->caps->mastersta_mask), 0,
981 dev_err(snf->dev, "Timed out waiting for bus becoming idle\n");
986 ret = mtk_ecc_wait_done(snf->ecc, ECC_DECODE);
988 dev_err(snf->dev, "wait ecc done timeout\n");
992 mtk_ecc_get_stats(snf->ecc, &snf->ecc_stats,
993 snf->nfi_cfg.nsectors);
996 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
998 if (snf->autofmt) {
999 mtk_snand_read_fdm(snf, buf_fdm);
1000 if (snf->caps->bbm_swap) {
1001 mtk_snand_bm_swap(snf, buf);
1002 mtk_snand_fdm_bm_swap(snf);
1007 if (nfi_read32(snf, NFI_STA) & READ_EMPTY) {
1009 snf->ecc_stats.bitflips = 0;
1010 snf->ecc_stats.failed = 0;
1011 snf->ecc_stats.corrected = 0;
1014 u32 cap_len = snf->buf_len - snf->nfi_cfg.page_size;
1015 u32 req_left = op->data.nbytes - snf->nfi_cfg.page_size;
1018 memcpy(op->data.buf.in + snf->nfi_cfg.page_size,
1021 } else if (rd_offset < snf->buf_len) {
1022 u32 cap_len = snf->buf_len - rd_offset;
1026 memcpy(op->data.buf.in, snf->buf + rd_offset, cap_len);
1031 mtk_ecc_disable(snf->ecc);
1036 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_FROM_DEVICE);
1039 nfi_write32(snf, NFI_CON, 0);
1040 nfi_write16(snf, NFI_CNFG, 0);
1043 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_READ_DONE);
1044 nfi_write32(snf, SNF_STA_CTL1, 0);
1047 nfi_read32(snf, NFI_INTR_STA);
1048 nfi_write32(snf, NFI_INTR_EN, 0);
1050 nfi_rmw32(snf, SNF_MISC_CTL, DATARD_CUSTOM_EN, 0);
1054 static int mtk_snand_write_page_cache(struct mtk_snand *snf,
1064 u32 dma_len = snf->buf_len;
1069 if (snf->autofmt) {
1073 dma_len = snf->nfi_cfg.page_size;
1078 last_bit = fls(snf->nfi_cfg.page_size + snf->nfi_cfg.oob_size);
1083 mtk_snand_mac_reset(snf);
1084 mtk_nfi_reset(snf);
1087 memset(snf->buf, 0xff, wr_offset);
1089 cap_len = snf->buf_len - wr_offset;
1092 memcpy(snf->buf + wr_offset, op->data.buf.out, cap_len);
1093 if (snf->autofmt) {
1094 if (snf->caps->bbm_swap) {
1095 mtk_snand_fdm_bm_swap(snf);
1096 mtk_snand_bm_swap(snf, snf->buf);
1098 mtk_snand_write_fdm(snf, snf->buf + snf->nfi_cfg.page_size);
1102 nfi_write32(snf, SNF_PG_CTL1, (op->cmd.opcode << PG_LOAD_CMD_S));
1105 nfi_write32(snf, SNF_PG_CTL2, op_addr);
1111 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_X4_EN,
1115 wr_bytes = (snf->nfi_cfg.spare_size + snf->caps->sector_size) *
1116 snf->nfi_cfg.nsectors;
1117 nfi_write32(snf, SNF_MISC_CTL2,
1121 nfi_write16(snf, NFI_CNFG,
1125 nfi_write32(snf, NFI_CON, (snf->nfi_cfg.nsectors << CON_SEC_NUM_S));
1126 buf_dma = dma_map_single(snf->dev, snf->buf, dma_len, DMA_TO_DEVICE);
1127 ret = dma_mapping_error(snf->dev, buf_dma);
1129 dev_err(snf->dev, "DMA mapping failed.\n");
1132 nfi_write32(snf, NFI_STRADDR, buf_dma);
1134 snf->ecc_cfg->op = ECC_ENCODE;
1135 ret = mtk_ecc_enable(snf->ecc, snf->ecc_cfg);
1140 nfi_write32(snf, NFI_INTR_EN, NFI_IRQ_INTR_EN | NFI_IRQ_CUS_PG);
1141 reinit_completion(&snf->op_done);
1145 nfi_write16(snf, NFI_CMD, NFI_CMD_DUMMY_WRITE);
1148 nfi_rmw32(snf, NFI_CON, 0, CON_BWR);
1149 nfi_write16(snf, NFI_STRDATA, STR_DATA);
1152 &snf->op_done, usecs_to_jiffies(SNFI_POLL_INTERVAL))) {
1153 dev_err(snf->dev, "DMA timed out for program load.\n");
1159 ret = readl_poll_timeout(snf->nfi_base + NFI_ADDRCNTR, val,
1160 NFI_SEC_CNTR(val) >= snf->nfi_cfg.nsectors, 0,
1163 dev_err(snf->dev, "Timed out waiting for NFI_SEC_CNTR\n");
1167 mtk_ecc_disable(snf->ecc);
1169 dma_unmap_single(snf->dev, buf_dma, dma_len, DMA_TO_DEVICE);
1172 nfi_write32(snf, NFI_CON, 0);
1173 nfi_write16(snf, NFI_CNFG, 0);
1176 nfi_rmw32(snf, SNF_STA_CTL1, 0, CUS_PG_DONE);
1177 nfi_write32(snf, SNF_STA_CTL1, 0);
1180 nfi_read32(snf, NFI_INTR_STA);
1181 nfi_write32(snf, NFI_INTR_EN, 0);
1183 nfi_rmw32(snf, SNF_MISC_CTL, PG_LOAD_CUSTOM_EN, 0);
1312 struct mtk_snand *snf = id;
1315 sta = nfi_read32(snf, NFI_INTR_STA);
1316 ien = nfi_read32(snf, NFI_INTR_EN);
1321 nfi_write32(snf, NFI_INTR_EN, 0);
1322 complete(&snf->op_done);