Lines Matching full:ctrl
191 struct brcmnand_controller *ctrl; member
441 static inline u32 nand_readreg(struct brcmnand_controller *ctrl, u32 offs) in nand_readreg() argument
443 return brcmnand_readl(ctrl->nand_base + offs); in nand_readreg()
446 static inline void nand_writereg(struct brcmnand_controller *ctrl, u32 offs, in nand_writereg() argument
449 brcmnand_writel(val, ctrl->nand_base + offs); in nand_writereg()
452 static int brcmnand_revision_init(struct brcmnand_controller *ctrl) in brcmnand_revision_init() argument
458 ctrl->nand_version = nand_readreg(ctrl, 0) & 0xffff; in brcmnand_revision_init()
461 if (ctrl->nand_version < 0x0400) { in brcmnand_revision_init()
462 dev_err(ctrl->dev, "version %#x not supported\n", in brcmnand_revision_init()
463 ctrl->nand_version); in brcmnand_revision_init()
468 if (ctrl->nand_version >= 0x0702) in brcmnand_revision_init()
469 ctrl->reg_offsets = brcmnand_regs_v72; in brcmnand_revision_init()
470 else if (ctrl->nand_version >= 0x0701) in brcmnand_revision_init()
471 ctrl->reg_offsets = brcmnand_regs_v71; in brcmnand_revision_init()
472 else if (ctrl->nand_version >= 0x0600) in brcmnand_revision_init()
473 ctrl->reg_offsets = brcmnand_regs_v60; in brcmnand_revision_init()
474 else if (ctrl->nand_version >= 0x0500) in brcmnand_revision_init()
475 ctrl->reg_offsets = brcmnand_regs_v50; in brcmnand_revision_init()
476 else if (ctrl->nand_version >= 0x0400) in brcmnand_revision_init()
477 ctrl->reg_offsets = brcmnand_regs_v40; in brcmnand_revision_init()
480 if (ctrl->nand_version >= 0x0701) in brcmnand_revision_init()
481 ctrl->reg_spacing = 0x14; in brcmnand_revision_init()
483 ctrl->reg_spacing = 0x10; in brcmnand_revision_init()
486 if (ctrl->nand_version >= 0x0701) { in brcmnand_revision_init()
487 ctrl->cs_offsets = brcmnand_cs_offsets_v71; in brcmnand_revision_init()
489 ctrl->cs_offsets = brcmnand_cs_offsets; in brcmnand_revision_init()
492 if (ctrl->nand_version <= 0x0500) in brcmnand_revision_init()
493 ctrl->cs0_offsets = brcmnand_cs_offsets_cs0; in brcmnand_revision_init()
497 if (ctrl->nand_version >= 0x0701) { in brcmnand_revision_init()
499 ctrl->max_page_size = 16 * 1024; in brcmnand_revision_init()
500 ctrl->max_block_size = 2 * 1024 * 1024; in brcmnand_revision_init()
502 ctrl->page_sizes = page_sizes; in brcmnand_revision_init()
503 if (ctrl->nand_version >= 0x0600) in brcmnand_revision_init()
504 ctrl->block_sizes = block_sizes_v6; in brcmnand_revision_init()
506 ctrl->block_sizes = block_sizes_v4; in brcmnand_revision_init()
508 if (ctrl->nand_version < 0x0400) { in brcmnand_revision_init()
509 ctrl->max_page_size = 4096; in brcmnand_revision_init()
510 ctrl->max_block_size = 512 * 1024; in brcmnand_revision_init()
515 if (ctrl->nand_version >= 0x0702) in brcmnand_revision_init()
516 ctrl->max_oob = 128; in brcmnand_revision_init()
517 else if (ctrl->nand_version >= 0x0600) in brcmnand_revision_init()
518 ctrl->max_oob = 64; in brcmnand_revision_init()
519 else if (ctrl->nand_version >= 0x0500) in brcmnand_revision_init()
520 ctrl->max_oob = 32; in brcmnand_revision_init()
522 ctrl->max_oob = 16; in brcmnand_revision_init()
525 if (ctrl->nand_version >= 0x0600 && ctrl->nand_version != 0x0601) in brcmnand_revision_init()
526 ctrl->features |= BRCMNAND_HAS_PREFETCH; in brcmnand_revision_init()
532 if (ctrl->nand_version >= 0x0700) in brcmnand_revision_init()
533 ctrl->features |= BRCMNAND_HAS_CACHE_MODE; in brcmnand_revision_init()
535 if (ctrl->nand_version >= 0x0500) in brcmnand_revision_init()
536 ctrl->features |= BRCMNAND_HAS_1K_SECTORS; in brcmnand_revision_init()
538 if (ctrl->nand_version >= 0x0700) in brcmnand_revision_init()
539 ctrl->features |= BRCMNAND_HAS_WP; in brcmnand_revision_init()
541 else if (of_property_read_bool(ctrl->dev->of_node, "brcm,nand-has-wp")) in brcmnand_revision_init()
543 else if (dev_read_bool(ctrl->dev, "brcm,nand-has-wp")) in brcmnand_revision_init()
545 ctrl->features |= BRCMNAND_HAS_WP; in brcmnand_revision_init()
550 static inline u32 brcmnand_read_reg(struct brcmnand_controller *ctrl, in brcmnand_read_reg() argument
553 u16 offs = ctrl->reg_offsets[reg]; in brcmnand_read_reg()
556 return nand_readreg(ctrl, offs); in brcmnand_read_reg()
561 static inline void brcmnand_write_reg(struct brcmnand_controller *ctrl, in brcmnand_write_reg() argument
564 u16 offs = ctrl->reg_offsets[reg]; in brcmnand_write_reg()
567 nand_writereg(ctrl, offs, val); in brcmnand_write_reg()
570 static inline void brcmnand_rmw_reg(struct brcmnand_controller *ctrl, in brcmnand_rmw_reg() argument
574 u32 tmp = brcmnand_read_reg(ctrl, reg); in brcmnand_rmw_reg()
578 brcmnand_write_reg(ctrl, reg, tmp); in brcmnand_rmw_reg()
581 static inline u32 brcmnand_read_fc(struct brcmnand_controller *ctrl, int word) in brcmnand_read_fc() argument
583 return __raw_readl(ctrl->nand_fc + word * 4); in brcmnand_read_fc()
586 static inline void brcmnand_write_fc(struct brcmnand_controller *ctrl, in brcmnand_write_fc() argument
589 __raw_writel(val, ctrl->nand_fc + word * 4); in brcmnand_write_fc()
592 static inline u16 brcmnand_cs_offset(struct brcmnand_controller *ctrl, int cs, in brcmnand_cs_offset() argument
595 u16 offs_cs0 = ctrl->reg_offsets[BRCMNAND_CS0_BASE]; in brcmnand_cs_offset()
596 u16 offs_cs1 = ctrl->reg_offsets[BRCMNAND_CS1_BASE]; in brcmnand_cs_offset()
599 if (cs == 0 && ctrl->cs0_offsets) in brcmnand_cs_offset()
600 cs_offs = ctrl->cs0_offsets[reg]; in brcmnand_cs_offset()
602 cs_offs = ctrl->cs_offsets[reg]; in brcmnand_cs_offset()
605 return offs_cs1 + (cs - 1) * ctrl->reg_spacing + cs_offs; in brcmnand_cs_offset()
607 return offs_cs0 + cs * ctrl->reg_spacing + cs_offs; in brcmnand_cs_offset()
610 static inline u32 brcmnand_count_corrected(struct brcmnand_controller *ctrl) in brcmnand_count_corrected() argument
612 if (ctrl->nand_version < 0x0600) in brcmnand_count_corrected()
614 return brcmnand_read_reg(ctrl, BRCMNAND_CORR_COUNT); in brcmnand_count_corrected()
619 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_wr_corr_thresh() local
624 if (ctrl->nand_version >= 0x0702) in brcmnand_wr_corr_thresh()
626 else if (ctrl->nand_version >= 0x0600) in brcmnand_wr_corr_thresh()
628 else if (ctrl->nand_version >= 0x0500) in brcmnand_wr_corr_thresh()
633 if (ctrl->nand_version >= 0x0702) { in brcmnand_wr_corr_thresh()
637 } else if (ctrl->nand_version >= 0x0600) { in brcmnand_wr_corr_thresh()
642 brcmnand_rmw_reg(ctrl, reg, (bits - 1) << shift, shift, val); in brcmnand_wr_corr_thresh()
645 static inline int brcmnand_cmd_shift(struct brcmnand_controller *ctrl) in brcmnand_cmd_shift() argument
647 if (ctrl->nand_version < 0x0602) in brcmnand_cmd_shift()
676 static inline u32 brcmnand_spare_area_mask(struct brcmnand_controller *ctrl) in brcmnand_spare_area_mask() argument
678 if (ctrl->nand_version >= 0x0702) in brcmnand_spare_area_mask()
680 else if (ctrl->nand_version >= 0x0600) in brcmnand_spare_area_mask()
689 static inline u32 brcmnand_ecc_level_mask(struct brcmnand_controller *ctrl) in brcmnand_ecc_level_mask() argument
691 u32 mask = (ctrl->nand_version >= 0x0600) ? 0x1f : 0x0f; in brcmnand_ecc_level_mask()
696 if (ctrl->nand_version >= 0x0702) in brcmnand_ecc_level_mask()
704 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_ecc_enabled() local
705 u16 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); in brcmnand_set_ecc_enabled()
706 u32 acc_control = nand_readreg(ctrl, offs); in brcmnand_set_ecc_enabled()
715 acc_control &= ~brcmnand_ecc_level_mask(ctrl); in brcmnand_set_ecc_enabled()
718 nand_writereg(ctrl, offs, acc_control); in brcmnand_set_ecc_enabled()
721 static inline int brcmnand_sector_1k_shift(struct brcmnand_controller *ctrl) in brcmnand_sector_1k_shift() argument
723 if (ctrl->nand_version >= 0x0702) in brcmnand_sector_1k_shift()
725 else if (ctrl->nand_version >= 0x0600) in brcmnand_sector_1k_shift()
727 else if (ctrl->nand_version >= 0x0500) in brcmnand_sector_1k_shift()
735 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_get_sector_size_1k() local
736 int shift = brcmnand_sector_1k_shift(ctrl); in brcmnand_get_sector_size_1k()
737 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_get_sector_size_1k()
743 return (nand_readreg(ctrl, acc_control_offs) >> shift) & 0x1; in brcmnand_get_sector_size_1k()
748 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_sector_size_1k() local
749 int shift = brcmnand_sector_1k_shift(ctrl); in brcmnand_set_sector_size_1k()
750 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_set_sector_size_1k()
757 tmp = nand_readreg(ctrl, acc_control_offs); in brcmnand_set_sector_size_1k()
760 nand_writereg(ctrl, acc_control_offs, tmp); in brcmnand_set_sector_size_1k()
772 static int bcmnand_ctrl_poll_status(struct brcmnand_controller *ctrl, in bcmnand_ctrl_poll_status() argument
785 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); in bcmnand_ctrl_poll_status()
801 val = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS); in bcmnand_ctrl_poll_status()
809 dev_warn(ctrl->dev, "timeout on status poll (expected %x got %x)\n", in bcmnand_ctrl_poll_status()
815 static inline void brcmnand_set_wp(struct brcmnand_controller *ctrl, bool en) in brcmnand_set_wp() argument
819 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, CS_SELECT_NAND_WP, 0, val); in brcmnand_set_wp()
840 static inline bool has_flash_dma(struct brcmnand_controller *ctrl) in has_flash_dma() argument
842 return ctrl->flash_dma_base; in has_flash_dma()
855 static inline void flash_dma_writel(struct brcmnand_controller *ctrl, u8 offs, in flash_dma_writel() argument
858 brcmnand_writel(val, ctrl->flash_dma_base + offs); in flash_dma_writel()
861 static inline u32 flash_dma_readl(struct brcmnand_controller *ctrl, u8 offs) in flash_dma_readl() argument
863 return brcmnand_readl(ctrl->flash_dma_base + offs); in flash_dma_readl()
878 static inline bool is_hamming_ecc(struct brcmnand_controller *ctrl, in is_hamming_ecc() argument
881 if (ctrl->nand_version <= 0x0701) in is_hamming_ecc()
917 if (is_hamming_ecc(host->ctrl, cfg)) { in brcmnand_create_layout()
1022 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_wp() local
1024 if ((ctrl->features & BRCMNAND_HAS_WP) && wp_on == 1) { in brcmnand_wp()
1029 dev_dbg(ctrl->dev, "WP %s\n", wp ? "on" : "off"); in brcmnand_wp()
1034 * make sure ctrl/flash ready before and after in brcmnand_wp()
1037 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY | in brcmnand_wp()
1044 brcmnand_set_wp(ctrl, wp); in brcmnand_wp()
1047 ret = bcmnand_ctrl_poll_status(ctrl, in brcmnand_wp()
1069 static inline u8 oob_reg_read(struct brcmnand_controller *ctrl, u32 offs) in oob_reg_read() argument
1073 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_READ_BASE]; in oob_reg_read()
1074 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_READ_10_BASE]; in oob_reg_read()
1076 if (offs >= ctrl->max_oob) in oob_reg_read()
1084 return nand_readreg(ctrl, reg_offs) >> (24 - ((offs & 0x03) << 3)); in oob_reg_read()
1087 static inline void oob_reg_write(struct brcmnand_controller *ctrl, u32 offs, in oob_reg_write() argument
1092 offset0 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_BASE]; in oob_reg_write()
1093 offset10 = ctrl->reg_offsets[BRCMNAND_OOB_WRITE_10_BASE]; in oob_reg_write()
1095 if (offs >= ctrl->max_oob) in oob_reg_write()
1103 nand_writereg(ctrl, reg_offs, data); in oob_reg_write()
1108 * @ctrl: NAND controller
1114 static int read_oob_from_regs(struct brcmnand_controller *ctrl, int i, u8 *oob, in read_oob_from_regs() argument
1122 tbytes = max(0, tbytes - (int)ctrl->max_oob); in read_oob_from_regs()
1123 tbytes = min_t(int, tbytes, ctrl->max_oob); in read_oob_from_regs()
1126 oob[j] = oob_reg_read(ctrl, j); in read_oob_from_regs()
1137 static int write_oob_to_regs(struct brcmnand_controller *ctrl, int i, in write_oob_to_regs() argument
1145 tbytes = max(0, tbytes - (int)ctrl->max_oob); in write_oob_to_regs()
1146 tbytes = min_t(int, tbytes, ctrl->max_oob); in write_oob_to_regs()
1149 oob_reg_write(ctrl, j, in write_oob_to_regs()
1160 struct brcmnand_controller *ctrl = data; in brcmnand_ctlrdy_irq() local
1163 if (ctrl->dma_pending) in brcmnand_ctlrdy_irq()
1166 complete(&ctrl->done); in brcmnand_ctlrdy_irq()
1173 struct brcmnand_controller *ctrl = data; in brcmnand_irq() local
1175 if (ctrl->soc->ctlrdy_ack(ctrl->soc)) in brcmnand_irq()
1183 struct brcmnand_controller *ctrl = data; in brcmnand_dma_irq() local
1185 complete(&ctrl->dma_done); in brcmnand_dma_irq()
1193 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_send_cmd() local
1196 dev_dbg(ctrl->dev, "send native cmd %d addr_lo 0x%x\n", cmd, in brcmnand_send_cmd()
1197 brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS)); in brcmnand_send_cmd()
1198 BUG_ON(ctrl->cmd_pending != 0); in brcmnand_send_cmd()
1199 ctrl->cmd_pending = cmd; in brcmnand_send_cmd()
1201 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, 0); in brcmnand_send_cmd()
1205 brcmnand_write_reg(ctrl, BRCMNAND_CMD_START, in brcmnand_send_cmd()
1206 cmd << brcmnand_cmd_shift(ctrl)); in brcmnand_send_cmd()
1214 unsigned int ctrl) in brcmnand_cmd_ctrl() argument
1223 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_waitfunc() local
1228 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); in brcmnand_waitfunc()
1229 if (ctrl->cmd_pending && in brcmnand_waitfunc()
1230 wait_for_completion_timeout(&ctrl->done, timeo) <= 0) { in brcmnand_waitfunc()
1231 u32 cmd = brcmnand_read_reg(ctrl, BRCMNAND_CMD_START) in brcmnand_waitfunc()
1232 >> brcmnand_cmd_shift(ctrl); in brcmnand_waitfunc()
1234 dev_err_ratelimited(ctrl->dev, in brcmnand_waitfunc()
1236 dev_err_ratelimited(ctrl->dev, "intfc status %08x\n", in brcmnand_waitfunc()
1237 brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS)); in brcmnand_waitfunc()
1243 dev_dbg(ctrl->dev, "wait on native cmd %d\n", ctrl->cmd_pending); in brcmnand_waitfunc()
1245 ret = bcmnand_ctrl_poll_status(ctrl, NAND_CTRL_RDY, NAND_CTRL_RDY, timeo); in brcmnand_waitfunc()
1249 ctrl->cmd_pending = 0; in brcmnand_waitfunc()
1250 return brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & in brcmnand_waitfunc()
1270 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_low_level_op() local
1295 dev_dbg(ctrl->dev, "ll_op cmd %#x\n", tmp); in brcmnand_low_level_op()
1297 brcmnand_write_reg(ctrl, BRCMNAND_LL_OP, tmp); in brcmnand_low_level_op()
1298 (void)brcmnand_read_reg(ctrl, BRCMNAND_LL_OP); in brcmnand_low_level_op()
1309 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_cmdfunc() local
1320 dev_dbg(ctrl->dev, "cmd 0x%x addr 0x%llx\n", command, in brcmnand_cmdfunc()
1370 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, in brcmnand_cmdfunc()
1372 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); in brcmnand_cmdfunc()
1373 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, lower_32_bits(addr)); in brcmnand_cmdfunc()
1374 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); in brcmnand_cmdfunc()
1382 u32 *flash_cache = (u32 *)ctrl->flash_cache; in brcmnand_cmdfunc()
1385 brcmnand_soc_data_bus_prepare(ctrl->soc, true); in brcmnand_cmdfunc()
1394 fc = brcmnand_read_fc(ctrl, i); in brcmnand_cmdfunc()
1400 if (ctrl->parameter_page_big_endian) in brcmnand_cmdfunc()
1406 brcmnand_soc_data_bus_unprepare(ctrl->soc, true); in brcmnand_cmdfunc()
1423 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_read_byte() local
1430 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID) >> in brcmnand_read_byte()
1433 ret = brcmnand_read_reg(ctrl, BRCMNAND_ID_EXT) >> in brcmnand_read_byte()
1438 ret = oob_reg_read(ctrl, host->last_byte); in brcmnand_read_byte()
1442 ret = brcmnand_read_reg(ctrl, BRCMNAND_INTFC_STATUS) & in brcmnand_read_byte()
1457 ret = ctrl->flash_cache[offs]; in brcmnand_read_byte()
1466 ret = brcmnand_read_reg(ctrl, BRCMNAND_LL_RDATA) & 0xff; in brcmnand_read_byte()
1470 dev_dbg(ctrl->dev, "read byte = 0x%02x\n", ret); in brcmnand_read_byte()
1542 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_dma_run() local
1545 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC, lower_32_bits(desc)); in brcmnand_dma_run()
1546 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC); in brcmnand_dma_run()
1547 flash_dma_writel(ctrl, FLASH_DMA_FIRST_DESC_EXT, upper_32_bits(desc)); in brcmnand_dma_run()
1548 (void)flash_dma_readl(ctrl, FLASH_DMA_FIRST_DESC_EXT); in brcmnand_dma_run()
1551 ctrl->dma_pending = true; in brcmnand_dma_run()
1553 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0x03); /* wake | run */ in brcmnand_dma_run()
1555 if (wait_for_completion_timeout(&ctrl->dma_done, timeo) <= 0) { in brcmnand_dma_run()
1556 dev_err(ctrl->dev, in brcmnand_dma_run()
1558 flash_dma_readl(ctrl, FLASH_DMA_STATUS), in brcmnand_dma_run()
1559 flash_dma_readl(ctrl, FLASH_DMA_ERROR_STATUS)); in brcmnand_dma_run()
1561 ctrl->dma_pending = false; in brcmnand_dma_run()
1562 flash_dma_writel(ctrl, FLASH_DMA_CTRL, 0); /* force stop */ in brcmnand_dma_run()
1568 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_dma_trans() local
1572 buf_pa = dma_map_single(ctrl->dev, buf, len, dir); in brcmnand_dma_trans()
1573 if (dma_mapping_error(ctrl->dev, buf_pa)) { in brcmnand_dma_trans()
1574 dev_err(ctrl->dev, "unable to map buffer for DMA\n"); in brcmnand_dma_trans()
1578 brcmnand_fill_dma_desc(host, ctrl->dma_desc, addr, buf_pa, len, in brcmnand_dma_trans()
1581 brcmnand_dma_run(host, ctrl->dma_pa); in brcmnand_dma_trans()
1583 dma_unmap_single(ctrl->dev, buf_pa, len, dir); in brcmnand_dma_trans()
1585 if (ctrl->dma_desc->status_valid & FLASH_DMA_ECC_ERROR) in brcmnand_dma_trans()
1587 else if (ctrl->dma_desc->status_valid & FLASH_DMA_CORR_ERROR) in brcmnand_dma_trans()
1602 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_read_by_pio() local
1606 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_ADDR, 0); in brcmnand_read_by_pio()
1607 brcmnand_write_reg(ctrl, BRCMNAND_CORR_ADDR, 0); in brcmnand_read_by_pio()
1608 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_EXT_ADDR, 0); in brcmnand_read_by_pio()
1609 brcmnand_write_reg(ctrl, BRCMNAND_CORR_EXT_ADDR, 0); in brcmnand_read_by_pio()
1611 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, in brcmnand_read_by_pio()
1613 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); in brcmnand_read_by_pio()
1616 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, in brcmnand_read_by_pio()
1618 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); in brcmnand_read_by_pio()
1624 brcmnand_soc_data_bus_prepare(ctrl->soc, false); in brcmnand_read_by_pio()
1627 *buf = brcmnand_read_fc(ctrl, j); in brcmnand_read_by_pio()
1629 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); in brcmnand_read_by_pio()
1633 oob += read_oob_from_regs(ctrl, i, oob, in brcmnand_read_by_pio()
1638 *err_addr = brcmnand_read_reg(ctrl, in brcmnand_read_by_pio()
1640 ((u64)(brcmnand_read_reg(ctrl, in brcmnand_read_by_pio()
1648 *err_addr = brcmnand_read_reg(ctrl, in brcmnand_read_by_pio()
1650 ((u64)(brcmnand_read_reg(ctrl, in brcmnand_read_by_pio()
1718 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_read() local
1723 dev_dbg(ctrl->dev, "read %llx -> %p\n", (unsigned long long)addr, buf); in brcmnand_read()
1726 brcmnand_write_reg(ctrl, BRCMNAND_UNCORR_COUNT, 0); in brcmnand_read()
1729 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { in brcmnand_read()
1762 if ((ctrl->nand_version == 0x0700) || in brcmnand_read()
1763 (ctrl->nand_version == 0x0701)) { in brcmnand_read()
1774 if (ctrl->nand_version < 0x0702) { in brcmnand_read()
1782 dev_dbg(ctrl->dev, "uncorrectable error at 0x%llx\n", in brcmnand_read()
1790 unsigned int corrected = brcmnand_count_corrected(ctrl); in brcmnand_read()
1792 dev_dbg(ctrl->dev, "corrected error at 0x%llx\n", in brcmnand_read()
1855 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_write() local
1859 dev_dbg(ctrl->dev, "write %llx <- %p\n", (unsigned long long)addr, buf); in brcmnand_write()
1862 dev_warn(ctrl->dev, "unaligned buffer: %p\n", buf); in brcmnand_write()
1868 for (i = 0; i < ctrl->max_oob; i += 4) in brcmnand_write()
1869 oob_reg_write(ctrl, i, 0xffffffff); in brcmnand_write()
1872 if (has_flash_dma(ctrl) && !oob && flash_dma_buf_ok(buf)) { in brcmnand_write()
1880 brcmnand_write_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS, in brcmnand_write()
1882 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_EXT_ADDRESS); in brcmnand_write()
1886 brcmnand_write_reg(ctrl, BRCMNAND_CMD_ADDRESS, in brcmnand_write()
1888 (void)brcmnand_read_reg(ctrl, BRCMNAND_CMD_ADDRESS); in brcmnand_write()
1891 brcmnand_soc_data_bus_prepare(ctrl->soc, false); in brcmnand_write()
1894 brcmnand_write_fc(ctrl, j, *buf); in brcmnand_write()
1896 brcmnand_soc_data_bus_unprepare(ctrl->soc, false); in brcmnand_write()
1899 brcmnand_write_fc(ctrl, j, 0xffffffff); in brcmnand_write()
1903 oob += write_oob_to_regs(ctrl, i, oob, in brcmnand_write()
1913 dev_info(ctrl->dev, "program failed at %llx\n", in brcmnand_write()
1979 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_set_cfg() local
1981 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); in brcmnand_set_cfg()
1982 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_set_cfg()
1984 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_set_cfg()
1989 if (ctrl->block_sizes) { in brcmnand_set_cfg()
1992 for (i = 0, found = 0; ctrl->block_sizes[i]; i++) in brcmnand_set_cfg()
1993 if (ctrl->block_sizes[i] * 1024 == cfg->block_size) { in brcmnand_set_cfg()
1998 dev_warn(ctrl->dev, "invalid block size %u\n", in brcmnand_set_cfg()
2006 if (cfg->block_size < BRCMNAND_MIN_BLOCKSIZE || (ctrl->max_block_size && in brcmnand_set_cfg()
2007 cfg->block_size > ctrl->max_block_size)) { in brcmnand_set_cfg()
2008 dev_warn(ctrl->dev, "invalid block size %u\n", in brcmnand_set_cfg()
2013 if (ctrl->page_sizes) { in brcmnand_set_cfg()
2016 for (i = 0, found = 0; ctrl->page_sizes[i]; i++) in brcmnand_set_cfg()
2017 if (ctrl->page_sizes[i] == cfg->page_size) { in brcmnand_set_cfg()
2022 dev_warn(ctrl->dev, "invalid page size %u\n", in brcmnand_set_cfg()
2030 if (cfg->page_size < BRCMNAND_MIN_PAGESIZE || (ctrl->max_page_size && in brcmnand_set_cfg()
2031 cfg->page_size > ctrl->max_page_size)) { in brcmnand_set_cfg()
2032 dev_warn(ctrl->dev, "invalid page size %u\n", cfg->page_size); in brcmnand_set_cfg()
2037 dev_warn(ctrl->dev, "invalid device size 0x%llx\n", in brcmnand_set_cfg()
2051 nand_writereg(ctrl, cfg_offs, tmp); in brcmnand_set_cfg()
2053 nand_writereg(ctrl, cfg_offs, tmp); in brcmnand_set_cfg()
2056 nand_writereg(ctrl, cfg_ext_offs, tmp); in brcmnand_set_cfg()
2059 tmp = nand_readreg(ctrl, acc_control_offs); in brcmnand_set_cfg()
2060 tmp &= ~brcmnand_ecc_level_mask(ctrl); in brcmnand_set_cfg()
2062 tmp &= ~brcmnand_spare_area_mask(ctrl); in brcmnand_set_cfg()
2064 nand_writereg(ctrl, acc_control_offs, tmp); in brcmnand_set_cfg()
2086 if (is_hamming_ecc(host->ctrl, cfg)) in brcmnand_print_cfg()
2110 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_setup_dev() local
2134 if (cfg->spare_area_size > ctrl->max_oob) in brcmnand_setup_dev()
2135 cfg->spare_area_size = ctrl->max_oob; in brcmnand_setup_dev()
2150 dev_err(ctrl->dev, "only HW ECC supported; selected: %d\n", in brcmnand_setup_dev()
2166 dev_err(ctrl->dev, "invalid Hamming params: %d bits per %d bytes\n", in brcmnand_setup_dev()
2180 if (!(ctrl->features & BRCMNAND_HAS_1K_SECTORS)) { in brcmnand_setup_dev()
2181 dev_err(ctrl->dev, "1KB sectors not supported\n"); in brcmnand_setup_dev()
2185 dev_err(ctrl->dev, in brcmnand_setup_dev()
2194 dev_err(ctrl->dev, "unsupported ECC size: %d\n", in brcmnand_setup_dev()
2212 dev_info(ctrl->dev, "detected %s\n", msg); in brcmnand_setup_dev()
2215 offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_ACC_CONTROL); in brcmnand_setup_dev()
2216 tmp = nand_readreg(ctrl, offs); in brcmnand_setup_dev()
2221 if (ctrl->nand_version >= 0x0702) in brcmnand_setup_dev()
2224 if (ctrl->features & BRCMNAND_HAS_PREFETCH) in brcmnand_setup_dev()
2227 nand_writereg(ctrl, offs, tmp); in brcmnand_setup_dev()
2238 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_init_cs() local
2301 chip->controller = &ctrl->controller; in brcmnand_init_cs()
2308 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); in brcmnand_init_cs()
2309 nand_writereg(ctrl, cfg_offs, in brcmnand_init_cs()
2310 nand_readreg(ctrl, cfg_offs) & ~CFG_BUS_WIDTH); in brcmnand_init_cs()
2357 struct brcmnand_controller *ctrl = host->ctrl; in brcmnand_save_restore_cs_config() local
2358 u16 cfg_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_CFG); in brcmnand_save_restore_cs_config()
2359 u16 cfg_ext_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_save_restore_cs_config()
2361 u16 acc_control_offs = brcmnand_cs_offset(ctrl, host->cs, in brcmnand_save_restore_cs_config()
2363 u16 t1_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING1); in brcmnand_save_restore_cs_config()
2364 u16 t2_offs = brcmnand_cs_offset(ctrl, host->cs, BRCMNAND_CS_TIMING2); in brcmnand_save_restore_cs_config()
2367 nand_writereg(ctrl, cfg_offs, host->hwcfg.config); in brcmnand_save_restore_cs_config()
2369 nand_writereg(ctrl, cfg_ext_offs, in brcmnand_save_restore_cs_config()
2371 nand_writereg(ctrl, acc_control_offs, host->hwcfg.acc_control); in brcmnand_save_restore_cs_config()
2372 nand_writereg(ctrl, t1_offs, host->hwcfg.timing_1); in brcmnand_save_restore_cs_config()
2373 nand_writereg(ctrl, t2_offs, host->hwcfg.timing_2); in brcmnand_save_restore_cs_config()
2375 host->hwcfg.config = nand_readreg(ctrl, cfg_offs); in brcmnand_save_restore_cs_config()
2378 nand_readreg(ctrl, cfg_ext_offs); in brcmnand_save_restore_cs_config()
2379 host->hwcfg.acc_control = nand_readreg(ctrl, acc_control_offs); in brcmnand_save_restore_cs_config()
2380 host->hwcfg.timing_1 = nand_readreg(ctrl, t1_offs); in brcmnand_save_restore_cs_config()
2381 host->hwcfg.timing_2 = nand_readreg(ctrl, t2_offs); in brcmnand_save_restore_cs_config()
2387 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); in brcmnand_suspend() local
2390 list_for_each_entry(host, &ctrl->host_list, node) in brcmnand_suspend()
2393 ctrl->nand_cs_nand_select = brcmnand_read_reg(ctrl, BRCMNAND_CS_SELECT); in brcmnand_suspend()
2394 ctrl->nand_cs_nand_xor = brcmnand_read_reg(ctrl, BRCMNAND_CS_XOR); in brcmnand_suspend()
2395 ctrl->corr_stat_threshold = in brcmnand_suspend()
2396 brcmnand_read_reg(ctrl, BRCMNAND_CORR_THRESHOLD); in brcmnand_suspend()
2398 if (has_flash_dma(ctrl)) in brcmnand_suspend()
2399 ctrl->flash_dma_mode = flash_dma_readl(ctrl, FLASH_DMA_MODE); in brcmnand_suspend()
2406 struct brcmnand_controller *ctrl = dev_get_drvdata(dev); in brcmnand_resume() local
2409 if (has_flash_dma(ctrl)) { in brcmnand_resume()
2410 flash_dma_writel(ctrl, FLASH_DMA_MODE, ctrl->flash_dma_mode); in brcmnand_resume()
2411 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); in brcmnand_resume()
2414 brcmnand_write_reg(ctrl, BRCMNAND_CS_SELECT, ctrl->nand_cs_nand_select); in brcmnand_resume()
2415 brcmnand_write_reg(ctrl, BRCMNAND_CS_XOR, ctrl->nand_cs_nand_xor); in brcmnand_resume()
2416 brcmnand_write_reg(ctrl, BRCMNAND_CORR_THRESHOLD, in brcmnand_resume()
2417 ctrl->corr_stat_threshold); in brcmnand_resume()
2418 if (ctrl->soc) { in brcmnand_resume()
2420 ctrl->soc->ctlrdy_ack(ctrl->soc); in brcmnand_resume()
2421 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); in brcmnand_resume()
2424 list_for_each_entry(host, &ctrl->host_list, node) { in brcmnand_resume()
2473 struct brcmnand_controller *ctrl; in brcmnand_probe() local
2490 ctrl = devm_kzalloc(dev, sizeof(*ctrl), GFP_KERNEL); in brcmnand_probe()
2491 if (!ctrl) in brcmnand_probe()
2495 dev_set_drvdata(dev, ctrl); in brcmnand_probe()
2499 * so to keep the reference to ctrl, we store it in the variable soc in brcmnand_probe()
2501 soc->ctrl = ctrl; in brcmnand_probe()
2503 ctrl->dev = dev; in brcmnand_probe()
2505 init_completion(&ctrl->done); in brcmnand_probe()
2506 init_completion(&ctrl->dma_done); in brcmnand_probe()
2507 nand_hw_control_init(&ctrl->controller); in brcmnand_probe()
2508 INIT_LIST_HEAD(&ctrl->host_list); in brcmnand_probe()
2511 ctrl->parameter_page_big_endian = in brcmnand_probe()
2517 ctrl->nand_base = devm_ioremap_resource(dev, res); in brcmnand_probe()
2520 ctrl->nand_base = devm_ioremap(pdev, res.start, resource_size(&res)); in brcmnand_probe()
2522 if (IS_ERR(ctrl->nand_base)) in brcmnand_probe()
2523 return PTR_ERR(ctrl->nand_base); in brcmnand_probe()
2526 ctrl->clk = devm_clk_get(dev, "nand"); in brcmnand_probe()
2527 if (!IS_ERR(ctrl->clk)) { in brcmnand_probe()
2528 ret = clk_prepare_enable(ctrl->clk); in brcmnand_probe()
2532 ret = PTR_ERR(ctrl->clk); in brcmnand_probe()
2536 ctrl->clk = NULL; in brcmnand_probe()
2540 ret = brcmnand_revision_init(ctrl); in brcmnand_probe()
2551 ctrl->nand_fc = devm_ioremap_resource(dev, res); in brcmnand_probe()
2552 if (IS_ERR(ctrl->nand_fc)) { in brcmnand_probe()
2553 ret = PTR_ERR(ctrl->nand_fc); in brcmnand_probe()
2557 ctrl->nand_fc = ctrl->nand_base + in brcmnand_probe()
2558 ctrl->reg_offsets[BRCMNAND_FC_BASE]; in brcmnand_probe()
2562 ctrl->nand_fc = devm_ioremap(dev, res.start, in brcmnand_probe()
2564 if (IS_ERR(ctrl->nand_fc)) { in brcmnand_probe()
2565 ret = PTR_ERR(ctrl->nand_fc); in brcmnand_probe()
2569 ctrl->nand_fc = ctrl->nand_base + in brcmnand_probe()
2570 ctrl->reg_offsets[BRCMNAND_FC_BASE]; in brcmnand_probe()
2578 ctrl->flash_dma_base = devm_ioremap_resource(dev, res); in brcmnand_probe()
2579 if (IS_ERR(ctrl->flash_dma_base)) { in brcmnand_probe()
2580 ret = PTR_ERR(ctrl->flash_dma_base); in brcmnand_probe()
2584 flash_dma_writel(ctrl, FLASH_DMA_MODE, 1); /* linked-list */ in brcmnand_probe()
2585 flash_dma_writel(ctrl, FLASH_DMA_ERROR_STATUS, 0); in brcmnand_probe()
2588 ctrl->dma_desc = dmam_alloc_coherent(dev, in brcmnand_probe()
2589 sizeof(*ctrl->dma_desc), in brcmnand_probe()
2590 &ctrl->dma_pa, GFP_KERNEL); in brcmnand_probe()
2591 if (!ctrl->dma_desc) { in brcmnand_probe()
2596 ctrl->dma_irq = platform_get_irq(pdev, 1); in brcmnand_probe()
2597 if ((int)ctrl->dma_irq < 0) { in brcmnand_probe()
2603 ret = devm_request_irq(dev, ctrl->dma_irq, in brcmnand_probe()
2605 ctrl); in brcmnand_probe()
2608 ctrl->dma_irq, ret); in brcmnand_probe()
2617 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_SELECT, in brcmnand_probe()
2620 brcmnand_rmw_reg(ctrl, BRCMNAND_CS_XOR, 0xff, 0, 0); in brcmnand_probe()
2625 if (ctrl->features & BRCMNAND_HAS_WP) { in brcmnand_probe()
2628 brcmnand_set_wp(ctrl, false); in brcmnand_probe()
2635 ctrl->irq = platform_get_irq(pdev, 0); in brcmnand_probe()
2636 if ((int)ctrl->irq < 0) { in brcmnand_probe()
2647 ctrl->soc = soc; in brcmnand_probe()
2649 ret = devm_request_irq(dev, ctrl->irq, brcmnand_irq, 0, in brcmnand_probe()
2650 DRV_NAME, ctrl); in brcmnand_probe()
2653 ctrl->soc->ctlrdy_ack(ctrl->soc); in brcmnand_probe()
2654 ctrl->soc->ctlrdy_set_enabled(ctrl->soc, true); in brcmnand_probe()
2657 ret = devm_request_irq(dev, ctrl->irq, brcmnand_ctlrdy_irq, 0, in brcmnand_probe()
2658 DRV_NAME, ctrl); in brcmnand_probe()
2662 ctrl->irq, ret); in brcmnand_probe()
2679 host->ctrl = ctrl; in brcmnand_probe()
2687 list_add_tail(&host->node, &ctrl->host_list); in brcmnand_probe()
2701 host->ctrl = ctrl; in brcmnand_probe()
2709 list_add_tail(&host->node, &ctrl->host_list); in brcmnand_probe()
2716 clk_disable_unprepare(ctrl->clk); in brcmnand_probe()
2718 if (ctrl->clk) in brcmnand_probe()
2719 clk_disable(ctrl->clk); in brcmnand_probe()
2729 struct brcmnand_controller *ctrl = dev_get_drvdata(&pdev->dev); in brcmnand_remove() local
2732 list_for_each_entry(host, &ctrl->host_list, node) in brcmnand_remove()
2735 clk_disable_unprepare(ctrl->clk); in brcmnand_remove()