Lines Matching +full:dma +full:- +full:spear1340
21 * Tested on arch/arm/mach-spear13xx
166 /* ATA Status-Command */
170 /* Extended Write Data Port 0x200-0x3FC */
172 /* Extended Read Data Port 0x400-0x5FC */
198 /* status to be updated to framework regarding DMA transfer */
203 /* dma specific */
206 /* Completion for DMA transfer complete. */
208 /* Dma channel allocated */
210 /* Mask for DMA transfers */
212 /* DMA transfer work */
214 /* DMA delayed finish work */
216 /* qc to be transferred using DMA */
227 struct device *dev = acdev->host->dev; in cf_dumpregs()
230 dev_dbg(dev, ": CFI_STS: %x", readl(acdev->vbase + CFI_STS)); in cf_dumpregs()
231 dev_dbg(dev, ": IRQ_STS: %x", readl(acdev->vbase + IRQ_STS)); in cf_dumpregs()
232 dev_dbg(dev, ": IRQ_EN: %x", readl(acdev->vbase + IRQ_EN)); in cf_dumpregs()
233 dev_dbg(dev, ": OP_MODE: %x", readl(acdev->vbase + OP_MODE)); in cf_dumpregs()
234 dev_dbg(dev, ": CLK_CFG: %x", readl(acdev->vbase + CLK_CFG)); in cf_dumpregs()
235 dev_dbg(dev, ": TM_CFG: %x", readl(acdev->vbase + TM_CFG)); in cf_dumpregs()
236 dev_dbg(dev, ": XFER_CTR: %x", readl(acdev->vbase + XFER_CTR)); in cf_dumpregs()
237 dev_dbg(dev, ": GIRQ_STS: %x", readl(acdev->vbase + GIRQ_STS)); in cf_dumpregs()
238 dev_dbg(dev, ": GIRQ_STS_EN: %x", readl(acdev->vbase + GIRQ_STS_EN)); in cf_dumpregs()
239 dev_dbg(dev, ": GIRQ_SGN_EN: %x", readl(acdev->vbase + GIRQ_SGN_EN)); in cf_dumpregs()
247 writel(enable, acdev->vbase + GIRQ_STS_EN); in cf_ginterrupt_enable()
248 writel(enable, acdev->vbase + GIRQ_SGN_EN); in cf_ginterrupt_enable()
255 u32 val = readl(acdev->vbase + IRQ_EN); in cf_interrupt_enable()
258 writel(mask, acdev->vbase + IRQ_STS); in cf_interrupt_enable()
259 writel(val | mask, acdev->vbase + IRQ_EN); in cf_interrupt_enable()
261 writel(val & ~mask, acdev->vbase + IRQ_EN); in cf_interrupt_enable()
266 u32 val = readl(acdev->vbase + OP_MODE); in cf_card_reset()
268 writel(val | CARD_RESET, acdev->vbase + OP_MODE); in cf_card_reset()
270 writel(val & ~CARD_RESET, acdev->vbase + OP_MODE); in cf_card_reset()
275 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, in cf_ctrl_reset()
276 acdev->vbase + OP_MODE); in cf_ctrl_reset()
277 writel(readl(acdev->vbase + OP_MODE) | CFHOST_ENB, in cf_ctrl_reset()
278 acdev->vbase + OP_MODE); in cf_ctrl_reset()
283 struct ata_port *ap = acdev->host->ports[0]; in cf_card_detect()
284 struct ata_eh_info *ehi = &ap->link.eh_info; in cf_card_detect()
285 u32 val = readl(acdev->vbase + CFI_STS); in cf_card_detect()
289 if (acdev->card_present) in cf_card_detect()
291 acdev->card_present = 1; in cf_card_detect()
294 if (!acdev->card_present) in cf_card_detect()
296 acdev->card_present = 0; in cf_card_detect()
307 struct arasan_cf_pdata *pdata = dev_get_platdata(acdev->host->dev); in cf_init()
312 ret = clk_prepare_enable(acdev->clk); in cf_init()
314 dev_dbg(acdev->host->dev, "clock enable failed"); in cf_init()
318 ret = clk_set_rate(acdev->clk, 166000000); in cf_init()
320 dev_warn(acdev->host->dev, "clock set rate failed"); in cf_init()
321 clk_disable_unprepare(acdev->clk); in cf_init()
325 spin_lock_irqsave(&acdev->host->lock, flags); in cf_init()
329 if (pdata && pdata->cf_if_clk <= CF_IF_CLK_200M) in cf_init()
330 if_clk = pdata->cf_if_clk; in cf_init()
332 writel(if_clk, acdev->vbase + CLK_CFG); in cf_init()
334 writel(TRUE_IDE_MODE | CFHOST_ENB, acdev->vbase + OP_MODE); in cf_init()
337 spin_unlock_irqrestore(&acdev->host->lock, flags); in cf_init()
346 spin_lock_irqsave(&acdev->host->lock, flags); in cf_exit()
350 writel(readl(acdev->vbase + OP_MODE) & ~CFHOST_ENB, in cf_exit()
351 acdev->vbase + OP_MODE); in cf_exit()
352 spin_unlock_irqrestore(&acdev->host->lock, flags); in cf_exit()
353 clk_disable_unprepare(acdev->clk); in cf_exit()
360 complete(&acdev->dma_completion); in dma_callback()
365 struct ata_queued_cmd *qc = acdev->qc; in dma_complete()
368 acdev->qc = NULL; in dma_complete()
369 ata_sff_interrupt(acdev->irq, acdev->host); in dma_complete()
371 spin_lock_irqsave(&acdev->host->lock, flags); in dma_complete()
372 if (unlikely(qc->err_mask) && ata_is_dma(qc->tf.protocol)) in dma_complete()
373 ata_ehi_push_desc(&qc->ap->link.eh_info, "DMA Failed: Timeout"); in dma_complete()
374 spin_unlock_irqrestore(&acdev->host->lock, flags); in dma_complete()
379 if (!wait_for_completion_timeout(&acdev->cf_completion, TIMEOUT)) { in wait4buf()
380 u32 rw = acdev->qc->tf.flags & ATA_TFLAG_WRITE; in wait4buf()
382 dev_err(acdev->host->dev, "%s TimeOut", rw ? "write" : "read"); in wait4buf()
383 return -ETIMEDOUT; in wait4buf()
387 if (acdev->dma_status & ATA_DMA_ERR) in wait4buf()
388 return -EAGAIN; in wait4buf()
397 struct dma_chan *chan = acdev->dma_chan; in dma_xfer()
402 tx = chan->device->device_prep_dma_memcpy(chan, dest, src, len, flags); in dma_xfer()
404 dev_err(acdev->host->dev, "device_prep_dma_memcpy failed\n"); in dma_xfer()
405 return -EAGAIN; in dma_xfer()
408 tx->callback = dma_callback; in dma_xfer()
409 tx->callback_param = acdev; in dma_xfer()
410 cookie = tx->tx_submit(tx); in dma_xfer()
414 dev_err(acdev->host->dev, "dma_submit_error\n"); in dma_xfer()
418 chan->device->device_issue_pending(chan); in dma_xfer()
420 /* Wait for DMA to complete */ in dma_xfer()
421 if (!wait_for_completion_timeout(&acdev->dma_completion, TIMEOUT)) { in dma_xfer()
423 dev_err(acdev->host->dev, "wait_for_completion_timeout\n"); in dma_xfer()
424 return -ETIMEDOUT; in dma_xfer()
434 u32 write = acdev->qc->tf.flags & ATA_TFLAG_WRITE; in sg_xfer()
441 dest = acdev->pbase + EXT_WRITE_PORT; in sg_xfer()
444 src = acdev->pbase + EXT_READ_PORT; in sg_xfer()
456 spin_lock_irqsave(&acdev->host->lock, flags); in sg_xfer()
457 xfer_ctr = readl(acdev->vbase + XFER_CTR) & in sg_xfer()
460 acdev->vbase + XFER_CTR); in sg_xfer()
461 spin_unlock_irqrestore(&acdev->host->lock, flags); in sg_xfer()
463 /* continue dma xfers until current sg is completed */ in sg_xfer()
476 dev_err(acdev->host->dev, "dma failed"); in sg_xfer()
485 sglen -= dma_len; in sg_xfer()
486 xfer_cnt -= dma_len; in sg_xfer()
498 spin_lock_irqsave(&acdev->host->lock, flags); in sg_xfer()
499 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, in sg_xfer()
500 acdev->vbase + XFER_CTR); in sg_xfer()
501 spin_unlock_irqrestore(&acdev->host->lock, flags); in sg_xfer()
507 * This routine uses External DMA controller to read/write data to FIFO of CF
509 * - buf_avail: This interrupt is generated as soon as we have buffer of 512
511 * - xfer_done: This interrupt is generated on transfer of "xfer_size" amount of
521 struct ata_queued_cmd *qc = acdev->qc; in data_xfer()
527 /* request dma channels */ in data_xfer()
529 acdev->dma_chan = dma_request_chan(acdev->host->dev, "data"); in data_xfer()
530 if (IS_ERR(acdev->dma_chan)) { in data_xfer()
531 dev_err(acdev->host->dev, "Unable to get dma_chan\n"); in data_xfer()
532 acdev->dma_chan = NULL; in data_xfer()
536 for_each_sg(qc->sg, sg, qc->n_elem, temp) { in data_xfer()
542 dma_release_channel(acdev->dma_chan); in data_xfer()
543 acdev->dma_chan = NULL; in data_xfer()
549 spin_lock_irqsave(&acdev->host->lock, flags); in data_xfer()
550 status = ioread8(qc->ap->ioaddr.altstatus_addr); in data_xfer()
551 spin_unlock_irqrestore(&acdev->host->lock, flags); in data_xfer()
553 ata_sff_queue_delayed_work(&acdev->dwork, 1); in data_xfer()
563 spin_lock_irqsave(&acdev->host->lock, flags); in data_xfer()
565 qc->err_mask |= AC_ERR_HOST_BUS; in data_xfer()
566 qc->ap->hsm_task_state = HSM_ST_ERR; in data_xfer()
569 spin_unlock_irqrestore(&acdev->host->lock, flags); in data_xfer()
578 struct ata_queued_cmd *qc = acdev->qc; in delayed_finish()
582 spin_lock_irqsave(&acdev->host->lock, flags); in delayed_finish()
583 status = ioread8(qc->ap->ioaddr.altstatus_addr); in delayed_finish()
584 spin_unlock_irqrestore(&acdev->host->lock, flags); in delayed_finish()
587 ata_sff_queue_delayed_work(&acdev->dwork, 1); in delayed_finish()
594 struct arasan_cf_dev *acdev = ((struct ata_host *)dev)->private_data; in arasan_cf_interrupt()
598 irqsts = readl(acdev->vbase + GIRQ_STS); in arasan_cf_interrupt()
602 spin_lock_irqsave(&acdev->host->lock, flags); in arasan_cf_interrupt()
603 irqsts = readl(acdev->vbase + IRQ_STS); in arasan_cf_interrupt()
604 writel(irqsts, acdev->vbase + IRQ_STS); /* clear irqs */ in arasan_cf_interrupt()
605 writel(GIRQ_CF, acdev->vbase + GIRQ_STS); /* clear girqs */ in arasan_cf_interrupt()
612 spin_unlock_irqrestore(&acdev->host->lock, flags); in arasan_cf_interrupt()
617 acdev->dma_status = ATA_DMA_ERR; in arasan_cf_interrupt()
618 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, in arasan_cf_interrupt()
619 acdev->vbase + XFER_CTR); in arasan_cf_interrupt()
620 spin_unlock_irqrestore(&acdev->host->lock, flags); in arasan_cf_interrupt()
621 complete(&acdev->cf_completion); in arasan_cf_interrupt()
622 dev_err(acdev->host->dev, "pio xfer err irq\n"); in arasan_cf_interrupt()
626 spin_unlock_irqrestore(&acdev->host->lock, flags); in arasan_cf_interrupt()
629 complete(&acdev->cf_completion); in arasan_cf_interrupt()
634 struct ata_queued_cmd *qc = acdev->qc; in arasan_cf_interrupt()
637 if (qc->tf.flags & ATA_TFLAG_WRITE) in arasan_cf_interrupt()
638 complete(&acdev->cf_completion); in arasan_cf_interrupt()
646 struct arasan_cf_dev *acdev = ap->host->private_data; in arasan_cf_freeze()
649 writel(readl(acdev->vbase + XFER_CTR) & ~XFER_START, in arasan_cf_freeze()
650 acdev->vbase + XFER_CTR); in arasan_cf_freeze()
652 acdev->dma_status = ATA_DMA_ERR; in arasan_cf_freeze()
660 struct arasan_cf_dev *acdev = ap->host->private_data; in arasan_cf_error_handler()
663 * DMA transfers using an external DMA controller may be scheduled. in arasan_cf_error_handler()
667 cancel_work_sync(&acdev->work); in arasan_cf_error_handler()
668 cancel_delayed_work_sync(&acdev->dwork); in arasan_cf_error_handler()
674 struct ata_queued_cmd *qc = acdev->qc; in arasan_cf_dma_start()
675 struct ata_port *ap = qc->ap; in arasan_cf_dma_start()
676 struct ata_taskfile *tf = &qc->tf; in arasan_cf_dma_start()
677 u32 xfer_ctr = readl(acdev->vbase + XFER_CTR) & ~XFER_DIR_MASK; in arasan_cf_dma_start()
678 u32 write = tf->flags & ATA_TFLAG_WRITE; in arasan_cf_dma_start()
681 writel(xfer_ctr, acdev->vbase + XFER_CTR); in arasan_cf_dma_start()
683 ap->ops->sff_exec_command(ap, tf); in arasan_cf_dma_start()
684 ata_sff_queue_work(&acdev->work); in arasan_cf_dma_start()
689 struct ata_port *ap = qc->ap; in arasan_cf_qc_issue()
690 struct arasan_cf_dev *acdev = ap->host->private_data; in arasan_cf_qc_issue()
693 if (!ata_is_dma(qc->tf.protocol)) in arasan_cf_qc_issue()
698 ata_sff_dev_select(ap, qc->dev->devno); in arasan_cf_qc_issue()
702 switch (qc->tf.protocol) { in arasan_cf_qc_issue()
704 WARN_ON_ONCE(qc->tf.flags & ATA_TFLAG_POLLING); in arasan_cf_qc_issue()
706 ap->ops->sff_tf_load(ap, &qc->tf); in arasan_cf_qc_issue()
707 acdev->dma_status = 0; in arasan_cf_qc_issue()
708 acdev->qc = qc; in arasan_cf_qc_issue()
710 ap->hsm_task_state = HSM_ST_LAST; in arasan_cf_qc_issue()
723 struct arasan_cf_dev *acdev = ap->host->private_data; in arasan_cf_set_piomode()
724 u8 pio = adev->pio_mode - XFER_PIO_0; in arasan_cf_set_piomode()
728 /* Arasan ctrl supports Mode0 -> Mode6 */ in arasan_cf_set_piomode()
730 dev_err(ap->dev, "Unknown PIO mode\n"); in arasan_cf_set_piomode()
734 spin_lock_irqsave(&acdev->host->lock, flags); in arasan_cf_set_piomode()
735 val = readl(acdev->vbase + OP_MODE) & in arasan_cf_set_piomode()
737 writel(val, acdev->vbase + OP_MODE); in arasan_cf_set_piomode()
738 val = readl(acdev->vbase + TM_CFG) & ~TRUEIDE_PIO_TIMING_MASK; in arasan_cf_set_piomode()
740 writel(val, acdev->vbase + TM_CFG); in arasan_cf_set_piomode()
744 spin_unlock_irqrestore(&acdev->host->lock, flags); in arasan_cf_set_piomode()
749 struct arasan_cf_dev *acdev = ap->host->private_data; in arasan_cf_set_dmamode()
750 u32 opmode, tmcfg, dma_mode = adev->dma_mode; in arasan_cf_set_dmamode()
753 spin_lock_irqsave(&acdev->host->lock, flags); in arasan_cf_set_dmamode()
754 opmode = readl(acdev->vbase + OP_MODE) & in arasan_cf_set_dmamode()
756 tmcfg = readl(acdev->vbase + TM_CFG); in arasan_cf_set_dmamode()
761 tmcfg |= (dma_mode - XFER_UDMA_0) << ULTRA_DMA_TIMING_SHIFT; in arasan_cf_set_dmamode()
765 tmcfg |= (dma_mode - XFER_MW_DMA_0) << in arasan_cf_set_dmamode()
768 dev_err(ap->dev, "Unknown DMA mode\n"); in arasan_cf_set_dmamode()
769 spin_unlock_irqrestore(&acdev->host->lock, flags); in arasan_cf_set_dmamode()
773 writel(opmode, acdev->vbase + OP_MODE); in arasan_cf_set_dmamode()
774 writel(tmcfg, acdev->vbase + TM_CFG); in arasan_cf_set_dmamode()
775 writel(DMA_XFER_MODE, acdev->vbase + XFER_CTR); in arasan_cf_set_dmamode()
779 spin_unlock_irqrestore(&acdev->host->lock, flags); in arasan_cf_set_dmamode()
794 struct arasan_cf_pdata *pdata = dev_get_platdata(&pdev->dev); in arasan_cf_probe()
804 return -EINVAL; in arasan_cf_probe()
806 if (!devm_request_mem_region(&pdev->dev, res->start, resource_size(res), in arasan_cf_probe()
808 dev_warn(&pdev->dev, "Failed to get memory region resource\n"); in arasan_cf_probe()
809 return -ENOENT; in arasan_cf_probe()
812 acdev = devm_kzalloc(&pdev->dev, sizeof(*acdev), GFP_KERNEL); in arasan_cf_probe()
814 return -ENOMEM; in arasan_cf_probe()
817 quirk = pdata->quirk; in arasan_cf_probe()
819 quirk = CF_BROKEN_UDMA; /* as it is on spear1340 */ in arasan_cf_probe()
827 acdev->irq = ret; in arasan_cf_probe()
829 } else if (ret == -EPROBE_DEFER) { in arasan_cf_probe()
835 acdev->pbase = res->start; in arasan_cf_probe()
836 acdev->vbase = devm_ioremap(&pdev->dev, res->start, in arasan_cf_probe()
838 if (!acdev->vbase) { in arasan_cf_probe()
839 dev_warn(&pdev->dev, "ioremap fail\n"); in arasan_cf_probe()
840 return -ENOMEM; in arasan_cf_probe()
843 acdev->clk = devm_clk_get(&pdev->dev, NULL); in arasan_cf_probe()
844 if (IS_ERR(acdev->clk)) { in arasan_cf_probe()
845 dev_warn(&pdev->dev, "Clock not found\n"); in arasan_cf_probe()
846 return PTR_ERR(acdev->clk); in arasan_cf_probe()
850 host = ata_host_alloc(&pdev->dev, 1); in arasan_cf_probe()
852 dev_warn(&pdev->dev, "alloc host fail\n"); in arasan_cf_probe()
853 return -ENOMEM; in arasan_cf_probe()
856 ap = host->ports[0]; in arasan_cf_probe()
857 host->private_data = acdev; in arasan_cf_probe()
858 acdev->host = host; in arasan_cf_probe()
859 ap->ops = &arasan_cf_ops; in arasan_cf_probe()
860 ap->pio_mask = ATA_PIO6; in arasan_cf_probe()
861 ap->mwdma_mask = ATA_MWDMA4; in arasan_cf_probe()
862 ap->udma_mask = ATA_UDMA6; in arasan_cf_probe()
864 init_completion(&acdev->cf_completion); in arasan_cf_probe()
865 init_completion(&acdev->dma_completion); in arasan_cf_probe()
866 INIT_WORK(&acdev->work, data_xfer); in arasan_cf_probe()
867 INIT_DELAYED_WORK(&acdev->dwork, delayed_finish); in arasan_cf_probe()
868 dma_cap_set(DMA_MEMCPY, acdev->mask); in arasan_cf_probe()
873 ap->ops->set_piomode = NULL; in arasan_cf_probe()
874 ap->pio_mask = 0; in arasan_cf_probe()
877 ap->mwdma_mask = 0; in arasan_cf_probe()
879 ap->udma_mask = 0; in arasan_cf_probe()
881 ap->flags |= ATA_FLAG_PIO_POLLING | ATA_FLAG_NO_ATAPI; in arasan_cf_probe()
883 ap->ioaddr.cmd_addr = acdev->vbase + ATA_DATA_PORT; in arasan_cf_probe()
884 ap->ioaddr.data_addr = acdev->vbase + ATA_DATA_PORT; in arasan_cf_probe()
885 ap->ioaddr.error_addr = acdev->vbase + ATA_ERR_FTR; in arasan_cf_probe()
886 ap->ioaddr.feature_addr = acdev->vbase + ATA_ERR_FTR; in arasan_cf_probe()
887 ap->ioaddr.nsect_addr = acdev->vbase + ATA_SC; in arasan_cf_probe()
888 ap->ioaddr.lbal_addr = acdev->vbase + ATA_SN; in arasan_cf_probe()
889 ap->ioaddr.lbam_addr = acdev->vbase + ATA_CL; in arasan_cf_probe()
890 ap->ioaddr.lbah_addr = acdev->vbase + ATA_CH; in arasan_cf_probe()
891 ap->ioaddr.device_addr = acdev->vbase + ATA_SH; in arasan_cf_probe()
892 ap->ioaddr.status_addr = acdev->vbase + ATA_STS_CMD; in arasan_cf_probe()
893 ap->ioaddr.command_addr = acdev->vbase + ATA_STS_CMD; in arasan_cf_probe()
894 ap->ioaddr.altstatus_addr = acdev->vbase + ATA_ASTS_DCTR; in arasan_cf_probe()
895 ap->ioaddr.ctl_addr = acdev->vbase + ATA_ASTS_DCTR; in arasan_cf_probe()
898 (unsigned long long) res->start, acdev->vbase); in arasan_cf_probe()
906 ret = ata_host_activate(host, acdev->irq, irq_handler, 0, in arasan_cf_probe()
919 struct arasan_cf_dev *acdev = host->ports[0]->private_data; in arasan_cf_remove()
931 struct arasan_cf_dev *acdev = host->ports[0]->private_data; in arasan_cf_suspend()
933 if (acdev->dma_chan) in arasan_cf_suspend()
934 dmaengine_terminate_all(acdev->dma_chan); in arasan_cf_suspend()
943 struct arasan_cf_dev *acdev = host->ports[0]->private_data; in arasan_cf_resume()
956 { .compatible = "arasan,cf-spear1340" },