Lines Matching refs:mdev
209 static struct msgdma_sw_desc *msgdma_get_descriptor(struct msgdma_device *mdev) in msgdma_get_descriptor() argument
214 spin_lock_irqsave(&mdev->lock, flags); in msgdma_get_descriptor()
215 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); in msgdma_get_descriptor()
217 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_get_descriptor()
229 static void msgdma_free_descriptor(struct msgdma_device *mdev, in msgdma_free_descriptor() argument
234 mdev->desc_free_cnt++; in msgdma_free_descriptor()
235 list_add_tail(&desc->node, &mdev->free_list); in msgdma_free_descriptor()
237 mdev->desc_free_cnt++; in msgdma_free_descriptor()
238 list_move_tail(&child->node, &mdev->free_list); in msgdma_free_descriptor()
247 static void msgdma_free_desc_list(struct msgdma_device *mdev, in msgdma_free_desc_list() argument
253 msgdma_free_descriptor(mdev, desc); in msgdma_free_desc_list()
305 struct msgdma_device *mdev = to_mdev(tx->chan); in msgdma_tx_submit() local
311 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tx_submit()
314 list_add_tail(&new->node, &mdev->pending_list); in msgdma_tx_submit()
315 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tx_submit()
334 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_memcpy() local
343 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_memcpy()
344 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_memcpy()
345 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
346 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_memcpy()
349 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_memcpy()
350 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
354 new = msgdma_get_descriptor(mdev); in msgdma_prep_memcpy()
392 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_prep_slave_sg() local
393 struct dma_slave_config *cfg = &mdev->slave_cfg; in msgdma_prep_slave_sg()
406 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
407 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_slave_sg()
408 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
409 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_slave_sg()
412 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_slave_sg()
413 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
420 new = msgdma_get_descriptor(mdev); in msgdma_prep_slave_sg()
463 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_dma_config() local
465 memcpy(&mdev->slave_cfg, config, sizeof(*config)); in msgdma_dma_config()
470 static void msgdma_reset(struct msgdma_device *mdev) in msgdma_reset() argument
476 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
477 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
479 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, in msgdma_reset()
483 dev_err(mdev->dev, "DMA channel did not reset\n"); in msgdma_reset()
486 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
490 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
492 mdev->idle = true; in msgdma_reset()
495 static void msgdma_copy_one(struct msgdma_device *mdev, in msgdma_copy_one() argument
498 void __iomem *hw_desc = mdev->desc; in msgdma_copy_one()
504 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & in msgdma_copy_one()
521 mdev->idle = false; in msgdma_copy_one()
533 static void msgdma_copy_desc_to_fifo(struct msgdma_device *mdev, in msgdma_copy_desc_to_fifo() argument
538 msgdma_copy_one(mdev, desc); in msgdma_copy_desc_to_fifo()
541 msgdma_copy_one(mdev, sdesc); in msgdma_copy_desc_to_fifo()
548 static void msgdma_start_transfer(struct msgdma_device *mdev) in msgdma_start_transfer() argument
552 if (!mdev->idle) in msgdma_start_transfer()
555 desc = list_first_entry_or_null(&mdev->pending_list, in msgdma_start_transfer()
560 list_splice_tail_init(&mdev->pending_list, &mdev->active_list); in msgdma_start_transfer()
561 msgdma_copy_desc_to_fifo(mdev, desc); in msgdma_start_transfer()
570 struct msgdma_device *mdev = to_mdev(chan); in msgdma_issue_pending() local
573 spin_lock_irqsave(&mdev->lock, flags); in msgdma_issue_pending()
574 msgdma_start_transfer(mdev); in msgdma_issue_pending()
575 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_issue_pending()
582 static void msgdma_chan_desc_cleanup(struct msgdma_device *mdev) in msgdma_chan_desc_cleanup() argument
586 list_for_each_entry_safe(desc, next, &mdev->done_list, node) { in msgdma_chan_desc_cleanup()
595 spin_unlock(&mdev->lock); in msgdma_chan_desc_cleanup()
597 spin_lock(&mdev->lock); in msgdma_chan_desc_cleanup()
601 msgdma_free_descriptor(mdev, desc); in msgdma_chan_desc_cleanup()
609 static void msgdma_complete_descriptor(struct msgdma_device *mdev) in msgdma_complete_descriptor() argument
613 desc = list_first_entry_or_null(&mdev->active_list, in msgdma_complete_descriptor()
619 list_add_tail(&desc->node, &mdev->done_list); in msgdma_complete_descriptor()
626 static void msgdma_free_descriptors(struct msgdma_device *mdev) in msgdma_free_descriptors() argument
628 msgdma_free_desc_list(mdev, &mdev->active_list); in msgdma_free_descriptors()
629 msgdma_free_desc_list(mdev, &mdev->pending_list); in msgdma_free_descriptors()
630 msgdma_free_desc_list(mdev, &mdev->done_list); in msgdma_free_descriptors()
639 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_free_chan_resources() local
642 spin_lock_irqsave(&mdev->lock, flags); in msgdma_free_chan_resources()
643 msgdma_free_descriptors(mdev); in msgdma_free_chan_resources()
644 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_free_chan_resources()
645 kfree(mdev->sw_desq); in msgdma_free_chan_resources()
656 struct msgdma_device *mdev = to_mdev(dchan); in msgdma_alloc_chan_resources() local
660 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); in msgdma_alloc_chan_resources()
661 if (!mdev->sw_desq) in msgdma_alloc_chan_resources()
664 mdev->idle = true; in msgdma_alloc_chan_resources()
665 mdev->desc_free_cnt = MSGDMA_DESC_NUM; in msgdma_alloc_chan_resources()
667 INIT_LIST_HEAD(&mdev->free_list); in msgdma_alloc_chan_resources()
670 desc = mdev->sw_desq + i; in msgdma_alloc_chan_resources()
671 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources()
673 list_add_tail(&desc->node, &mdev->free_list); in msgdma_alloc_chan_resources()
685 struct msgdma_device *mdev = from_tasklet(mdev, t, irq_tasklet); in msgdma_tasklet() local
691 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tasklet()
694 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); in msgdma_tasklet()
695 dev_dbg(mdev->dev, "%s (%d): response count=%d\n", in msgdma_tasklet()
705 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); in msgdma_tasklet()
706 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); in msgdma_tasklet()
708 msgdma_complete_descriptor(mdev); in msgdma_tasklet()
709 msgdma_chan_desc_cleanup(mdev); in msgdma_tasklet()
712 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tasklet()
724 struct msgdma_device *mdev = data; in msgdma_irq_handler() local
727 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
730 spin_lock(&mdev->lock); in msgdma_irq_handler()
731 mdev->idle = true; in msgdma_irq_handler()
732 msgdma_start_transfer(mdev); in msgdma_irq_handler()
733 spin_unlock(&mdev->lock); in msgdma_irq_handler()
736 tasklet_schedule(&mdev->irq_tasklet); in msgdma_irq_handler()
739 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
748 static void msgdma_dev_remove(struct msgdma_device *mdev) in msgdma_dev_remove() argument
750 if (!mdev) in msgdma_dev_remove()
753 devm_free_irq(mdev->dev, mdev->irq, mdev); in msgdma_dev_remove()
754 tasklet_kill(&mdev->irq_tasklet); in msgdma_dev_remove()
755 list_del(&mdev->dmachan.device_node); in msgdma_dev_remove()
795 struct msgdma_device *mdev; in msgdma_probe() local
800 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); in msgdma_probe()
801 if (!mdev) in msgdma_probe()
804 mdev->dev = &pdev->dev; in msgdma_probe()
807 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr); in msgdma_probe()
812 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc); in msgdma_probe()
817 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp); in msgdma_probe()
821 platform_set_drvdata(pdev, mdev); in msgdma_probe()
824 mdev->irq = platform_get_irq(pdev, 0); in msgdma_probe()
825 if (mdev->irq < 0) in msgdma_probe()
828 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, in msgdma_probe()
829 0, dev_name(&pdev->dev), mdev); in msgdma_probe()
833 tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet); in msgdma_probe()
835 dma_cookie_init(&mdev->dmachan); in msgdma_probe()
837 spin_lock_init(&mdev->lock); in msgdma_probe()
839 INIT_LIST_HEAD(&mdev->active_list); in msgdma_probe()
840 INIT_LIST_HEAD(&mdev->pending_list); in msgdma_probe()
841 INIT_LIST_HEAD(&mdev->done_list); in msgdma_probe()
842 INIT_LIST_HEAD(&mdev->free_list); in msgdma_probe()
844 dma_dev = &mdev->dmadev; in msgdma_probe()
873 mdev->dmachan.device = dma_dev; in msgdma_probe()
874 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); in msgdma_probe()
885 msgdma_reset(mdev); in msgdma_probe()
896 msgdma_dev_remove(mdev); in msgdma_probe()
909 struct msgdma_device *mdev = platform_get_drvdata(pdev); in msgdma_remove() local
911 dma_async_device_unregister(&mdev->dmadev); in msgdma_remove()
912 msgdma_dev_remove(mdev); in msgdma_remove()