Lines Matching +full:map +full:- +full:to +full:- +full:dma +full:- +full:channel

1 // SPDX-License-Identifier: GPL-2.0-or-later
3 * DMA driver for Altera mSGDMA IP core
7 * Based on drivers/dma/xilinx/zynqmp_dma.c, which is:
13 #include <linux/dma-mapping.h>
29 * struct msgdma_extended_desc - implements an extended descriptor
32 * @len: the number of bytes to transfer per descriptor
103 /* mSGDMA dispatcher control and status register map */
106 #define MSGDMA_CSR_RW_FILL_LEVEL 0x08 /* 31:16 - write fill level */
107 /* 15:00 - read fill level */
109 #define MSGDMA_CSR_RW_SEQ_NUM 0x10 /* 31:16 - write seq number */
110 /* 15:00 - read seq number */
144 /* mSGDMA response register map */
153 * struct msgdma_sw_desc - implements a sw descriptor
156 * @node: node to move from the free list to the tx list
167 * struct msgdma_device - DMA device structure
204 * msgdma_get_descriptor - Get the sw descriptor from the pool
205 * @mdev: Pointer to the Altera mSGDMA device structure
214 spin_lock_irqsave(&mdev->lock, flags); in msgdma_get_descriptor()
215 desc = list_first_entry(&mdev->free_list, struct msgdma_sw_desc, node); in msgdma_get_descriptor()
216 list_del(&desc->node); in msgdma_get_descriptor()
217 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_get_descriptor()
219 INIT_LIST_HEAD(&desc->tx_list); in msgdma_get_descriptor()
225 * msgdma_free_descriptor - Issue pending transactions
226 * @mdev: Pointer to the Altera mSGDMA device structure
234 mdev->desc_free_cnt++; in msgdma_free_descriptor()
235 list_add_tail(&desc->node, &mdev->free_list); in msgdma_free_descriptor()
236 list_for_each_entry_safe(child, next, &desc->tx_list, node) { in msgdma_free_descriptor()
237 mdev->desc_free_cnt++; in msgdma_free_descriptor()
238 list_move_tail(&child->node, &mdev->free_list); in msgdma_free_descriptor()
243 * msgdma_free_desc_list - Free descriptors list
244 * @mdev: Pointer to the Altera mSGDMA device structure
245 * @list: List to parse and delete the descriptor
257 * msgdma_desc_config - Configure the descriptor
262 * @stride: Read/write stride value to set
269 desc->read_addr_lo = lower_32_bits(src); in msgdma_desc_config()
270 desc->write_addr_lo = lower_32_bits(dst); in msgdma_desc_config()
273 desc->read_addr_hi = upper_32_bits(src); in msgdma_desc_config()
274 desc->write_addr_hi = upper_32_bits(dst); in msgdma_desc_config()
276 desc->len = len; in msgdma_desc_config()
277 desc->stride = stride; in msgdma_desc_config()
278 desc->burst_seq_num = 0; /* 0 will result in max burst length */ in msgdma_desc_config()
284 desc->control = MSGDMA_DESC_CTL_TR_ERR_IRQ | MSGDMA_DESC_CTL_GO | in msgdma_desc_config()
289 * msgdma_desc_config_eod - Mark the descriptor as end descriptor
294 desc->control |= MSGDMA_DESC_CTL_TR_COMP_IRQ; in msgdma_desc_config_eod()
298 * msgdma_tx_submit - Submit DMA transaction
305 struct msgdma_device *mdev = to_mdev(tx->chan); in msgdma_tx_submit()
311 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tx_submit()
314 list_add_tail(&new->node, &mdev->pending_list); in msgdma_tx_submit()
315 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tx_submit()
321 * msgdma_prep_memcpy - prepare descriptors for memcpy transaction
322 * @dchan: DMA channel
343 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_memcpy()
344 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_memcpy()
345 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
346 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_memcpy()
349 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_memcpy()
350 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_memcpy()
357 desc = &new->hw_desc; in msgdma_prep_memcpy()
360 len -= copy; in msgdma_prep_memcpy()
366 list_add_tail(&new->node, &first->tx_list); in msgdma_prep_memcpy()
370 async_tx_ack(&first->async_tx); in msgdma_prep_memcpy()
371 first->async_tx.flags = flags; in msgdma_prep_memcpy()
373 return &first->async_tx; in msgdma_prep_memcpy()
377 * msgdma_prep_slave_sg - prepare descriptors for a slave sg transaction
379 * @dchan: DMA channel
382 * @dir: DMA transfer direction
393 struct dma_slave_config *cfg = &mdev->slave_cfg; in msgdma_prep_slave_sg()
406 spin_lock_irqsave(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
407 if (desc_cnt > mdev->desc_free_cnt) { in msgdma_prep_slave_sg()
408 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
409 dev_dbg(mdev->dev, "mdev %p descs are not available\n", mdev); in msgdma_prep_slave_sg()
412 mdev->desc_free_cnt -= desc_cnt; in msgdma_prep_slave_sg()
413 spin_unlock_irqrestore(&mdev->lock, irqflags); in msgdma_prep_slave_sg()
422 desc = &new->hw_desc; in msgdma_prep_slave_sg()
426 dma_src = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; in msgdma_prep_slave_sg()
427 dma_dst = cfg->dst_addr; in msgdma_prep_slave_sg()
430 dma_src = cfg->src_addr; in msgdma_prep_slave_sg()
431 dma_dst = sg_dma_address(sgl) + sg_dma_len(sgl) - avail; in msgdma_prep_slave_sg()
435 avail -= len; in msgdma_prep_slave_sg()
440 list_add_tail(&new->node, &first->tx_list); in msgdma_prep_slave_sg()
449 sg_len--; in msgdma_prep_slave_sg()
455 first->async_tx.flags = flags; in msgdma_prep_slave_sg()
457 return &first->async_tx; in msgdma_prep_slave_sg()
465 memcpy(&mdev->slave_cfg, config, sizeof(*config)); in msgdma_dma_config()
476 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
477 iowrite32(MSGDMA_CSR_CTL_RESET, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
479 ret = readl_poll_timeout(mdev->csr + MSGDMA_CSR_STATUS, val, in msgdma_reset()
483 dev_err(mdev->dev, "DMA channel did not reset\n"); in msgdma_reset()
486 iowrite32(MSGDMA_CSR_STAT_MASK, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_reset()
488 /* Enable the DMA controller including interrupts */ in msgdma_reset()
490 MSGDMA_CSR_CTL_GLOBAL_INTR, mdev->csr + MSGDMA_CSR_CONTROL); in msgdma_reset()
492 mdev->idle = true; in msgdma_reset()
498 void __iomem *hw_desc = mdev->desc; in msgdma_copy_one()
501 * Check if the DESC FIFO it not full. If its full, we need to wait in msgdma_copy_one()
502 * for at least one entry to become free again in msgdma_copy_one()
504 while (ioread32(mdev->csr + MSGDMA_CSR_STATUS) & in msgdma_copy_one()
509 * The descriptor needs to get copied into the descriptor FIFO in msgdma_copy_one()
510 * of the DMA controller. The descriptor will get flushed to the in msgdma_copy_one()
513 * oder (address from low to high) on all architectures, we make in msgdma_copy_one()
515 * adding some write-barriers here. in msgdma_copy_one()
517 memcpy((void __force *)hw_desc, &desc->hw_desc, in msgdma_copy_one()
518 sizeof(desc->hw_desc) - sizeof(u32)); in msgdma_copy_one()
520 /* Write control word last to flush this descriptor into the FIFO */ in msgdma_copy_one()
521 mdev->idle = false; in msgdma_copy_one()
523 iowrite32(desc->hw_desc.control, hw_desc + in msgdma_copy_one()
529 * msgdma_copy_desc_to_fifo - copy descriptor(s) into controller FIFO
530 * @mdev: Pointer to the Altera mSGDMA device structure
540 list_for_each_entry_safe(sdesc, next, &desc->tx_list, node) in msgdma_copy_desc_to_fifo()
545 * msgdma_start_transfer - Initiate the new transfer
546 * @mdev: Pointer to the Altera mSGDMA device structure
552 if (!mdev->idle) in msgdma_start_transfer()
555 desc = list_first_entry_or_null(&mdev->pending_list, in msgdma_start_transfer()
560 list_splice_tail_init(&mdev->pending_list, &mdev->active_list); in msgdma_start_transfer()
565 * msgdma_issue_pending - Issue pending transactions
566 * @chan: DMA channel pointer
573 spin_lock_irqsave(&mdev->lock, flags); in msgdma_issue_pending()
575 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_issue_pending()
579 * msgdma_chan_desc_cleanup - Cleanup the completed descriptors
580 * @mdev: Pointer to the Altera mSGDMA device structure
586 list_for_each_entry_safe(desc, next, &mdev->done_list, node) { in msgdma_chan_desc_cleanup()
590 list_del(&desc->node); in msgdma_chan_desc_cleanup()
592 callback = desc->async_tx.callback; in msgdma_chan_desc_cleanup()
593 callback_param = desc->async_tx.callback_param; in msgdma_chan_desc_cleanup()
595 spin_unlock(&mdev->lock); in msgdma_chan_desc_cleanup()
597 spin_lock(&mdev->lock); in msgdma_chan_desc_cleanup()
606 * msgdma_complete_descriptor - Mark the active descriptor as complete
607 * @mdev: Pointer to the Altera mSGDMA device structure
613 desc = list_first_entry_or_null(&mdev->active_list, in msgdma_complete_descriptor()
617 list_del(&desc->node); in msgdma_complete_descriptor()
618 dma_cookie_complete(&desc->async_tx); in msgdma_complete_descriptor()
619 list_add_tail(&desc->node, &mdev->done_list); in msgdma_complete_descriptor()
623 * msgdma_free_descriptors - Free channel descriptors
624 * @mdev: Pointer to the Altera mSGDMA device structure
628 msgdma_free_desc_list(mdev, &mdev->active_list); in msgdma_free_descriptors()
629 msgdma_free_desc_list(mdev, &mdev->pending_list); in msgdma_free_descriptors()
630 msgdma_free_desc_list(mdev, &mdev->done_list); in msgdma_free_descriptors()
634 * msgdma_free_chan_resources - Free channel resources
635 * @dchan: DMA channel pointer
642 spin_lock_irqsave(&mdev->lock, flags); in msgdma_free_chan_resources()
644 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_free_chan_resources()
645 kfree(mdev->sw_desq); in msgdma_free_chan_resources()
649 * msgdma_alloc_chan_resources - Allocate channel resources
650 * @dchan: DMA channel
660 mdev->sw_desq = kcalloc(MSGDMA_DESC_NUM, sizeof(*desc), GFP_NOWAIT); in msgdma_alloc_chan_resources()
661 if (!mdev->sw_desq) in msgdma_alloc_chan_resources()
662 return -ENOMEM; in msgdma_alloc_chan_resources()
664 mdev->idle = true; in msgdma_alloc_chan_resources()
665 mdev->desc_free_cnt = MSGDMA_DESC_NUM; in msgdma_alloc_chan_resources()
667 INIT_LIST_HEAD(&mdev->free_list); in msgdma_alloc_chan_resources()
670 desc = mdev->sw_desq + i; in msgdma_alloc_chan_resources()
671 dma_async_tx_descriptor_init(&desc->async_tx, &mdev->dmachan); in msgdma_alloc_chan_resources()
672 desc->async_tx.tx_submit = msgdma_tx_submit; in msgdma_alloc_chan_resources()
673 list_add_tail(&desc->node, &mdev->free_list); in msgdma_alloc_chan_resources()
680 * msgdma_tasklet - Schedule completion tasklet
681 * @t: Pointer to the Altera sSGDMA channel structure
691 spin_lock_irqsave(&mdev->lock, flags); in msgdma_tasklet()
694 count = ioread32(mdev->csr + MSGDMA_CSR_RESP_FILL_LEVEL); in msgdma_tasklet()
695 dev_dbg(mdev->dev, "%s (%d): response count=%d\n", in msgdma_tasklet()
698 while (count--) { in msgdma_tasklet()
700 * Read both longwords to purge this response from the FIFO in msgdma_tasklet()
701 * On Avalon-MM implementations, size and status do not in msgdma_tasklet()
703 * bits. So we need to just drop these values. in msgdma_tasklet()
705 size = ioread32(mdev->resp + MSGDMA_RESP_BYTES_TRANSFERRED); in msgdma_tasklet()
706 status = ioread32(mdev->resp + MSGDMA_RESP_STATUS); in msgdma_tasklet()
712 spin_unlock_irqrestore(&mdev->lock, flags); in msgdma_tasklet()
716 * msgdma_irq_handler - Altera mSGDMA Interrupt handler
718 * @data: Pointer to the Altera mSGDMA device structure
727 status = ioread32(mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
729 /* Start next transfer if the DMA controller is idle */ in msgdma_irq_handler()
730 spin_lock(&mdev->lock); in msgdma_irq_handler()
731 mdev->idle = true; in msgdma_irq_handler()
733 spin_unlock(&mdev->lock); in msgdma_irq_handler()
736 tasklet_schedule(&mdev->irq_tasklet); in msgdma_irq_handler()
739 iowrite32(MSGDMA_CSR_STAT_IRQ, mdev->csr + MSGDMA_CSR_STATUS); in msgdma_irq_handler()
745 * msgdma_chan_remove - Channel remove function
746 * @mdev: Pointer to the Altera mSGDMA device structure
753 devm_free_irq(mdev->dev, mdev->irq, mdev); in msgdma_dev_remove()
754 tasklet_kill(&mdev->irq_tasklet); in msgdma_dev_remove()
755 list_del(&mdev->dmachan.device_node); in msgdma_dev_remove()
762 struct device *device = &pdev->dev; in request_and_map()
767 return -ENODEV; in request_and_map()
770 region = devm_request_mem_region(device, (*res)->start, in request_and_map()
773 dev_err(device, "unable to request %s\n", name); in request_and_map()
774 return -EBUSY; in request_and_map()
777 *ptr = devm_ioremap(device, region->start, in request_and_map()
781 return -ENOMEM; in request_and_map()
788 * msgdma_probe - Driver probe function
789 * @pdev: Pointer to the platform_device structure
800 mdev = devm_kzalloc(&pdev->dev, sizeof(*mdev), GFP_NOWAIT); in msgdma_probe()
802 return -ENOMEM; in msgdma_probe()
804 mdev->dev = &pdev->dev; in msgdma_probe()
806 /* Map CSR space */ in msgdma_probe()
807 ret = request_and_map(pdev, "csr", &dma_res, &mdev->csr); in msgdma_probe()
811 /* Map (extended) descriptor space */ in msgdma_probe()
812 ret = request_and_map(pdev, "desc", &dma_res, &mdev->desc); in msgdma_probe()
816 /* Map response space */ in msgdma_probe()
817 ret = request_and_map(pdev, "resp", &dma_res, &mdev->resp); in msgdma_probe()
824 mdev->irq = platform_get_irq(pdev, 0); in msgdma_probe()
825 if (mdev->irq < 0) in msgdma_probe()
826 return -ENXIO; in msgdma_probe()
828 ret = devm_request_irq(&pdev->dev, mdev->irq, msgdma_irq_handler, in msgdma_probe()
829 0, dev_name(&pdev->dev), mdev); in msgdma_probe()
833 tasklet_setup(&mdev->irq_tasklet, msgdma_tasklet); in msgdma_probe()
835 dma_cookie_init(&mdev->dmachan); in msgdma_probe()
837 spin_lock_init(&mdev->lock); in msgdma_probe()
839 INIT_LIST_HEAD(&mdev->active_list); in msgdma_probe()
840 INIT_LIST_HEAD(&mdev->pending_list); in msgdma_probe()
841 INIT_LIST_HEAD(&mdev->done_list); in msgdma_probe()
842 INIT_LIST_HEAD(&mdev->free_list); in msgdma_probe()
844 dma_dev = &mdev->dmadev; in msgdma_probe()
846 /* Set DMA capabilities */ in msgdma_probe()
847 dma_cap_zero(dma_dev->cap_mask); in msgdma_probe()
848 dma_cap_set(DMA_MEMCPY, dma_dev->cap_mask); in msgdma_probe()
849 dma_cap_set(DMA_SLAVE, dma_dev->cap_mask); in msgdma_probe()
851 dma_dev->src_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); in msgdma_probe()
852 dma_dev->dst_addr_widths = BIT(DMA_SLAVE_BUSWIDTH_4_BYTES); in msgdma_probe()
853 dma_dev->directions = BIT(DMA_MEM_TO_DEV) | BIT(DMA_DEV_TO_MEM) | in msgdma_probe()
855 dma_dev->residue_granularity = DMA_RESIDUE_GRANULARITY_DESCRIPTOR; in msgdma_probe()
857 /* Init DMA link list */ in msgdma_probe()
858 INIT_LIST_HEAD(&dma_dev->channels); in msgdma_probe()
861 dma_dev->device_tx_status = dma_cookie_status; in msgdma_probe()
862 dma_dev->device_issue_pending = msgdma_issue_pending; in msgdma_probe()
863 dma_dev->dev = &pdev->dev; in msgdma_probe()
865 dma_dev->copy_align = DMAENGINE_ALIGN_4_BYTES; in msgdma_probe()
866 dma_dev->device_prep_dma_memcpy = msgdma_prep_memcpy; in msgdma_probe()
867 dma_dev->device_prep_slave_sg = msgdma_prep_slave_sg; in msgdma_probe()
868 dma_dev->device_config = msgdma_dma_config; in msgdma_probe()
870 dma_dev->device_alloc_chan_resources = msgdma_alloc_chan_resources; in msgdma_probe()
871 dma_dev->device_free_chan_resources = msgdma_free_chan_resources; in msgdma_probe()
873 mdev->dmachan.device = dma_dev; in msgdma_probe()
874 list_add_tail(&mdev->dmachan.device_node, &dma_dev->channels); in msgdma_probe()
876 /* Set DMA mask to 64 bits */ in msgdma_probe()
877 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64)); in msgdma_probe()
879 dev_warn(&pdev->dev, "unable to set coherent mask to 64"); in msgdma_probe()
880 ret = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32)); in msgdma_probe()
891 dev_notice(&pdev->dev, "Altera mSGDMA driver probe success\n"); in msgdma_probe()
902 * msgdma_dma_remove - Driver remove function
903 * @pdev: Pointer to the platform_device structure
911 dma_async_device_unregister(&mdev->dmadev); in msgdma_remove()
914 dev_notice(&pdev->dev, "Altera mSGDMA driver removed\n"); in msgdma_remove()
921 .name = "altera-msgdma",
929 MODULE_ALIAS("platform:altera-msgdma");