1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * SiFive FU540 Platform DMA driver 4*4882a593Smuzhiyun * Copyright (C) 2019 SiFive 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun * Based partially on: 7*4882a593Smuzhiyun * - drivers/dma/fsl-edma.c 8*4882a593Smuzhiyun * - drivers/dma/dw-edma/ 9*4882a593Smuzhiyun * - drivers/dma/pxa-dma.c 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * See the following sources for further documentation: 12*4882a593Smuzhiyun * - Chapter 12 "Platform DMA Engine (PDMA)" of 13*4882a593Smuzhiyun * SiFive FU540-C000 v1.0 14*4882a593Smuzhiyun * https://static.dev.sifive.com/FU540-C000-v1.0.pdf 15*4882a593Smuzhiyun */ 16*4882a593Smuzhiyun #ifndef _SF_PDMA_H 17*4882a593Smuzhiyun #define _SF_PDMA_H 18*4882a593Smuzhiyun 19*4882a593Smuzhiyun #include <linux/dmaengine.h> 20*4882a593Smuzhiyun #include <linux/dma-direction.h> 21*4882a593Smuzhiyun 22*4882a593Smuzhiyun #include "../dmaengine.h" 23*4882a593Smuzhiyun #include "../virt-dma.h" 24*4882a593Smuzhiyun 25*4882a593Smuzhiyun #define PDMA_NR_CH 4 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun #if (PDMA_NR_CH != 4) 28*4882a593Smuzhiyun #error "Please define PDMA_NR_CH to 4" 29*4882a593Smuzhiyun #endif 30*4882a593Smuzhiyun 31*4882a593Smuzhiyun #define PDMA_BASE_ADDR 0x3000000 32*4882a593Smuzhiyun #define PDMA_CHAN_OFFSET 0x1000 33*4882a593Smuzhiyun 34*4882a593Smuzhiyun /* Register Offset */ 35*4882a593Smuzhiyun #define PDMA_CTRL 0x000 36*4882a593Smuzhiyun #define PDMA_XFER_TYPE 0x004 37*4882a593Smuzhiyun #define PDMA_XFER_SIZE 0x008 38*4882a593Smuzhiyun #define PDMA_DST_ADDR 0x010 39*4882a593Smuzhiyun #define PDMA_SRC_ADDR 0x018 40*4882a593Smuzhiyun #define PDMA_ACT_TYPE 0x104 /* Read-only */ 41*4882a593Smuzhiyun #define PDMA_REMAINING_BYTE 0x108 /* Read-only */ 42*4882a593Smuzhiyun #define PDMA_CUR_DST_ADDR 0x110 /* Read-only*/ 43*4882a593Smuzhiyun #define PDMA_CUR_SRC_ADDR 0x118 /* Read-only*/ 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun /* CTRL */ 46*4882a593Smuzhiyun #define PDMA_CLEAR_CTRL 0x0 47*4882a593Smuzhiyun #define PDMA_CLAIM_MASK GENMASK(0, 0) 48*4882a593Smuzhiyun #define PDMA_RUN_MASK GENMASK(1, 1) 49*4882a593Smuzhiyun #define PDMA_ENABLE_DONE_INT_MASK GENMASK(14, 14) 50*4882a593Smuzhiyun #define PDMA_ENABLE_ERR_INT_MASK GENMASK(15, 15) 51*4882a593Smuzhiyun #define PDMA_DONE_STATUS_MASK GENMASK(30, 30) 52*4882a593Smuzhiyun #define PDMA_ERR_STATUS_MASK GENMASK(31, 31) 53*4882a593Smuzhiyun 54*4882a593Smuzhiyun /* Transfer Type */ 55*4882a593Smuzhiyun #define PDMA_FULL_SPEED 0xFF000008 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun /* Error Recovery */ 58*4882a593Smuzhiyun #define MAX_RETRY 1 59*4882a593Smuzhiyun 60*4882a593Smuzhiyun #define SF_PDMA_REG_BASE(ch) (pdma->membase + (PDMA_CHAN_OFFSET * (ch))) 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun struct pdma_regs { 63*4882a593Smuzhiyun /* read-write regs */ 64*4882a593Smuzhiyun void __iomem *ctrl; /* 4 bytes */ 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun void __iomem *xfer_type; /* 4 bytes */ 67*4882a593Smuzhiyun void __iomem *xfer_size; /* 8 bytes */ 68*4882a593Smuzhiyun void __iomem *dst_addr; /* 8 bytes */ 69*4882a593Smuzhiyun void __iomem *src_addr; /* 8 bytes */ 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun /* read-only */ 72*4882a593Smuzhiyun void __iomem *act_type; /* 4 bytes */ 73*4882a593Smuzhiyun void __iomem *residue; /* 8 bytes */ 74*4882a593Smuzhiyun void __iomem *cur_dst_addr; /* 8 bytes */ 75*4882a593Smuzhiyun void __iomem *cur_src_addr; /* 8 bytes */ 76*4882a593Smuzhiyun }; 77*4882a593Smuzhiyun 78*4882a593Smuzhiyun struct sf_pdma_desc { 79*4882a593Smuzhiyun u32 xfer_type; 80*4882a593Smuzhiyun u64 xfer_size; 81*4882a593Smuzhiyun u64 dst_addr; 82*4882a593Smuzhiyun u64 src_addr; 83*4882a593Smuzhiyun struct virt_dma_desc vdesc; 84*4882a593Smuzhiyun struct sf_pdma_chan *chan; 85*4882a593Smuzhiyun bool in_use; 86*4882a593Smuzhiyun enum dma_transfer_direction dirn; 87*4882a593Smuzhiyun struct dma_async_tx_descriptor *async_tx; 88*4882a593Smuzhiyun }; 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun enum sf_pdma_pm_state { 91*4882a593Smuzhiyun RUNNING = 0, 92*4882a593Smuzhiyun SUSPENDED, 93*4882a593Smuzhiyun }; 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun struct sf_pdma_chan { 96*4882a593Smuzhiyun struct virt_dma_chan vchan; 97*4882a593Smuzhiyun enum dma_status status; 98*4882a593Smuzhiyun enum sf_pdma_pm_state pm_state; 99*4882a593Smuzhiyun u32 slave_id; 100*4882a593Smuzhiyun struct sf_pdma *pdma; 101*4882a593Smuzhiyun struct sf_pdma_desc *desc; 102*4882a593Smuzhiyun struct dma_slave_config cfg; 103*4882a593Smuzhiyun u32 attr; 104*4882a593Smuzhiyun dma_addr_t dma_dev_addr; 105*4882a593Smuzhiyun u32 dma_dev_size; 106*4882a593Smuzhiyun struct tasklet_struct done_tasklet; 107*4882a593Smuzhiyun struct tasklet_struct err_tasklet; 108*4882a593Smuzhiyun struct pdma_regs regs; 109*4882a593Smuzhiyun spinlock_t lock; /* protect chan data */ 110*4882a593Smuzhiyun bool xfer_err; 111*4882a593Smuzhiyun int txirq; 112*4882a593Smuzhiyun int errirq; 113*4882a593Smuzhiyun int retries; 114*4882a593Smuzhiyun }; 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun struct sf_pdma { 117*4882a593Smuzhiyun struct dma_device dma_dev; 118*4882a593Smuzhiyun void __iomem *membase; 119*4882a593Smuzhiyun void __iomem *mappedbase; 120*4882a593Smuzhiyun u32 n_chans; 121*4882a593Smuzhiyun struct sf_pdma_chan chans[PDMA_NR_CH]; 122*4882a593Smuzhiyun }; 123*4882a593Smuzhiyun 124*4882a593Smuzhiyun #endif /* _SF_PDMA_H */ 125