1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */ 2*4882a593Smuzhiyun /* 3*4882a593Smuzhiyun * Qualcomm Technologies HIDMA data structures 4*4882a593Smuzhiyun * 5*4882a593Smuzhiyun * Copyright (c) 2014-2016, The Linux Foundation. All rights reserved. 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef QCOM_HIDMA_H 9*4882a593Smuzhiyun #define QCOM_HIDMA_H 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/kfifo.h> 12*4882a593Smuzhiyun #include <linux/interrupt.h> 13*4882a593Smuzhiyun #include <linux/dmaengine.h> 14*4882a593Smuzhiyun 15*4882a593Smuzhiyun #define HIDMA_TRE_SIZE 32 /* each TRE is 32 bytes */ 16*4882a593Smuzhiyun #define HIDMA_TRE_CFG_IDX 0 17*4882a593Smuzhiyun #define HIDMA_TRE_LEN_IDX 1 18*4882a593Smuzhiyun #define HIDMA_TRE_SRC_LOW_IDX 2 19*4882a593Smuzhiyun #define HIDMA_TRE_SRC_HI_IDX 3 20*4882a593Smuzhiyun #define HIDMA_TRE_DEST_LOW_IDX 4 21*4882a593Smuzhiyun #define HIDMA_TRE_DEST_HI_IDX 5 22*4882a593Smuzhiyun 23*4882a593Smuzhiyun enum tre_type { 24*4882a593Smuzhiyun HIDMA_TRE_MEMCPY = 3, 25*4882a593Smuzhiyun HIDMA_TRE_MEMSET = 4, 26*4882a593Smuzhiyun }; 27*4882a593Smuzhiyun 28*4882a593Smuzhiyun struct hidma_tre { 29*4882a593Smuzhiyun atomic_t allocated; /* if this channel is allocated */ 30*4882a593Smuzhiyun bool queued; /* flag whether this is pending */ 31*4882a593Smuzhiyun u16 status; /* status */ 32*4882a593Smuzhiyun u32 idx; /* index of the tre */ 33*4882a593Smuzhiyun u32 dma_sig; /* signature of the tre */ 34*4882a593Smuzhiyun const char *dev_name; /* name of the device */ 35*4882a593Smuzhiyun void (*callback)(void *data); /* requester callback */ 36*4882a593Smuzhiyun void *data; /* Data associated with this channel*/ 37*4882a593Smuzhiyun struct hidma_lldev *lldev; /* lldma device pointer */ 38*4882a593Smuzhiyun u32 tre_local[HIDMA_TRE_SIZE / sizeof(u32) + 1]; /* TRE local copy */ 39*4882a593Smuzhiyun u32 tre_index; /* the offset where this was written*/ 40*4882a593Smuzhiyun u32 int_flags; /* interrupt flags */ 41*4882a593Smuzhiyun u8 err_info; /* error record in this transfer */ 42*4882a593Smuzhiyun u8 err_code; /* completion code */ 43*4882a593Smuzhiyun }; 44*4882a593Smuzhiyun 45*4882a593Smuzhiyun struct hidma_lldev { 46*4882a593Smuzhiyun bool msi_support; /* flag indicating MSI support */ 47*4882a593Smuzhiyun bool initialized; /* initialized flag */ 48*4882a593Smuzhiyun u8 trch_state; /* trch_state of the device */ 49*4882a593Smuzhiyun u8 evch_state; /* evch_state of the device */ 50*4882a593Smuzhiyun u8 chidx; /* channel index in the core */ 51*4882a593Smuzhiyun u32 nr_tres; /* max number of configs */ 52*4882a593Smuzhiyun spinlock_t lock; /* reentrancy */ 53*4882a593Smuzhiyun struct hidma_tre *trepool; /* trepool of user configs */ 54*4882a593Smuzhiyun struct device *dev; /* device */ 55*4882a593Smuzhiyun void __iomem *trca; /* Transfer Channel address */ 56*4882a593Smuzhiyun void __iomem *evca; /* Event Channel address */ 57*4882a593Smuzhiyun struct hidma_tre 58*4882a593Smuzhiyun **pending_tre_list; /* Pointers to pending TREs */ 59*4882a593Smuzhiyun atomic_t pending_tre_count; /* Number of TREs pending */ 60*4882a593Smuzhiyun 61*4882a593Smuzhiyun void *tre_ring; /* TRE ring */ 62*4882a593Smuzhiyun dma_addr_t tre_dma; /* TRE ring to be shared with HW */ 63*4882a593Smuzhiyun u32 tre_ring_size; /* Byte size of the ring */ 64*4882a593Smuzhiyun u32 tre_processed_off; /* last processed TRE */ 65*4882a593Smuzhiyun 66*4882a593Smuzhiyun void *evre_ring; /* EVRE ring */ 67*4882a593Smuzhiyun dma_addr_t evre_dma; /* EVRE ring to be shared with HW */ 68*4882a593Smuzhiyun u32 evre_ring_size; /* Byte size of the ring */ 69*4882a593Smuzhiyun u32 evre_processed_off; /* last processed EVRE */ 70*4882a593Smuzhiyun 71*4882a593Smuzhiyun u32 tre_write_offset; /* TRE write location */ 72*4882a593Smuzhiyun struct tasklet_struct task; /* task delivering notifications */ 73*4882a593Smuzhiyun DECLARE_KFIFO_PTR(handoff_fifo, 74*4882a593Smuzhiyun struct hidma_tre *); /* pending TREs FIFO */ 75*4882a593Smuzhiyun }; 76*4882a593Smuzhiyun 77*4882a593Smuzhiyun struct hidma_desc { 78*4882a593Smuzhiyun struct dma_async_tx_descriptor desc; 79*4882a593Smuzhiyun /* link list node for this channel*/ 80*4882a593Smuzhiyun struct list_head node; 81*4882a593Smuzhiyun u32 tre_ch; 82*4882a593Smuzhiyun }; 83*4882a593Smuzhiyun 84*4882a593Smuzhiyun struct hidma_chan { 85*4882a593Smuzhiyun bool paused; 86*4882a593Smuzhiyun bool allocated; 87*4882a593Smuzhiyun char dbg_name[16]; 88*4882a593Smuzhiyun u32 dma_sig; 89*4882a593Smuzhiyun dma_cookie_t last_success; 90*4882a593Smuzhiyun 91*4882a593Smuzhiyun /* 92*4882a593Smuzhiyun * active descriptor on this channel 93*4882a593Smuzhiyun * It is used by the DMA complete notification to 94*4882a593Smuzhiyun * locate the descriptor that initiated the transfer. 95*4882a593Smuzhiyun */ 96*4882a593Smuzhiyun struct hidma_dev *dmadev; 97*4882a593Smuzhiyun struct hidma_desc *running; 98*4882a593Smuzhiyun 99*4882a593Smuzhiyun struct dma_chan chan; 100*4882a593Smuzhiyun struct list_head free; 101*4882a593Smuzhiyun struct list_head prepared; 102*4882a593Smuzhiyun struct list_head queued; 103*4882a593Smuzhiyun struct list_head active; 104*4882a593Smuzhiyun struct list_head completed; 105*4882a593Smuzhiyun 106*4882a593Smuzhiyun /* Lock for this structure */ 107*4882a593Smuzhiyun spinlock_t lock; 108*4882a593Smuzhiyun }; 109*4882a593Smuzhiyun 110*4882a593Smuzhiyun struct hidma_dev { 111*4882a593Smuzhiyun int irq; 112*4882a593Smuzhiyun int chidx; 113*4882a593Smuzhiyun u32 nr_descriptors; 114*4882a593Smuzhiyun int msi_virqbase; 115*4882a593Smuzhiyun 116*4882a593Smuzhiyun struct hidma_lldev *lldev; 117*4882a593Smuzhiyun void __iomem *dev_trca; 118*4882a593Smuzhiyun struct resource *trca_resource; 119*4882a593Smuzhiyun void __iomem *dev_evca; 120*4882a593Smuzhiyun struct resource *evca_resource; 121*4882a593Smuzhiyun 122*4882a593Smuzhiyun /* used to protect the pending channel list*/ 123*4882a593Smuzhiyun spinlock_t lock; 124*4882a593Smuzhiyun struct dma_device ddev; 125*4882a593Smuzhiyun 126*4882a593Smuzhiyun struct dentry *debugfs; 127*4882a593Smuzhiyun 128*4882a593Smuzhiyun /* sysfs entry for the channel id */ 129*4882a593Smuzhiyun struct device_attribute *chid_attrs; 130*4882a593Smuzhiyun 131*4882a593Smuzhiyun /* Task delivering issue_pending */ 132*4882a593Smuzhiyun struct tasklet_struct task; 133*4882a593Smuzhiyun }; 134*4882a593Smuzhiyun 135*4882a593Smuzhiyun int hidma_ll_request(struct hidma_lldev *llhndl, u32 dev_id, 136*4882a593Smuzhiyun const char *dev_name, 137*4882a593Smuzhiyun void (*callback)(void *data), void *data, u32 *tre_ch); 138*4882a593Smuzhiyun 139*4882a593Smuzhiyun void hidma_ll_free(struct hidma_lldev *llhndl, u32 tre_ch); 140*4882a593Smuzhiyun enum dma_status hidma_ll_status(struct hidma_lldev *llhndl, u32 tre_ch); 141*4882a593Smuzhiyun bool hidma_ll_isenabled(struct hidma_lldev *llhndl); 142*4882a593Smuzhiyun void hidma_ll_queue_request(struct hidma_lldev *llhndl, u32 tre_ch); 143*4882a593Smuzhiyun void hidma_ll_start(struct hidma_lldev *llhndl); 144*4882a593Smuzhiyun int hidma_ll_disable(struct hidma_lldev *lldev); 145*4882a593Smuzhiyun int hidma_ll_enable(struct hidma_lldev *llhndl); 146*4882a593Smuzhiyun void hidma_ll_set_transfer_params(struct hidma_lldev *llhndl, u32 tre_ch, 147*4882a593Smuzhiyun dma_addr_t src, dma_addr_t dest, u32 len, u32 flags, u32 txntype); 148*4882a593Smuzhiyun void hidma_ll_setup_irq(struct hidma_lldev *lldev, bool msi); 149*4882a593Smuzhiyun int hidma_ll_setup(struct hidma_lldev *lldev); 150*4882a593Smuzhiyun struct hidma_lldev *hidma_ll_init(struct device *dev, u32 max_channels, 151*4882a593Smuzhiyun void __iomem *trca, void __iomem *evca, 152*4882a593Smuzhiyun u8 chidx); 153*4882a593Smuzhiyun int hidma_ll_uninit(struct hidma_lldev *llhndl); 154*4882a593Smuzhiyun irqreturn_t hidma_ll_inthandler(int irq, void *arg); 155*4882a593Smuzhiyun irqreturn_t hidma_ll_inthandler_msi(int irq, void *arg, int cause); 156*4882a593Smuzhiyun void hidma_cleanup_pending_tre(struct hidma_lldev *llhndl, u8 err_info, 157*4882a593Smuzhiyun u8 err_code); 158*4882a593Smuzhiyun void hidma_debug_init(struct hidma_dev *dmadev); 159*4882a593Smuzhiyun void hidma_debug_uninit(struct hidma_dev *dmadev); 160*4882a593Smuzhiyun #endif 161