xref: /OK3568_Linux_fs/kernel/arch/arm/include/asm/dma.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef __ASM_ARM_DMA_H
3*4882a593Smuzhiyun #define __ASM_ARM_DMA_H
4*4882a593Smuzhiyun 
5*4882a593Smuzhiyun /*
6*4882a593Smuzhiyun  * This is the maximum virtual address which can be DMA'd from.
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun #ifndef CONFIG_ZONE_DMA
9*4882a593Smuzhiyun #define MAX_DMA_ADDRESS	0xffffffffUL
10*4882a593Smuzhiyun #else
11*4882a593Smuzhiyun #define MAX_DMA_ADDRESS	({ \
12*4882a593Smuzhiyun 	extern phys_addr_t arm_dma_zone_size; \
13*4882a593Smuzhiyun 	arm_dma_zone_size && arm_dma_zone_size < (0x100000000ULL - PAGE_OFFSET) ? \
14*4882a593Smuzhiyun 		(PAGE_OFFSET + arm_dma_zone_size) : 0xffffffffUL; })
15*4882a593Smuzhiyun #endif
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #ifdef CONFIG_ISA_DMA_API
18*4882a593Smuzhiyun /*
19*4882a593Smuzhiyun  * This is used to support drivers written for the x86 ISA DMA API.
20*4882a593Smuzhiyun  * It should not be re-used except for that purpose.
21*4882a593Smuzhiyun  */
22*4882a593Smuzhiyun #include <linux/spinlock.h>
23*4882a593Smuzhiyun #include <linux/scatterlist.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <mach/isa-dma.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun /*
28*4882a593Smuzhiyun  * The DMA modes reflect the settings for the ISA DMA controller
29*4882a593Smuzhiyun  */
30*4882a593Smuzhiyun #define DMA_MODE_MASK	 0xcc
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun #define DMA_MODE_READ	 0x44
33*4882a593Smuzhiyun #define DMA_MODE_WRITE	 0x48
34*4882a593Smuzhiyun #define DMA_MODE_CASCADE 0xc0
35*4882a593Smuzhiyun #define DMA_AUTOINIT	 0x10
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun extern raw_spinlock_t  dma_spin_lock;
38*4882a593Smuzhiyun 
claim_dma_lock(void)39*4882a593Smuzhiyun static inline unsigned long claim_dma_lock(void)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	unsigned long flags;
42*4882a593Smuzhiyun 	raw_spin_lock_irqsave(&dma_spin_lock, flags);
43*4882a593Smuzhiyun 	return flags;
44*4882a593Smuzhiyun }
45*4882a593Smuzhiyun 
release_dma_lock(unsigned long flags)46*4882a593Smuzhiyun static inline void release_dma_lock(unsigned long flags)
47*4882a593Smuzhiyun {
48*4882a593Smuzhiyun 	raw_spin_unlock_irqrestore(&dma_spin_lock, flags);
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* Clear the 'DMA Pointer Flip Flop'.
52*4882a593Smuzhiyun  * Write 0 for LSB/MSB, 1 for MSB/LSB access.
53*4882a593Smuzhiyun  */
54*4882a593Smuzhiyun #define clear_dma_ff(chan)
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun /* Set only the page register bits of the transfer address.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * NOTE: This is an architecture specific function, and should
59*4882a593Smuzhiyun  *       be hidden from the drivers
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun extern void set_dma_page(unsigned int chan, char pagenr);
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun /* Request a DMA channel
64*4882a593Smuzhiyun  *
65*4882a593Smuzhiyun  * Some architectures may need to do allocate an interrupt
66*4882a593Smuzhiyun  */
67*4882a593Smuzhiyun extern int  request_dma(unsigned int chan, const char * device_id);
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /* Free a DMA channel
70*4882a593Smuzhiyun  *
71*4882a593Smuzhiyun  * Some architectures may need to do free an interrupt
72*4882a593Smuzhiyun  */
73*4882a593Smuzhiyun extern void free_dma(unsigned int chan);
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun /* Enable DMA for this channel
76*4882a593Smuzhiyun  *
77*4882a593Smuzhiyun  * On some architectures, this may have other side effects like
78*4882a593Smuzhiyun  * enabling an interrupt and setting the DMA registers.
79*4882a593Smuzhiyun  */
80*4882a593Smuzhiyun extern void enable_dma(unsigned int chan);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun /* Disable DMA for this channel
83*4882a593Smuzhiyun  *
84*4882a593Smuzhiyun  * On some architectures, this may have other side effects like
85*4882a593Smuzhiyun  * disabling an interrupt or whatever.
86*4882a593Smuzhiyun  */
87*4882a593Smuzhiyun extern void disable_dma(unsigned int chan);
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /* Test whether the specified channel has an active DMA transfer
90*4882a593Smuzhiyun  */
91*4882a593Smuzhiyun extern int dma_channel_active(unsigned int chan);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Set the DMA scatter gather list for this channel
94*4882a593Smuzhiyun  *
95*4882a593Smuzhiyun  * This should not be called if a DMA channel is enabled,
96*4882a593Smuzhiyun  * especially since some DMA architectures don't update the
97*4882a593Smuzhiyun  * DMA address immediately, but defer it to the enable_dma().
98*4882a593Smuzhiyun  */
99*4882a593Smuzhiyun extern void set_dma_sg(unsigned int chan, struct scatterlist *sg, int nr_sg);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun /* Set the DMA address for this channel
102*4882a593Smuzhiyun  *
103*4882a593Smuzhiyun  * This should not be called if a DMA channel is enabled,
104*4882a593Smuzhiyun  * especially since some DMA architectures don't update the
105*4882a593Smuzhiyun  * DMA address immediately, but defer it to the enable_dma().
106*4882a593Smuzhiyun  */
107*4882a593Smuzhiyun extern void __set_dma_addr(unsigned int chan, void *addr);
108*4882a593Smuzhiyun #define set_dma_addr(chan, addr)				\
109*4882a593Smuzhiyun 	__set_dma_addr(chan, (void *)__bus_to_virt(addr))
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun /* Set the DMA byte count for this channel
112*4882a593Smuzhiyun  *
113*4882a593Smuzhiyun  * This should not be called if a DMA channel is enabled,
114*4882a593Smuzhiyun  * especially since some DMA architectures don't update the
115*4882a593Smuzhiyun  * DMA count immediately, but defer it to the enable_dma().
116*4882a593Smuzhiyun  */
117*4882a593Smuzhiyun extern void set_dma_count(unsigned int chan, unsigned long count);
118*4882a593Smuzhiyun 
119*4882a593Smuzhiyun /* Set the transfer direction for this channel
120*4882a593Smuzhiyun  *
121*4882a593Smuzhiyun  * This should not be called if a DMA channel is enabled,
122*4882a593Smuzhiyun  * especially since some DMA architectures don't update the
123*4882a593Smuzhiyun  * DMA transfer direction immediately, but defer it to the
124*4882a593Smuzhiyun  * enable_dma().
125*4882a593Smuzhiyun  */
126*4882a593Smuzhiyun extern void set_dma_mode(unsigned int chan, unsigned int mode);
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun /* Set the transfer speed for this channel
129*4882a593Smuzhiyun  */
130*4882a593Smuzhiyun extern void set_dma_speed(unsigned int chan, int cycle_ns);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun /* Get DMA residue count. After a DMA transfer, this
133*4882a593Smuzhiyun  * should return zero. Reading this while a DMA transfer is
134*4882a593Smuzhiyun  * still in progress will return unpredictable results.
135*4882a593Smuzhiyun  * If called before the channel has been used, it may return 1.
136*4882a593Smuzhiyun  * Otherwise, it returns the number of _bytes_ left to transfer.
137*4882a593Smuzhiyun  */
138*4882a593Smuzhiyun extern int  get_dma_residue(unsigned int chan);
139*4882a593Smuzhiyun 
140*4882a593Smuzhiyun #ifndef NO_DMA
141*4882a593Smuzhiyun #define NO_DMA	255
142*4882a593Smuzhiyun #endif
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun #endif /* CONFIG_ISA_DMA_API */
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun #ifdef CONFIG_PCI
147*4882a593Smuzhiyun extern int isa_dma_bridge_buggy;
148*4882a593Smuzhiyun #else
149*4882a593Smuzhiyun #define isa_dma_bridge_buggy    (0)
150*4882a593Smuzhiyun #endif
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #endif /* __ASM_ARM_DMA_H */
153