xref: /OK3568_Linux_fs/kernel/arch/arm/common/dmabounce.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  arch/arm/common/dmabounce.c
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Special dma_{map/unmap/dma_sync}_* routines for systems that have
6*4882a593Smuzhiyun  *  limited DMA windows. These functions utilize bounce buffers to
7*4882a593Smuzhiyun  *  copy data to/from buffers located outside the DMA region. This
8*4882a593Smuzhiyun  *  only works for systems in which DMA memory is at the bottom of
9*4882a593Smuzhiyun  *  RAM, the remainder of memory is at the top and the DMA memory
10*4882a593Smuzhiyun  *  can be marked as ZONE_DMA. Anything beyond that such as discontiguous
11*4882a593Smuzhiyun  *  DMA windows will require custom implementations that reserve memory
12*4882a593Smuzhiyun  *  areas at early bootup.
13*4882a593Smuzhiyun  *
14*4882a593Smuzhiyun  *  Original version by Brad Parker (brad@heeltoe.com)
15*4882a593Smuzhiyun  *  Re-written by Christopher Hoover <ch@murgatroid.com>
16*4882a593Smuzhiyun  *  Made generic by Deepak Saxena <dsaxena@plexity.net>
17*4882a593Smuzhiyun  *
18*4882a593Smuzhiyun  *  Copyright (C) 2002 Hewlett Packard Company.
19*4882a593Smuzhiyun  *  Copyright (C) 2004 MontaVista Software, Inc.
20*4882a593Smuzhiyun  */
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/init.h>
24*4882a593Smuzhiyun #include <linux/slab.h>
25*4882a593Smuzhiyun #include <linux/page-flags.h>
26*4882a593Smuzhiyun #include <linux/device.h>
27*4882a593Smuzhiyun #include <linux/dma-direct.h>
28*4882a593Smuzhiyun #include <linux/dma-map-ops.h>
29*4882a593Smuzhiyun #include <linux/dmapool.h>
30*4882a593Smuzhiyun #include <linux/list.h>
31*4882a593Smuzhiyun #include <linux/scatterlist.h>
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun #include <asm/cacheflush.h>
34*4882a593Smuzhiyun #include <asm/dma-iommu.h>
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun #undef STATS
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #ifdef STATS
39*4882a593Smuzhiyun #define DO_STATS(X) do { X ; } while (0)
40*4882a593Smuzhiyun #else
41*4882a593Smuzhiyun #define DO_STATS(X) do { } while (0)
42*4882a593Smuzhiyun #endif
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* ************************************************** */
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun struct safe_buffer {
47*4882a593Smuzhiyun 	struct list_head node;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	/* original request */
50*4882a593Smuzhiyun 	void		*ptr;
51*4882a593Smuzhiyun 	size_t		size;
52*4882a593Smuzhiyun 	int		direction;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	/* safe buffer info */
55*4882a593Smuzhiyun 	struct dmabounce_pool *pool;
56*4882a593Smuzhiyun 	void		*safe;
57*4882a593Smuzhiyun 	dma_addr_t	safe_dma_addr;
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun struct dmabounce_pool {
61*4882a593Smuzhiyun 	unsigned long	size;
62*4882a593Smuzhiyun 	struct dma_pool	*pool;
63*4882a593Smuzhiyun #ifdef STATS
64*4882a593Smuzhiyun 	unsigned long	allocs;
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun };
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun struct dmabounce_device_info {
69*4882a593Smuzhiyun 	struct device *dev;
70*4882a593Smuzhiyun 	struct list_head safe_buffers;
71*4882a593Smuzhiyun #ifdef STATS
72*4882a593Smuzhiyun 	unsigned long total_allocs;
73*4882a593Smuzhiyun 	unsigned long map_op_count;
74*4882a593Smuzhiyun 	unsigned long bounce_count;
75*4882a593Smuzhiyun 	int attr_res;
76*4882a593Smuzhiyun #endif
77*4882a593Smuzhiyun 	struct dmabounce_pool	small;
78*4882a593Smuzhiyun 	struct dmabounce_pool	large;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	rwlock_t lock;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	int (*needs_bounce)(struct device *, dma_addr_t, size_t);
83*4882a593Smuzhiyun };
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun #ifdef STATS
dmabounce_show(struct device * dev,struct device_attribute * attr,char * buf)86*4882a593Smuzhiyun static ssize_t dmabounce_show(struct device *dev, struct device_attribute *attr,
87*4882a593Smuzhiyun 			      char *buf)
88*4882a593Smuzhiyun {
89*4882a593Smuzhiyun 	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
90*4882a593Smuzhiyun 	return sprintf(buf, "%lu %lu %lu %lu %lu %lu\n",
91*4882a593Smuzhiyun 		device_info->small.allocs,
92*4882a593Smuzhiyun 		device_info->large.allocs,
93*4882a593Smuzhiyun 		device_info->total_allocs - device_info->small.allocs -
94*4882a593Smuzhiyun 			device_info->large.allocs,
95*4882a593Smuzhiyun 		device_info->total_allocs,
96*4882a593Smuzhiyun 		device_info->map_op_count,
97*4882a593Smuzhiyun 		device_info->bounce_count);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun static DEVICE_ATTR(dmabounce_stats, 0400, dmabounce_show, NULL);
101*4882a593Smuzhiyun #endif
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun /* allocate a 'safe' buffer and keep track of it */
105*4882a593Smuzhiyun static inline struct safe_buffer *
alloc_safe_buffer(struct dmabounce_device_info * device_info,void * ptr,size_t size,enum dma_data_direction dir)106*4882a593Smuzhiyun alloc_safe_buffer(struct dmabounce_device_info *device_info, void *ptr,
107*4882a593Smuzhiyun 		  size_t size, enum dma_data_direction dir)
108*4882a593Smuzhiyun {
109*4882a593Smuzhiyun 	struct safe_buffer *buf;
110*4882a593Smuzhiyun 	struct dmabounce_pool *pool;
111*4882a593Smuzhiyun 	struct device *dev = device_info->dev;
112*4882a593Smuzhiyun 	unsigned long flags;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	dev_dbg(dev, "%s(ptr=%p, size=%d, dir=%d)\n",
115*4882a593Smuzhiyun 		__func__, ptr, size, dir);
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 	if (size <= device_info->small.size) {
118*4882a593Smuzhiyun 		pool = &device_info->small;
119*4882a593Smuzhiyun 	} else if (size <= device_info->large.size) {
120*4882a593Smuzhiyun 		pool = &device_info->large;
121*4882a593Smuzhiyun 	} else {
122*4882a593Smuzhiyun 		pool = NULL;
123*4882a593Smuzhiyun 	}
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 	buf = kmalloc(sizeof(struct safe_buffer), GFP_ATOMIC);
126*4882a593Smuzhiyun 	if (buf == NULL) {
127*4882a593Smuzhiyun 		dev_warn(dev, "%s: kmalloc failed\n", __func__);
128*4882a593Smuzhiyun 		return NULL;
129*4882a593Smuzhiyun 	}
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	buf->ptr = ptr;
132*4882a593Smuzhiyun 	buf->size = size;
133*4882a593Smuzhiyun 	buf->direction = dir;
134*4882a593Smuzhiyun 	buf->pool = pool;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	if (pool) {
137*4882a593Smuzhiyun 		buf->safe = dma_pool_alloc(pool->pool, GFP_ATOMIC,
138*4882a593Smuzhiyun 					   &buf->safe_dma_addr);
139*4882a593Smuzhiyun 	} else {
140*4882a593Smuzhiyun 		buf->safe = dma_alloc_coherent(dev, size, &buf->safe_dma_addr,
141*4882a593Smuzhiyun 					       GFP_ATOMIC);
142*4882a593Smuzhiyun 	}
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	if (buf->safe == NULL) {
145*4882a593Smuzhiyun 		dev_warn(dev,
146*4882a593Smuzhiyun 			 "%s: could not alloc dma memory (size=%d)\n",
147*4882a593Smuzhiyun 			 __func__, size);
148*4882a593Smuzhiyun 		kfree(buf);
149*4882a593Smuzhiyun 		return NULL;
150*4882a593Smuzhiyun 	}
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun #ifdef STATS
153*4882a593Smuzhiyun 	if (pool)
154*4882a593Smuzhiyun 		pool->allocs++;
155*4882a593Smuzhiyun 	device_info->total_allocs++;
156*4882a593Smuzhiyun #endif
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	write_lock_irqsave(&device_info->lock, flags);
159*4882a593Smuzhiyun 	list_add(&buf->node, &device_info->safe_buffers);
160*4882a593Smuzhiyun 	write_unlock_irqrestore(&device_info->lock, flags);
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 	return buf;
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /* determine if a buffer is from our "safe" pool */
166*4882a593Smuzhiyun static inline struct safe_buffer *
find_safe_buffer(struct dmabounce_device_info * device_info,dma_addr_t safe_dma_addr)167*4882a593Smuzhiyun find_safe_buffer(struct dmabounce_device_info *device_info, dma_addr_t safe_dma_addr)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun 	struct safe_buffer *b, *rb = NULL;
170*4882a593Smuzhiyun 	unsigned long flags;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	read_lock_irqsave(&device_info->lock, flags);
173*4882a593Smuzhiyun 
174*4882a593Smuzhiyun 	list_for_each_entry(b, &device_info->safe_buffers, node)
175*4882a593Smuzhiyun 		if (b->safe_dma_addr <= safe_dma_addr &&
176*4882a593Smuzhiyun 		    b->safe_dma_addr + b->size > safe_dma_addr) {
177*4882a593Smuzhiyun 			rb = b;
178*4882a593Smuzhiyun 			break;
179*4882a593Smuzhiyun 		}
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	read_unlock_irqrestore(&device_info->lock, flags);
182*4882a593Smuzhiyun 	return rb;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun static inline void
free_safe_buffer(struct dmabounce_device_info * device_info,struct safe_buffer * buf)186*4882a593Smuzhiyun free_safe_buffer(struct dmabounce_device_info *device_info, struct safe_buffer *buf)
187*4882a593Smuzhiyun {
188*4882a593Smuzhiyun 	unsigned long flags;
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	dev_dbg(device_info->dev, "%s(buf=%p)\n", __func__, buf);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	write_lock_irqsave(&device_info->lock, flags);
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	list_del(&buf->node);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	write_unlock_irqrestore(&device_info->lock, flags);
197*4882a593Smuzhiyun 
198*4882a593Smuzhiyun 	if (buf->pool)
199*4882a593Smuzhiyun 		dma_pool_free(buf->pool->pool, buf->safe, buf->safe_dma_addr);
200*4882a593Smuzhiyun 	else
201*4882a593Smuzhiyun 		dma_free_coherent(device_info->dev, buf->size, buf->safe,
202*4882a593Smuzhiyun 				    buf->safe_dma_addr);
203*4882a593Smuzhiyun 
204*4882a593Smuzhiyun 	kfree(buf);
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun /* ************************************************** */
208*4882a593Smuzhiyun 
find_safe_buffer_dev(struct device * dev,dma_addr_t dma_addr,const char * where)209*4882a593Smuzhiyun static struct safe_buffer *find_safe_buffer_dev(struct device *dev,
210*4882a593Smuzhiyun 		dma_addr_t dma_addr, const char *where)
211*4882a593Smuzhiyun {
212*4882a593Smuzhiyun 	if (!dev || !dev->archdata.dmabounce)
213*4882a593Smuzhiyun 		return NULL;
214*4882a593Smuzhiyun 	if (dma_mapping_error(dev, dma_addr)) {
215*4882a593Smuzhiyun 		dev_err(dev, "Trying to %s invalid mapping\n", where);
216*4882a593Smuzhiyun 		return NULL;
217*4882a593Smuzhiyun 	}
218*4882a593Smuzhiyun 	return find_safe_buffer(dev->archdata.dmabounce, dma_addr);
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun 
needs_bounce(struct device * dev,dma_addr_t dma_addr,size_t size)221*4882a593Smuzhiyun static int needs_bounce(struct device *dev, dma_addr_t dma_addr, size_t size)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun 	if (!dev || !dev->archdata.dmabounce)
224*4882a593Smuzhiyun 		return 0;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	if (dev->dma_mask) {
227*4882a593Smuzhiyun 		unsigned long limit, mask = *dev->dma_mask;
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 		limit = (mask + 1) & ~mask;
230*4882a593Smuzhiyun 		if (limit && size > limit) {
231*4882a593Smuzhiyun 			dev_err(dev, "DMA mapping too big (requested %#x "
232*4882a593Smuzhiyun 				"mask %#Lx)\n", size, *dev->dma_mask);
233*4882a593Smuzhiyun 			return -E2BIG;
234*4882a593Smuzhiyun 		}
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 		/* Figure out if we need to bounce from the DMA mask. */
237*4882a593Smuzhiyun 		if ((dma_addr | (dma_addr + size - 1)) & ~mask)
238*4882a593Smuzhiyun 			return 1;
239*4882a593Smuzhiyun 	}
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	return !!dev->archdata.dmabounce->needs_bounce(dev, dma_addr, size);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun 
map_single(struct device * dev,void * ptr,size_t size,enum dma_data_direction dir,unsigned long attrs)244*4882a593Smuzhiyun static inline dma_addr_t map_single(struct device *dev, void *ptr, size_t size,
245*4882a593Smuzhiyun 				    enum dma_data_direction dir,
246*4882a593Smuzhiyun 				    unsigned long attrs)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun 	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
249*4882a593Smuzhiyun 	struct safe_buffer *buf;
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	if (device_info)
252*4882a593Smuzhiyun 		DO_STATS ( device_info->map_op_count++ );
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	buf = alloc_safe_buffer(device_info, ptr, size, dir);
255*4882a593Smuzhiyun 	if (buf == NULL) {
256*4882a593Smuzhiyun 		dev_err(dev, "%s: unable to map unsafe buffer %p!\n",
257*4882a593Smuzhiyun 		       __func__, ptr);
258*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;
259*4882a593Smuzhiyun 	}
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
262*4882a593Smuzhiyun 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
263*4882a593Smuzhiyun 		buf->safe, buf->safe_dma_addr);
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	if ((dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) &&
266*4882a593Smuzhiyun 	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
267*4882a593Smuzhiyun 		dev_dbg(dev, "%s: copy unsafe %p to safe %p, size %d\n",
268*4882a593Smuzhiyun 			__func__, ptr, buf->safe, size);
269*4882a593Smuzhiyun 		memcpy(buf->safe, ptr, size);
270*4882a593Smuzhiyun 	}
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	return buf->safe_dma_addr;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
unmap_single(struct device * dev,struct safe_buffer * buf,size_t size,enum dma_data_direction dir,unsigned long attrs)275*4882a593Smuzhiyun static inline void unmap_single(struct device *dev, struct safe_buffer *buf,
276*4882a593Smuzhiyun 				size_t size, enum dma_data_direction dir,
277*4882a593Smuzhiyun 				unsigned long attrs)
278*4882a593Smuzhiyun {
279*4882a593Smuzhiyun 	BUG_ON(buf->size != size);
280*4882a593Smuzhiyun 	BUG_ON(buf->direction != dir);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x) mapped to %p (dma=%#x)\n",
283*4882a593Smuzhiyun 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr),
284*4882a593Smuzhiyun 		buf->safe, buf->safe_dma_addr);
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	if ((dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) &&
289*4882a593Smuzhiyun 	    !(attrs & DMA_ATTR_SKIP_CPU_SYNC)) {
290*4882a593Smuzhiyun 		void *ptr = buf->ptr;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
293*4882a593Smuzhiyun 			__func__, buf->safe, ptr, size);
294*4882a593Smuzhiyun 		memcpy(ptr, buf->safe, size);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 		/*
297*4882a593Smuzhiyun 		 * Since we may have written to a page cache page,
298*4882a593Smuzhiyun 		 * we need to ensure that the data will be coherent
299*4882a593Smuzhiyun 		 * with user mappings.
300*4882a593Smuzhiyun 		 */
301*4882a593Smuzhiyun 		__cpuc_flush_dcache_area(ptr, size);
302*4882a593Smuzhiyun 	}
303*4882a593Smuzhiyun 	free_safe_buffer(dev->archdata.dmabounce, buf);
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun /* ************************************************** */
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun /*
309*4882a593Smuzhiyun  * see if a buffer address is in an 'unsafe' range.  if it is
310*4882a593Smuzhiyun  * allocate a 'safe' buffer and copy the unsafe buffer into it.
311*4882a593Smuzhiyun  * substitute the safe buffer for the unsafe one.
312*4882a593Smuzhiyun  * (basically move the buffer from an unsafe area to a safe one)
313*4882a593Smuzhiyun  */
dmabounce_map_page(struct device * dev,struct page * page,unsigned long offset,size_t size,enum dma_data_direction dir,unsigned long attrs)314*4882a593Smuzhiyun static dma_addr_t dmabounce_map_page(struct device *dev, struct page *page,
315*4882a593Smuzhiyun 		unsigned long offset, size_t size, enum dma_data_direction dir,
316*4882a593Smuzhiyun 		unsigned long attrs)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun 	dma_addr_t dma_addr;
319*4882a593Smuzhiyun 	int ret;
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 	dev_dbg(dev, "%s(page=%p,off=%#lx,size=%zx,dir=%x)\n",
322*4882a593Smuzhiyun 		__func__, page, offset, size, dir);
323*4882a593Smuzhiyun 
324*4882a593Smuzhiyun 	dma_addr = pfn_to_dma(dev, page_to_pfn(page)) + offset;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	ret = needs_bounce(dev, dma_addr, size);
327*4882a593Smuzhiyun 	if (ret < 0)
328*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;
329*4882a593Smuzhiyun 
330*4882a593Smuzhiyun 	if (ret == 0) {
331*4882a593Smuzhiyun 		arm_dma_ops.sync_single_for_device(dev, dma_addr, size, dir);
332*4882a593Smuzhiyun 		return dma_addr;
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	if (PageHighMem(page)) {
336*4882a593Smuzhiyun 		dev_err(dev, "DMA buffer bouncing of HIGHMEM pages is not supported\n");
337*4882a593Smuzhiyun 		return DMA_MAPPING_ERROR;
338*4882a593Smuzhiyun 	}
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	return map_single(dev, page_address(page) + offset, size, dir, attrs);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun /*
344*4882a593Smuzhiyun  * see if a mapped address was really a "safe" buffer and if so, copy
345*4882a593Smuzhiyun  * the data from the safe buffer back to the unsafe buffer and free up
346*4882a593Smuzhiyun  * the safe buffer.  (basically return things back to the way they
347*4882a593Smuzhiyun  * should be)
348*4882a593Smuzhiyun  */
dmabounce_unmap_page(struct device * dev,dma_addr_t dma_addr,size_t size,enum dma_data_direction dir,unsigned long attrs)349*4882a593Smuzhiyun static void dmabounce_unmap_page(struct device *dev, dma_addr_t dma_addr, size_t size,
350*4882a593Smuzhiyun 		enum dma_data_direction dir, unsigned long attrs)
351*4882a593Smuzhiyun {
352*4882a593Smuzhiyun 	struct safe_buffer *buf;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	dev_dbg(dev, "%s(dma=%#x,size=%d,dir=%x)\n",
355*4882a593Smuzhiyun 		__func__, dma_addr, size, dir);
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	buf = find_safe_buffer_dev(dev, dma_addr, __func__);
358*4882a593Smuzhiyun 	if (!buf) {
359*4882a593Smuzhiyun 		arm_dma_ops.sync_single_for_cpu(dev, dma_addr, size, dir);
360*4882a593Smuzhiyun 		return;
361*4882a593Smuzhiyun 	}
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	unmap_single(dev, buf, size, dir, attrs);
364*4882a593Smuzhiyun }
365*4882a593Smuzhiyun 
__dmabounce_sync_for_cpu(struct device * dev,dma_addr_t addr,size_t sz,enum dma_data_direction dir)366*4882a593Smuzhiyun static int __dmabounce_sync_for_cpu(struct device *dev, dma_addr_t addr,
367*4882a593Smuzhiyun 		size_t sz, enum dma_data_direction dir)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct safe_buffer *buf;
370*4882a593Smuzhiyun 	unsigned long off;
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
373*4882a593Smuzhiyun 		__func__, addr, sz, dir);
374*4882a593Smuzhiyun 
375*4882a593Smuzhiyun 	buf = find_safe_buffer_dev(dev, addr, __func__);
376*4882a593Smuzhiyun 	if (!buf)
377*4882a593Smuzhiyun 		return 1;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	off = addr - buf->safe_dma_addr;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	BUG_ON(buf->direction != dir);
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
384*4882a593Smuzhiyun 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
385*4882a593Smuzhiyun 		buf->safe, buf->safe_dma_addr);
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	if (dir == DMA_FROM_DEVICE || dir == DMA_BIDIRECTIONAL) {
390*4882a593Smuzhiyun 		dev_dbg(dev, "%s: copy back safe %p to unsafe %p size %d\n",
391*4882a593Smuzhiyun 			__func__, buf->safe + off, buf->ptr + off, sz);
392*4882a593Smuzhiyun 		memcpy(buf->ptr + off, buf->safe + off, sz);
393*4882a593Smuzhiyun 	}
394*4882a593Smuzhiyun 	return 0;
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun 
dmabounce_sync_for_cpu(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)397*4882a593Smuzhiyun static void dmabounce_sync_for_cpu(struct device *dev,
398*4882a593Smuzhiyun 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun 	if (!__dmabounce_sync_for_cpu(dev, handle, size, dir))
401*4882a593Smuzhiyun 		return;
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	arm_dma_ops.sync_single_for_cpu(dev, handle, size, dir);
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun 
__dmabounce_sync_for_device(struct device * dev,dma_addr_t addr,size_t sz,enum dma_data_direction dir)406*4882a593Smuzhiyun static int __dmabounce_sync_for_device(struct device *dev, dma_addr_t addr,
407*4882a593Smuzhiyun 		size_t sz, enum dma_data_direction dir)
408*4882a593Smuzhiyun {
409*4882a593Smuzhiyun 	struct safe_buffer *buf;
410*4882a593Smuzhiyun 	unsigned long off;
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	dev_dbg(dev, "%s(dma=%#x,sz=%zx,dir=%x)\n",
413*4882a593Smuzhiyun 		__func__, addr, sz, dir);
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	buf = find_safe_buffer_dev(dev, addr, __func__);
416*4882a593Smuzhiyun 	if (!buf)
417*4882a593Smuzhiyun 		return 1;
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	off = addr - buf->safe_dma_addr;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	BUG_ON(buf->direction != dir);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	dev_dbg(dev, "%s: unsafe buffer %p (dma=%#x off=%#lx) mapped to %p (dma=%#x)\n",
424*4882a593Smuzhiyun 		__func__, buf->ptr, virt_to_dma(dev, buf->ptr), off,
425*4882a593Smuzhiyun 		buf->safe, buf->safe_dma_addr);
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	DO_STATS(dev->archdata.dmabounce->bounce_count++);
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun 	if (dir == DMA_TO_DEVICE || dir == DMA_BIDIRECTIONAL) {
430*4882a593Smuzhiyun 		dev_dbg(dev, "%s: copy out unsafe %p to safe %p, size %d\n",
431*4882a593Smuzhiyun 			__func__,buf->ptr + off, buf->safe + off, sz);
432*4882a593Smuzhiyun 		memcpy(buf->safe + off, buf->ptr + off, sz);
433*4882a593Smuzhiyun 	}
434*4882a593Smuzhiyun 	return 0;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun 
dmabounce_sync_for_device(struct device * dev,dma_addr_t handle,size_t size,enum dma_data_direction dir)437*4882a593Smuzhiyun static void dmabounce_sync_for_device(struct device *dev,
438*4882a593Smuzhiyun 		dma_addr_t handle, size_t size, enum dma_data_direction dir)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun 	if (!__dmabounce_sync_for_device(dev, handle, size, dir))
441*4882a593Smuzhiyun 		return;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 	arm_dma_ops.sync_single_for_device(dev, handle, size, dir);
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun 
dmabounce_dma_supported(struct device * dev,u64 dma_mask)446*4882a593Smuzhiyun static int dmabounce_dma_supported(struct device *dev, u64 dma_mask)
447*4882a593Smuzhiyun {
448*4882a593Smuzhiyun 	if (dev->archdata.dmabounce)
449*4882a593Smuzhiyun 		return 0;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	return arm_dma_ops.dma_supported(dev, dma_mask);
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun static const struct dma_map_ops dmabounce_ops = {
455*4882a593Smuzhiyun 	.alloc			= arm_dma_alloc,
456*4882a593Smuzhiyun 	.free			= arm_dma_free,
457*4882a593Smuzhiyun 	.mmap			= arm_dma_mmap,
458*4882a593Smuzhiyun 	.get_sgtable		= arm_dma_get_sgtable,
459*4882a593Smuzhiyun 	.map_page		= dmabounce_map_page,
460*4882a593Smuzhiyun 	.unmap_page		= dmabounce_unmap_page,
461*4882a593Smuzhiyun 	.sync_single_for_cpu	= dmabounce_sync_for_cpu,
462*4882a593Smuzhiyun 	.sync_single_for_device	= dmabounce_sync_for_device,
463*4882a593Smuzhiyun 	.map_sg			= arm_dma_map_sg,
464*4882a593Smuzhiyun 	.unmap_sg		= arm_dma_unmap_sg,
465*4882a593Smuzhiyun 	.sync_sg_for_cpu	= arm_dma_sync_sg_for_cpu,
466*4882a593Smuzhiyun 	.sync_sg_for_device	= arm_dma_sync_sg_for_device,
467*4882a593Smuzhiyun 	.dma_supported		= dmabounce_dma_supported,
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun 
dmabounce_init_pool(struct dmabounce_pool * pool,struct device * dev,const char * name,unsigned long size)470*4882a593Smuzhiyun static int dmabounce_init_pool(struct dmabounce_pool *pool, struct device *dev,
471*4882a593Smuzhiyun 		const char *name, unsigned long size)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun 	pool->size = size;
474*4882a593Smuzhiyun 	DO_STATS(pool->allocs = 0);
475*4882a593Smuzhiyun 	pool->pool = dma_pool_create(name, dev, size,
476*4882a593Smuzhiyun 				     0 /* byte alignment */,
477*4882a593Smuzhiyun 				     0 /* no page-crossing issues */);
478*4882a593Smuzhiyun 
479*4882a593Smuzhiyun 	return pool->pool ? 0 : -ENOMEM;
480*4882a593Smuzhiyun }
481*4882a593Smuzhiyun 
dmabounce_register_dev(struct device * dev,unsigned long small_buffer_size,unsigned long large_buffer_size,int (* needs_bounce_fn)(struct device *,dma_addr_t,size_t))482*4882a593Smuzhiyun int dmabounce_register_dev(struct device *dev, unsigned long small_buffer_size,
483*4882a593Smuzhiyun 		unsigned long large_buffer_size,
484*4882a593Smuzhiyun 		int (*needs_bounce_fn)(struct device *, dma_addr_t, size_t))
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun 	struct dmabounce_device_info *device_info;
487*4882a593Smuzhiyun 	int ret;
488*4882a593Smuzhiyun 
489*4882a593Smuzhiyun 	device_info = kmalloc(sizeof(struct dmabounce_device_info), GFP_ATOMIC);
490*4882a593Smuzhiyun 	if (!device_info) {
491*4882a593Smuzhiyun 		dev_err(dev,
492*4882a593Smuzhiyun 			"Could not allocated dmabounce_device_info\n");
493*4882a593Smuzhiyun 		return -ENOMEM;
494*4882a593Smuzhiyun 	}
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	ret = dmabounce_init_pool(&device_info->small, dev,
497*4882a593Smuzhiyun 				  "small_dmabounce_pool", small_buffer_size);
498*4882a593Smuzhiyun 	if (ret) {
499*4882a593Smuzhiyun 		dev_err(dev,
500*4882a593Smuzhiyun 			"dmabounce: could not allocate DMA pool for %ld byte objects\n",
501*4882a593Smuzhiyun 			small_buffer_size);
502*4882a593Smuzhiyun 		goto err_free;
503*4882a593Smuzhiyun 	}
504*4882a593Smuzhiyun 
505*4882a593Smuzhiyun 	if (large_buffer_size) {
506*4882a593Smuzhiyun 		ret = dmabounce_init_pool(&device_info->large, dev,
507*4882a593Smuzhiyun 					  "large_dmabounce_pool",
508*4882a593Smuzhiyun 					  large_buffer_size);
509*4882a593Smuzhiyun 		if (ret) {
510*4882a593Smuzhiyun 			dev_err(dev,
511*4882a593Smuzhiyun 				"dmabounce: could not allocate DMA pool for %ld byte objects\n",
512*4882a593Smuzhiyun 				large_buffer_size);
513*4882a593Smuzhiyun 			goto err_destroy;
514*4882a593Smuzhiyun 		}
515*4882a593Smuzhiyun 	}
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	device_info->dev = dev;
518*4882a593Smuzhiyun 	INIT_LIST_HEAD(&device_info->safe_buffers);
519*4882a593Smuzhiyun 	rwlock_init(&device_info->lock);
520*4882a593Smuzhiyun 	device_info->needs_bounce = needs_bounce_fn;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun #ifdef STATS
523*4882a593Smuzhiyun 	device_info->total_allocs = 0;
524*4882a593Smuzhiyun 	device_info->map_op_count = 0;
525*4882a593Smuzhiyun 	device_info->bounce_count = 0;
526*4882a593Smuzhiyun 	device_info->attr_res = device_create_file(dev, &dev_attr_dmabounce_stats);
527*4882a593Smuzhiyun #endif
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	dev->archdata.dmabounce = device_info;
530*4882a593Smuzhiyun 	set_dma_ops(dev, &dmabounce_ops);
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	dev_info(dev, "dmabounce: registered device\n");
533*4882a593Smuzhiyun 
534*4882a593Smuzhiyun 	return 0;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun  err_destroy:
537*4882a593Smuzhiyun 	dma_pool_destroy(device_info->small.pool);
538*4882a593Smuzhiyun  err_free:
539*4882a593Smuzhiyun 	kfree(device_info);
540*4882a593Smuzhiyun 	return ret;
541*4882a593Smuzhiyun }
542*4882a593Smuzhiyun EXPORT_SYMBOL(dmabounce_register_dev);
543*4882a593Smuzhiyun 
dmabounce_unregister_dev(struct device * dev)544*4882a593Smuzhiyun void dmabounce_unregister_dev(struct device *dev)
545*4882a593Smuzhiyun {
546*4882a593Smuzhiyun 	struct dmabounce_device_info *device_info = dev->archdata.dmabounce;
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun 	dev->archdata.dmabounce = NULL;
549*4882a593Smuzhiyun 	set_dma_ops(dev, NULL);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	if (!device_info) {
552*4882a593Smuzhiyun 		dev_warn(dev,
553*4882a593Smuzhiyun 			 "Never registered with dmabounce but attempting"
554*4882a593Smuzhiyun 			 "to unregister!\n");
555*4882a593Smuzhiyun 		return;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	if (!list_empty(&device_info->safe_buffers)) {
559*4882a593Smuzhiyun 		dev_err(dev,
560*4882a593Smuzhiyun 			"Removing from dmabounce with pending buffers!\n");
561*4882a593Smuzhiyun 		BUG();
562*4882a593Smuzhiyun 	}
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	if (device_info->small.pool)
565*4882a593Smuzhiyun 		dma_pool_destroy(device_info->small.pool);
566*4882a593Smuzhiyun 	if (device_info->large.pool)
567*4882a593Smuzhiyun 		dma_pool_destroy(device_info->large.pool);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun #ifdef STATS
570*4882a593Smuzhiyun 	if (device_info->attr_res == 0)
571*4882a593Smuzhiyun 		device_remove_file(dev, &dev_attr_dmabounce_stats);
572*4882a593Smuzhiyun #endif
573*4882a593Smuzhiyun 
574*4882a593Smuzhiyun 	kfree(device_info);
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	dev_info(dev, "dmabounce: device unregistered\n");
577*4882a593Smuzhiyun }
578*4882a593Smuzhiyun EXPORT_SYMBOL(dmabounce_unregister_dev);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun MODULE_AUTHOR("Christopher Hoover <ch@hpl.hp.com>, Deepak Saxena <dsaxena@plexity.net>");
581*4882a593Smuzhiyun MODULE_DESCRIPTION("Special dma_{map/unmap/dma_sync}_* routines for systems with limited DMA windows");
582*4882a593Smuzhiyun MODULE_LICENSE("GPL");
583