xref: /OK3568_Linux_fs/kernel/sound/core/memalloc.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  *  Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4*4882a593Smuzhiyun  *                   Takashi Iwai <tiwai@suse.de>
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  *  Generic memory allocators
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/slab.h>
10*4882a593Smuzhiyun #include <linux/mm.h>
11*4882a593Smuzhiyun #include <linux/dma-mapping.h>
12*4882a593Smuzhiyun #include <linux/genalloc.h>
13*4882a593Smuzhiyun #include <linux/vmalloc.h>
14*4882a593Smuzhiyun #ifdef CONFIG_X86
15*4882a593Smuzhiyun #include <asm/set_memory.h>
16*4882a593Smuzhiyun #endif
17*4882a593Smuzhiyun #include <sound/memalloc.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  *
21*4882a593Smuzhiyun  *  Bus-specific memory allocators
22*4882a593Smuzhiyun  *
23*4882a593Smuzhiyun  */
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #ifdef CONFIG_HAS_DMA
26*4882a593Smuzhiyun /* allocate the coherent DMA pages */
snd_malloc_dev_pages(struct snd_dma_buffer * dmab,size_t size)27*4882a593Smuzhiyun static void snd_malloc_dev_pages(struct snd_dma_buffer *dmab, size_t size)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	gfp_t gfp_flags;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	gfp_flags = GFP_KERNEL
32*4882a593Smuzhiyun 		| __GFP_COMP	/* compound page lets parts be mapped */
33*4882a593Smuzhiyun 		| __GFP_NORETRY /* don't trigger OOM-killer */
34*4882a593Smuzhiyun 		| __GFP_NOWARN; /* no stack trace print - this call is non-critical */
35*4882a593Smuzhiyun 	dmab->area = dma_alloc_coherent(dmab->dev.dev, size, &dmab->addr,
36*4882a593Smuzhiyun 					gfp_flags);
37*4882a593Smuzhiyun #ifdef CONFIG_X86
38*4882a593Smuzhiyun 	if (dmab->area && dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
39*4882a593Smuzhiyun 		set_memory_wc((unsigned long)dmab->area,
40*4882a593Smuzhiyun 			      PAGE_ALIGN(size) >> PAGE_SHIFT);
41*4882a593Smuzhiyun #endif
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* free the coherent DMA pages */
snd_free_dev_pages(struct snd_dma_buffer * dmab)45*4882a593Smuzhiyun static void snd_free_dev_pages(struct snd_dma_buffer *dmab)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun #ifdef CONFIG_X86
48*4882a593Smuzhiyun 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC)
49*4882a593Smuzhiyun 		set_memory_wb((unsigned long)dmab->area,
50*4882a593Smuzhiyun 			      PAGE_ALIGN(dmab->bytes) >> PAGE_SHIFT);
51*4882a593Smuzhiyun #endif
52*4882a593Smuzhiyun 	dma_free_coherent(dmab->dev.dev, dmab->bytes, dmab->area, dmab->addr);
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_ALLOCATOR
56*4882a593Smuzhiyun /**
57*4882a593Smuzhiyun  * snd_malloc_dev_iram - allocate memory from on-chip internal ram
58*4882a593Smuzhiyun  * @dmab: buffer allocation record to store the allocated data
59*4882a593Smuzhiyun  * @size: number of bytes to allocate from the iram
60*4882a593Smuzhiyun  *
61*4882a593Smuzhiyun  * This function requires iram phandle provided via of_node
62*4882a593Smuzhiyun  */
snd_malloc_dev_iram(struct snd_dma_buffer * dmab,size_t size)63*4882a593Smuzhiyun static void snd_malloc_dev_iram(struct snd_dma_buffer *dmab, size_t size)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun 	struct device *dev = dmab->dev.dev;
66*4882a593Smuzhiyun 	struct gen_pool *pool = NULL;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	dmab->area = NULL;
69*4882a593Smuzhiyun 	dmab->addr = 0;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	if (dev->of_node)
72*4882a593Smuzhiyun 		pool = of_gen_pool_get(dev->of_node, "iram", 0);
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	if (!pool)
75*4882a593Smuzhiyun 		return;
76*4882a593Smuzhiyun 
77*4882a593Smuzhiyun 	/* Assign the pool into private_data field */
78*4882a593Smuzhiyun 	dmab->private_data = pool;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	dmab->area = gen_pool_dma_alloc_align(pool, size, &dmab->addr,
81*4882a593Smuzhiyun 					PAGE_SIZE);
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun /**
85*4882a593Smuzhiyun  * snd_free_dev_iram - free allocated specific memory from on-chip internal ram
86*4882a593Smuzhiyun  * @dmab: buffer allocation record to store the allocated data
87*4882a593Smuzhiyun  */
snd_free_dev_iram(struct snd_dma_buffer * dmab)88*4882a593Smuzhiyun static void snd_free_dev_iram(struct snd_dma_buffer *dmab)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct gen_pool *pool = dmab->private_data;
91*4882a593Smuzhiyun 
92*4882a593Smuzhiyun 	if (pool && dmab->area)
93*4882a593Smuzhiyun 		gen_pool_free(pool, (unsigned long)dmab->area, dmab->bytes);
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_ALLOCATOR */
96*4882a593Smuzhiyun #endif /* CONFIG_HAS_DMA */
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun /*
99*4882a593Smuzhiyun  *
100*4882a593Smuzhiyun  *  ALSA generic memory management
101*4882a593Smuzhiyun  *
102*4882a593Smuzhiyun  */
103*4882a593Smuzhiyun 
snd_mem_get_gfp_flags(const struct device * dev,gfp_t default_gfp)104*4882a593Smuzhiyun static inline gfp_t snd_mem_get_gfp_flags(const struct device *dev,
105*4882a593Smuzhiyun 					  gfp_t default_gfp)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	if (!dev)
108*4882a593Smuzhiyun 		return default_gfp;
109*4882a593Smuzhiyun 	else
110*4882a593Smuzhiyun 		return (__force gfp_t)(unsigned long)dev;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /**
114*4882a593Smuzhiyun  * snd_dma_alloc_pages - allocate the buffer area according to the given type
115*4882a593Smuzhiyun  * @type: the DMA buffer type
116*4882a593Smuzhiyun  * @device: the device pointer
117*4882a593Smuzhiyun  * @size: the buffer size to allocate
118*4882a593Smuzhiyun  * @dmab: buffer allocation record to store the allocated data
119*4882a593Smuzhiyun  *
120*4882a593Smuzhiyun  * Calls the memory-allocator function for the corresponding
121*4882a593Smuzhiyun  * buffer type.
122*4882a593Smuzhiyun  *
123*4882a593Smuzhiyun  * Return: Zero if the buffer with the given size is allocated successfully,
124*4882a593Smuzhiyun  * otherwise a negative value on error.
125*4882a593Smuzhiyun  */
snd_dma_alloc_pages(int type,struct device * device,size_t size,struct snd_dma_buffer * dmab)126*4882a593Smuzhiyun int snd_dma_alloc_pages(int type, struct device *device, size_t size,
127*4882a593Smuzhiyun 			struct snd_dma_buffer *dmab)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	gfp_t gfp;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (WARN_ON(!size))
132*4882a593Smuzhiyun 		return -ENXIO;
133*4882a593Smuzhiyun 	if (WARN_ON(!dmab))
134*4882a593Smuzhiyun 		return -ENXIO;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	size = PAGE_ALIGN(size);
137*4882a593Smuzhiyun 	dmab->dev.type = type;
138*4882a593Smuzhiyun 	dmab->dev.dev = device;
139*4882a593Smuzhiyun 	dmab->bytes = 0;
140*4882a593Smuzhiyun 	dmab->area = NULL;
141*4882a593Smuzhiyun 	dmab->addr = 0;
142*4882a593Smuzhiyun 	dmab->private_data = NULL;
143*4882a593Smuzhiyun 	switch (type) {
144*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_CONTINUOUS:
145*4882a593Smuzhiyun 		gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL);
146*4882a593Smuzhiyun 		dmab->area = alloc_pages_exact(size, gfp);
147*4882a593Smuzhiyun 		break;
148*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_VMALLOC:
149*4882a593Smuzhiyun 		gfp = snd_mem_get_gfp_flags(device, GFP_KERNEL | __GFP_HIGHMEM);
150*4882a593Smuzhiyun 		dmab->area = __vmalloc(size, gfp);
151*4882a593Smuzhiyun 		break;
152*4882a593Smuzhiyun #ifdef CONFIG_HAS_DMA
153*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_ALLOCATOR
154*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_IRAM:
155*4882a593Smuzhiyun 		snd_malloc_dev_iram(dmab, size);
156*4882a593Smuzhiyun 		if (dmab->area)
157*4882a593Smuzhiyun 			break;
158*4882a593Smuzhiyun 		/* Internal memory might have limited size and no enough space,
159*4882a593Smuzhiyun 		 * so if we fail to malloc, try to fetch memory traditionally.
160*4882a593Smuzhiyun 		 */
161*4882a593Smuzhiyun 		dmab->dev.type = SNDRV_DMA_TYPE_DEV;
162*4882a593Smuzhiyun 		fallthrough;
163*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_ALLOCATOR */
164*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV:
165*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_UC:
166*4882a593Smuzhiyun 		snd_malloc_dev_pages(dmab, size);
167*4882a593Smuzhiyun 		break;
168*4882a593Smuzhiyun #endif
169*4882a593Smuzhiyun #ifdef CONFIG_SND_DMA_SGBUF
170*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_SG:
171*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_UC_SG:
172*4882a593Smuzhiyun 		snd_malloc_sgbuf_pages(device, size, dmab, NULL);
173*4882a593Smuzhiyun 		break;
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun 	default:
176*4882a593Smuzhiyun 		pr_err("snd-malloc: invalid device type %d\n", type);
177*4882a593Smuzhiyun 		return -ENXIO;
178*4882a593Smuzhiyun 	}
179*4882a593Smuzhiyun 	if (! dmab->area)
180*4882a593Smuzhiyun 		return -ENOMEM;
181*4882a593Smuzhiyun 	dmab->bytes = size;
182*4882a593Smuzhiyun 	return 0;
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun EXPORT_SYMBOL(snd_dma_alloc_pages);
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun /**
187*4882a593Smuzhiyun  * snd_dma_alloc_pages_fallback - allocate the buffer area according to the given type with fallback
188*4882a593Smuzhiyun  * @type: the DMA buffer type
189*4882a593Smuzhiyun  * @device: the device pointer
190*4882a593Smuzhiyun  * @size: the buffer size to allocate
191*4882a593Smuzhiyun  * @dmab: buffer allocation record to store the allocated data
192*4882a593Smuzhiyun  *
193*4882a593Smuzhiyun  * Calls the memory-allocator function for the corresponding
194*4882a593Smuzhiyun  * buffer type.  When no space is left, this function reduces the size and
195*4882a593Smuzhiyun  * tries to allocate again.  The size actually allocated is stored in
196*4882a593Smuzhiyun  * res_size argument.
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * Return: Zero if the buffer with the given size is allocated successfully,
199*4882a593Smuzhiyun  * otherwise a negative value on error.
200*4882a593Smuzhiyun  */
snd_dma_alloc_pages_fallback(int type,struct device * device,size_t size,struct snd_dma_buffer * dmab)201*4882a593Smuzhiyun int snd_dma_alloc_pages_fallback(int type, struct device *device, size_t size,
202*4882a593Smuzhiyun 				 struct snd_dma_buffer *dmab)
203*4882a593Smuzhiyun {
204*4882a593Smuzhiyun 	int err;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	while ((err = snd_dma_alloc_pages(type, device, size, dmab)) < 0) {
207*4882a593Smuzhiyun 		if (err != -ENOMEM)
208*4882a593Smuzhiyun 			return err;
209*4882a593Smuzhiyun 		if (size <= PAGE_SIZE)
210*4882a593Smuzhiyun 			return -ENOMEM;
211*4882a593Smuzhiyun 		size >>= 1;
212*4882a593Smuzhiyun 		size = PAGE_SIZE << get_order(size);
213*4882a593Smuzhiyun 	}
214*4882a593Smuzhiyun 	if (! dmab->area)
215*4882a593Smuzhiyun 		return -ENOMEM;
216*4882a593Smuzhiyun 	return 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun EXPORT_SYMBOL(snd_dma_alloc_pages_fallback);
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun /**
222*4882a593Smuzhiyun  * snd_dma_free_pages - release the allocated buffer
223*4882a593Smuzhiyun  * @dmab: the buffer allocation record to release
224*4882a593Smuzhiyun  *
225*4882a593Smuzhiyun  * Releases the allocated buffer via snd_dma_alloc_pages().
226*4882a593Smuzhiyun  */
snd_dma_free_pages(struct snd_dma_buffer * dmab)227*4882a593Smuzhiyun void snd_dma_free_pages(struct snd_dma_buffer *dmab)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	switch (dmab->dev.type) {
230*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_CONTINUOUS:
231*4882a593Smuzhiyun 		free_pages_exact(dmab->area, dmab->bytes);
232*4882a593Smuzhiyun 		break;
233*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_VMALLOC:
234*4882a593Smuzhiyun 		vfree(dmab->area);
235*4882a593Smuzhiyun 		break;
236*4882a593Smuzhiyun #ifdef CONFIG_HAS_DMA
237*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_ALLOCATOR
238*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_IRAM:
239*4882a593Smuzhiyun 		snd_free_dev_iram(dmab);
240*4882a593Smuzhiyun 		break;
241*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_ALLOCATOR */
242*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV:
243*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_UC:
244*4882a593Smuzhiyun 		snd_free_dev_pages(dmab);
245*4882a593Smuzhiyun 		break;
246*4882a593Smuzhiyun #endif
247*4882a593Smuzhiyun #ifdef CONFIG_SND_DMA_SGBUF
248*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_SG:
249*4882a593Smuzhiyun 	case SNDRV_DMA_TYPE_DEV_UC_SG:
250*4882a593Smuzhiyun 		snd_free_sgbuf_pages(dmab);
251*4882a593Smuzhiyun 		break;
252*4882a593Smuzhiyun #endif
253*4882a593Smuzhiyun 	default:
254*4882a593Smuzhiyun 		pr_err("snd-malloc: invalid device type %d\n", dmab->dev.type);
255*4882a593Smuzhiyun 	}
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun EXPORT_SYMBOL(snd_dma_free_pages);
258