xref: /OK3568_Linux_fs/kernel/sound/core/sgbuf.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Scatter-Gather buffer
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  *  Copyright (c) by Takashi Iwai <tiwai@suse.de>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/slab.h>
9*4882a593Smuzhiyun #include <linux/mm.h>
10*4882a593Smuzhiyun #include <linux/vmalloc.h>
11*4882a593Smuzhiyun #include <linux/export.h>
12*4882a593Smuzhiyun #include <sound/memalloc.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* table entries are align to 32 */
16*4882a593Smuzhiyun #define SGBUF_TBL_ALIGN		32
17*4882a593Smuzhiyun #define sgbuf_align_table(tbl)	ALIGN((tbl), SGBUF_TBL_ALIGN)
18*4882a593Smuzhiyun 
snd_free_sgbuf_pages(struct snd_dma_buffer * dmab)19*4882a593Smuzhiyun int snd_free_sgbuf_pages(struct snd_dma_buffer *dmab)
20*4882a593Smuzhiyun {
21*4882a593Smuzhiyun 	struct snd_sg_buf *sgbuf = dmab->private_data;
22*4882a593Smuzhiyun 	struct snd_dma_buffer tmpb;
23*4882a593Smuzhiyun 	int i;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	if (! sgbuf)
26*4882a593Smuzhiyun 		return -EINVAL;
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun 	vunmap(dmab->area);
29*4882a593Smuzhiyun 	dmab->area = NULL;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	tmpb.dev.type = SNDRV_DMA_TYPE_DEV;
32*4882a593Smuzhiyun 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG)
33*4882a593Smuzhiyun 		tmpb.dev.type = SNDRV_DMA_TYPE_DEV_UC;
34*4882a593Smuzhiyun 	tmpb.dev.dev = sgbuf->dev;
35*4882a593Smuzhiyun 	for (i = 0; i < sgbuf->pages; i++) {
36*4882a593Smuzhiyun 		if (!(sgbuf->table[i].addr & ~PAGE_MASK))
37*4882a593Smuzhiyun 			continue; /* continuous pages */
38*4882a593Smuzhiyun 		tmpb.area = sgbuf->table[i].buf;
39*4882a593Smuzhiyun 		tmpb.addr = sgbuf->table[i].addr & PAGE_MASK;
40*4882a593Smuzhiyun 		tmpb.bytes = (sgbuf->table[i].addr & ~PAGE_MASK) << PAGE_SHIFT;
41*4882a593Smuzhiyun 		snd_dma_free_pages(&tmpb);
42*4882a593Smuzhiyun 	}
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	kfree(sgbuf->table);
45*4882a593Smuzhiyun 	kfree(sgbuf->page_table);
46*4882a593Smuzhiyun 	kfree(sgbuf);
47*4882a593Smuzhiyun 	dmab->private_data = NULL;
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 	return 0;
50*4882a593Smuzhiyun }
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun #define MAX_ALLOC_PAGES		32
53*4882a593Smuzhiyun 
snd_malloc_sgbuf_pages(struct device * device,size_t size,struct snd_dma_buffer * dmab,size_t * res_size)54*4882a593Smuzhiyun void *snd_malloc_sgbuf_pages(struct device *device,
55*4882a593Smuzhiyun 			     size_t size, struct snd_dma_buffer *dmab,
56*4882a593Smuzhiyun 			     size_t *res_size)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	struct snd_sg_buf *sgbuf;
59*4882a593Smuzhiyun 	unsigned int i, pages, chunk, maxpages;
60*4882a593Smuzhiyun 	struct snd_dma_buffer tmpb;
61*4882a593Smuzhiyun 	struct snd_sg_page *table;
62*4882a593Smuzhiyun 	struct page **pgtable;
63*4882a593Smuzhiyun 	int type = SNDRV_DMA_TYPE_DEV;
64*4882a593Smuzhiyun 	pgprot_t prot = PAGE_KERNEL;
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	dmab->area = NULL;
67*4882a593Smuzhiyun 	dmab->addr = 0;
68*4882a593Smuzhiyun 	dmab->private_data = sgbuf = kzalloc(sizeof(*sgbuf), GFP_KERNEL);
69*4882a593Smuzhiyun 	if (! sgbuf)
70*4882a593Smuzhiyun 		return NULL;
71*4882a593Smuzhiyun 	if (dmab->dev.type == SNDRV_DMA_TYPE_DEV_UC_SG) {
72*4882a593Smuzhiyun 		type = SNDRV_DMA_TYPE_DEV_UC;
73*4882a593Smuzhiyun #ifdef pgprot_noncached
74*4882a593Smuzhiyun 		prot = pgprot_noncached(PAGE_KERNEL);
75*4882a593Smuzhiyun #endif
76*4882a593Smuzhiyun 	}
77*4882a593Smuzhiyun 	sgbuf->dev = device;
78*4882a593Smuzhiyun 	pages = snd_sgbuf_aligned_pages(size);
79*4882a593Smuzhiyun 	sgbuf->tblsize = sgbuf_align_table(pages);
80*4882a593Smuzhiyun 	table = kcalloc(sgbuf->tblsize, sizeof(*table), GFP_KERNEL);
81*4882a593Smuzhiyun 	if (!table)
82*4882a593Smuzhiyun 		goto _failed;
83*4882a593Smuzhiyun 	sgbuf->table = table;
84*4882a593Smuzhiyun 	pgtable = kcalloc(sgbuf->tblsize, sizeof(*pgtable), GFP_KERNEL);
85*4882a593Smuzhiyun 	if (!pgtable)
86*4882a593Smuzhiyun 		goto _failed;
87*4882a593Smuzhiyun 	sgbuf->page_table = pgtable;
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun 	/* allocate pages */
90*4882a593Smuzhiyun 	maxpages = MAX_ALLOC_PAGES;
91*4882a593Smuzhiyun 	while (pages > 0) {
92*4882a593Smuzhiyun 		chunk = pages;
93*4882a593Smuzhiyun 		/* don't be too eager to take a huge chunk */
94*4882a593Smuzhiyun 		if (chunk > maxpages)
95*4882a593Smuzhiyun 			chunk = maxpages;
96*4882a593Smuzhiyun 		chunk <<= PAGE_SHIFT;
97*4882a593Smuzhiyun 		if (snd_dma_alloc_pages_fallback(type, device,
98*4882a593Smuzhiyun 						 chunk, &tmpb) < 0) {
99*4882a593Smuzhiyun 			if (!sgbuf->pages)
100*4882a593Smuzhiyun 				goto _failed;
101*4882a593Smuzhiyun 			if (!res_size)
102*4882a593Smuzhiyun 				goto _failed;
103*4882a593Smuzhiyun 			size = sgbuf->pages * PAGE_SIZE;
104*4882a593Smuzhiyun 			break;
105*4882a593Smuzhiyun 		}
106*4882a593Smuzhiyun 		chunk = tmpb.bytes >> PAGE_SHIFT;
107*4882a593Smuzhiyun 		for (i = 0; i < chunk; i++) {
108*4882a593Smuzhiyun 			table->buf = tmpb.area;
109*4882a593Smuzhiyun 			table->addr = tmpb.addr;
110*4882a593Smuzhiyun 			if (!i)
111*4882a593Smuzhiyun 				table->addr |= chunk; /* mark head */
112*4882a593Smuzhiyun 			table++;
113*4882a593Smuzhiyun 			*pgtable++ = virt_to_page(tmpb.area);
114*4882a593Smuzhiyun 			tmpb.area += PAGE_SIZE;
115*4882a593Smuzhiyun 			tmpb.addr += PAGE_SIZE;
116*4882a593Smuzhiyun 		}
117*4882a593Smuzhiyun 		sgbuf->pages += chunk;
118*4882a593Smuzhiyun 		pages -= chunk;
119*4882a593Smuzhiyun 		if (chunk < maxpages)
120*4882a593Smuzhiyun 			maxpages = chunk;
121*4882a593Smuzhiyun 	}
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	sgbuf->size = size;
124*4882a593Smuzhiyun 	dmab->area = vmap(sgbuf->page_table, sgbuf->pages, VM_MAP, prot);
125*4882a593Smuzhiyun 	if (! dmab->area)
126*4882a593Smuzhiyun 		goto _failed;
127*4882a593Smuzhiyun 	if (res_size)
128*4882a593Smuzhiyun 		*res_size = sgbuf->size;
129*4882a593Smuzhiyun 	return dmab->area;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun  _failed:
132*4882a593Smuzhiyun 	snd_free_sgbuf_pages(dmab); /* free the table */
133*4882a593Smuzhiyun 	return NULL;
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun /*
137*4882a593Smuzhiyun  * compute the max chunk size with continuous pages on sg-buffer
138*4882a593Smuzhiyun  */
snd_sgbuf_get_chunk_size(struct snd_dma_buffer * dmab,unsigned int ofs,unsigned int size)139*4882a593Smuzhiyun unsigned int snd_sgbuf_get_chunk_size(struct snd_dma_buffer *dmab,
140*4882a593Smuzhiyun 				      unsigned int ofs, unsigned int size)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct snd_sg_buf *sg = dmab->private_data;
143*4882a593Smuzhiyun 	unsigned int start, end, pg;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (!sg)
146*4882a593Smuzhiyun 		return size;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	start = ofs >> PAGE_SHIFT;
149*4882a593Smuzhiyun 	end = (ofs + size - 1) >> PAGE_SHIFT;
150*4882a593Smuzhiyun 	/* check page continuity */
151*4882a593Smuzhiyun 	pg = sg->table[start].addr >> PAGE_SHIFT;
152*4882a593Smuzhiyun 	for (;;) {
153*4882a593Smuzhiyun 		start++;
154*4882a593Smuzhiyun 		if (start > end)
155*4882a593Smuzhiyun 			break;
156*4882a593Smuzhiyun 		pg++;
157*4882a593Smuzhiyun 		if ((sg->table[start].addr >> PAGE_SHIFT) != pg)
158*4882a593Smuzhiyun 			return (start << PAGE_SHIFT) - ofs;
159*4882a593Smuzhiyun 	}
160*4882a593Smuzhiyun 	/* ok, all on continuous pages */
161*4882a593Smuzhiyun 	return size;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun EXPORT_SYMBOL(snd_sgbuf_get_chunk_size);
164