1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) by Jaroslav Kysela <perex@perex.cz>
4*4882a593Smuzhiyun * Copyright (c) by Takashi Iwai <tiwai@suse.de>
5*4882a593Smuzhiyun * Copyright (c) by Scott McNab <sdm@fractalgraphics.com.au>
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Trident 4DWave-NX memory page allocation (TLB area)
8*4882a593Smuzhiyun * Trident chip can handle only 16MByte of the memory at the same time.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/pci.h>
13*4882a593Smuzhiyun #include <linux/time.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun #include <sound/core.h>
17*4882a593Smuzhiyun #include "trident.h"
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun /* page arguments of these two macros are Trident page (4096 bytes), not like
20*4882a593Smuzhiyun * aligned pages in others
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun #define __set_tlb_bus(trident,page,ptr,addr) \
23*4882a593Smuzhiyun do { (trident)->tlb.entries[page] = cpu_to_le32((addr) & ~(SNDRV_TRIDENT_PAGE_SIZE-1)); \
24*4882a593Smuzhiyun (trident)->tlb.shadow_entries[page] = (ptr); } while (0)
25*4882a593Smuzhiyun #define __tlb_to_ptr(trident,page) \
26*4882a593Smuzhiyun (void*)((trident)->tlb.shadow_entries[page])
27*4882a593Smuzhiyun #define __tlb_to_addr(trident,page) \
28*4882a593Smuzhiyun (dma_addr_t)le32_to_cpu((trident->tlb.entries[page]) & ~(SNDRV_TRIDENT_PAGE_SIZE - 1))
29*4882a593Smuzhiyun
30*4882a593Smuzhiyun #if PAGE_SIZE == 4096
31*4882a593Smuzhiyun /* page size == SNDRV_TRIDENT_PAGE_SIZE */
32*4882a593Smuzhiyun #define ALIGN_PAGE_SIZE PAGE_SIZE /* minimum page size for allocation */
33*4882a593Smuzhiyun #define MAX_ALIGN_PAGES SNDRV_TRIDENT_MAX_PAGES /* maxmium aligned pages */
34*4882a593Smuzhiyun /* fill TLB entrie(s) corresponding to page with ptr */
35*4882a593Smuzhiyun #define set_tlb_bus(trident,page,ptr,addr) __set_tlb_bus(trident,page,ptr,addr)
36*4882a593Smuzhiyun /* fill TLB entrie(s) corresponding to page with silence pointer */
37*4882a593Smuzhiyun #define set_silent_tlb(trident,page) __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr)
38*4882a593Smuzhiyun /* get aligned page from offset address */
39*4882a593Smuzhiyun #define get_aligned_page(offset) ((offset) >> 12)
40*4882a593Smuzhiyun /* get offset address from aligned page */
41*4882a593Smuzhiyun #define aligned_page_offset(page) ((page) << 12)
42*4882a593Smuzhiyun /* get buffer address from aligned page */
43*4882a593Smuzhiyun #define page_to_ptr(trident,page) __tlb_to_ptr(trident, page)
44*4882a593Smuzhiyun /* get PCI physical address from aligned page */
45*4882a593Smuzhiyun #define page_to_addr(trident,page) __tlb_to_addr(trident, page)
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun #elif PAGE_SIZE == 8192
48*4882a593Smuzhiyun /* page size == SNDRV_TRIDENT_PAGE_SIZE x 2*/
49*4882a593Smuzhiyun #define ALIGN_PAGE_SIZE PAGE_SIZE
50*4882a593Smuzhiyun #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / 2)
51*4882a593Smuzhiyun #define get_aligned_page(offset) ((offset) >> 13)
52*4882a593Smuzhiyun #define aligned_page_offset(page) ((page) << 13)
53*4882a593Smuzhiyun #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) << 1)
54*4882a593Smuzhiyun #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) << 1)
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun /* fill TLB entries -- we need to fill two entries */
set_tlb_bus(struct snd_trident * trident,int page,unsigned long ptr,dma_addr_t addr)57*4882a593Smuzhiyun static inline void set_tlb_bus(struct snd_trident *trident, int page,
58*4882a593Smuzhiyun unsigned long ptr, dma_addr_t addr)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun page <<= 1;
61*4882a593Smuzhiyun __set_tlb_bus(trident, page, ptr, addr);
62*4882a593Smuzhiyun __set_tlb_bus(trident, page+1, ptr + SNDRV_TRIDENT_PAGE_SIZE, addr + SNDRV_TRIDENT_PAGE_SIZE);
63*4882a593Smuzhiyun }
set_silent_tlb(struct snd_trident * trident,int page)64*4882a593Smuzhiyun static inline void set_silent_tlb(struct snd_trident *trident, int page)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun page <<= 1;
67*4882a593Smuzhiyun __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
68*4882a593Smuzhiyun __set_tlb_bus(trident, page+1, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun #else
72*4882a593Smuzhiyun /* arbitrary size */
73*4882a593Smuzhiyun #define UNIT_PAGES (PAGE_SIZE / SNDRV_TRIDENT_PAGE_SIZE)
74*4882a593Smuzhiyun #define ALIGN_PAGE_SIZE (SNDRV_TRIDENT_PAGE_SIZE * UNIT_PAGES)
75*4882a593Smuzhiyun #define MAX_ALIGN_PAGES (SNDRV_TRIDENT_MAX_PAGES / UNIT_PAGES)
76*4882a593Smuzhiyun /* Note: if alignment doesn't match to the maximum size, the last few blocks
77*4882a593Smuzhiyun * become unusable. To use such blocks, you'll need to check the validity
78*4882a593Smuzhiyun * of accessing page in set_tlb_bus and set_silent_tlb. search_empty()
79*4882a593Smuzhiyun * should also check it, too.
80*4882a593Smuzhiyun */
81*4882a593Smuzhiyun #define get_aligned_page(offset) ((offset) / ALIGN_PAGE_SIZE)
82*4882a593Smuzhiyun #define aligned_page_offset(page) ((page) * ALIGN_PAGE_SIZE)
83*4882a593Smuzhiyun #define page_to_ptr(trident,page) __tlb_to_ptr(trident, (page) * UNIT_PAGES)
84*4882a593Smuzhiyun #define page_to_addr(trident,page) __tlb_to_addr(trident, (page) * UNIT_PAGES)
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* fill TLB entries -- UNIT_PAGES entries must be filled */
set_tlb_bus(struct snd_trident * trident,int page,unsigned long ptr,dma_addr_t addr)87*4882a593Smuzhiyun static inline void set_tlb_bus(struct snd_trident *trident, int page,
88*4882a593Smuzhiyun unsigned long ptr, dma_addr_t addr)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun int i;
91*4882a593Smuzhiyun page *= UNIT_PAGES;
92*4882a593Smuzhiyun for (i = 0; i < UNIT_PAGES; i++, page++) {
93*4882a593Smuzhiyun __set_tlb_bus(trident, page, ptr, addr);
94*4882a593Smuzhiyun ptr += SNDRV_TRIDENT_PAGE_SIZE;
95*4882a593Smuzhiyun addr += SNDRV_TRIDENT_PAGE_SIZE;
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun }
set_silent_tlb(struct snd_trident * trident,int page)98*4882a593Smuzhiyun static inline void set_silent_tlb(struct snd_trident *trident, int page)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun int i;
101*4882a593Smuzhiyun page *= UNIT_PAGES;
102*4882a593Smuzhiyun for (i = 0; i < UNIT_PAGES; i++, page++)
103*4882a593Smuzhiyun __set_tlb_bus(trident, page, (unsigned long)trident->tlb.silent_page.area, trident->tlb.silent_page.addr);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun #endif /* PAGE_SIZE */
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun /* calculate buffer pointer from offset address */
offset_ptr(struct snd_trident * trident,int offset)109*4882a593Smuzhiyun static inline void *offset_ptr(struct snd_trident *trident, int offset)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun char *ptr;
112*4882a593Smuzhiyun ptr = page_to_ptr(trident, get_aligned_page(offset));
113*4882a593Smuzhiyun ptr += offset % ALIGN_PAGE_SIZE;
114*4882a593Smuzhiyun return (void*)ptr;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun /* first and last (aligned) pages of memory block */
118*4882a593Smuzhiyun #define firstpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->first_page)
119*4882a593Smuzhiyun #define lastpg(blk) (((struct snd_trident_memblk_arg *)snd_util_memblk_argptr(blk))->last_page)
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun /*
122*4882a593Smuzhiyun * search empty pages which may contain given size
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun static struct snd_util_memblk *
search_empty(struct snd_util_memhdr * hdr,int size)125*4882a593Smuzhiyun search_empty(struct snd_util_memhdr *hdr, int size)
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun struct snd_util_memblk *blk;
128*4882a593Smuzhiyun int page, psize;
129*4882a593Smuzhiyun struct list_head *p;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun psize = get_aligned_page(size + ALIGN_PAGE_SIZE -1);
132*4882a593Smuzhiyun page = 0;
133*4882a593Smuzhiyun list_for_each(p, &hdr->block) {
134*4882a593Smuzhiyun blk = list_entry(p, struct snd_util_memblk, list);
135*4882a593Smuzhiyun if (page + psize <= firstpg(blk))
136*4882a593Smuzhiyun goto __found_pages;
137*4882a593Smuzhiyun page = lastpg(blk) + 1;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun if (page + psize > MAX_ALIGN_PAGES)
140*4882a593Smuzhiyun return NULL;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun __found_pages:
143*4882a593Smuzhiyun /* create a new memory block */
144*4882a593Smuzhiyun blk = __snd_util_memblk_new(hdr, psize * ALIGN_PAGE_SIZE, p->prev);
145*4882a593Smuzhiyun if (blk == NULL)
146*4882a593Smuzhiyun return NULL;
147*4882a593Smuzhiyun blk->offset = aligned_page_offset(page); /* set aligned offset */
148*4882a593Smuzhiyun firstpg(blk) = page;
149*4882a593Smuzhiyun lastpg(blk) = page + psize - 1;
150*4882a593Smuzhiyun return blk;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /*
155*4882a593Smuzhiyun * check if the given pointer is valid for pages
156*4882a593Smuzhiyun */
is_valid_page(unsigned long ptr)157*4882a593Smuzhiyun static int is_valid_page(unsigned long ptr)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun if (ptr & ~0x3fffffffUL) {
160*4882a593Smuzhiyun snd_printk(KERN_ERR "max memory size is 1GB!!\n");
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun if (ptr & (SNDRV_TRIDENT_PAGE_SIZE-1)) {
164*4882a593Smuzhiyun snd_printk(KERN_ERR "page is not aligned\n");
165*4882a593Smuzhiyun return 0;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun return 1;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * page allocation for DMA (Scatter-Gather version)
172*4882a593Smuzhiyun */
173*4882a593Smuzhiyun static struct snd_util_memblk *
snd_trident_alloc_sg_pages(struct snd_trident * trident,struct snd_pcm_substream * substream)174*4882a593Smuzhiyun snd_trident_alloc_sg_pages(struct snd_trident *trident,
175*4882a593Smuzhiyun struct snd_pcm_substream *substream)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct snd_util_memhdr *hdr;
178*4882a593Smuzhiyun struct snd_util_memblk *blk;
179*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
180*4882a593Smuzhiyun int idx, page;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
183*4882a593Smuzhiyun runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES *
184*4882a593Smuzhiyun SNDRV_TRIDENT_PAGE_SIZE))
185*4882a593Smuzhiyun return NULL;
186*4882a593Smuzhiyun hdr = trident->tlb.memhdr;
187*4882a593Smuzhiyun if (snd_BUG_ON(!hdr))
188*4882a593Smuzhiyun return NULL;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun mutex_lock(&hdr->block_mutex);
193*4882a593Smuzhiyun blk = search_empty(hdr, runtime->dma_bytes);
194*4882a593Smuzhiyun if (blk == NULL) {
195*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
196*4882a593Smuzhiyun return NULL;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
199*4882a593Smuzhiyun /* set TLB entries */
200*4882a593Smuzhiyun idx = 0;
201*4882a593Smuzhiyun for (page = firstpg(blk); page <= lastpg(blk); page++, idx++) {
202*4882a593Smuzhiyun unsigned long ofs = idx << PAGE_SHIFT;
203*4882a593Smuzhiyun dma_addr_t addr = snd_pcm_sgbuf_get_addr(substream, ofs);
204*4882a593Smuzhiyun unsigned long ptr = (unsigned long)
205*4882a593Smuzhiyun snd_pcm_sgbuf_get_ptr(substream, ofs);
206*4882a593Smuzhiyun if (! is_valid_page(addr)) {
207*4882a593Smuzhiyun __snd_util_mem_free(hdr, blk);
208*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
209*4882a593Smuzhiyun return NULL;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun set_tlb_bus(trident, page, ptr, addr);
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
214*4882a593Smuzhiyun return blk;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun /*
218*4882a593Smuzhiyun * page allocation for DMA (contiguous version)
219*4882a593Smuzhiyun */
220*4882a593Smuzhiyun static struct snd_util_memblk *
snd_trident_alloc_cont_pages(struct snd_trident * trident,struct snd_pcm_substream * substream)221*4882a593Smuzhiyun snd_trident_alloc_cont_pages(struct snd_trident *trident,
222*4882a593Smuzhiyun struct snd_pcm_substream *substream)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct snd_util_memhdr *hdr;
225*4882a593Smuzhiyun struct snd_util_memblk *blk;
226*4882a593Smuzhiyun int page;
227*4882a593Smuzhiyun struct snd_pcm_runtime *runtime = substream->runtime;
228*4882a593Smuzhiyun dma_addr_t addr;
229*4882a593Smuzhiyun unsigned long ptr;
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun if (snd_BUG_ON(runtime->dma_bytes <= 0 ||
232*4882a593Smuzhiyun runtime->dma_bytes > SNDRV_TRIDENT_MAX_PAGES *
233*4882a593Smuzhiyun SNDRV_TRIDENT_PAGE_SIZE))
234*4882a593Smuzhiyun return NULL;
235*4882a593Smuzhiyun hdr = trident->tlb.memhdr;
236*4882a593Smuzhiyun if (snd_BUG_ON(!hdr))
237*4882a593Smuzhiyun return NULL;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun mutex_lock(&hdr->block_mutex);
240*4882a593Smuzhiyun blk = search_empty(hdr, runtime->dma_bytes);
241*4882a593Smuzhiyun if (blk == NULL) {
242*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
243*4882a593Smuzhiyun return NULL;
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* set TLB entries */
247*4882a593Smuzhiyun addr = runtime->dma_addr;
248*4882a593Smuzhiyun ptr = (unsigned long)runtime->dma_area;
249*4882a593Smuzhiyun for (page = firstpg(blk); page <= lastpg(blk); page++,
250*4882a593Smuzhiyun ptr += SNDRV_TRIDENT_PAGE_SIZE, addr += SNDRV_TRIDENT_PAGE_SIZE) {
251*4882a593Smuzhiyun if (! is_valid_page(addr)) {
252*4882a593Smuzhiyun __snd_util_mem_free(hdr, blk);
253*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
254*4882a593Smuzhiyun return NULL;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun set_tlb_bus(trident, page, ptr, addr);
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
259*4882a593Smuzhiyun return blk;
260*4882a593Smuzhiyun }
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun /*
263*4882a593Smuzhiyun * page allocation for DMA
264*4882a593Smuzhiyun */
265*4882a593Smuzhiyun struct snd_util_memblk *
snd_trident_alloc_pages(struct snd_trident * trident,struct snd_pcm_substream * substream)266*4882a593Smuzhiyun snd_trident_alloc_pages(struct snd_trident *trident,
267*4882a593Smuzhiyun struct snd_pcm_substream *substream)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun if (snd_BUG_ON(!trident || !substream))
270*4882a593Smuzhiyun return NULL;
271*4882a593Smuzhiyun if (substream->dma_buffer.dev.type == SNDRV_DMA_TYPE_DEV_SG)
272*4882a593Smuzhiyun return snd_trident_alloc_sg_pages(trident, substream);
273*4882a593Smuzhiyun else
274*4882a593Smuzhiyun return snd_trident_alloc_cont_pages(trident, substream);
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun /*
279*4882a593Smuzhiyun * release DMA buffer from page table
280*4882a593Smuzhiyun */
snd_trident_free_pages(struct snd_trident * trident,struct snd_util_memblk * blk)281*4882a593Smuzhiyun int snd_trident_free_pages(struct snd_trident *trident,
282*4882a593Smuzhiyun struct snd_util_memblk *blk)
283*4882a593Smuzhiyun {
284*4882a593Smuzhiyun struct snd_util_memhdr *hdr;
285*4882a593Smuzhiyun int page;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (snd_BUG_ON(!trident || !blk))
288*4882a593Smuzhiyun return -EINVAL;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun hdr = trident->tlb.memhdr;
291*4882a593Smuzhiyun mutex_lock(&hdr->block_mutex);
292*4882a593Smuzhiyun /* reset TLB entries */
293*4882a593Smuzhiyun for (page = firstpg(blk); page <= lastpg(blk); page++)
294*4882a593Smuzhiyun set_silent_tlb(trident, page);
295*4882a593Smuzhiyun /* free memory block */
296*4882a593Smuzhiyun __snd_util_mem_free(hdr, blk);
297*4882a593Smuzhiyun mutex_unlock(&hdr->block_mutex);
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300