xref: /OK3568_Linux_fs/kernel/sound/pci/ctxfi/ctvmem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /**
3*4882a593Smuzhiyun  * Copyright (C) 2008, Creative Technology Ltd. All Rights Reserved.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * @File    ctvmem.c
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * @Brief
8*4882a593Smuzhiyun  * This file contains the implementation of virtual memory management object
9*4882a593Smuzhiyun  * for card device.
10*4882a593Smuzhiyun  *
11*4882a593Smuzhiyun  * @Author Liu Chun
12*4882a593Smuzhiyun  * @Date Apr 1 2008
13*4882a593Smuzhiyun  */
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #include "ctvmem.h"
16*4882a593Smuzhiyun #include "ctatc.h"
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/mm.h>
19*4882a593Smuzhiyun #include <linux/io.h>
20*4882a593Smuzhiyun #include <sound/pcm.h>
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define CT_PTES_PER_PAGE (CT_PAGE_SIZE / sizeof(void *))
23*4882a593Smuzhiyun #define CT_ADDRS_PER_PAGE (CT_PTES_PER_PAGE * CT_PAGE_SIZE)
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun /* *
26*4882a593Smuzhiyun  * Find or create vm block based on requested @size.
27*4882a593Smuzhiyun  * @size must be page aligned.
28*4882a593Smuzhiyun  * */
29*4882a593Smuzhiyun static struct ct_vm_block *
get_vm_block(struct ct_vm * vm,unsigned int size,struct ct_atc * atc)30*4882a593Smuzhiyun get_vm_block(struct ct_vm *vm, unsigned int size, struct ct_atc *atc)
31*4882a593Smuzhiyun {
32*4882a593Smuzhiyun 	struct ct_vm_block *block = NULL, *entry;
33*4882a593Smuzhiyun 	struct list_head *pos;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	size = CT_PAGE_ALIGN(size);
36*4882a593Smuzhiyun 	if (size > vm->size) {
37*4882a593Smuzhiyun 		dev_err(atc->card->dev,
38*4882a593Smuzhiyun 			"Fail! No sufficient device virtual memory space available!\n");
39*4882a593Smuzhiyun 		return NULL;
40*4882a593Smuzhiyun 	}
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	mutex_lock(&vm->lock);
43*4882a593Smuzhiyun 	list_for_each(pos, &vm->unused) {
44*4882a593Smuzhiyun 		entry = list_entry(pos, struct ct_vm_block, list);
45*4882a593Smuzhiyun 		if (entry->size >= size)
46*4882a593Smuzhiyun 			break; /* found a block that is big enough */
47*4882a593Smuzhiyun 	}
48*4882a593Smuzhiyun 	if (pos == &vm->unused)
49*4882a593Smuzhiyun 		goto out;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	if (entry->size == size) {
52*4882a593Smuzhiyun 		/* Move the vm node from unused list to used list directly */
53*4882a593Smuzhiyun 		list_move(&entry->list, &vm->used);
54*4882a593Smuzhiyun 		vm->size -= size;
55*4882a593Smuzhiyun 		block = entry;
56*4882a593Smuzhiyun 		goto out;
57*4882a593Smuzhiyun 	}
58*4882a593Smuzhiyun 
59*4882a593Smuzhiyun 	block = kzalloc(sizeof(*block), GFP_KERNEL);
60*4882a593Smuzhiyun 	if (!block)
61*4882a593Smuzhiyun 		goto out;
62*4882a593Smuzhiyun 
63*4882a593Smuzhiyun 	block->addr = entry->addr;
64*4882a593Smuzhiyun 	block->size = size;
65*4882a593Smuzhiyun 	list_add(&block->list, &vm->used);
66*4882a593Smuzhiyun 	entry->addr += size;
67*4882a593Smuzhiyun 	entry->size -= size;
68*4882a593Smuzhiyun 	vm->size -= size;
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun  out:
71*4882a593Smuzhiyun 	mutex_unlock(&vm->lock);
72*4882a593Smuzhiyun 	return block;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
put_vm_block(struct ct_vm * vm,struct ct_vm_block * block)75*4882a593Smuzhiyun static void put_vm_block(struct ct_vm *vm, struct ct_vm_block *block)
76*4882a593Smuzhiyun {
77*4882a593Smuzhiyun 	struct ct_vm_block *entry, *pre_ent;
78*4882a593Smuzhiyun 	struct list_head *pos, *pre;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	block->size = CT_PAGE_ALIGN(block->size);
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	mutex_lock(&vm->lock);
83*4882a593Smuzhiyun 	list_del(&block->list);
84*4882a593Smuzhiyun 	vm->size += block->size;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	list_for_each(pos, &vm->unused) {
87*4882a593Smuzhiyun 		entry = list_entry(pos, struct ct_vm_block, list);
88*4882a593Smuzhiyun 		if (entry->addr >= (block->addr + block->size))
89*4882a593Smuzhiyun 			break; /* found a position */
90*4882a593Smuzhiyun 	}
91*4882a593Smuzhiyun 	if (pos == &vm->unused) {
92*4882a593Smuzhiyun 		list_add_tail(&block->list, &vm->unused);
93*4882a593Smuzhiyun 		entry = block;
94*4882a593Smuzhiyun 	} else {
95*4882a593Smuzhiyun 		if ((block->addr + block->size) == entry->addr) {
96*4882a593Smuzhiyun 			entry->addr = block->addr;
97*4882a593Smuzhiyun 			entry->size += block->size;
98*4882a593Smuzhiyun 			kfree(block);
99*4882a593Smuzhiyun 		} else {
100*4882a593Smuzhiyun 			__list_add(&block->list, pos->prev, pos);
101*4882a593Smuzhiyun 			entry = block;
102*4882a593Smuzhiyun 		}
103*4882a593Smuzhiyun 	}
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	pos = &entry->list;
106*4882a593Smuzhiyun 	pre = pos->prev;
107*4882a593Smuzhiyun 	while (pre != &vm->unused) {
108*4882a593Smuzhiyun 		entry = list_entry(pos, struct ct_vm_block, list);
109*4882a593Smuzhiyun 		pre_ent = list_entry(pre, struct ct_vm_block, list);
110*4882a593Smuzhiyun 		if ((pre_ent->addr + pre_ent->size) > entry->addr)
111*4882a593Smuzhiyun 			break;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 		pre_ent->size += entry->size;
114*4882a593Smuzhiyun 		list_del(pos);
115*4882a593Smuzhiyun 		kfree(entry);
116*4882a593Smuzhiyun 		pos = pre;
117*4882a593Smuzhiyun 		pre = pos->prev;
118*4882a593Smuzhiyun 	}
119*4882a593Smuzhiyun 	mutex_unlock(&vm->lock);
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun /* Map host addr (kmalloced/vmalloced) to device logical addr. */
123*4882a593Smuzhiyun static struct ct_vm_block *
ct_vm_map(struct ct_vm * vm,struct snd_pcm_substream * substream,int size)124*4882a593Smuzhiyun ct_vm_map(struct ct_vm *vm, struct snd_pcm_substream *substream, int size)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct ct_vm_block *block;
127*4882a593Smuzhiyun 	unsigned int pte_start;
128*4882a593Smuzhiyun 	unsigned i, pages;
129*4882a593Smuzhiyun 	unsigned long *ptp;
130*4882a593Smuzhiyun 	struct ct_atc *atc = snd_pcm_substream_chip(substream);
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	block = get_vm_block(vm, size, atc);
133*4882a593Smuzhiyun 	if (block == NULL) {
134*4882a593Smuzhiyun 		dev_err(atc->card->dev,
135*4882a593Smuzhiyun 			"No virtual memory block that is big enough to allocate!\n");
136*4882a593Smuzhiyun 		return NULL;
137*4882a593Smuzhiyun 	}
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	ptp = (unsigned long *)vm->ptp[0].area;
140*4882a593Smuzhiyun 	pte_start = (block->addr >> CT_PAGE_SHIFT);
141*4882a593Smuzhiyun 	pages = block->size >> CT_PAGE_SHIFT;
142*4882a593Smuzhiyun 	for (i = 0; i < pages; i++) {
143*4882a593Smuzhiyun 		unsigned long addr;
144*4882a593Smuzhiyun 		addr = snd_pcm_sgbuf_get_addr(substream, i << CT_PAGE_SHIFT);
145*4882a593Smuzhiyun 		ptp[pte_start + i] = addr;
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	block->size = size;
149*4882a593Smuzhiyun 	return block;
150*4882a593Smuzhiyun }
151*4882a593Smuzhiyun 
ct_vm_unmap(struct ct_vm * vm,struct ct_vm_block * block)152*4882a593Smuzhiyun static void ct_vm_unmap(struct ct_vm *vm, struct ct_vm_block *block)
153*4882a593Smuzhiyun {
154*4882a593Smuzhiyun 	/* do unmapping */
155*4882a593Smuzhiyun 	put_vm_block(vm, block);
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun /* *
159*4882a593Smuzhiyun  * return the host physical addr of the @index-th device
160*4882a593Smuzhiyun  * page table page on success, or ~0UL on failure.
161*4882a593Smuzhiyun  * The first returned ~0UL indicates the termination.
162*4882a593Smuzhiyun  * */
163*4882a593Smuzhiyun static dma_addr_t
ct_get_ptp_phys(struct ct_vm * vm,int index)164*4882a593Smuzhiyun ct_get_ptp_phys(struct ct_vm *vm, int index)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun 	return (index >= CT_PTP_NUM) ? ~0UL : vm->ptp[index].addr;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun 
ct_vm_create(struct ct_vm ** rvm,struct pci_dev * pci)169*4882a593Smuzhiyun int ct_vm_create(struct ct_vm **rvm, struct pci_dev *pci)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct ct_vm *vm;
172*4882a593Smuzhiyun 	struct ct_vm_block *block;
173*4882a593Smuzhiyun 	int i, err = 0;
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	*rvm = NULL;
176*4882a593Smuzhiyun 
177*4882a593Smuzhiyun 	vm = kzalloc(sizeof(*vm), GFP_KERNEL);
178*4882a593Smuzhiyun 	if (!vm)
179*4882a593Smuzhiyun 		return -ENOMEM;
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	mutex_init(&vm->lock);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Allocate page table pages */
184*4882a593Smuzhiyun 	for (i = 0; i < CT_PTP_NUM; i++) {
185*4882a593Smuzhiyun 		err = snd_dma_alloc_pages(SNDRV_DMA_TYPE_DEV,
186*4882a593Smuzhiyun 					  &pci->dev,
187*4882a593Smuzhiyun 					  PAGE_SIZE, &vm->ptp[i]);
188*4882a593Smuzhiyun 		if (err < 0)
189*4882a593Smuzhiyun 			break;
190*4882a593Smuzhiyun 	}
191*4882a593Smuzhiyun 	if (err < 0) {
192*4882a593Smuzhiyun 		/* no page table pages are allocated */
193*4882a593Smuzhiyun 		ct_vm_destroy(vm);
194*4882a593Smuzhiyun 		return -ENOMEM;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 	vm->size = CT_ADDRS_PER_PAGE * i;
197*4882a593Smuzhiyun 	vm->map = ct_vm_map;
198*4882a593Smuzhiyun 	vm->unmap = ct_vm_unmap;
199*4882a593Smuzhiyun 	vm->get_ptp_phys = ct_get_ptp_phys;
200*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vm->unused);
201*4882a593Smuzhiyun 	INIT_LIST_HEAD(&vm->used);
202*4882a593Smuzhiyun 	block = kzalloc(sizeof(*block), GFP_KERNEL);
203*4882a593Smuzhiyun 	if (NULL != block) {
204*4882a593Smuzhiyun 		block->addr = 0;
205*4882a593Smuzhiyun 		block->size = vm->size;
206*4882a593Smuzhiyun 		list_add(&block->list, &vm->unused);
207*4882a593Smuzhiyun 	}
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	*rvm = vm;
210*4882a593Smuzhiyun 	return 0;
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun /* The caller must ensure no mapping pages are being used
214*4882a593Smuzhiyun  * by hardware before calling this function */
ct_vm_destroy(struct ct_vm * vm)215*4882a593Smuzhiyun void ct_vm_destroy(struct ct_vm *vm)
216*4882a593Smuzhiyun {
217*4882a593Smuzhiyun 	int i;
218*4882a593Smuzhiyun 	struct list_head *pos;
219*4882a593Smuzhiyun 	struct ct_vm_block *entry;
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	/* free used and unused list nodes */
222*4882a593Smuzhiyun 	while (!list_empty(&vm->used)) {
223*4882a593Smuzhiyun 		pos = vm->used.next;
224*4882a593Smuzhiyun 		list_del(pos);
225*4882a593Smuzhiyun 		entry = list_entry(pos, struct ct_vm_block, list);
226*4882a593Smuzhiyun 		kfree(entry);
227*4882a593Smuzhiyun 	}
228*4882a593Smuzhiyun 	while (!list_empty(&vm->unused)) {
229*4882a593Smuzhiyun 		pos = vm->unused.next;
230*4882a593Smuzhiyun 		list_del(pos);
231*4882a593Smuzhiyun 		entry = list_entry(pos, struct ct_vm_block, list);
232*4882a593Smuzhiyun 		kfree(entry);
233*4882a593Smuzhiyun 	}
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun 	/* free allocated page table pages */
236*4882a593Smuzhiyun 	for (i = 0; i < CT_PTP_NUM; i++)
237*4882a593Smuzhiyun 		snd_dma_free_pages(&vm->ptp[i]);
238*4882a593Smuzhiyun 
239*4882a593Smuzhiyun 	vm->size = 0;
240*4882a593Smuzhiyun 
241*4882a593Smuzhiyun 	kfree(vm);
242*4882a593Smuzhiyun }
243