xref: /OK3568_Linux_fs/kernel/drivers/misc/sram.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Generic on-chip SRAM allocation driver
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2012 Philipp Zabel, Pengutronix
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/genalloc.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/list_sort.h>
13*4882a593Smuzhiyun #include <linux/of_address.h>
14*4882a593Smuzhiyun #include <linux/of_device.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/regmap.h>
17*4882a593Smuzhiyun #include <linux/slab.h>
18*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
19*4882a593Smuzhiyun #include <soc/at91/atmel-secumod.h>
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #include "sram.h"
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun #define SRAM_GRANULARITY	32
24*4882a593Smuzhiyun 
sram_read(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)25*4882a593Smuzhiyun static ssize_t sram_read(struct file *filp, struct kobject *kobj,
26*4882a593Smuzhiyun 			 struct bin_attribute *attr,
27*4882a593Smuzhiyun 			 char *buf, loff_t pos, size_t count)
28*4882a593Smuzhiyun {
29*4882a593Smuzhiyun 	struct sram_partition *part;
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun 	part = container_of(attr, struct sram_partition, battr);
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun 	mutex_lock(&part->lock);
34*4882a593Smuzhiyun 	memcpy_fromio(buf, part->base + pos, count);
35*4882a593Smuzhiyun 	mutex_unlock(&part->lock);
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	return count;
38*4882a593Smuzhiyun }
39*4882a593Smuzhiyun 
sram_write(struct file * filp,struct kobject * kobj,struct bin_attribute * attr,char * buf,loff_t pos,size_t count)40*4882a593Smuzhiyun static ssize_t sram_write(struct file *filp, struct kobject *kobj,
41*4882a593Smuzhiyun 			  struct bin_attribute *attr,
42*4882a593Smuzhiyun 			  char *buf, loff_t pos, size_t count)
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun 	struct sram_partition *part;
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun 	part = container_of(attr, struct sram_partition, battr);
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun 	mutex_lock(&part->lock);
49*4882a593Smuzhiyun 	memcpy_toio(part->base + pos, buf, count);
50*4882a593Smuzhiyun 	mutex_unlock(&part->lock);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	return count;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun 
sram_add_pool(struct sram_dev * sram,struct sram_reserve * block,phys_addr_t start,struct sram_partition * part)55*4882a593Smuzhiyun static int sram_add_pool(struct sram_dev *sram, struct sram_reserve *block,
56*4882a593Smuzhiyun 			 phys_addr_t start, struct sram_partition *part)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun 	int ret;
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun 	part->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
61*4882a593Smuzhiyun 					  NUMA_NO_NODE, block->label);
62*4882a593Smuzhiyun 	if (IS_ERR(part->pool))
63*4882a593Smuzhiyun 		return PTR_ERR(part->pool);
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	ret = gen_pool_add_virt(part->pool, (unsigned long)part->base, start,
66*4882a593Smuzhiyun 				block->size, NUMA_NO_NODE);
67*4882a593Smuzhiyun 	if (ret < 0) {
68*4882a593Smuzhiyun 		dev_err(sram->dev, "failed to register subpool: %d\n", ret);
69*4882a593Smuzhiyun 		return ret;
70*4882a593Smuzhiyun 	}
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	return 0;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun 
sram_add_export(struct sram_dev * sram,struct sram_reserve * block,phys_addr_t start,struct sram_partition * part)75*4882a593Smuzhiyun static int sram_add_export(struct sram_dev *sram, struct sram_reserve *block,
76*4882a593Smuzhiyun 			   phys_addr_t start, struct sram_partition *part)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun 	sysfs_bin_attr_init(&part->battr);
79*4882a593Smuzhiyun 	part->battr.attr.name = devm_kasprintf(sram->dev, GFP_KERNEL,
80*4882a593Smuzhiyun 					       "%llx.sram",
81*4882a593Smuzhiyun 					       (unsigned long long)start);
82*4882a593Smuzhiyun 	if (!part->battr.attr.name)
83*4882a593Smuzhiyun 		return -ENOMEM;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	part->battr.attr.mode = S_IRUSR | S_IWUSR;
86*4882a593Smuzhiyun 	part->battr.read = sram_read;
87*4882a593Smuzhiyun 	part->battr.write = sram_write;
88*4882a593Smuzhiyun 	part->battr.size = block->size;
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun 	return device_create_bin_file(sram->dev, &part->battr);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
sram_add_partition(struct sram_dev * sram,struct sram_reserve * block,phys_addr_t start)93*4882a593Smuzhiyun static int sram_add_partition(struct sram_dev *sram, struct sram_reserve *block,
94*4882a593Smuzhiyun 			      phys_addr_t start)
95*4882a593Smuzhiyun {
96*4882a593Smuzhiyun 	int ret;
97*4882a593Smuzhiyun 	struct sram_partition *part = &sram->partition[sram->partitions];
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	mutex_init(&part->lock);
100*4882a593Smuzhiyun 	part->base = sram->virt_base + block->start;
101*4882a593Smuzhiyun 
102*4882a593Smuzhiyun 	if (block->pool) {
103*4882a593Smuzhiyun 		ret = sram_add_pool(sram, block, start, part);
104*4882a593Smuzhiyun 		if (ret)
105*4882a593Smuzhiyun 			return ret;
106*4882a593Smuzhiyun 	}
107*4882a593Smuzhiyun 	if (block->export) {
108*4882a593Smuzhiyun 		ret = sram_add_export(sram, block, start, part);
109*4882a593Smuzhiyun 		if (ret)
110*4882a593Smuzhiyun 			return ret;
111*4882a593Smuzhiyun 	}
112*4882a593Smuzhiyun 	if (block->protect_exec) {
113*4882a593Smuzhiyun 		ret = sram_check_protect_exec(sram, block, part);
114*4882a593Smuzhiyun 		if (ret)
115*4882a593Smuzhiyun 			return ret;
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun 		ret = sram_add_pool(sram, block, start, part);
118*4882a593Smuzhiyun 		if (ret)
119*4882a593Smuzhiyun 			return ret;
120*4882a593Smuzhiyun 
121*4882a593Smuzhiyun 		sram_add_protect_exec(part);
122*4882a593Smuzhiyun 	}
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	sram->partitions++;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	return 0;
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun 
sram_free_partitions(struct sram_dev * sram)129*4882a593Smuzhiyun static void sram_free_partitions(struct sram_dev *sram)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun 	struct sram_partition *part;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	if (!sram->partitions)
134*4882a593Smuzhiyun 		return;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	part = &sram->partition[sram->partitions - 1];
137*4882a593Smuzhiyun 	for (; sram->partitions; sram->partitions--, part--) {
138*4882a593Smuzhiyun 		if (part->battr.size)
139*4882a593Smuzhiyun 			device_remove_bin_file(sram->dev, &part->battr);
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		if (part->pool &&
142*4882a593Smuzhiyun 		    gen_pool_avail(part->pool) < gen_pool_size(part->pool))
143*4882a593Smuzhiyun 			dev_err(sram->dev, "removed pool while SRAM allocated\n");
144*4882a593Smuzhiyun 	}
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
sram_reserve_cmp(void * priv,struct list_head * a,struct list_head * b)147*4882a593Smuzhiyun static int sram_reserve_cmp(void *priv, struct list_head *a,
148*4882a593Smuzhiyun 					struct list_head *b)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct sram_reserve *ra = list_entry(a, struct sram_reserve, list);
151*4882a593Smuzhiyun 	struct sram_reserve *rb = list_entry(b, struct sram_reserve, list);
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun 	return ra->start - rb->start;
154*4882a593Smuzhiyun }
155*4882a593Smuzhiyun 
sram_reserve_regions(struct sram_dev * sram,struct resource * res)156*4882a593Smuzhiyun static int sram_reserve_regions(struct sram_dev *sram, struct resource *res)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun 	struct device_node *np = sram->dev->of_node, *child;
159*4882a593Smuzhiyun 	unsigned long size, cur_start, cur_size;
160*4882a593Smuzhiyun 	struct sram_reserve *rblocks, *block;
161*4882a593Smuzhiyun 	struct list_head reserve_list;
162*4882a593Smuzhiyun 	unsigned int nblocks, exports = 0;
163*4882a593Smuzhiyun 	const char *label;
164*4882a593Smuzhiyun 	int ret = 0;
165*4882a593Smuzhiyun 
166*4882a593Smuzhiyun 	INIT_LIST_HEAD(&reserve_list);
167*4882a593Smuzhiyun 
168*4882a593Smuzhiyun 	size = resource_size(res);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	/*
171*4882a593Smuzhiyun 	 * We need an additional block to mark the end of the memory region
172*4882a593Smuzhiyun 	 * after the reserved blocks from the dt are processed.
173*4882a593Smuzhiyun 	 */
174*4882a593Smuzhiyun 	nblocks = (np) ? of_get_available_child_count(np) + 1 : 1;
175*4882a593Smuzhiyun 	rblocks = kcalloc(nblocks, sizeof(*rblocks), GFP_KERNEL);
176*4882a593Smuzhiyun 	if (!rblocks)
177*4882a593Smuzhiyun 		return -ENOMEM;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	block = &rblocks[0];
180*4882a593Smuzhiyun 	for_each_available_child_of_node(np, child) {
181*4882a593Smuzhiyun 		struct resource child_res;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 		ret = of_address_to_resource(child, 0, &child_res);
184*4882a593Smuzhiyun 		if (ret < 0) {
185*4882a593Smuzhiyun 			dev_err(sram->dev,
186*4882a593Smuzhiyun 				"could not get address for node %pOF\n",
187*4882a593Smuzhiyun 				child);
188*4882a593Smuzhiyun 			goto err_chunks;
189*4882a593Smuzhiyun 		}
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun 		if (child_res.start < res->start || child_res.end > res->end) {
192*4882a593Smuzhiyun 			dev_err(sram->dev,
193*4882a593Smuzhiyun 				"reserved block %pOF outside the sram area\n",
194*4882a593Smuzhiyun 				child);
195*4882a593Smuzhiyun 			ret = -EINVAL;
196*4882a593Smuzhiyun 			goto err_chunks;
197*4882a593Smuzhiyun 		}
198*4882a593Smuzhiyun 
199*4882a593Smuzhiyun 		block->start = child_res.start - res->start;
200*4882a593Smuzhiyun 		block->size = resource_size(&child_res);
201*4882a593Smuzhiyun 		list_add_tail(&block->list, &reserve_list);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		if (of_find_property(child, "export", NULL))
204*4882a593Smuzhiyun 			block->export = true;
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 		if (of_find_property(child, "pool", NULL))
207*4882a593Smuzhiyun 			block->pool = true;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 		if (of_find_property(child, "protect-exec", NULL))
210*4882a593Smuzhiyun 			block->protect_exec = true;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 		if ((block->export || block->pool || block->protect_exec) &&
213*4882a593Smuzhiyun 		    block->size) {
214*4882a593Smuzhiyun 			exports++;
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 			label = NULL;
217*4882a593Smuzhiyun 			ret = of_property_read_string(child, "label", &label);
218*4882a593Smuzhiyun 			if (ret && ret != -EINVAL) {
219*4882a593Smuzhiyun 				dev_err(sram->dev,
220*4882a593Smuzhiyun 					"%pOF has invalid label name\n",
221*4882a593Smuzhiyun 					child);
222*4882a593Smuzhiyun 				goto err_chunks;
223*4882a593Smuzhiyun 			}
224*4882a593Smuzhiyun 			if (!label)
225*4882a593Smuzhiyun 				label = child->name;
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 			block->label = devm_kstrdup(sram->dev,
228*4882a593Smuzhiyun 						    label, GFP_KERNEL);
229*4882a593Smuzhiyun 			if (!block->label) {
230*4882a593Smuzhiyun 				ret = -ENOMEM;
231*4882a593Smuzhiyun 				goto err_chunks;
232*4882a593Smuzhiyun 			}
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			dev_dbg(sram->dev, "found %sblock '%s' 0x%x-0x%x\n",
235*4882a593Smuzhiyun 				block->export ? "exported " : "", block->label,
236*4882a593Smuzhiyun 				block->start, block->start + block->size);
237*4882a593Smuzhiyun 		} else {
238*4882a593Smuzhiyun 			dev_dbg(sram->dev, "found reserved block 0x%x-0x%x\n",
239*4882a593Smuzhiyun 				block->start, block->start + block->size);
240*4882a593Smuzhiyun 		}
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 		block++;
243*4882a593Smuzhiyun 	}
244*4882a593Smuzhiyun 	child = NULL;
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 	/* the last chunk marks the end of the region */
247*4882a593Smuzhiyun 	rblocks[nblocks - 1].start = size;
248*4882a593Smuzhiyun 	rblocks[nblocks - 1].size = 0;
249*4882a593Smuzhiyun 	list_add_tail(&rblocks[nblocks - 1].list, &reserve_list);
250*4882a593Smuzhiyun 
251*4882a593Smuzhiyun 	list_sort(NULL, &reserve_list, sram_reserve_cmp);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	if (exports) {
254*4882a593Smuzhiyun 		sram->partition = devm_kcalloc(sram->dev,
255*4882a593Smuzhiyun 				       exports, sizeof(*sram->partition),
256*4882a593Smuzhiyun 				       GFP_KERNEL);
257*4882a593Smuzhiyun 		if (!sram->partition) {
258*4882a593Smuzhiyun 			ret = -ENOMEM;
259*4882a593Smuzhiyun 			goto err_chunks;
260*4882a593Smuzhiyun 		}
261*4882a593Smuzhiyun 	}
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	cur_start = 0;
264*4882a593Smuzhiyun 	list_for_each_entry(block, &reserve_list, list) {
265*4882a593Smuzhiyun 		/* can only happen if sections overlap */
266*4882a593Smuzhiyun 		if (block->start < cur_start) {
267*4882a593Smuzhiyun 			dev_err(sram->dev,
268*4882a593Smuzhiyun 				"block at 0x%x starts after current offset 0x%lx\n",
269*4882a593Smuzhiyun 				block->start, cur_start);
270*4882a593Smuzhiyun 			ret = -EINVAL;
271*4882a593Smuzhiyun 			sram_free_partitions(sram);
272*4882a593Smuzhiyun 			goto err_chunks;
273*4882a593Smuzhiyun 		}
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun 		if ((block->export || block->pool || block->protect_exec) &&
276*4882a593Smuzhiyun 		    block->size) {
277*4882a593Smuzhiyun 			ret = sram_add_partition(sram, block,
278*4882a593Smuzhiyun 						 res->start + block->start);
279*4882a593Smuzhiyun 			if (ret) {
280*4882a593Smuzhiyun 				sram_free_partitions(sram);
281*4882a593Smuzhiyun 				goto err_chunks;
282*4882a593Smuzhiyun 			}
283*4882a593Smuzhiyun 		}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 		/* current start is in a reserved block, so continue after it */
286*4882a593Smuzhiyun 		if (block->start == cur_start) {
287*4882a593Smuzhiyun 			cur_start = block->start + block->size;
288*4882a593Smuzhiyun 			continue;
289*4882a593Smuzhiyun 		}
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 		/*
292*4882a593Smuzhiyun 		 * allocate the space between the current starting
293*4882a593Smuzhiyun 		 * address and the following reserved block, or the
294*4882a593Smuzhiyun 		 * end of the region.
295*4882a593Smuzhiyun 		 */
296*4882a593Smuzhiyun 		cur_size = block->start - cur_start;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 		dev_dbg(sram->dev, "adding chunk 0x%lx-0x%lx\n",
299*4882a593Smuzhiyun 			cur_start, cur_start + cur_size);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 		ret = gen_pool_add_virt(sram->pool,
302*4882a593Smuzhiyun 				(unsigned long)sram->virt_base + cur_start,
303*4882a593Smuzhiyun 				res->start + cur_start, cur_size, -1);
304*4882a593Smuzhiyun 		if (ret < 0) {
305*4882a593Smuzhiyun 			sram_free_partitions(sram);
306*4882a593Smuzhiyun 			goto err_chunks;
307*4882a593Smuzhiyun 		}
308*4882a593Smuzhiyun 
309*4882a593Smuzhiyun 		/* next allocation after this reserved block */
310*4882a593Smuzhiyun 		cur_start = block->start + block->size;
311*4882a593Smuzhiyun 	}
312*4882a593Smuzhiyun 
313*4882a593Smuzhiyun err_chunks:
314*4882a593Smuzhiyun 	of_node_put(child);
315*4882a593Smuzhiyun 	kfree(rblocks);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun 	return ret;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
atmel_securam_wait(void)320*4882a593Smuzhiyun static int atmel_securam_wait(void)
321*4882a593Smuzhiyun {
322*4882a593Smuzhiyun 	struct regmap *regmap;
323*4882a593Smuzhiyun 	u32 val;
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	regmap = syscon_regmap_lookup_by_compatible("atmel,sama5d2-secumod");
326*4882a593Smuzhiyun 	if (IS_ERR(regmap))
327*4882a593Smuzhiyun 		return -ENODEV;
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	return regmap_read_poll_timeout(regmap, AT91_SECUMOD_RAMRDY, val,
330*4882a593Smuzhiyun 					val & AT91_SECUMOD_RAMRDY_READY,
331*4882a593Smuzhiyun 					10000, 500000);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun static const struct of_device_id sram_dt_ids[] = {
335*4882a593Smuzhiyun 	{ .compatible = "mmio-sram" },
336*4882a593Smuzhiyun 	{ .compatible = "atmel,sama5d2-securam", .data = atmel_securam_wait },
337*4882a593Smuzhiyun 	{}
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun 
sram_probe(struct platform_device * pdev)340*4882a593Smuzhiyun static int sram_probe(struct platform_device *pdev)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun 	struct sram_dev *sram;
343*4882a593Smuzhiyun 	int ret;
344*4882a593Smuzhiyun 	int (*init_func)(void);
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	sram = devm_kzalloc(&pdev->dev, sizeof(*sram), GFP_KERNEL);
347*4882a593Smuzhiyun 	if (!sram)
348*4882a593Smuzhiyun 		return -ENOMEM;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	sram->dev = &pdev->dev;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun 	if (of_property_read_bool(pdev->dev.of_node, "no-memory-wc"))
353*4882a593Smuzhiyun 		sram->virt_base = devm_platform_ioremap_resource(pdev, 0);
354*4882a593Smuzhiyun 	else
355*4882a593Smuzhiyun 		sram->virt_base = devm_platform_ioremap_resource_wc(pdev, 0);
356*4882a593Smuzhiyun 	if (IS_ERR(sram->virt_base)) {
357*4882a593Smuzhiyun 		dev_err(&pdev->dev, "could not map SRAM registers\n");
358*4882a593Smuzhiyun 		return PTR_ERR(sram->virt_base);
359*4882a593Smuzhiyun 	}
360*4882a593Smuzhiyun 
361*4882a593Smuzhiyun 	sram->pool = devm_gen_pool_create(sram->dev, ilog2(SRAM_GRANULARITY),
362*4882a593Smuzhiyun 					  NUMA_NO_NODE, NULL);
363*4882a593Smuzhiyun 	if (IS_ERR(sram->pool))
364*4882a593Smuzhiyun 		return PTR_ERR(sram->pool);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	sram->clk = devm_clk_get(sram->dev, NULL);
367*4882a593Smuzhiyun 	if (IS_ERR(sram->clk))
368*4882a593Smuzhiyun 		sram->clk = NULL;
369*4882a593Smuzhiyun 	else
370*4882a593Smuzhiyun 		clk_prepare_enable(sram->clk);
371*4882a593Smuzhiyun 
372*4882a593Smuzhiyun 	ret = sram_reserve_regions(sram,
373*4882a593Smuzhiyun 			platform_get_resource(pdev, IORESOURCE_MEM, 0));
374*4882a593Smuzhiyun 	if (ret)
375*4882a593Smuzhiyun 		goto err_disable_clk;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	platform_set_drvdata(pdev, sram);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	init_func = of_device_get_match_data(&pdev->dev);
380*4882a593Smuzhiyun 	if (init_func) {
381*4882a593Smuzhiyun 		ret = init_func();
382*4882a593Smuzhiyun 		if (ret)
383*4882a593Smuzhiyun 			goto err_free_partitions;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	dev_dbg(sram->dev, "SRAM pool: %zu KiB @ 0x%p\n",
387*4882a593Smuzhiyun 		gen_pool_size(sram->pool) / 1024, sram->virt_base);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	return 0;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun err_free_partitions:
392*4882a593Smuzhiyun 	sram_free_partitions(sram);
393*4882a593Smuzhiyun err_disable_clk:
394*4882a593Smuzhiyun 	if (sram->clk)
395*4882a593Smuzhiyun 		clk_disable_unprepare(sram->clk);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return ret;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
sram_remove(struct platform_device * pdev)400*4882a593Smuzhiyun static int sram_remove(struct platform_device *pdev)
401*4882a593Smuzhiyun {
402*4882a593Smuzhiyun 	struct sram_dev *sram = platform_get_drvdata(pdev);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	sram_free_partitions(sram);
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	if (gen_pool_avail(sram->pool) < gen_pool_size(sram->pool))
407*4882a593Smuzhiyun 		dev_err(sram->dev, "removed while SRAM allocated\n");
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	if (sram->clk)
410*4882a593Smuzhiyun 		clk_disable_unprepare(sram->clk);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun 	return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun static struct platform_driver sram_driver = {
416*4882a593Smuzhiyun 	.driver = {
417*4882a593Smuzhiyun 		.name = "sram",
418*4882a593Smuzhiyun 		.of_match_table = sram_dt_ids,
419*4882a593Smuzhiyun 	},
420*4882a593Smuzhiyun 	.probe = sram_probe,
421*4882a593Smuzhiyun 	.remove = sram_remove,
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun 
sram_init(void)424*4882a593Smuzhiyun static int __init sram_init(void)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun 	return platform_driver_register(&sram_driver);
427*4882a593Smuzhiyun }
428*4882a593Smuzhiyun 
429*4882a593Smuzhiyun postcore_initcall(sram_init);
430