1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * TI AM33XX SRAM EMIF Driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2016-2017 Texas Instruments Inc.
6*4882a593Smuzhiyun * Dave Gerlach
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/err.h>
10*4882a593Smuzhiyun #include <linux/genalloc.h>
11*4882a593Smuzhiyun #include <linux/io.h>
12*4882a593Smuzhiyun #include <linux/kernel.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/of_platform.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/sram.h>
18*4882a593Smuzhiyun #include <linux/ti-emif-sram.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "emif.h"
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #define TI_EMIF_SRAM_SYMBOL_OFFSET(sym) ((unsigned long)(sym) - \
23*4882a593Smuzhiyun (unsigned long)&ti_emif_sram)
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #define EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES 0x00a0
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun struct ti_emif_data {
28*4882a593Smuzhiyun phys_addr_t ti_emif_sram_phys;
29*4882a593Smuzhiyun phys_addr_t ti_emif_sram_data_phys;
30*4882a593Smuzhiyun unsigned long ti_emif_sram_virt;
31*4882a593Smuzhiyun unsigned long ti_emif_sram_data_virt;
32*4882a593Smuzhiyun struct gen_pool *sram_pool_code;
33*4882a593Smuzhiyun struct gen_pool *sram_pool_data;
34*4882a593Smuzhiyun struct ti_emif_pm_data pm_data;
35*4882a593Smuzhiyun struct ti_emif_pm_functions pm_functions;
36*4882a593Smuzhiyun };
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static struct ti_emif_data *emif_instance;
39*4882a593Smuzhiyun
sram_suspend_address(struct ti_emif_data * emif_data,unsigned long addr)40*4882a593Smuzhiyun static u32 sram_suspend_address(struct ti_emif_data *emif_data,
41*4882a593Smuzhiyun unsigned long addr)
42*4882a593Smuzhiyun {
43*4882a593Smuzhiyun return (emif_data->ti_emif_sram_virt +
44*4882a593Smuzhiyun TI_EMIF_SRAM_SYMBOL_OFFSET(addr));
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
sram_resume_address(struct ti_emif_data * emif_data,unsigned long addr)47*4882a593Smuzhiyun static phys_addr_t sram_resume_address(struct ti_emif_data *emif_data,
48*4882a593Smuzhiyun unsigned long addr)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun return ((unsigned long)emif_data->ti_emif_sram_phys +
51*4882a593Smuzhiyun TI_EMIF_SRAM_SYMBOL_OFFSET(addr));
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun
ti_emif_free_sram(struct ti_emif_data * emif_data)54*4882a593Smuzhiyun static void ti_emif_free_sram(struct ti_emif_data *emif_data)
55*4882a593Smuzhiyun {
56*4882a593Smuzhiyun gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt,
57*4882a593Smuzhiyun ti_emif_sram_sz);
58*4882a593Smuzhiyun gen_pool_free(emif_data->sram_pool_data,
59*4882a593Smuzhiyun emif_data->ti_emif_sram_data_virt,
60*4882a593Smuzhiyun sizeof(struct emif_regs_amx3));
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
ti_emif_alloc_sram(struct device * dev,struct ti_emif_data * emif_data)63*4882a593Smuzhiyun static int ti_emif_alloc_sram(struct device *dev,
64*4882a593Smuzhiyun struct ti_emif_data *emif_data)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun struct device_node *np = dev->of_node;
67*4882a593Smuzhiyun int ret;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun emif_data->sram_pool_code = of_gen_pool_get(np, "sram", 0);
70*4882a593Smuzhiyun if (!emif_data->sram_pool_code) {
71*4882a593Smuzhiyun dev_err(dev, "Unable to get sram pool for ocmcram code\n");
72*4882a593Smuzhiyun return -ENODEV;
73*4882a593Smuzhiyun }
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun emif_data->ti_emif_sram_virt =
76*4882a593Smuzhiyun gen_pool_alloc(emif_data->sram_pool_code,
77*4882a593Smuzhiyun ti_emif_sram_sz);
78*4882a593Smuzhiyun if (!emif_data->ti_emif_sram_virt) {
79*4882a593Smuzhiyun dev_err(dev, "Unable to allocate code memory from ocmcram\n");
80*4882a593Smuzhiyun return -ENOMEM;
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Save physical address to calculate resume offset during pm init */
84*4882a593Smuzhiyun emif_data->ti_emif_sram_phys =
85*4882a593Smuzhiyun gen_pool_virt_to_phys(emif_data->sram_pool_code,
86*4882a593Smuzhiyun emif_data->ti_emif_sram_virt);
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun /* Get sram pool for data section and allocate space */
89*4882a593Smuzhiyun emif_data->sram_pool_data = of_gen_pool_get(np, "sram", 1);
90*4882a593Smuzhiyun if (!emif_data->sram_pool_data) {
91*4882a593Smuzhiyun dev_err(dev, "Unable to get sram pool for ocmcram data\n");
92*4882a593Smuzhiyun ret = -ENODEV;
93*4882a593Smuzhiyun goto err_free_sram_code;
94*4882a593Smuzhiyun }
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun emif_data->ti_emif_sram_data_virt =
97*4882a593Smuzhiyun gen_pool_alloc(emif_data->sram_pool_data,
98*4882a593Smuzhiyun sizeof(struct emif_regs_amx3));
99*4882a593Smuzhiyun if (!emif_data->ti_emif_sram_data_virt) {
100*4882a593Smuzhiyun dev_err(dev, "Unable to allocate data memory from ocmcram\n");
101*4882a593Smuzhiyun ret = -ENOMEM;
102*4882a593Smuzhiyun goto err_free_sram_code;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /* Save physical address to calculate resume offset during pm init */
106*4882a593Smuzhiyun emif_data->ti_emif_sram_data_phys =
107*4882a593Smuzhiyun gen_pool_virt_to_phys(emif_data->sram_pool_data,
108*4882a593Smuzhiyun emif_data->ti_emif_sram_data_virt);
109*4882a593Smuzhiyun /*
110*4882a593Smuzhiyun * These functions are called during suspend path while MMU is
111*4882a593Smuzhiyun * still on so add virtual base to offset for absolute address
112*4882a593Smuzhiyun */
113*4882a593Smuzhiyun emif_data->pm_functions.save_context =
114*4882a593Smuzhiyun sram_suspend_address(emif_data,
115*4882a593Smuzhiyun (unsigned long)ti_emif_save_context);
116*4882a593Smuzhiyun emif_data->pm_functions.enter_sr =
117*4882a593Smuzhiyun sram_suspend_address(emif_data,
118*4882a593Smuzhiyun (unsigned long)ti_emif_enter_sr);
119*4882a593Smuzhiyun emif_data->pm_functions.abort_sr =
120*4882a593Smuzhiyun sram_suspend_address(emif_data,
121*4882a593Smuzhiyun (unsigned long)ti_emif_abort_sr);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun /*
124*4882a593Smuzhiyun * These are called during resume path when MMU is not enabled
125*4882a593Smuzhiyun * so physical address is used instead
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun emif_data->pm_functions.restore_context =
128*4882a593Smuzhiyun sram_resume_address(emif_data,
129*4882a593Smuzhiyun (unsigned long)ti_emif_restore_context);
130*4882a593Smuzhiyun emif_data->pm_functions.exit_sr =
131*4882a593Smuzhiyun sram_resume_address(emif_data,
132*4882a593Smuzhiyun (unsigned long)ti_emif_exit_sr);
133*4882a593Smuzhiyun emif_data->pm_functions.run_hw_leveling =
134*4882a593Smuzhiyun sram_resume_address(emif_data,
135*4882a593Smuzhiyun (unsigned long)ti_emif_run_hw_leveling);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun emif_data->pm_data.regs_virt =
138*4882a593Smuzhiyun (struct emif_regs_amx3 *)emif_data->ti_emif_sram_data_virt;
139*4882a593Smuzhiyun emif_data->pm_data.regs_phys = emif_data->ti_emif_sram_data_phys;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun return 0;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun err_free_sram_code:
144*4882a593Smuzhiyun gen_pool_free(emif_data->sram_pool_code, emif_data->ti_emif_sram_virt,
145*4882a593Smuzhiyun ti_emif_sram_sz);
146*4882a593Smuzhiyun return ret;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
ti_emif_push_sram(struct device * dev,struct ti_emif_data * emif_data)149*4882a593Smuzhiyun static int ti_emif_push_sram(struct device *dev, struct ti_emif_data *emif_data)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun void *copy_addr;
152*4882a593Smuzhiyun u32 data_addr;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun copy_addr = sram_exec_copy(emif_data->sram_pool_code,
155*4882a593Smuzhiyun (void *)emif_data->ti_emif_sram_virt,
156*4882a593Smuzhiyun &ti_emif_sram, ti_emif_sram_sz);
157*4882a593Smuzhiyun if (!copy_addr) {
158*4882a593Smuzhiyun dev_err(dev, "Cannot copy emif code to sram\n");
159*4882a593Smuzhiyun return -ENODEV;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun data_addr = sram_suspend_address(emif_data,
163*4882a593Smuzhiyun (unsigned long)&ti_emif_pm_sram_data);
164*4882a593Smuzhiyun copy_addr = sram_exec_copy(emif_data->sram_pool_code,
165*4882a593Smuzhiyun (void *)data_addr,
166*4882a593Smuzhiyun &emif_data->pm_data,
167*4882a593Smuzhiyun sizeof(emif_data->pm_data));
168*4882a593Smuzhiyun if (!copy_addr) {
169*4882a593Smuzhiyun dev_err(dev, "Cannot copy emif data to code sram\n");
170*4882a593Smuzhiyun return -ENODEV;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun return 0;
174*4882a593Smuzhiyun }
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * Due to Usage Note 3.1.2 "DDR3: JEDEC Compliance for Maximum
178*4882a593Smuzhiyun * Self-Refresh Command Limit" found in AM335x Silicon Errata
179*4882a593Smuzhiyun * (Document SPRZ360F Revised November 2013) we must configure
180*4882a593Smuzhiyun * the self refresh delay timer to 0xA (8192 cycles) to avoid
181*4882a593Smuzhiyun * generating too many refresh command from the EMIF.
182*4882a593Smuzhiyun */
ti_emif_configure_sr_delay(struct ti_emif_data * emif_data)183*4882a593Smuzhiyun static void ti_emif_configure_sr_delay(struct ti_emif_data *emif_data)
184*4882a593Smuzhiyun {
185*4882a593Smuzhiyun writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES,
186*4882a593Smuzhiyun (emif_data->pm_data.ti_emif_base_addr_virt +
187*4882a593Smuzhiyun EMIF_POWER_MANAGEMENT_CONTROL));
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun writel(EMIF_POWER_MGMT_WAIT_SELF_REFRESH_8192_CYCLES,
190*4882a593Smuzhiyun (emif_data->pm_data.ti_emif_base_addr_virt +
191*4882a593Smuzhiyun EMIF_POWER_MANAGEMENT_CTRL_SHDW));
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /**
195*4882a593Smuzhiyun * ti_emif_copy_pm_function_table - copy mapping of pm funcs in sram
196*4882a593Smuzhiyun * @sram_pool: pointer to struct gen_pool where dst resides
197*4882a593Smuzhiyun * @dst: void * to address that table should be copied
198*4882a593Smuzhiyun *
199*4882a593Smuzhiyun * Returns 0 if success other error code if table is not available
200*4882a593Smuzhiyun */
ti_emif_copy_pm_function_table(struct gen_pool * sram_pool,void * dst)201*4882a593Smuzhiyun int ti_emif_copy_pm_function_table(struct gen_pool *sram_pool, void *dst)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun void *copy_addr;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun if (!emif_instance)
206*4882a593Smuzhiyun return -ENODEV;
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun copy_addr = sram_exec_copy(sram_pool, dst,
209*4882a593Smuzhiyun &emif_instance->pm_functions,
210*4882a593Smuzhiyun sizeof(emif_instance->pm_functions));
211*4882a593Smuzhiyun if (!copy_addr)
212*4882a593Smuzhiyun return -ENODEV;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun return 0;
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ti_emif_copy_pm_function_table);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun /**
219*4882a593Smuzhiyun * ti_emif_get_mem_type - return type for memory type in use
220*4882a593Smuzhiyun *
221*4882a593Smuzhiyun * Returns memory type value read from EMIF or error code if fails
222*4882a593Smuzhiyun */
ti_emif_get_mem_type(void)223*4882a593Smuzhiyun int ti_emif_get_mem_type(void)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun unsigned long temp;
226*4882a593Smuzhiyun
227*4882a593Smuzhiyun if (!emif_instance)
228*4882a593Smuzhiyun return -ENODEV;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun temp = readl(emif_instance->pm_data.ti_emif_base_addr_virt +
231*4882a593Smuzhiyun EMIF_SDRAM_CONFIG);
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun temp = (temp & SDRAM_TYPE_MASK) >> SDRAM_TYPE_SHIFT;
234*4882a593Smuzhiyun return temp;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(ti_emif_get_mem_type);
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun static const struct of_device_id ti_emif_of_match[] = {
239*4882a593Smuzhiyun { .compatible = "ti,emif-am3352", .data =
240*4882a593Smuzhiyun (void *)EMIF_SRAM_AM33_REG_LAYOUT, },
241*4882a593Smuzhiyun { .compatible = "ti,emif-am4372", .data =
242*4882a593Smuzhiyun (void *)EMIF_SRAM_AM43_REG_LAYOUT, },
243*4882a593Smuzhiyun {},
244*4882a593Smuzhiyun };
245*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, ti_emif_of_match);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
ti_emif_resume(struct device * dev)248*4882a593Smuzhiyun static int ti_emif_resume(struct device *dev)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun unsigned long tmp =
251*4882a593Smuzhiyun __raw_readl((void __iomem *)emif_instance->ti_emif_sram_virt);
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * Check to see if what we are copying is already present in the
255*4882a593Smuzhiyun * first byte at the destination, only copy if it is not which
256*4882a593Smuzhiyun * indicates we have lost context and sram no longer contains
257*4882a593Smuzhiyun * the PM code
258*4882a593Smuzhiyun */
259*4882a593Smuzhiyun if (tmp != ti_emif_sram)
260*4882a593Smuzhiyun ti_emif_push_sram(dev, emif_instance);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
ti_emif_suspend(struct device * dev)265*4882a593Smuzhiyun static int ti_emif_suspend(struct device *dev)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun /*
268*4882a593Smuzhiyun * The contents will be present in DDR hence no need to
269*4882a593Smuzhiyun * explicitly save
270*4882a593Smuzhiyun */
271*4882a593Smuzhiyun return 0;
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun #endif /* CONFIG_PM_SLEEP */
274*4882a593Smuzhiyun
ti_emif_probe(struct platform_device * pdev)275*4882a593Smuzhiyun static int ti_emif_probe(struct platform_device *pdev)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun int ret;
278*4882a593Smuzhiyun struct resource *res;
279*4882a593Smuzhiyun struct device *dev = &pdev->dev;
280*4882a593Smuzhiyun const struct of_device_id *match;
281*4882a593Smuzhiyun struct ti_emif_data *emif_data;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun emif_data = devm_kzalloc(dev, sizeof(*emif_data), GFP_KERNEL);
284*4882a593Smuzhiyun if (!emif_data)
285*4882a593Smuzhiyun return -ENOMEM;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun match = of_match_device(ti_emif_of_match, &pdev->dev);
288*4882a593Smuzhiyun if (!match)
289*4882a593Smuzhiyun return -ENODEV;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun emif_data->pm_data.ti_emif_sram_config = (unsigned long)match->data;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
294*4882a593Smuzhiyun emif_data->pm_data.ti_emif_base_addr_virt = devm_ioremap_resource(dev,
295*4882a593Smuzhiyun res);
296*4882a593Smuzhiyun if (IS_ERR(emif_data->pm_data.ti_emif_base_addr_virt)) {
297*4882a593Smuzhiyun ret = PTR_ERR(emif_data->pm_data.ti_emif_base_addr_virt);
298*4882a593Smuzhiyun return ret;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun emif_data->pm_data.ti_emif_base_addr_phys = res->start;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun ti_emif_configure_sr_delay(emif_data);
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun ret = ti_emif_alloc_sram(dev, emif_data);
306*4882a593Smuzhiyun if (ret)
307*4882a593Smuzhiyun return ret;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun ret = ti_emif_push_sram(dev, emif_data);
310*4882a593Smuzhiyun if (ret)
311*4882a593Smuzhiyun goto fail_free_sram;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun emif_instance = emif_data;
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun fail_free_sram:
318*4882a593Smuzhiyun ti_emif_free_sram(emif_data);
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun return ret;
321*4882a593Smuzhiyun }
322*4882a593Smuzhiyun
ti_emif_remove(struct platform_device * pdev)323*4882a593Smuzhiyun static int ti_emif_remove(struct platform_device *pdev)
324*4882a593Smuzhiyun {
325*4882a593Smuzhiyun struct ti_emif_data *emif_data = emif_instance;
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun emif_instance = NULL;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun ti_emif_free_sram(emif_data);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun return 0;
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun static const struct dev_pm_ops ti_emif_pm_ops = {
335*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(ti_emif_suspend, ti_emif_resume)
336*4882a593Smuzhiyun };
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun static struct platform_driver ti_emif_driver = {
339*4882a593Smuzhiyun .probe = ti_emif_probe,
340*4882a593Smuzhiyun .remove = ti_emif_remove,
341*4882a593Smuzhiyun .driver = {
342*4882a593Smuzhiyun .name = KBUILD_MODNAME,
343*4882a593Smuzhiyun .of_match_table = of_match_ptr(ti_emif_of_match),
344*4882a593Smuzhiyun .pm = &ti_emif_pm_ops,
345*4882a593Smuzhiyun },
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun module_platform_driver(ti_emif_driver);
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun MODULE_AUTHOR("Dave Gerlach <d-gerlach@ti.com>");
350*4882a593Smuzhiyun MODULE_DESCRIPTION("Texas Instruments SRAM EMIF driver");
351*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
352