1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * linux/arch/arm/mach-pxa/pxa3xx.c
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * code specific to pxa3xx aka Monahans
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (C) 2006 Marvell International Ltd.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * 2007-09-02: eric miao <eric.miao@marvell.com>
10*4882a593Smuzhiyun * initial version
11*4882a593Smuzhiyun */
12*4882a593Smuzhiyun #include <linux/dmaengine.h>
13*4882a593Smuzhiyun #include <linux/dma/pxa-dma.h>
14*4882a593Smuzhiyun #include <linux/module.h>
15*4882a593Smuzhiyun #include <linux/kernel.h>
16*4882a593Smuzhiyun #include <linux/init.h>
17*4882a593Smuzhiyun #include <linux/gpio-pxa.h>
18*4882a593Smuzhiyun #include <linux/pm.h>
19*4882a593Smuzhiyun #include <linux/platform_device.h>
20*4882a593Smuzhiyun #include <linux/irq.h>
21*4882a593Smuzhiyun #include <linux/irqchip.h>
22*4882a593Smuzhiyun #include <linux/io.h>
23*4882a593Smuzhiyun #include <linux/of.h>
24*4882a593Smuzhiyun #include <linux/syscore_ops.h>
25*4882a593Smuzhiyun #include <linux/platform_data/i2c-pxa.h>
26*4882a593Smuzhiyun #include <linux/platform_data/mmp_dma.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include <asm/mach/map.h>
29*4882a593Smuzhiyun #include <asm/suspend.h>
30*4882a593Smuzhiyun #include <mach/hardware.h>
31*4882a593Smuzhiyun #include <mach/pxa3xx-regs.h>
32*4882a593Smuzhiyun #include <mach/reset.h>
33*4882a593Smuzhiyun #include <linux/platform_data/usb-ohci-pxa27x.h>
34*4882a593Smuzhiyun #include "pm.h"
35*4882a593Smuzhiyun #include <mach/dma.h>
36*4882a593Smuzhiyun #include <mach/smemc.h>
37*4882a593Smuzhiyun #include <mach/irqs.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "generic.h"
40*4882a593Smuzhiyun #include "devices.h"
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun #define PECR_IE(n) ((1 << ((n) * 2)) << 28)
43*4882a593Smuzhiyun #define PECR_IS(n) ((1 << ((n) * 2)) << 29)
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun extern void __init pxa_dt_irq_init(int (*fn)(struct irq_data *, unsigned int));
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * NAND NFC: DFI bus arbitration subset
49*4882a593Smuzhiyun */
50*4882a593Smuzhiyun #define NDCR (*(volatile u32 __iomem*)(NAND_VIRT + 0))
51*4882a593Smuzhiyun #define NDCR_ND_ARB_EN (1 << 12)
52*4882a593Smuzhiyun #define NDCR_ND_ARB_CNTL (1 << 19)
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun #ifdef CONFIG_PM
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define ISRAM_START 0x5c000000
57*4882a593Smuzhiyun #define ISRAM_SIZE SZ_256K
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun static void __iomem *sram;
60*4882a593Smuzhiyun static unsigned long wakeup_src;
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /*
63*4882a593Smuzhiyun * Enter a standby mode (S0D1C2 or S0D2C2). Upon wakeup, the dynamic
64*4882a593Smuzhiyun * memory controller has to be reinitialised, so we place some code
65*4882a593Smuzhiyun * in the SRAM to perform this function.
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * We disable FIQs across the standby - otherwise, we might receive a
68*4882a593Smuzhiyun * FIQ while the SDRAM is unavailable.
69*4882a593Smuzhiyun */
pxa3xx_cpu_standby(unsigned int pwrmode)70*4882a593Smuzhiyun static void pxa3xx_cpu_standby(unsigned int pwrmode)
71*4882a593Smuzhiyun {
72*4882a593Smuzhiyun void (*fn)(unsigned int) = (void __force *)(sram + 0x8000);
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun memcpy_toio(sram + 0x8000, pm_enter_standby_start,
75*4882a593Smuzhiyun pm_enter_standby_end - pm_enter_standby_start);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun AD2D0SR = ~0;
78*4882a593Smuzhiyun AD2D1SR = ~0;
79*4882a593Smuzhiyun AD2D0ER = wakeup_src;
80*4882a593Smuzhiyun AD2D1ER = 0;
81*4882a593Smuzhiyun ASCR = ASCR;
82*4882a593Smuzhiyun ARSR = ARSR;
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun local_fiq_disable();
85*4882a593Smuzhiyun fn(pwrmode);
86*4882a593Smuzhiyun local_fiq_enable();
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun AD2D0ER = 0;
89*4882a593Smuzhiyun AD2D1ER = 0;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * NOTE: currently, the OBM (OEM Boot Module) binary comes along with
94*4882a593Smuzhiyun * PXA3xx development kits assumes that the resuming process continues
95*4882a593Smuzhiyun * with the address stored within the first 4 bytes of SDRAM. The PSPR
96*4882a593Smuzhiyun * register is used privately by BootROM and OBM, and _must_ be set to
97*4882a593Smuzhiyun * 0x5c014000 for the moment.
98*4882a593Smuzhiyun */
pxa3xx_cpu_pm_suspend(void)99*4882a593Smuzhiyun static void pxa3xx_cpu_pm_suspend(void)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun volatile unsigned long *p = (volatile void *)0xc0000000;
102*4882a593Smuzhiyun unsigned long saved_data = *p;
103*4882a593Smuzhiyun #ifndef CONFIG_IWMMXT
104*4882a593Smuzhiyun u64 acc0;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun asm volatile(".arch_extension xscale\n\t"
107*4882a593Smuzhiyun "mra %Q0, %R0, acc0" : "=r" (acc0));
108*4882a593Smuzhiyun #endif
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /* resuming from D2 requires the HSIO2/BOOT/TPM clocks enabled */
111*4882a593Smuzhiyun CKENA |= (1 << CKEN_BOOT) | (1 << CKEN_TPM);
112*4882a593Smuzhiyun CKENB |= 1 << (CKEN_HSIO2 & 0x1f);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun /* clear and setup wakeup source */
115*4882a593Smuzhiyun AD3SR = ~0;
116*4882a593Smuzhiyun AD3ER = wakeup_src;
117*4882a593Smuzhiyun ASCR = ASCR;
118*4882a593Smuzhiyun ARSR = ARSR;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun PCFR |= (1u << 13); /* L1_DIS */
121*4882a593Smuzhiyun PCFR &= ~((1u << 12) | (1u << 1)); /* L0_EN | SL_ROD */
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun PSPR = 0x5c014000;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* overwrite with the resume address */
126*4882a593Smuzhiyun *p = __pa_symbol(cpu_resume);
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun cpu_suspend(0, pxa3xx_finish_suspend);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun *p = saved_data;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun AD3ER = 0;
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #ifndef CONFIG_IWMMXT
135*4882a593Smuzhiyun asm volatile(".arch_extension xscale\n\t"
136*4882a593Smuzhiyun "mar acc0, %Q0, %R0" : "=r" (acc0));
137*4882a593Smuzhiyun #endif
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
pxa3xx_cpu_pm_enter(suspend_state_t state)140*4882a593Smuzhiyun static void pxa3xx_cpu_pm_enter(suspend_state_t state)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * Don't sleep if no wakeup sources are defined
144*4882a593Smuzhiyun */
145*4882a593Smuzhiyun if (wakeup_src == 0) {
146*4882a593Smuzhiyun printk(KERN_ERR "Not suspending: no wakeup sources\n");
147*4882a593Smuzhiyun return;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun switch (state) {
151*4882a593Smuzhiyun case PM_SUSPEND_STANDBY:
152*4882a593Smuzhiyun pxa3xx_cpu_standby(PXA3xx_PM_S0D2C2);
153*4882a593Smuzhiyun break;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun case PM_SUSPEND_MEM:
156*4882a593Smuzhiyun pxa3xx_cpu_pm_suspend();
157*4882a593Smuzhiyun break;
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun
pxa3xx_cpu_pm_valid(suspend_state_t state)161*4882a593Smuzhiyun static int pxa3xx_cpu_pm_valid(suspend_state_t state)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return state == PM_SUSPEND_MEM || state == PM_SUSPEND_STANDBY;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun static struct pxa_cpu_pm_fns pxa3xx_cpu_pm_fns = {
167*4882a593Smuzhiyun .valid = pxa3xx_cpu_pm_valid,
168*4882a593Smuzhiyun .enter = pxa3xx_cpu_pm_enter,
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
pxa3xx_init_pm(void)171*4882a593Smuzhiyun static void __init pxa3xx_init_pm(void)
172*4882a593Smuzhiyun {
173*4882a593Smuzhiyun sram = ioremap(ISRAM_START, ISRAM_SIZE);
174*4882a593Smuzhiyun if (!sram) {
175*4882a593Smuzhiyun printk(KERN_ERR "Unable to map ISRAM: disabling standby/suspend\n");
176*4882a593Smuzhiyun return;
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /*
180*4882a593Smuzhiyun * Since we copy wakeup code into the SRAM, we need to ensure
181*4882a593Smuzhiyun * that it is preserved over the low power modes. Note: bit 8
182*4882a593Smuzhiyun * is undocumented in the developer manual, but must be set.
183*4882a593Smuzhiyun */
184*4882a593Smuzhiyun AD1R |= ADXR_L2 | ADXR_R0;
185*4882a593Smuzhiyun AD2R |= ADXR_L2 | ADXR_R0;
186*4882a593Smuzhiyun AD3R |= ADXR_L2 | ADXR_R0;
187*4882a593Smuzhiyun
188*4882a593Smuzhiyun /*
189*4882a593Smuzhiyun * Clear the resume enable registers.
190*4882a593Smuzhiyun */
191*4882a593Smuzhiyun AD1D0ER = 0;
192*4882a593Smuzhiyun AD2D0ER = 0;
193*4882a593Smuzhiyun AD2D1ER = 0;
194*4882a593Smuzhiyun AD3ER = 0;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun pxa_cpu_pm_fns = &pxa3xx_cpu_pm_fns;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun
pxa3xx_set_wake(struct irq_data * d,unsigned int on)199*4882a593Smuzhiyun static int pxa3xx_set_wake(struct irq_data *d, unsigned int on)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun unsigned long flags, mask = 0;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun switch (d->irq) {
204*4882a593Smuzhiyun case IRQ_SSP3:
205*4882a593Smuzhiyun mask = ADXER_MFP_WSSP3;
206*4882a593Smuzhiyun break;
207*4882a593Smuzhiyun case IRQ_MSL:
208*4882a593Smuzhiyun mask = ADXER_WMSL0;
209*4882a593Smuzhiyun break;
210*4882a593Smuzhiyun case IRQ_USBH2:
211*4882a593Smuzhiyun case IRQ_USBH1:
212*4882a593Smuzhiyun mask = ADXER_WUSBH;
213*4882a593Smuzhiyun break;
214*4882a593Smuzhiyun case IRQ_KEYPAD:
215*4882a593Smuzhiyun mask = ADXER_WKP;
216*4882a593Smuzhiyun break;
217*4882a593Smuzhiyun case IRQ_AC97:
218*4882a593Smuzhiyun mask = ADXER_MFP_WAC97;
219*4882a593Smuzhiyun break;
220*4882a593Smuzhiyun case IRQ_USIM:
221*4882a593Smuzhiyun mask = ADXER_WUSIM0;
222*4882a593Smuzhiyun break;
223*4882a593Smuzhiyun case IRQ_SSP2:
224*4882a593Smuzhiyun mask = ADXER_MFP_WSSP2;
225*4882a593Smuzhiyun break;
226*4882a593Smuzhiyun case IRQ_I2C:
227*4882a593Smuzhiyun mask = ADXER_MFP_WI2C;
228*4882a593Smuzhiyun break;
229*4882a593Smuzhiyun case IRQ_STUART:
230*4882a593Smuzhiyun mask = ADXER_MFP_WUART3;
231*4882a593Smuzhiyun break;
232*4882a593Smuzhiyun case IRQ_BTUART:
233*4882a593Smuzhiyun mask = ADXER_MFP_WUART2;
234*4882a593Smuzhiyun break;
235*4882a593Smuzhiyun case IRQ_FFUART:
236*4882a593Smuzhiyun mask = ADXER_MFP_WUART1;
237*4882a593Smuzhiyun break;
238*4882a593Smuzhiyun case IRQ_MMC:
239*4882a593Smuzhiyun mask = ADXER_MFP_WMMC1;
240*4882a593Smuzhiyun break;
241*4882a593Smuzhiyun case IRQ_SSP:
242*4882a593Smuzhiyun mask = ADXER_MFP_WSSP1;
243*4882a593Smuzhiyun break;
244*4882a593Smuzhiyun case IRQ_RTCAlrm:
245*4882a593Smuzhiyun mask = ADXER_WRTC;
246*4882a593Smuzhiyun break;
247*4882a593Smuzhiyun case IRQ_SSP4:
248*4882a593Smuzhiyun mask = ADXER_MFP_WSSP4;
249*4882a593Smuzhiyun break;
250*4882a593Smuzhiyun case IRQ_TSI:
251*4882a593Smuzhiyun mask = ADXER_WTSI;
252*4882a593Smuzhiyun break;
253*4882a593Smuzhiyun case IRQ_USIM2:
254*4882a593Smuzhiyun mask = ADXER_WUSIM1;
255*4882a593Smuzhiyun break;
256*4882a593Smuzhiyun case IRQ_MMC2:
257*4882a593Smuzhiyun mask = ADXER_MFP_WMMC2;
258*4882a593Smuzhiyun break;
259*4882a593Smuzhiyun case IRQ_NAND:
260*4882a593Smuzhiyun mask = ADXER_MFP_WFLASH;
261*4882a593Smuzhiyun break;
262*4882a593Smuzhiyun case IRQ_USB2:
263*4882a593Smuzhiyun mask = ADXER_WUSB2;
264*4882a593Smuzhiyun break;
265*4882a593Smuzhiyun case IRQ_WAKEUP0:
266*4882a593Smuzhiyun mask = ADXER_WEXTWAKE0;
267*4882a593Smuzhiyun break;
268*4882a593Smuzhiyun case IRQ_WAKEUP1:
269*4882a593Smuzhiyun mask = ADXER_WEXTWAKE1;
270*4882a593Smuzhiyun break;
271*4882a593Smuzhiyun case IRQ_MMC3:
272*4882a593Smuzhiyun mask = ADXER_MFP_GEN12;
273*4882a593Smuzhiyun break;
274*4882a593Smuzhiyun default:
275*4882a593Smuzhiyun return -EINVAL;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun local_irq_save(flags);
279*4882a593Smuzhiyun if (on)
280*4882a593Smuzhiyun wakeup_src |= mask;
281*4882a593Smuzhiyun else
282*4882a593Smuzhiyun wakeup_src &= ~mask;
283*4882a593Smuzhiyun local_irq_restore(flags);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun return 0;
286*4882a593Smuzhiyun }
287*4882a593Smuzhiyun #else
pxa3xx_init_pm(void)288*4882a593Smuzhiyun static inline void pxa3xx_init_pm(void) {}
289*4882a593Smuzhiyun #define pxa3xx_set_wake NULL
290*4882a593Smuzhiyun #endif
291*4882a593Smuzhiyun
pxa_ack_ext_wakeup(struct irq_data * d)292*4882a593Smuzhiyun static void pxa_ack_ext_wakeup(struct irq_data *d)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun PECR |= PECR_IS(d->irq - IRQ_WAKEUP0);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
pxa_mask_ext_wakeup(struct irq_data * d)297*4882a593Smuzhiyun static void pxa_mask_ext_wakeup(struct irq_data *d)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun pxa_mask_irq(d);
300*4882a593Smuzhiyun PECR &= ~PECR_IE(d->irq - IRQ_WAKEUP0);
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
pxa_unmask_ext_wakeup(struct irq_data * d)303*4882a593Smuzhiyun static void pxa_unmask_ext_wakeup(struct irq_data *d)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun pxa_unmask_irq(d);
306*4882a593Smuzhiyun PECR |= PECR_IE(d->irq - IRQ_WAKEUP0);
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
pxa_set_ext_wakeup_type(struct irq_data * d,unsigned int flow_type)309*4882a593Smuzhiyun static int pxa_set_ext_wakeup_type(struct irq_data *d, unsigned int flow_type)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun if (flow_type & IRQ_TYPE_EDGE_RISING)
312*4882a593Smuzhiyun PWER |= 1 << (d->irq - IRQ_WAKEUP0);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (flow_type & IRQ_TYPE_EDGE_FALLING)
315*4882a593Smuzhiyun PWER |= 1 << (d->irq - IRQ_WAKEUP0 + 2);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun static struct irq_chip pxa_ext_wakeup_chip = {
321*4882a593Smuzhiyun .name = "WAKEUP",
322*4882a593Smuzhiyun .irq_ack = pxa_ack_ext_wakeup,
323*4882a593Smuzhiyun .irq_mask = pxa_mask_ext_wakeup,
324*4882a593Smuzhiyun .irq_unmask = pxa_unmask_ext_wakeup,
325*4882a593Smuzhiyun .irq_set_type = pxa_set_ext_wakeup_type,
326*4882a593Smuzhiyun };
327*4882a593Smuzhiyun
pxa_init_ext_wakeup_irq(int (* fn)(struct irq_data *,unsigned int))328*4882a593Smuzhiyun static void __init pxa_init_ext_wakeup_irq(int (*fn)(struct irq_data *,
329*4882a593Smuzhiyun unsigned int))
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun int irq;
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun for (irq = IRQ_WAKEUP0; irq <= IRQ_WAKEUP1; irq++) {
334*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &pxa_ext_wakeup_chip,
335*4882a593Smuzhiyun handle_edge_irq);
336*4882a593Smuzhiyun irq_clear_status_flags(irq, IRQ_NOREQUEST);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun pxa_ext_wakeup_chip.irq_set_wake = fn;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
__pxa3xx_init_irq(void)342*4882a593Smuzhiyun static void __init __pxa3xx_init_irq(void)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun /* enable CP6 access */
345*4882a593Smuzhiyun u32 value;
346*4882a593Smuzhiyun __asm__ __volatile__("mrc p15, 0, %0, c15, c1, 0\n": "=r"(value));
347*4882a593Smuzhiyun value |= (1 << 6);
348*4882a593Smuzhiyun __asm__ __volatile__("mcr p15, 0, %0, c15, c1, 0\n": :"r"(value));
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun pxa_init_ext_wakeup_irq(pxa3xx_set_wake);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
pxa3xx_init_irq(void)353*4882a593Smuzhiyun void __init pxa3xx_init_irq(void)
354*4882a593Smuzhiyun {
355*4882a593Smuzhiyun __pxa3xx_init_irq();
356*4882a593Smuzhiyun pxa_init_irq(56, pxa3xx_set_wake);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun #ifdef CONFIG_OF
360*4882a593Smuzhiyun static int __init __init
pxa3xx_dt_init_irq(struct device_node * node,struct device_node * parent)361*4882a593Smuzhiyun pxa3xx_dt_init_irq(struct device_node *node, struct device_node *parent)
362*4882a593Smuzhiyun {
363*4882a593Smuzhiyun __pxa3xx_init_irq();
364*4882a593Smuzhiyun pxa_dt_irq_init(pxa3xx_set_wake);
365*4882a593Smuzhiyun set_handle_irq(ichp_handle_irq);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun IRQCHIP_DECLARE(pxa3xx_intc, "marvell,pxa-intc", pxa3xx_dt_init_irq);
370*4882a593Smuzhiyun #endif /* CONFIG_OF */
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun static struct map_desc pxa3xx_io_desc[] __initdata = {
373*4882a593Smuzhiyun { /* Mem Ctl */
374*4882a593Smuzhiyun .virtual = (unsigned long)SMEMC_VIRT,
375*4882a593Smuzhiyun .pfn = __phys_to_pfn(PXA3XX_SMEMC_BASE),
376*4882a593Smuzhiyun .length = SMEMC_SIZE,
377*4882a593Smuzhiyun .type = MT_DEVICE
378*4882a593Smuzhiyun }, {
379*4882a593Smuzhiyun .virtual = (unsigned long)NAND_VIRT,
380*4882a593Smuzhiyun .pfn = __phys_to_pfn(NAND_PHYS),
381*4882a593Smuzhiyun .length = NAND_SIZE,
382*4882a593Smuzhiyun .type = MT_DEVICE
383*4882a593Smuzhiyun },
384*4882a593Smuzhiyun };
385*4882a593Smuzhiyun
pxa3xx_map_io(void)386*4882a593Smuzhiyun void __init pxa3xx_map_io(void)
387*4882a593Smuzhiyun {
388*4882a593Smuzhiyun pxa_map_io();
389*4882a593Smuzhiyun iotable_init(ARRAY_AND_SIZE(pxa3xx_io_desc));
390*4882a593Smuzhiyun pxa3xx_get_clk_frequency_khz(1);
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /*
394*4882a593Smuzhiyun * device registration specific to PXA3xx.
395*4882a593Smuzhiyun */
396*4882a593Smuzhiyun
pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data * info)397*4882a593Smuzhiyun void __init pxa3xx_set_i2c_power_info(struct i2c_pxa_platform_data *info)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun pxa_register_device(&pxa3xx_device_i2c_power, info);
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun static struct pxa_gpio_platform_data pxa3xx_gpio_pdata = {
403*4882a593Smuzhiyun .irq_base = PXA_GPIO_TO_IRQ(0),
404*4882a593Smuzhiyun };
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun static struct platform_device *devices[] __initdata = {
407*4882a593Smuzhiyun &pxa27x_device_udc,
408*4882a593Smuzhiyun &pxa_device_pmu,
409*4882a593Smuzhiyun &pxa_device_i2s,
410*4882a593Smuzhiyun &pxa_device_asoc_ssp1,
411*4882a593Smuzhiyun &pxa_device_asoc_ssp2,
412*4882a593Smuzhiyun &pxa_device_asoc_ssp3,
413*4882a593Smuzhiyun &pxa_device_asoc_ssp4,
414*4882a593Smuzhiyun &pxa_device_asoc_platform,
415*4882a593Smuzhiyun &pxa_device_rtc,
416*4882a593Smuzhiyun &pxa3xx_device_ssp1,
417*4882a593Smuzhiyun &pxa3xx_device_ssp2,
418*4882a593Smuzhiyun &pxa3xx_device_ssp3,
419*4882a593Smuzhiyun &pxa3xx_device_ssp4,
420*4882a593Smuzhiyun &pxa27x_device_pwm0,
421*4882a593Smuzhiyun &pxa27x_device_pwm1,
422*4882a593Smuzhiyun };
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun static const struct dma_slave_map pxa3xx_slave_map[] = {
425*4882a593Smuzhiyun /* PXA25x, PXA27x and PXA3xx common entries */
426*4882a593Smuzhiyun { "pxa2xx-ac97", "pcm_pcm_mic_mono", PDMA_FILTER_PARAM(LOWEST, 8) },
427*4882a593Smuzhiyun { "pxa2xx-ac97", "pcm_pcm_aux_mono_in", PDMA_FILTER_PARAM(LOWEST, 9) },
428*4882a593Smuzhiyun { "pxa2xx-ac97", "pcm_pcm_aux_mono_out",
429*4882a593Smuzhiyun PDMA_FILTER_PARAM(LOWEST, 10) },
430*4882a593Smuzhiyun { "pxa2xx-ac97", "pcm_pcm_stereo_in", PDMA_FILTER_PARAM(LOWEST, 11) },
431*4882a593Smuzhiyun { "pxa2xx-ac97", "pcm_pcm_stereo_out", PDMA_FILTER_PARAM(LOWEST, 12) },
432*4882a593Smuzhiyun { "pxa-ssp-dai.0", "rx", PDMA_FILTER_PARAM(LOWEST, 13) },
433*4882a593Smuzhiyun { "pxa-ssp-dai.0", "tx", PDMA_FILTER_PARAM(LOWEST, 14) },
434*4882a593Smuzhiyun { "pxa-ssp-dai.1", "rx", PDMA_FILTER_PARAM(LOWEST, 15) },
435*4882a593Smuzhiyun { "pxa-ssp-dai.1", "tx", PDMA_FILTER_PARAM(LOWEST, 16) },
436*4882a593Smuzhiyun { "pxa2xx-ir", "rx", PDMA_FILTER_PARAM(LOWEST, 17) },
437*4882a593Smuzhiyun { "pxa2xx-ir", "tx", PDMA_FILTER_PARAM(LOWEST, 18) },
438*4882a593Smuzhiyun { "pxa2xx-mci.0", "rx", PDMA_FILTER_PARAM(LOWEST, 21) },
439*4882a593Smuzhiyun { "pxa2xx-mci.0", "tx", PDMA_FILTER_PARAM(LOWEST, 22) },
440*4882a593Smuzhiyun { "pxa-ssp-dai.2", "rx", PDMA_FILTER_PARAM(LOWEST, 66) },
441*4882a593Smuzhiyun { "pxa-ssp-dai.2", "tx", PDMA_FILTER_PARAM(LOWEST, 67) },
442*4882a593Smuzhiyun
443*4882a593Smuzhiyun /* PXA3xx specific map */
444*4882a593Smuzhiyun { "pxa-ssp-dai.3", "rx", PDMA_FILTER_PARAM(LOWEST, 2) },
445*4882a593Smuzhiyun { "pxa-ssp-dai.3", "tx", PDMA_FILTER_PARAM(LOWEST, 3) },
446*4882a593Smuzhiyun { "pxa2xx-mci.1", "rx", PDMA_FILTER_PARAM(LOWEST, 93) },
447*4882a593Smuzhiyun { "pxa2xx-mci.1", "tx", PDMA_FILTER_PARAM(LOWEST, 94) },
448*4882a593Smuzhiyun { "pxa3xx-nand", "data", PDMA_FILTER_PARAM(LOWEST, 97) },
449*4882a593Smuzhiyun { "pxa2xx-mci.2", "rx", PDMA_FILTER_PARAM(LOWEST, 100) },
450*4882a593Smuzhiyun { "pxa2xx-mci.2", "tx", PDMA_FILTER_PARAM(LOWEST, 101) },
451*4882a593Smuzhiyun };
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun static struct mmp_dma_platdata pxa3xx_dma_pdata = {
454*4882a593Smuzhiyun .dma_channels = 32,
455*4882a593Smuzhiyun .nb_requestors = 100,
456*4882a593Smuzhiyun .slave_map = pxa3xx_slave_map,
457*4882a593Smuzhiyun .slave_map_cnt = ARRAY_SIZE(pxa3xx_slave_map),
458*4882a593Smuzhiyun };
459*4882a593Smuzhiyun
pxa3xx_init(void)460*4882a593Smuzhiyun static int __init pxa3xx_init(void)
461*4882a593Smuzhiyun {
462*4882a593Smuzhiyun int ret = 0;
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun if (cpu_is_pxa3xx()) {
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun reset_status = ARSR;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun /*
469*4882a593Smuzhiyun * clear RDH bit every time after reset
470*4882a593Smuzhiyun *
471*4882a593Smuzhiyun * Note: the last 3 bits DxS are write-1-to-clear so carefully
472*4882a593Smuzhiyun * preserve them here in case they will be referenced later
473*4882a593Smuzhiyun */
474*4882a593Smuzhiyun ASCR &= ~(ASCR_RDH | ASCR_D1S | ASCR_D2S | ASCR_D3S);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /*
477*4882a593Smuzhiyun * Disable DFI bus arbitration, to prevent a system bus lock if
478*4882a593Smuzhiyun * somebody disables the NAND clock (unused clock) while this
479*4882a593Smuzhiyun * bit remains set.
480*4882a593Smuzhiyun */
481*4882a593Smuzhiyun NDCR = (NDCR & ~NDCR_ND_ARB_EN) | NDCR_ND_ARB_CNTL;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun pxa3xx_init_pm();
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun enable_irq_wake(IRQ_WAKEUP0);
486*4882a593Smuzhiyun if (cpu_is_pxa320())
487*4882a593Smuzhiyun enable_irq_wake(IRQ_WAKEUP1);
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun register_syscore_ops(&pxa_irq_syscore_ops);
490*4882a593Smuzhiyun register_syscore_ops(&pxa3xx_mfp_syscore_ops);
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun if (of_have_populated_dt())
493*4882a593Smuzhiyun return 0;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun pxa2xx_set_dmac_info(&pxa3xx_dma_pdata);
496*4882a593Smuzhiyun ret = platform_add_devices(devices, ARRAY_SIZE(devices));
497*4882a593Smuzhiyun if (ret)
498*4882a593Smuzhiyun return ret;
499*4882a593Smuzhiyun if (cpu_is_pxa300() || cpu_is_pxa310() || cpu_is_pxa320()) {
500*4882a593Smuzhiyun platform_device_add_data(&pxa3xx_device_gpio,
501*4882a593Smuzhiyun &pxa3xx_gpio_pdata,
502*4882a593Smuzhiyun sizeof(pxa3xx_gpio_pdata));
503*4882a593Smuzhiyun ret = platform_device_register(&pxa3xx_device_gpio);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun return ret;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun postcore_initcall(pxa3xx_init);
511