1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Error Location Module
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2012 Texas Instruments Incorporated - https://www.ti.com/
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #define DRIVER_NAME "omap-elm"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/sched.h>
16*4882a593Smuzhiyun #include <linux/pm_runtime.h>
17*4882a593Smuzhiyun #include <linux/platform_data/elm.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun #define ELM_SYSCONFIG 0x010
20*4882a593Smuzhiyun #define ELM_IRQSTATUS 0x018
21*4882a593Smuzhiyun #define ELM_IRQENABLE 0x01c
22*4882a593Smuzhiyun #define ELM_LOCATION_CONFIG 0x020
23*4882a593Smuzhiyun #define ELM_PAGE_CTRL 0x080
24*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_0 0x400
25*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_1 0x404
26*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_2 0x408
27*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_3 0x40c
28*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_4 0x410
29*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_5 0x414
30*4882a593Smuzhiyun #define ELM_SYNDROME_FRAGMENT_6 0x418
31*4882a593Smuzhiyun #define ELM_LOCATION_STATUS 0x800
32*4882a593Smuzhiyun #define ELM_ERROR_LOCATION_0 0x880
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /* ELM Interrupt Status Register */
35*4882a593Smuzhiyun #define INTR_STATUS_PAGE_VALID BIT(8)
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun /* ELM Interrupt Enable Register */
38*4882a593Smuzhiyun #define INTR_EN_PAGE_MASK BIT(8)
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /* ELM Location Configuration Register */
41*4882a593Smuzhiyun #define ECC_BCH_LEVEL_MASK 0x3
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /* ELM syndrome */
44*4882a593Smuzhiyun #define ELM_SYNDROME_VALID BIT(16)
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* ELM_LOCATION_STATUS Register */
47*4882a593Smuzhiyun #define ECC_CORRECTABLE_MASK BIT(8)
48*4882a593Smuzhiyun #define ECC_NB_ERRORS_MASK 0x1f
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /* ELM_ERROR_LOCATION_0-15 Registers */
51*4882a593Smuzhiyun #define ECC_ERROR_LOCATION_MASK 0x1fff
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun #define ELM_ECC_SIZE 0x7ff
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define SYNDROME_FRAGMENT_REG_SIZE 0x40
56*4882a593Smuzhiyun #define ERROR_LOCATION_SIZE 0x100
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct elm_registers {
59*4882a593Smuzhiyun u32 elm_irqenable;
60*4882a593Smuzhiyun u32 elm_sysconfig;
61*4882a593Smuzhiyun u32 elm_location_config;
62*4882a593Smuzhiyun u32 elm_page_ctrl;
63*4882a593Smuzhiyun u32 elm_syndrome_fragment_6[ERROR_VECTOR_MAX];
64*4882a593Smuzhiyun u32 elm_syndrome_fragment_5[ERROR_VECTOR_MAX];
65*4882a593Smuzhiyun u32 elm_syndrome_fragment_4[ERROR_VECTOR_MAX];
66*4882a593Smuzhiyun u32 elm_syndrome_fragment_3[ERROR_VECTOR_MAX];
67*4882a593Smuzhiyun u32 elm_syndrome_fragment_2[ERROR_VECTOR_MAX];
68*4882a593Smuzhiyun u32 elm_syndrome_fragment_1[ERROR_VECTOR_MAX];
69*4882a593Smuzhiyun u32 elm_syndrome_fragment_0[ERROR_VECTOR_MAX];
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun struct elm_info {
73*4882a593Smuzhiyun struct device *dev;
74*4882a593Smuzhiyun void __iomem *elm_base;
75*4882a593Smuzhiyun struct completion elm_completion;
76*4882a593Smuzhiyun struct list_head list;
77*4882a593Smuzhiyun enum bch_ecc bch_type;
78*4882a593Smuzhiyun struct elm_registers elm_regs;
79*4882a593Smuzhiyun int ecc_steps;
80*4882a593Smuzhiyun int ecc_syndrome_size;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static LIST_HEAD(elm_devices);
84*4882a593Smuzhiyun
elm_write_reg(struct elm_info * info,int offset,u32 val)85*4882a593Smuzhiyun static void elm_write_reg(struct elm_info *info, int offset, u32 val)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun writel(val, info->elm_base + offset);
88*4882a593Smuzhiyun }
89*4882a593Smuzhiyun
elm_read_reg(struct elm_info * info,int offset)90*4882a593Smuzhiyun static u32 elm_read_reg(struct elm_info *info, int offset)
91*4882a593Smuzhiyun {
92*4882a593Smuzhiyun return readl(info->elm_base + offset);
93*4882a593Smuzhiyun }
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /**
96*4882a593Smuzhiyun * elm_config - Configure ELM module
97*4882a593Smuzhiyun * @dev: ELM device
98*4882a593Smuzhiyun * @bch_type: Type of BCH ecc
99*4882a593Smuzhiyun */
elm_config(struct device * dev,enum bch_ecc bch_type,int ecc_steps,int ecc_step_size,int ecc_syndrome_size)100*4882a593Smuzhiyun int elm_config(struct device *dev, enum bch_ecc bch_type,
101*4882a593Smuzhiyun int ecc_steps, int ecc_step_size, int ecc_syndrome_size)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun u32 reg_val;
104*4882a593Smuzhiyun struct elm_info *info = dev_get_drvdata(dev);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun if (!info) {
107*4882a593Smuzhiyun dev_err(dev, "Unable to configure elm - device not probed?\n");
108*4882a593Smuzhiyun return -EPROBE_DEFER;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun /* ELM cannot detect ECC errors for chunks > 1KB */
111*4882a593Smuzhiyun if (ecc_step_size > ((ELM_ECC_SIZE + 1) / 2)) {
112*4882a593Smuzhiyun dev_err(dev, "unsupported config ecc-size=%d\n", ecc_step_size);
113*4882a593Smuzhiyun return -EINVAL;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun /* ELM support 8 error syndrome process */
116*4882a593Smuzhiyun if (ecc_steps > ERROR_VECTOR_MAX) {
117*4882a593Smuzhiyun dev_err(dev, "unsupported config ecc-step=%d\n", ecc_steps);
118*4882a593Smuzhiyun return -EINVAL;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun reg_val = (bch_type & ECC_BCH_LEVEL_MASK) | (ELM_ECC_SIZE << 16);
122*4882a593Smuzhiyun elm_write_reg(info, ELM_LOCATION_CONFIG, reg_val);
123*4882a593Smuzhiyun info->bch_type = bch_type;
124*4882a593Smuzhiyun info->ecc_steps = ecc_steps;
125*4882a593Smuzhiyun info->ecc_syndrome_size = ecc_syndrome_size;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun EXPORT_SYMBOL(elm_config);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun * elm_configure_page_mode - Enable/Disable page mode
133*4882a593Smuzhiyun * @info: elm info
134*4882a593Smuzhiyun * @index: index number of syndrome fragment vector
135*4882a593Smuzhiyun * @enable: enable/disable flag for page mode
136*4882a593Smuzhiyun *
137*4882a593Smuzhiyun * Enable page mode for syndrome fragment index
138*4882a593Smuzhiyun */
elm_configure_page_mode(struct elm_info * info,int index,bool enable)139*4882a593Smuzhiyun static void elm_configure_page_mode(struct elm_info *info, int index,
140*4882a593Smuzhiyun bool enable)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun u32 reg_val;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun reg_val = elm_read_reg(info, ELM_PAGE_CTRL);
145*4882a593Smuzhiyun if (enable)
146*4882a593Smuzhiyun reg_val |= BIT(index); /* enable page mode */
147*4882a593Smuzhiyun else
148*4882a593Smuzhiyun reg_val &= ~BIT(index); /* disable page mode */
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun elm_write_reg(info, ELM_PAGE_CTRL, reg_val);
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /**
154*4882a593Smuzhiyun * elm_load_syndrome - Load ELM syndrome reg
155*4882a593Smuzhiyun * @info: elm info
156*4882a593Smuzhiyun * @err_vec: elm error vectors
157*4882a593Smuzhiyun * @ecc: buffer with calculated ecc
158*4882a593Smuzhiyun *
159*4882a593Smuzhiyun * Load syndrome fragment registers with calculated ecc in reverse order.
160*4882a593Smuzhiyun */
elm_load_syndrome(struct elm_info * info,struct elm_errorvec * err_vec,u8 * ecc)161*4882a593Smuzhiyun static void elm_load_syndrome(struct elm_info *info,
162*4882a593Smuzhiyun struct elm_errorvec *err_vec, u8 *ecc)
163*4882a593Smuzhiyun {
164*4882a593Smuzhiyun int i, offset;
165*4882a593Smuzhiyun u32 val;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun for (i = 0; i < info->ecc_steps; i++) {
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun /* Check error reported */
170*4882a593Smuzhiyun if (err_vec[i].error_reported) {
171*4882a593Smuzhiyun elm_configure_page_mode(info, i, true);
172*4882a593Smuzhiyun offset = ELM_SYNDROME_FRAGMENT_0 +
173*4882a593Smuzhiyun SYNDROME_FRAGMENT_REG_SIZE * i;
174*4882a593Smuzhiyun switch (info->bch_type) {
175*4882a593Smuzhiyun case BCH8_ECC:
176*4882a593Smuzhiyun /* syndrome fragment 0 = ecc[9-12B] */
177*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[9]);
178*4882a593Smuzhiyun elm_write_reg(info, offset, val);
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /* syndrome fragment 1 = ecc[5-8B] */
181*4882a593Smuzhiyun offset += 4;
182*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[5]);
183*4882a593Smuzhiyun elm_write_reg(info, offset, val);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* syndrome fragment 2 = ecc[1-4B] */
186*4882a593Smuzhiyun offset += 4;
187*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[1]);
188*4882a593Smuzhiyun elm_write_reg(info, offset, val);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /* syndrome fragment 3 = ecc[0B] */
191*4882a593Smuzhiyun offset += 4;
192*4882a593Smuzhiyun val = ecc[0];
193*4882a593Smuzhiyun elm_write_reg(info, offset, val);
194*4882a593Smuzhiyun break;
195*4882a593Smuzhiyun case BCH4_ECC:
196*4882a593Smuzhiyun /* syndrome fragment 0 = ecc[20-52b] bits */
197*4882a593Smuzhiyun val = (cpu_to_be32(*(u32 *) &ecc[3]) >> 4) |
198*4882a593Smuzhiyun ((ecc[2] & 0xf) << 28);
199*4882a593Smuzhiyun elm_write_reg(info, offset, val);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* syndrome fragment 1 = ecc[0-20b] bits */
202*4882a593Smuzhiyun offset += 4;
203*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[0]) >> 12;
204*4882a593Smuzhiyun elm_write_reg(info, offset, val);
205*4882a593Smuzhiyun break;
206*4882a593Smuzhiyun case BCH16_ECC:
207*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[22]);
208*4882a593Smuzhiyun elm_write_reg(info, offset, val);
209*4882a593Smuzhiyun offset += 4;
210*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[18]);
211*4882a593Smuzhiyun elm_write_reg(info, offset, val);
212*4882a593Smuzhiyun offset += 4;
213*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[14]);
214*4882a593Smuzhiyun elm_write_reg(info, offset, val);
215*4882a593Smuzhiyun offset += 4;
216*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[10]);
217*4882a593Smuzhiyun elm_write_reg(info, offset, val);
218*4882a593Smuzhiyun offset += 4;
219*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[6]);
220*4882a593Smuzhiyun elm_write_reg(info, offset, val);
221*4882a593Smuzhiyun offset += 4;
222*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[2]);
223*4882a593Smuzhiyun elm_write_reg(info, offset, val);
224*4882a593Smuzhiyun offset += 4;
225*4882a593Smuzhiyun val = cpu_to_be32(*(u32 *) &ecc[0]) >> 16;
226*4882a593Smuzhiyun elm_write_reg(info, offset, val);
227*4882a593Smuzhiyun break;
228*4882a593Smuzhiyun default:
229*4882a593Smuzhiyun pr_err("invalid config bch_type\n");
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun }
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun /* Update ecc pointer with ecc byte size */
234*4882a593Smuzhiyun ecc += info->ecc_syndrome_size;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun }
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun /**
239*4882a593Smuzhiyun * elm_start_processing - start elm syndrome processing
240*4882a593Smuzhiyun * @info: elm info
241*4882a593Smuzhiyun * @err_vec: elm error vectors
242*4882a593Smuzhiyun *
243*4882a593Smuzhiyun * Set syndrome valid bit for syndrome fragment registers for which
244*4882a593Smuzhiyun * elm syndrome fragment registers are loaded. This enables elm module
245*4882a593Smuzhiyun * to start processing syndrome vectors.
246*4882a593Smuzhiyun */
elm_start_processing(struct elm_info * info,struct elm_errorvec * err_vec)247*4882a593Smuzhiyun static void elm_start_processing(struct elm_info *info,
248*4882a593Smuzhiyun struct elm_errorvec *err_vec)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun int i, offset;
251*4882a593Smuzhiyun u32 reg_val;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /*
254*4882a593Smuzhiyun * Set syndrome vector valid, so that ELM module
255*4882a593Smuzhiyun * will process it for vectors error is reported
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun for (i = 0; i < info->ecc_steps; i++) {
258*4882a593Smuzhiyun if (err_vec[i].error_reported) {
259*4882a593Smuzhiyun offset = ELM_SYNDROME_FRAGMENT_6 +
260*4882a593Smuzhiyun SYNDROME_FRAGMENT_REG_SIZE * i;
261*4882a593Smuzhiyun reg_val = elm_read_reg(info, offset);
262*4882a593Smuzhiyun reg_val |= ELM_SYNDROME_VALID;
263*4882a593Smuzhiyun elm_write_reg(info, offset, reg_val);
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /**
269*4882a593Smuzhiyun * elm_error_correction - locate correctable error position
270*4882a593Smuzhiyun * @info: elm info
271*4882a593Smuzhiyun * @err_vec: elm error vectors
272*4882a593Smuzhiyun *
273*4882a593Smuzhiyun * On completion of processing by elm module, error location status
274*4882a593Smuzhiyun * register updated with correctable/uncorrectable error information.
275*4882a593Smuzhiyun * In case of correctable errors, number of errors located from
276*4882a593Smuzhiyun * elm location status register & read the positions from
277*4882a593Smuzhiyun * elm error location register.
278*4882a593Smuzhiyun */
elm_error_correction(struct elm_info * info,struct elm_errorvec * err_vec)279*4882a593Smuzhiyun static void elm_error_correction(struct elm_info *info,
280*4882a593Smuzhiyun struct elm_errorvec *err_vec)
281*4882a593Smuzhiyun {
282*4882a593Smuzhiyun int i, j, errors = 0;
283*4882a593Smuzhiyun int offset;
284*4882a593Smuzhiyun u32 reg_val;
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun for (i = 0; i < info->ecc_steps; i++) {
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Check error reported */
289*4882a593Smuzhiyun if (err_vec[i].error_reported) {
290*4882a593Smuzhiyun offset = ELM_LOCATION_STATUS + ERROR_LOCATION_SIZE * i;
291*4882a593Smuzhiyun reg_val = elm_read_reg(info, offset);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Check correctable error or not */
294*4882a593Smuzhiyun if (reg_val & ECC_CORRECTABLE_MASK) {
295*4882a593Smuzhiyun offset = ELM_ERROR_LOCATION_0 +
296*4882a593Smuzhiyun ERROR_LOCATION_SIZE * i;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun /* Read count of correctable errors */
299*4882a593Smuzhiyun err_vec[i].error_count = reg_val &
300*4882a593Smuzhiyun ECC_NB_ERRORS_MASK;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun /* Update the error locations in error vector */
303*4882a593Smuzhiyun for (j = 0; j < err_vec[i].error_count; j++) {
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun reg_val = elm_read_reg(info, offset);
306*4882a593Smuzhiyun err_vec[i].error_loc[j] = reg_val &
307*4882a593Smuzhiyun ECC_ERROR_LOCATION_MASK;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Update error location register */
310*4882a593Smuzhiyun offset += 4;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun errors += err_vec[i].error_count;
314*4882a593Smuzhiyun } else {
315*4882a593Smuzhiyun err_vec[i].error_uncorrectable = true;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* Clearing interrupts for processed error vectors */
319*4882a593Smuzhiyun elm_write_reg(info, ELM_IRQSTATUS, BIT(i));
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Disable page mode */
322*4882a593Smuzhiyun elm_configure_page_mode(info, i, false);
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun /**
328*4882a593Smuzhiyun * elm_decode_bch_error_page - Locate error position
329*4882a593Smuzhiyun * @dev: device pointer
330*4882a593Smuzhiyun * @ecc_calc: calculated ECC bytes from GPMC
331*4882a593Smuzhiyun * @err_vec: elm error vectors
332*4882a593Smuzhiyun *
333*4882a593Smuzhiyun * Called with one or more error reported vectors & vectors with
334*4882a593Smuzhiyun * error reported is updated in err_vec[].error_reported
335*4882a593Smuzhiyun */
elm_decode_bch_error_page(struct device * dev,u8 * ecc_calc,struct elm_errorvec * err_vec)336*4882a593Smuzhiyun void elm_decode_bch_error_page(struct device *dev, u8 *ecc_calc,
337*4882a593Smuzhiyun struct elm_errorvec *err_vec)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct elm_info *info = dev_get_drvdata(dev);
340*4882a593Smuzhiyun u32 reg_val;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun /* Enable page mode interrupt */
343*4882a593Smuzhiyun reg_val = elm_read_reg(info, ELM_IRQSTATUS);
344*4882a593Smuzhiyun elm_write_reg(info, ELM_IRQSTATUS, reg_val & INTR_STATUS_PAGE_VALID);
345*4882a593Smuzhiyun elm_write_reg(info, ELM_IRQENABLE, INTR_EN_PAGE_MASK);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* Load valid ecc byte to syndrome fragment register */
348*4882a593Smuzhiyun elm_load_syndrome(info, err_vec, ecc_calc);
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun /* Enable syndrome processing for which syndrome fragment is updated */
351*4882a593Smuzhiyun elm_start_processing(info, err_vec);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun /* Wait for ELM module to finish locating error correction */
354*4882a593Smuzhiyun wait_for_completion(&info->elm_completion);
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun /* Disable page mode interrupt */
357*4882a593Smuzhiyun reg_val = elm_read_reg(info, ELM_IRQENABLE);
358*4882a593Smuzhiyun elm_write_reg(info, ELM_IRQENABLE, reg_val & ~INTR_EN_PAGE_MASK);
359*4882a593Smuzhiyun elm_error_correction(info, err_vec);
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun EXPORT_SYMBOL(elm_decode_bch_error_page);
362*4882a593Smuzhiyun
elm_isr(int this_irq,void * dev_id)363*4882a593Smuzhiyun static irqreturn_t elm_isr(int this_irq, void *dev_id)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun u32 reg_val;
366*4882a593Smuzhiyun struct elm_info *info = dev_id;
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun reg_val = elm_read_reg(info, ELM_IRQSTATUS);
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* All error vectors processed */
371*4882a593Smuzhiyun if (reg_val & INTR_STATUS_PAGE_VALID) {
372*4882a593Smuzhiyun elm_write_reg(info, ELM_IRQSTATUS,
373*4882a593Smuzhiyun reg_val & INTR_STATUS_PAGE_VALID);
374*4882a593Smuzhiyun complete(&info->elm_completion);
375*4882a593Smuzhiyun return IRQ_HANDLED;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return IRQ_NONE;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
elm_probe(struct platform_device * pdev)381*4882a593Smuzhiyun static int elm_probe(struct platform_device *pdev)
382*4882a593Smuzhiyun {
383*4882a593Smuzhiyun int ret = 0;
384*4882a593Smuzhiyun struct resource *res, *irq;
385*4882a593Smuzhiyun struct elm_info *info;
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun info = devm_kzalloc(&pdev->dev, sizeof(*info), GFP_KERNEL);
388*4882a593Smuzhiyun if (!info)
389*4882a593Smuzhiyun return -ENOMEM;
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun info->dev = &pdev->dev;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun irq = platform_get_resource(pdev, IORESOURCE_IRQ, 0);
394*4882a593Smuzhiyun if (!irq) {
395*4882a593Smuzhiyun dev_err(&pdev->dev, "no irq resource defined\n");
396*4882a593Smuzhiyun return -ENODEV;
397*4882a593Smuzhiyun }
398*4882a593Smuzhiyun
399*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
400*4882a593Smuzhiyun info->elm_base = devm_ioremap_resource(&pdev->dev, res);
401*4882a593Smuzhiyun if (IS_ERR(info->elm_base))
402*4882a593Smuzhiyun return PTR_ERR(info->elm_base);
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun ret = devm_request_irq(&pdev->dev, irq->start, elm_isr, 0,
405*4882a593Smuzhiyun pdev->name, info);
406*4882a593Smuzhiyun if (ret) {
407*4882a593Smuzhiyun dev_err(&pdev->dev, "failure requesting %pr\n", irq);
408*4882a593Smuzhiyun return ret;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun pm_runtime_enable(&pdev->dev);
412*4882a593Smuzhiyun if (pm_runtime_get_sync(&pdev->dev) < 0) {
413*4882a593Smuzhiyun ret = -EINVAL;
414*4882a593Smuzhiyun pm_runtime_put_sync(&pdev->dev);
415*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
416*4882a593Smuzhiyun dev_err(&pdev->dev, "can't enable clock\n");
417*4882a593Smuzhiyun return ret;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
420*4882a593Smuzhiyun init_completion(&info->elm_completion);
421*4882a593Smuzhiyun INIT_LIST_HEAD(&info->list);
422*4882a593Smuzhiyun list_add(&info->list, &elm_devices);
423*4882a593Smuzhiyun platform_set_drvdata(pdev, info);
424*4882a593Smuzhiyun return ret;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun
elm_remove(struct platform_device * pdev)427*4882a593Smuzhiyun static int elm_remove(struct platform_device *pdev)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun pm_runtime_put_sync(&pdev->dev);
430*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
431*4882a593Smuzhiyun return 0;
432*4882a593Smuzhiyun }
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun * elm_context_save
437*4882a593Smuzhiyun * saves ELM configurations to preserve them across Hardware powered-down
438*4882a593Smuzhiyun */
elm_context_save(struct elm_info * info)439*4882a593Smuzhiyun static int elm_context_save(struct elm_info *info)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun struct elm_registers *regs = &info->elm_regs;
442*4882a593Smuzhiyun enum bch_ecc bch_type = info->bch_type;
443*4882a593Smuzhiyun u32 offset = 0, i;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun regs->elm_irqenable = elm_read_reg(info, ELM_IRQENABLE);
446*4882a593Smuzhiyun regs->elm_sysconfig = elm_read_reg(info, ELM_SYSCONFIG);
447*4882a593Smuzhiyun regs->elm_location_config = elm_read_reg(info, ELM_LOCATION_CONFIG);
448*4882a593Smuzhiyun regs->elm_page_ctrl = elm_read_reg(info, ELM_PAGE_CTRL);
449*4882a593Smuzhiyun for (i = 0; i < ERROR_VECTOR_MAX; i++) {
450*4882a593Smuzhiyun offset = i * SYNDROME_FRAGMENT_REG_SIZE;
451*4882a593Smuzhiyun switch (bch_type) {
452*4882a593Smuzhiyun case BCH16_ECC:
453*4882a593Smuzhiyun regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
454*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_6 + offset);
455*4882a593Smuzhiyun regs->elm_syndrome_fragment_5[i] = elm_read_reg(info,
456*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_5 + offset);
457*4882a593Smuzhiyun regs->elm_syndrome_fragment_4[i] = elm_read_reg(info,
458*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_4 + offset);
459*4882a593Smuzhiyun fallthrough;
460*4882a593Smuzhiyun case BCH8_ECC:
461*4882a593Smuzhiyun regs->elm_syndrome_fragment_3[i] = elm_read_reg(info,
462*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_3 + offset);
463*4882a593Smuzhiyun regs->elm_syndrome_fragment_2[i] = elm_read_reg(info,
464*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_2 + offset);
465*4882a593Smuzhiyun fallthrough;
466*4882a593Smuzhiyun case BCH4_ECC:
467*4882a593Smuzhiyun regs->elm_syndrome_fragment_1[i] = elm_read_reg(info,
468*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_1 + offset);
469*4882a593Smuzhiyun regs->elm_syndrome_fragment_0[i] = elm_read_reg(info,
470*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_0 + offset);
471*4882a593Smuzhiyun break;
472*4882a593Smuzhiyun default:
473*4882a593Smuzhiyun return -EINVAL;
474*4882a593Smuzhiyun }
475*4882a593Smuzhiyun /* ELM SYNDROME_VALID bit in SYNDROME_FRAGMENT_6[] needs
476*4882a593Smuzhiyun * to be saved for all BCH schemes*/
477*4882a593Smuzhiyun regs->elm_syndrome_fragment_6[i] = elm_read_reg(info,
478*4882a593Smuzhiyun ELM_SYNDROME_FRAGMENT_6 + offset);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun return 0;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun /**
484*4882a593Smuzhiyun * elm_context_restore
485*4882a593Smuzhiyun * writes configurations saved duing power-down back into ELM registers
486*4882a593Smuzhiyun */
elm_context_restore(struct elm_info * info)487*4882a593Smuzhiyun static int elm_context_restore(struct elm_info *info)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun struct elm_registers *regs = &info->elm_regs;
490*4882a593Smuzhiyun enum bch_ecc bch_type = info->bch_type;
491*4882a593Smuzhiyun u32 offset = 0, i;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun elm_write_reg(info, ELM_IRQENABLE, regs->elm_irqenable);
494*4882a593Smuzhiyun elm_write_reg(info, ELM_SYSCONFIG, regs->elm_sysconfig);
495*4882a593Smuzhiyun elm_write_reg(info, ELM_LOCATION_CONFIG, regs->elm_location_config);
496*4882a593Smuzhiyun elm_write_reg(info, ELM_PAGE_CTRL, regs->elm_page_ctrl);
497*4882a593Smuzhiyun for (i = 0; i < ERROR_VECTOR_MAX; i++) {
498*4882a593Smuzhiyun offset = i * SYNDROME_FRAGMENT_REG_SIZE;
499*4882a593Smuzhiyun switch (bch_type) {
500*4882a593Smuzhiyun case BCH16_ECC:
501*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
502*4882a593Smuzhiyun regs->elm_syndrome_fragment_6[i]);
503*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_5 + offset,
504*4882a593Smuzhiyun regs->elm_syndrome_fragment_5[i]);
505*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_4 + offset,
506*4882a593Smuzhiyun regs->elm_syndrome_fragment_4[i]);
507*4882a593Smuzhiyun fallthrough;
508*4882a593Smuzhiyun case BCH8_ECC:
509*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_3 + offset,
510*4882a593Smuzhiyun regs->elm_syndrome_fragment_3[i]);
511*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_2 + offset,
512*4882a593Smuzhiyun regs->elm_syndrome_fragment_2[i]);
513*4882a593Smuzhiyun fallthrough;
514*4882a593Smuzhiyun case BCH4_ECC:
515*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_1 + offset,
516*4882a593Smuzhiyun regs->elm_syndrome_fragment_1[i]);
517*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_0 + offset,
518*4882a593Smuzhiyun regs->elm_syndrome_fragment_0[i]);
519*4882a593Smuzhiyun break;
520*4882a593Smuzhiyun default:
521*4882a593Smuzhiyun return -EINVAL;
522*4882a593Smuzhiyun }
523*4882a593Smuzhiyun /* ELM_SYNDROME_VALID bit to be set in last to trigger FSM */
524*4882a593Smuzhiyun elm_write_reg(info, ELM_SYNDROME_FRAGMENT_6 + offset,
525*4882a593Smuzhiyun regs->elm_syndrome_fragment_6[i] &
526*4882a593Smuzhiyun ELM_SYNDROME_VALID);
527*4882a593Smuzhiyun }
528*4882a593Smuzhiyun return 0;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
elm_suspend(struct device * dev)531*4882a593Smuzhiyun static int elm_suspend(struct device *dev)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun struct elm_info *info = dev_get_drvdata(dev);
534*4882a593Smuzhiyun elm_context_save(info);
535*4882a593Smuzhiyun pm_runtime_put_sync(dev);
536*4882a593Smuzhiyun return 0;
537*4882a593Smuzhiyun }
538*4882a593Smuzhiyun
elm_resume(struct device * dev)539*4882a593Smuzhiyun static int elm_resume(struct device *dev)
540*4882a593Smuzhiyun {
541*4882a593Smuzhiyun struct elm_info *info = dev_get_drvdata(dev);
542*4882a593Smuzhiyun pm_runtime_get_sync(dev);
543*4882a593Smuzhiyun elm_context_restore(info);
544*4882a593Smuzhiyun return 0;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun #endif
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(elm_pm_ops, elm_suspend, elm_resume);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun #ifdef CONFIG_OF
551*4882a593Smuzhiyun static const struct of_device_id elm_of_match[] = {
552*4882a593Smuzhiyun { .compatible = "ti,am3352-elm" },
553*4882a593Smuzhiyun {},
554*4882a593Smuzhiyun };
555*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, elm_of_match);
556*4882a593Smuzhiyun #endif
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun static struct platform_driver elm_driver = {
559*4882a593Smuzhiyun .driver = {
560*4882a593Smuzhiyun .name = DRIVER_NAME,
561*4882a593Smuzhiyun .of_match_table = of_match_ptr(elm_of_match),
562*4882a593Smuzhiyun .pm = &elm_pm_ops,
563*4882a593Smuzhiyun },
564*4882a593Smuzhiyun .probe = elm_probe,
565*4882a593Smuzhiyun .remove = elm_remove,
566*4882a593Smuzhiyun };
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun module_platform_driver(elm_driver);
569*4882a593Smuzhiyun
570*4882a593Smuzhiyun MODULE_DESCRIPTION("ELM driver for BCH error correction");
571*4882a593Smuzhiyun MODULE_AUTHOR("Texas Instruments");
572*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRIVER_NAME);
573*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
574