1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) IBM Corporation 2017
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
6*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
7*4882a593Smuzhiyun * published by the Free Software Foundation.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This program is distributed in the hope that it will be useful,
10*4882a593Smuzhiyun * but WITHOUT ANY WARRANTY; without even the implied warranty of
11*4882a593Smuzhiyun * MERGCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
12*4882a593Smuzhiyun * GNU General Public License for more details.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #include <linux/device.h>
16*4882a593Smuzhiyun #include <linux/errno.h>
17*4882a593Smuzhiyun #include <linux/fs.h>
18*4882a593Smuzhiyun #include <linux/fsi.h>
19*4882a593Smuzhiyun #include <linux/fsi-sbefifo.h>
20*4882a593Smuzhiyun #include <linux/kernel.h>
21*4882a593Smuzhiyun #include <linux/cdev.h>
22*4882a593Smuzhiyun #include <linux/module.h>
23*4882a593Smuzhiyun #include <linux/mutex.h>
24*4882a593Smuzhiyun #include <linux/of.h>
25*4882a593Smuzhiyun #include <linux/of_device.h>
26*4882a593Smuzhiyun #include <linux/of_platform.h>
27*4882a593Smuzhiyun #include <linux/sched.h>
28*4882a593Smuzhiyun #include <linux/slab.h>
29*4882a593Smuzhiyun #include <linux/uaccess.h>
30*4882a593Smuzhiyun #include <linux/delay.h>
31*4882a593Smuzhiyun #include <linux/uio.h>
32*4882a593Smuzhiyun #include <linux/vmalloc.h>
33*4882a593Smuzhiyun #include <linux/mm.h>
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun * The SBEFIFO is a pipe-like FSI device for communicating with
37*4882a593Smuzhiyun * the self boot engine on POWER processors.
38*4882a593Smuzhiyun */
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun #define DEVICE_NAME "sbefifo"
41*4882a593Smuzhiyun #define FSI_ENGID_SBE 0x22
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun * Register layout
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Register banks */
48*4882a593Smuzhiyun #define SBEFIFO_UP 0x00 /* FSI -> Host */
49*4882a593Smuzhiyun #define SBEFIFO_DOWN 0x40 /* Host -> FSI */
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Per-bank registers */
52*4882a593Smuzhiyun #define SBEFIFO_FIFO 0x00 /* The FIFO itself */
53*4882a593Smuzhiyun #define SBEFIFO_STS 0x04 /* Status register */
54*4882a593Smuzhiyun #define SBEFIFO_STS_PARITY_ERR 0x20000000
55*4882a593Smuzhiyun #define SBEFIFO_STS_RESET_REQ 0x02000000
56*4882a593Smuzhiyun #define SBEFIFO_STS_GOT_EOT 0x00800000
57*4882a593Smuzhiyun #define SBEFIFO_STS_MAX_XFER_LIMIT 0x00400000
58*4882a593Smuzhiyun #define SBEFIFO_STS_FULL 0x00200000
59*4882a593Smuzhiyun #define SBEFIFO_STS_EMPTY 0x00100000
60*4882a593Smuzhiyun #define SBEFIFO_STS_ECNT_MASK 0x000f0000
61*4882a593Smuzhiyun #define SBEFIFO_STS_ECNT_SHIFT 16
62*4882a593Smuzhiyun #define SBEFIFO_STS_VALID_MASK 0x0000ff00
63*4882a593Smuzhiyun #define SBEFIFO_STS_VALID_SHIFT 8
64*4882a593Smuzhiyun #define SBEFIFO_STS_EOT_MASK 0x000000ff
65*4882a593Smuzhiyun #define SBEFIFO_STS_EOT_SHIFT 0
66*4882a593Smuzhiyun #define SBEFIFO_EOT_RAISE 0x08 /* (Up only) Set End Of Transfer */
67*4882a593Smuzhiyun #define SBEFIFO_REQ_RESET 0x0C /* (Up only) Reset Request */
68*4882a593Smuzhiyun #define SBEFIFO_PERFORM_RESET 0x10 /* (Down only) Perform Reset */
69*4882a593Smuzhiyun #define SBEFIFO_EOT_ACK 0x14 /* (Down only) Acknowledge EOT */
70*4882a593Smuzhiyun #define SBEFIFO_DOWN_MAX 0x18 /* (Down only) Max transfer */
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* CFAM GP Mailbox SelfBoot Message register */
73*4882a593Smuzhiyun #define CFAM_GP_MBOX_SBM_ADDR 0x2824 /* Converted 0x2809 */
74*4882a593Smuzhiyun
75*4882a593Smuzhiyun #define CFAM_SBM_SBE_BOOTED 0x80000000
76*4882a593Smuzhiyun #define CFAM_SBM_SBE_ASYNC_FFDC 0x40000000
77*4882a593Smuzhiyun #define CFAM_SBM_SBE_STATE_MASK 0x00f00000
78*4882a593Smuzhiyun #define CFAM_SBM_SBE_STATE_SHIFT 20
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun enum sbe_state
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun SBE_STATE_UNKNOWN = 0x0, // Unkown, initial state
83*4882a593Smuzhiyun SBE_STATE_IPLING = 0x1, // IPL'ing - autonomous mode (transient)
84*4882a593Smuzhiyun SBE_STATE_ISTEP = 0x2, // ISTEP - Running IPL by steps (transient)
85*4882a593Smuzhiyun SBE_STATE_MPIPL = 0x3, // MPIPL
86*4882a593Smuzhiyun SBE_STATE_RUNTIME = 0x4, // SBE Runtime
87*4882a593Smuzhiyun SBE_STATE_DMT = 0x5, // Dead Man Timer State (transient)
88*4882a593Smuzhiyun SBE_STATE_DUMP = 0x6, // Dumping
89*4882a593Smuzhiyun SBE_STATE_FAILURE = 0x7, // Internal SBE failure
90*4882a593Smuzhiyun SBE_STATE_QUIESCE = 0x8, // Final state - needs SBE reset to get out
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* FIFO depth */
94*4882a593Smuzhiyun #define SBEFIFO_FIFO_DEPTH 8
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* Helpers */
97*4882a593Smuzhiyun #define sbefifo_empty(sts) ((sts) & SBEFIFO_STS_EMPTY)
98*4882a593Smuzhiyun #define sbefifo_full(sts) ((sts) & SBEFIFO_STS_FULL)
99*4882a593Smuzhiyun #define sbefifo_parity_err(sts) ((sts) & SBEFIFO_STS_PARITY_ERR)
100*4882a593Smuzhiyun #define sbefifo_populated(sts) (((sts) & SBEFIFO_STS_ECNT_MASK) >> SBEFIFO_STS_ECNT_SHIFT)
101*4882a593Smuzhiyun #define sbefifo_vacant(sts) (SBEFIFO_FIFO_DEPTH - sbefifo_populated(sts))
102*4882a593Smuzhiyun #define sbefifo_eot_set(sts) (((sts) & SBEFIFO_STS_EOT_MASK) >> SBEFIFO_STS_EOT_SHIFT)
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun /* Reset request timeout in ms */
105*4882a593Smuzhiyun #define SBEFIFO_RESET_TIMEOUT 10000
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /* Timeouts for commands in ms */
108*4882a593Smuzhiyun #define SBEFIFO_TIMEOUT_START_CMD 10000
109*4882a593Smuzhiyun #define SBEFIFO_TIMEOUT_IN_CMD 1000
110*4882a593Smuzhiyun #define SBEFIFO_TIMEOUT_START_RSP 10000
111*4882a593Smuzhiyun #define SBEFIFO_TIMEOUT_IN_RSP 1000
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* Other constants */
114*4882a593Smuzhiyun #define SBEFIFO_MAX_USER_CMD_LEN (0x100000 + PAGE_SIZE)
115*4882a593Smuzhiyun #define SBEFIFO_RESET_MAGIC 0x52534554 /* "RSET" */
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun struct sbefifo {
118*4882a593Smuzhiyun uint32_t magic;
119*4882a593Smuzhiyun #define SBEFIFO_MAGIC 0x53424546 /* "SBEF" */
120*4882a593Smuzhiyun struct fsi_device *fsi_dev;
121*4882a593Smuzhiyun struct device dev;
122*4882a593Smuzhiyun struct cdev cdev;
123*4882a593Smuzhiyun struct mutex lock;
124*4882a593Smuzhiyun bool broken;
125*4882a593Smuzhiyun bool dead;
126*4882a593Smuzhiyun bool async_ffdc;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun struct sbefifo_user {
130*4882a593Smuzhiyun struct sbefifo *sbefifo;
131*4882a593Smuzhiyun struct mutex file_lock;
132*4882a593Smuzhiyun void *cmd_page;
133*4882a593Smuzhiyun void *pending_cmd;
134*4882a593Smuzhiyun size_t pending_len;
135*4882a593Smuzhiyun };
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun static DEFINE_MUTEX(sbefifo_ffdc_mutex);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun
__sbefifo_dump_ffdc(struct device * dev,const __be32 * ffdc,size_t ffdc_sz,bool internal)140*4882a593Smuzhiyun static void __sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
141*4882a593Smuzhiyun size_t ffdc_sz, bool internal)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun int pack = 0;
144*4882a593Smuzhiyun #define FFDC_LSIZE 60
145*4882a593Smuzhiyun static char ffdc_line[FFDC_LSIZE];
146*4882a593Smuzhiyun char *p = ffdc_line;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun while (ffdc_sz) {
149*4882a593Smuzhiyun u32 w0, w1, w2, i;
150*4882a593Smuzhiyun if (ffdc_sz < 3) {
151*4882a593Smuzhiyun dev_err(dev, "SBE invalid FFDC package size %zd\n", ffdc_sz);
152*4882a593Smuzhiyun return;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun w0 = be32_to_cpu(*(ffdc++));
155*4882a593Smuzhiyun w1 = be32_to_cpu(*(ffdc++));
156*4882a593Smuzhiyun w2 = be32_to_cpu(*(ffdc++));
157*4882a593Smuzhiyun ffdc_sz -= 3;
158*4882a593Smuzhiyun if ((w0 >> 16) != 0xFFDC) {
159*4882a593Smuzhiyun dev_err(dev, "SBE invalid FFDC package signature %08x %08x %08x\n",
160*4882a593Smuzhiyun w0, w1, w2);
161*4882a593Smuzhiyun break;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun w0 &= 0xffff;
164*4882a593Smuzhiyun if (w0 > ffdc_sz) {
165*4882a593Smuzhiyun dev_err(dev, "SBE FFDC package len %d words but only %zd remaining\n",
166*4882a593Smuzhiyun w0, ffdc_sz);
167*4882a593Smuzhiyun w0 = ffdc_sz;
168*4882a593Smuzhiyun break;
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun if (internal) {
171*4882a593Smuzhiyun dev_warn(dev, "+---- SBE FFDC package %d for async err -----+\n",
172*4882a593Smuzhiyun pack++);
173*4882a593Smuzhiyun } else {
174*4882a593Smuzhiyun dev_warn(dev, "+---- SBE FFDC package %d for cmd %02x:%02x -----+\n",
175*4882a593Smuzhiyun pack++, (w1 >> 8) & 0xff, w1 & 0xff);
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun dev_warn(dev, "| Response code: %08x |\n", w2);
178*4882a593Smuzhiyun dev_warn(dev, "|-------------------------------------------|\n");
179*4882a593Smuzhiyun for (i = 0; i < w0; i++) {
180*4882a593Smuzhiyun if ((i & 3) == 0) {
181*4882a593Smuzhiyun p = ffdc_line;
182*4882a593Smuzhiyun p += sprintf(p, "| %04x:", i << 4);
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun p += sprintf(p, " %08x", be32_to_cpu(*(ffdc++)));
185*4882a593Smuzhiyun ffdc_sz--;
186*4882a593Smuzhiyun if ((i & 3) == 3 || i == (w0 - 1)) {
187*4882a593Smuzhiyun while ((i & 3) < 3) {
188*4882a593Smuzhiyun p += sprintf(p, " ");
189*4882a593Smuzhiyun i++;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun dev_warn(dev, "%s |\n", ffdc_line);
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun dev_warn(dev, "+-------------------------------------------+\n");
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
sbefifo_dump_ffdc(struct device * dev,const __be32 * ffdc,size_t ffdc_sz,bool internal)198*4882a593Smuzhiyun static void sbefifo_dump_ffdc(struct device *dev, const __be32 *ffdc,
199*4882a593Smuzhiyun size_t ffdc_sz, bool internal)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun mutex_lock(&sbefifo_ffdc_mutex);
202*4882a593Smuzhiyun __sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, internal);
203*4882a593Smuzhiyun mutex_unlock(&sbefifo_ffdc_mutex);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
sbefifo_parse_status(struct device * dev,u16 cmd,__be32 * response,size_t resp_len,size_t * data_len)206*4882a593Smuzhiyun int sbefifo_parse_status(struct device *dev, u16 cmd, __be32 *response,
207*4882a593Smuzhiyun size_t resp_len, size_t *data_len)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun u32 dh, s0, s1;
210*4882a593Smuzhiyun size_t ffdc_sz;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun if (resp_len < 3) {
213*4882a593Smuzhiyun pr_debug("sbefifo: cmd %04x, response too small: %zd\n",
214*4882a593Smuzhiyun cmd, resp_len);
215*4882a593Smuzhiyun return -ENXIO;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun dh = be32_to_cpu(response[resp_len - 1]);
218*4882a593Smuzhiyun if (dh > resp_len || dh < 3) {
219*4882a593Smuzhiyun dev_err(dev, "SBE cmd %02x:%02x status offset out of range: %d/%zd\n",
220*4882a593Smuzhiyun cmd >> 8, cmd & 0xff, dh, resp_len);
221*4882a593Smuzhiyun return -ENXIO;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun s0 = be32_to_cpu(response[resp_len - dh]);
224*4882a593Smuzhiyun s1 = be32_to_cpu(response[resp_len - dh + 1]);
225*4882a593Smuzhiyun if (((s0 >> 16) != 0xC0DE) || ((s0 & 0xffff) != cmd)) {
226*4882a593Smuzhiyun dev_err(dev, "SBE cmd %02x:%02x, status signature invalid: 0x%08x 0x%08x\n",
227*4882a593Smuzhiyun cmd >> 8, cmd & 0xff, s0, s1);
228*4882a593Smuzhiyun return -ENXIO;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun if (s1 != 0) {
231*4882a593Smuzhiyun ffdc_sz = dh - 3;
232*4882a593Smuzhiyun dev_warn(dev, "SBE error cmd %02x:%02x status=%04x:%04x\n",
233*4882a593Smuzhiyun cmd >> 8, cmd & 0xff, s1 >> 16, s1 & 0xffff);
234*4882a593Smuzhiyun if (ffdc_sz)
235*4882a593Smuzhiyun sbefifo_dump_ffdc(dev, &response[resp_len - dh + 2],
236*4882a593Smuzhiyun ffdc_sz, false);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun if (data_len)
239*4882a593Smuzhiyun *data_len = resp_len - dh;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /*
242*4882a593Smuzhiyun * Primary status don't have the top bit set, so can't be confused with
243*4882a593Smuzhiyun * Linux negative error codes, so return the status word whole.
244*4882a593Smuzhiyun */
245*4882a593Smuzhiyun return s1;
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sbefifo_parse_status);
248*4882a593Smuzhiyun
sbefifo_regr(struct sbefifo * sbefifo,int reg,u32 * word)249*4882a593Smuzhiyun static int sbefifo_regr(struct sbefifo *sbefifo, int reg, u32 *word)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun __be32 raw_word;
252*4882a593Smuzhiyun int rc;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun rc = fsi_device_read(sbefifo->fsi_dev, reg, &raw_word,
255*4882a593Smuzhiyun sizeof(raw_word));
256*4882a593Smuzhiyun if (rc)
257*4882a593Smuzhiyun return rc;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun *word = be32_to_cpu(raw_word);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun return 0;
262*4882a593Smuzhiyun }
263*4882a593Smuzhiyun
sbefifo_regw(struct sbefifo * sbefifo,int reg,u32 word)264*4882a593Smuzhiyun static int sbefifo_regw(struct sbefifo *sbefifo, int reg, u32 word)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun __be32 raw_word = cpu_to_be32(word);
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun return fsi_device_write(sbefifo->fsi_dev, reg, &raw_word,
269*4882a593Smuzhiyun sizeof(raw_word));
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun
sbefifo_check_sbe_state(struct sbefifo * sbefifo)272*4882a593Smuzhiyun static int sbefifo_check_sbe_state(struct sbefifo *sbefifo)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun __be32 raw_word;
275*4882a593Smuzhiyun u32 sbm;
276*4882a593Smuzhiyun int rc;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun rc = fsi_slave_read(sbefifo->fsi_dev->slave, CFAM_GP_MBOX_SBM_ADDR,
279*4882a593Smuzhiyun &raw_word, sizeof(raw_word));
280*4882a593Smuzhiyun if (rc)
281*4882a593Smuzhiyun return rc;
282*4882a593Smuzhiyun sbm = be32_to_cpu(raw_word);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* SBE booted at all ? */
285*4882a593Smuzhiyun if (!(sbm & CFAM_SBM_SBE_BOOTED))
286*4882a593Smuzhiyun return -ESHUTDOWN;
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* Check its state */
289*4882a593Smuzhiyun switch ((sbm & CFAM_SBM_SBE_STATE_MASK) >> CFAM_SBM_SBE_STATE_SHIFT) {
290*4882a593Smuzhiyun case SBE_STATE_UNKNOWN:
291*4882a593Smuzhiyun return -ESHUTDOWN;
292*4882a593Smuzhiyun case SBE_STATE_DMT:
293*4882a593Smuzhiyun return -EBUSY;
294*4882a593Smuzhiyun case SBE_STATE_IPLING:
295*4882a593Smuzhiyun case SBE_STATE_ISTEP:
296*4882a593Smuzhiyun case SBE_STATE_MPIPL:
297*4882a593Smuzhiyun case SBE_STATE_RUNTIME:
298*4882a593Smuzhiyun case SBE_STATE_DUMP: /* Not sure about that one */
299*4882a593Smuzhiyun break;
300*4882a593Smuzhiyun case SBE_STATE_FAILURE:
301*4882a593Smuzhiyun case SBE_STATE_QUIESCE:
302*4882a593Smuzhiyun return -ESHUTDOWN;
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /* Is there async FFDC available ? Remember it */
306*4882a593Smuzhiyun if (sbm & CFAM_SBM_SBE_ASYNC_FFDC)
307*4882a593Smuzhiyun sbefifo->async_ffdc = true;
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun return 0;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun /* Don't flip endianness of data to/from FIFO, just pass through. */
sbefifo_down_read(struct sbefifo * sbefifo,__be32 * word)313*4882a593Smuzhiyun static int sbefifo_down_read(struct sbefifo *sbefifo, __be32 *word)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun return fsi_device_read(sbefifo->fsi_dev, SBEFIFO_DOWN, word,
316*4882a593Smuzhiyun sizeof(*word));
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
sbefifo_up_write(struct sbefifo * sbefifo,__be32 word)319*4882a593Smuzhiyun static int sbefifo_up_write(struct sbefifo *sbefifo, __be32 word)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun return fsi_device_write(sbefifo->fsi_dev, SBEFIFO_UP, &word,
322*4882a593Smuzhiyun sizeof(word));
323*4882a593Smuzhiyun }
324*4882a593Smuzhiyun
sbefifo_request_reset(struct sbefifo * sbefifo)325*4882a593Smuzhiyun static int sbefifo_request_reset(struct sbefifo *sbefifo)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
328*4882a593Smuzhiyun unsigned long end_time;
329*4882a593Smuzhiyun u32 status;
330*4882a593Smuzhiyun int rc;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun dev_dbg(dev, "Requesting FIFO reset\n");
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /* Mark broken first, will be cleared if reset succeeds */
335*4882a593Smuzhiyun sbefifo->broken = true;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* Send reset request */
338*4882a593Smuzhiyun rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_REQ_RESET, 1);
339*4882a593Smuzhiyun if (rc) {
340*4882a593Smuzhiyun dev_err(dev, "Sending reset request failed, rc=%d\n", rc);
341*4882a593Smuzhiyun return rc;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /* Wait for it to complete */
345*4882a593Smuzhiyun end_time = jiffies + msecs_to_jiffies(SBEFIFO_RESET_TIMEOUT);
346*4882a593Smuzhiyun while (!time_after(jiffies, end_time)) {
347*4882a593Smuzhiyun rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &status);
348*4882a593Smuzhiyun if (rc) {
349*4882a593Smuzhiyun dev_err(dev, "Failed to read UP fifo status during reset"
350*4882a593Smuzhiyun " , rc=%d\n", rc);
351*4882a593Smuzhiyun return rc;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun if (!(status & SBEFIFO_STS_RESET_REQ)) {
355*4882a593Smuzhiyun dev_dbg(dev, "FIFO reset done\n");
356*4882a593Smuzhiyun sbefifo->broken = false;
357*4882a593Smuzhiyun return 0;
358*4882a593Smuzhiyun }
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun cond_resched();
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun dev_err(dev, "FIFO reset timed out\n");
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun return -ETIMEDOUT;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
sbefifo_cleanup_hw(struct sbefifo * sbefifo)367*4882a593Smuzhiyun static int sbefifo_cleanup_hw(struct sbefifo *sbefifo)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
370*4882a593Smuzhiyun u32 up_status, down_status;
371*4882a593Smuzhiyun bool need_reset = false;
372*4882a593Smuzhiyun int rc;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun rc = sbefifo_check_sbe_state(sbefifo);
375*4882a593Smuzhiyun if (rc) {
376*4882a593Smuzhiyun dev_dbg(dev, "SBE state=%d\n", rc);
377*4882a593Smuzhiyun return rc;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun /* If broken, we don't need to look at status, go straight to reset */
381*4882a593Smuzhiyun if (sbefifo->broken)
382*4882a593Smuzhiyun goto do_reset;
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun rc = sbefifo_regr(sbefifo, SBEFIFO_UP | SBEFIFO_STS, &up_status);
385*4882a593Smuzhiyun if (rc) {
386*4882a593Smuzhiyun dev_err(dev, "Cleanup: Reading UP status failed, rc=%d\n", rc);
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun /* Will try reset again on next attempt at using it */
389*4882a593Smuzhiyun sbefifo->broken = true;
390*4882a593Smuzhiyun return rc;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun rc = sbefifo_regr(sbefifo, SBEFIFO_DOWN | SBEFIFO_STS, &down_status);
394*4882a593Smuzhiyun if (rc) {
395*4882a593Smuzhiyun dev_err(dev, "Cleanup: Reading DOWN status failed, rc=%d\n", rc);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* Will try reset again on next attempt at using it */
398*4882a593Smuzhiyun sbefifo->broken = true;
399*4882a593Smuzhiyun return rc;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun
402*4882a593Smuzhiyun /* The FIFO already contains a reset request from the SBE ? */
403*4882a593Smuzhiyun if (down_status & SBEFIFO_STS_RESET_REQ) {
404*4882a593Smuzhiyun dev_info(dev, "Cleanup: FIFO reset request set, resetting\n");
405*4882a593Smuzhiyun rc = sbefifo_regw(sbefifo, SBEFIFO_DOWN, SBEFIFO_PERFORM_RESET);
406*4882a593Smuzhiyun if (rc) {
407*4882a593Smuzhiyun sbefifo->broken = true;
408*4882a593Smuzhiyun dev_err(dev, "Cleanup: Reset reg write failed, rc=%d\n", rc);
409*4882a593Smuzhiyun return rc;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun sbefifo->broken = false;
412*4882a593Smuzhiyun return 0;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun /* Parity error on either FIFO ? */
416*4882a593Smuzhiyun if ((up_status | down_status) & SBEFIFO_STS_PARITY_ERR)
417*4882a593Smuzhiyun need_reset = true;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun /* Either FIFO not empty ? */
420*4882a593Smuzhiyun if (!((up_status & down_status) & SBEFIFO_STS_EMPTY))
421*4882a593Smuzhiyun need_reset = true;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (!need_reset)
424*4882a593Smuzhiyun return 0;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun dev_info(dev, "Cleanup: FIFO not clean (up=0x%08x down=0x%08x)\n",
427*4882a593Smuzhiyun up_status, down_status);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun do_reset:
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun /* Mark broken, will be cleared if/when reset succeeds */
432*4882a593Smuzhiyun return sbefifo_request_reset(sbefifo);
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
sbefifo_wait(struct sbefifo * sbefifo,bool up,u32 * status,unsigned long timeout)435*4882a593Smuzhiyun static int sbefifo_wait(struct sbefifo *sbefifo, bool up,
436*4882a593Smuzhiyun u32 *status, unsigned long timeout)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
439*4882a593Smuzhiyun unsigned long end_time;
440*4882a593Smuzhiyun bool ready = false;
441*4882a593Smuzhiyun u32 addr, sts = 0;
442*4882a593Smuzhiyun int rc;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun dev_vdbg(dev, "Wait on %s fifo...\n", up ? "up" : "down");
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun addr = (up ? SBEFIFO_UP : SBEFIFO_DOWN) | SBEFIFO_STS;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun end_time = jiffies + timeout;
449*4882a593Smuzhiyun while (!time_after(jiffies, end_time)) {
450*4882a593Smuzhiyun cond_resched();
451*4882a593Smuzhiyun rc = sbefifo_regr(sbefifo, addr, &sts);
452*4882a593Smuzhiyun if (rc < 0) {
453*4882a593Smuzhiyun dev_err(dev, "FSI error %d reading status register\n", rc);
454*4882a593Smuzhiyun return rc;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun if (!up && sbefifo_parity_err(sts)) {
457*4882a593Smuzhiyun dev_err(dev, "Parity error in DOWN FIFO\n");
458*4882a593Smuzhiyun return -ENXIO;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun ready = !(up ? sbefifo_full(sts) : sbefifo_empty(sts));
461*4882a593Smuzhiyun if (ready)
462*4882a593Smuzhiyun break;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun if (!ready) {
465*4882a593Smuzhiyun dev_err(dev, "%s FIFO Timeout ! status=%08x\n", up ? "UP" : "DOWN", sts);
466*4882a593Smuzhiyun return -ETIMEDOUT;
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun dev_vdbg(dev, "End of wait status: %08x\n", sts);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun *status = sts;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun return 0;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun
sbefifo_send_command(struct sbefifo * sbefifo,const __be32 * command,size_t cmd_len)475*4882a593Smuzhiyun static int sbefifo_send_command(struct sbefifo *sbefifo,
476*4882a593Smuzhiyun const __be32 *command, size_t cmd_len)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
479*4882a593Smuzhiyun size_t len, chunk, vacant = 0, remaining = cmd_len;
480*4882a593Smuzhiyun unsigned long timeout;
481*4882a593Smuzhiyun u32 status;
482*4882a593Smuzhiyun int rc;
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun dev_vdbg(dev, "sending command (%zd words, cmd=%04x)\n",
485*4882a593Smuzhiyun cmd_len, be32_to_cpu(command[1]));
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun /* As long as there's something to send */
488*4882a593Smuzhiyun timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_CMD);
489*4882a593Smuzhiyun while (remaining) {
490*4882a593Smuzhiyun /* Wait for room in the FIFO */
491*4882a593Smuzhiyun rc = sbefifo_wait(sbefifo, true, &status, timeout);
492*4882a593Smuzhiyun if (rc < 0)
493*4882a593Smuzhiyun return rc;
494*4882a593Smuzhiyun timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_CMD);
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun vacant = sbefifo_vacant(status);
497*4882a593Smuzhiyun len = chunk = min(vacant, remaining);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun dev_vdbg(dev, " status=%08x vacant=%zd chunk=%zd\n",
500*4882a593Smuzhiyun status, vacant, chunk);
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* Write as much as we can */
503*4882a593Smuzhiyun while (len--) {
504*4882a593Smuzhiyun rc = sbefifo_up_write(sbefifo, *(command++));
505*4882a593Smuzhiyun if (rc) {
506*4882a593Smuzhiyun dev_err(dev, "FSI error %d writing UP FIFO\n", rc);
507*4882a593Smuzhiyun return rc;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun remaining -= chunk;
511*4882a593Smuzhiyun vacant -= chunk;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun /* If there's no room left, wait for some to write EOT */
515*4882a593Smuzhiyun if (!vacant) {
516*4882a593Smuzhiyun rc = sbefifo_wait(sbefifo, true, &status, timeout);
517*4882a593Smuzhiyun if (rc)
518*4882a593Smuzhiyun return rc;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* Send an EOT */
522*4882a593Smuzhiyun rc = sbefifo_regw(sbefifo, SBEFIFO_UP | SBEFIFO_EOT_RAISE, 0);
523*4882a593Smuzhiyun if (rc)
524*4882a593Smuzhiyun dev_err(dev, "FSI error %d writing EOT\n", rc);
525*4882a593Smuzhiyun return rc;
526*4882a593Smuzhiyun }
527*4882a593Smuzhiyun
sbefifo_read_response(struct sbefifo * sbefifo,struct iov_iter * response)528*4882a593Smuzhiyun static int sbefifo_read_response(struct sbefifo *sbefifo, struct iov_iter *response)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
531*4882a593Smuzhiyun u32 status, eot_set;
532*4882a593Smuzhiyun unsigned long timeout;
533*4882a593Smuzhiyun bool overflow = false;
534*4882a593Smuzhiyun __be32 data;
535*4882a593Smuzhiyun size_t len;
536*4882a593Smuzhiyun int rc;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun dev_vdbg(dev, "reading response, buflen = %zd\n", iov_iter_count(response));
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_START_RSP);
541*4882a593Smuzhiyun for (;;) {
542*4882a593Smuzhiyun /* Grab FIFO status (this will handle parity errors) */
543*4882a593Smuzhiyun rc = sbefifo_wait(sbefifo, false, &status, timeout);
544*4882a593Smuzhiyun if (rc < 0)
545*4882a593Smuzhiyun return rc;
546*4882a593Smuzhiyun timeout = msecs_to_jiffies(SBEFIFO_TIMEOUT_IN_RSP);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /* Decode status */
549*4882a593Smuzhiyun len = sbefifo_populated(status);
550*4882a593Smuzhiyun eot_set = sbefifo_eot_set(status);
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun dev_vdbg(dev, " chunk size %zd eot_set=0x%x\n", len, eot_set);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun /* Go through the chunk */
555*4882a593Smuzhiyun while(len--) {
556*4882a593Smuzhiyun /* Read the data */
557*4882a593Smuzhiyun rc = sbefifo_down_read(sbefifo, &data);
558*4882a593Smuzhiyun if (rc < 0)
559*4882a593Smuzhiyun return rc;
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Was it an EOT ? */
562*4882a593Smuzhiyun if (eot_set & 0x80) {
563*4882a593Smuzhiyun /*
564*4882a593Smuzhiyun * There should be nothing else in the FIFO,
565*4882a593Smuzhiyun * if there is, mark broken, this will force
566*4882a593Smuzhiyun * a reset on next use, but don't fail the
567*4882a593Smuzhiyun * command.
568*4882a593Smuzhiyun */
569*4882a593Smuzhiyun if (len) {
570*4882a593Smuzhiyun dev_warn(dev, "FIFO read hit"
571*4882a593Smuzhiyun " EOT with still %zd data\n",
572*4882a593Smuzhiyun len);
573*4882a593Smuzhiyun sbefifo->broken = true;
574*4882a593Smuzhiyun }
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* We are done */
577*4882a593Smuzhiyun rc = sbefifo_regw(sbefifo,
578*4882a593Smuzhiyun SBEFIFO_DOWN | SBEFIFO_EOT_ACK, 0);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /*
581*4882a593Smuzhiyun * If that write fail, still complete the request but mark
582*4882a593Smuzhiyun * the fifo as broken for subsequent reset (not much else
583*4882a593Smuzhiyun * we can do here).
584*4882a593Smuzhiyun */
585*4882a593Smuzhiyun if (rc) {
586*4882a593Smuzhiyun dev_err(dev, "FSI error %d ack'ing EOT\n", rc);
587*4882a593Smuzhiyun sbefifo->broken = true;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun /* Tell whether we overflowed */
591*4882a593Smuzhiyun return overflow ? -EOVERFLOW : 0;
592*4882a593Smuzhiyun }
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun /* Store it if there is room */
595*4882a593Smuzhiyun if (iov_iter_count(response) >= sizeof(__be32)) {
596*4882a593Smuzhiyun if (copy_to_iter(&data, sizeof(__be32), response) < sizeof(__be32))
597*4882a593Smuzhiyun return -EFAULT;
598*4882a593Smuzhiyun } else {
599*4882a593Smuzhiyun dev_vdbg(dev, "Response overflowed !\n");
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun overflow = true;
602*4882a593Smuzhiyun }
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun /* Next EOT bit */
605*4882a593Smuzhiyun eot_set <<= 1;
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun /* Shouldn't happen */
609*4882a593Smuzhiyun return -EIO;
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
sbefifo_do_command(struct sbefifo * sbefifo,const __be32 * command,size_t cmd_len,struct iov_iter * response)612*4882a593Smuzhiyun static int sbefifo_do_command(struct sbefifo *sbefifo,
613*4882a593Smuzhiyun const __be32 *command, size_t cmd_len,
614*4882a593Smuzhiyun struct iov_iter *response)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun /* Try sending the command */
617*4882a593Smuzhiyun int rc = sbefifo_send_command(sbefifo, command, cmd_len);
618*4882a593Smuzhiyun if (rc)
619*4882a593Smuzhiyun return rc;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun /* Now, get the response */
622*4882a593Smuzhiyun return sbefifo_read_response(sbefifo, response);
623*4882a593Smuzhiyun }
624*4882a593Smuzhiyun
sbefifo_collect_async_ffdc(struct sbefifo * sbefifo)625*4882a593Smuzhiyun static void sbefifo_collect_async_ffdc(struct sbefifo *sbefifo)
626*4882a593Smuzhiyun {
627*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
628*4882a593Smuzhiyun struct iov_iter ffdc_iter;
629*4882a593Smuzhiyun struct kvec ffdc_iov;
630*4882a593Smuzhiyun __be32 *ffdc;
631*4882a593Smuzhiyun size_t ffdc_sz;
632*4882a593Smuzhiyun __be32 cmd[2];
633*4882a593Smuzhiyun int rc;
634*4882a593Smuzhiyun
635*4882a593Smuzhiyun sbefifo->async_ffdc = false;
636*4882a593Smuzhiyun ffdc = vmalloc(SBEFIFO_MAX_FFDC_SIZE);
637*4882a593Smuzhiyun if (!ffdc) {
638*4882a593Smuzhiyun dev_err(dev, "Failed to allocate SBE FFDC buffer\n");
639*4882a593Smuzhiyun return;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun ffdc_iov.iov_base = ffdc;
642*4882a593Smuzhiyun ffdc_iov.iov_len = SBEFIFO_MAX_FFDC_SIZE;
643*4882a593Smuzhiyun iov_iter_kvec(&ffdc_iter, WRITE, &ffdc_iov, 1, SBEFIFO_MAX_FFDC_SIZE);
644*4882a593Smuzhiyun cmd[0] = cpu_to_be32(2);
645*4882a593Smuzhiyun cmd[1] = cpu_to_be32(SBEFIFO_CMD_GET_SBE_FFDC);
646*4882a593Smuzhiyun rc = sbefifo_do_command(sbefifo, cmd, 2, &ffdc_iter);
647*4882a593Smuzhiyun if (rc != 0) {
648*4882a593Smuzhiyun dev_err(dev, "Error %d retrieving SBE FFDC\n", rc);
649*4882a593Smuzhiyun goto bail;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun ffdc_sz = SBEFIFO_MAX_FFDC_SIZE - iov_iter_count(&ffdc_iter);
652*4882a593Smuzhiyun ffdc_sz /= sizeof(__be32);
653*4882a593Smuzhiyun rc = sbefifo_parse_status(dev, SBEFIFO_CMD_GET_SBE_FFDC, ffdc,
654*4882a593Smuzhiyun ffdc_sz, &ffdc_sz);
655*4882a593Smuzhiyun if (rc != 0) {
656*4882a593Smuzhiyun dev_err(dev, "Error %d decoding SBE FFDC\n", rc);
657*4882a593Smuzhiyun goto bail;
658*4882a593Smuzhiyun }
659*4882a593Smuzhiyun if (ffdc_sz > 0)
660*4882a593Smuzhiyun sbefifo_dump_ffdc(dev, ffdc, ffdc_sz, true);
661*4882a593Smuzhiyun bail:
662*4882a593Smuzhiyun vfree(ffdc);
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
__sbefifo_submit(struct sbefifo * sbefifo,const __be32 * command,size_t cmd_len,struct iov_iter * response)666*4882a593Smuzhiyun static int __sbefifo_submit(struct sbefifo *sbefifo,
667*4882a593Smuzhiyun const __be32 *command, size_t cmd_len,
668*4882a593Smuzhiyun struct iov_iter *response)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun struct device *dev = &sbefifo->fsi_dev->dev;
671*4882a593Smuzhiyun int rc;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (sbefifo->dead)
674*4882a593Smuzhiyun return -ENODEV;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (cmd_len < 2 || be32_to_cpu(command[0]) != cmd_len) {
677*4882a593Smuzhiyun dev_vdbg(dev, "Invalid command len %zd (header: %d)\n",
678*4882a593Smuzhiyun cmd_len, be32_to_cpu(command[0]));
679*4882a593Smuzhiyun return -EINVAL;
680*4882a593Smuzhiyun }
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun /* First ensure the HW is in a clean state */
683*4882a593Smuzhiyun rc = sbefifo_cleanup_hw(sbefifo);
684*4882a593Smuzhiyun if (rc)
685*4882a593Smuzhiyun return rc;
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /* Look for async FFDC first if any */
688*4882a593Smuzhiyun if (sbefifo->async_ffdc)
689*4882a593Smuzhiyun sbefifo_collect_async_ffdc(sbefifo);
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun rc = sbefifo_do_command(sbefifo, command, cmd_len, response);
692*4882a593Smuzhiyun if (rc != 0 && rc != -EOVERFLOW)
693*4882a593Smuzhiyun goto fail;
694*4882a593Smuzhiyun return rc;
695*4882a593Smuzhiyun fail:
696*4882a593Smuzhiyun /*
697*4882a593Smuzhiyun * On failure, attempt a reset. Ignore the result, it will mark
698*4882a593Smuzhiyun * the fifo broken if the reset fails
699*4882a593Smuzhiyun */
700*4882a593Smuzhiyun sbefifo_request_reset(sbefifo);
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun /* Return original error */
703*4882a593Smuzhiyun return rc;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun /**
707*4882a593Smuzhiyun * sbefifo_submit() - Submit and SBE fifo command and receive response
708*4882a593Smuzhiyun * @dev: The sbefifo device
709*4882a593Smuzhiyun * @command: The raw command data
710*4882a593Smuzhiyun * @cmd_len: The command size (in 32-bit words)
711*4882a593Smuzhiyun * @response: The output response buffer
712*4882a593Smuzhiyun * @resp_len: In: Response buffer size, Out: Response size
713*4882a593Smuzhiyun *
714*4882a593Smuzhiyun * This will perform the entire operation. If the reponse buffer
715*4882a593Smuzhiyun * overflows, returns -EOVERFLOW
716*4882a593Smuzhiyun */
sbefifo_submit(struct device * dev,const __be32 * command,size_t cmd_len,__be32 * response,size_t * resp_len)717*4882a593Smuzhiyun int sbefifo_submit(struct device *dev, const __be32 *command, size_t cmd_len,
718*4882a593Smuzhiyun __be32 *response, size_t *resp_len)
719*4882a593Smuzhiyun {
720*4882a593Smuzhiyun struct sbefifo *sbefifo;
721*4882a593Smuzhiyun struct iov_iter resp_iter;
722*4882a593Smuzhiyun struct kvec resp_iov;
723*4882a593Smuzhiyun size_t rbytes;
724*4882a593Smuzhiyun int rc;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (!dev)
727*4882a593Smuzhiyun return -ENODEV;
728*4882a593Smuzhiyun sbefifo = dev_get_drvdata(dev);
729*4882a593Smuzhiyun if (!sbefifo)
730*4882a593Smuzhiyun return -ENODEV;
731*4882a593Smuzhiyun if (WARN_ON_ONCE(sbefifo->magic != SBEFIFO_MAGIC))
732*4882a593Smuzhiyun return -ENODEV;
733*4882a593Smuzhiyun if (!resp_len || !command || !response)
734*4882a593Smuzhiyun return -EINVAL;
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun /* Prepare iov iterator */
737*4882a593Smuzhiyun rbytes = (*resp_len) * sizeof(__be32);
738*4882a593Smuzhiyun resp_iov.iov_base = response;
739*4882a593Smuzhiyun resp_iov.iov_len = rbytes;
740*4882a593Smuzhiyun iov_iter_kvec(&resp_iter, WRITE, &resp_iov, 1, rbytes);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun /* Perform the command */
743*4882a593Smuzhiyun mutex_lock(&sbefifo->lock);
744*4882a593Smuzhiyun rc = __sbefifo_submit(sbefifo, command, cmd_len, &resp_iter);
745*4882a593Smuzhiyun mutex_unlock(&sbefifo->lock);
746*4882a593Smuzhiyun
747*4882a593Smuzhiyun /* Extract the response length */
748*4882a593Smuzhiyun rbytes -= iov_iter_count(&resp_iter);
749*4882a593Smuzhiyun *resp_len = rbytes / sizeof(__be32);
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun return rc;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(sbefifo_submit);
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun /*
756*4882a593Smuzhiyun * Char device interface
757*4882a593Smuzhiyun */
758*4882a593Smuzhiyun
sbefifo_release_command(struct sbefifo_user * user)759*4882a593Smuzhiyun static void sbefifo_release_command(struct sbefifo_user *user)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun if (is_vmalloc_addr(user->pending_cmd))
762*4882a593Smuzhiyun vfree(user->pending_cmd);
763*4882a593Smuzhiyun user->pending_cmd = NULL;
764*4882a593Smuzhiyun user->pending_len = 0;
765*4882a593Smuzhiyun }
766*4882a593Smuzhiyun
sbefifo_user_open(struct inode * inode,struct file * file)767*4882a593Smuzhiyun static int sbefifo_user_open(struct inode *inode, struct file *file)
768*4882a593Smuzhiyun {
769*4882a593Smuzhiyun struct sbefifo *sbefifo = container_of(inode->i_cdev, struct sbefifo, cdev);
770*4882a593Smuzhiyun struct sbefifo_user *user;
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun user = kzalloc(sizeof(struct sbefifo_user), GFP_KERNEL);
773*4882a593Smuzhiyun if (!user)
774*4882a593Smuzhiyun return -ENOMEM;
775*4882a593Smuzhiyun
776*4882a593Smuzhiyun file->private_data = user;
777*4882a593Smuzhiyun user->sbefifo = sbefifo;
778*4882a593Smuzhiyun user->cmd_page = (void *)__get_free_page(GFP_KERNEL);
779*4882a593Smuzhiyun if (!user->cmd_page) {
780*4882a593Smuzhiyun kfree(user);
781*4882a593Smuzhiyun return -ENOMEM;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun mutex_init(&user->file_lock);
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun return 0;
786*4882a593Smuzhiyun }
787*4882a593Smuzhiyun
sbefifo_user_read(struct file * file,char __user * buf,size_t len,loff_t * offset)788*4882a593Smuzhiyun static ssize_t sbefifo_user_read(struct file *file, char __user *buf,
789*4882a593Smuzhiyun size_t len, loff_t *offset)
790*4882a593Smuzhiyun {
791*4882a593Smuzhiyun struct sbefifo_user *user = file->private_data;
792*4882a593Smuzhiyun struct sbefifo *sbefifo;
793*4882a593Smuzhiyun struct iov_iter resp_iter;
794*4882a593Smuzhiyun struct iovec resp_iov;
795*4882a593Smuzhiyun size_t cmd_len;
796*4882a593Smuzhiyun int rc;
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun if (!user)
799*4882a593Smuzhiyun return -EINVAL;
800*4882a593Smuzhiyun sbefifo = user->sbefifo;
801*4882a593Smuzhiyun if (len & 3)
802*4882a593Smuzhiyun return -EINVAL;
803*4882a593Smuzhiyun
804*4882a593Smuzhiyun mutex_lock(&user->file_lock);
805*4882a593Smuzhiyun
806*4882a593Smuzhiyun /* Cronus relies on -EAGAIN after a short read */
807*4882a593Smuzhiyun if (user->pending_len == 0) {
808*4882a593Smuzhiyun rc = -EAGAIN;
809*4882a593Smuzhiyun goto bail;
810*4882a593Smuzhiyun }
811*4882a593Smuzhiyun if (user->pending_len < 8) {
812*4882a593Smuzhiyun rc = -EINVAL;
813*4882a593Smuzhiyun goto bail;
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun cmd_len = user->pending_len >> 2;
816*4882a593Smuzhiyun
817*4882a593Smuzhiyun /* Prepare iov iterator */
818*4882a593Smuzhiyun resp_iov.iov_base = buf;
819*4882a593Smuzhiyun resp_iov.iov_len = len;
820*4882a593Smuzhiyun iov_iter_init(&resp_iter, WRITE, &resp_iov, 1, len);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun /* Perform the command */
823*4882a593Smuzhiyun mutex_lock(&sbefifo->lock);
824*4882a593Smuzhiyun rc = __sbefifo_submit(sbefifo, user->pending_cmd, cmd_len, &resp_iter);
825*4882a593Smuzhiyun mutex_unlock(&sbefifo->lock);
826*4882a593Smuzhiyun if (rc < 0)
827*4882a593Smuzhiyun goto bail;
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /* Extract the response length */
830*4882a593Smuzhiyun rc = len - iov_iter_count(&resp_iter);
831*4882a593Smuzhiyun bail:
832*4882a593Smuzhiyun sbefifo_release_command(user);
833*4882a593Smuzhiyun mutex_unlock(&user->file_lock);
834*4882a593Smuzhiyun return rc;
835*4882a593Smuzhiyun }
836*4882a593Smuzhiyun
sbefifo_user_write(struct file * file,const char __user * buf,size_t len,loff_t * offset)837*4882a593Smuzhiyun static ssize_t sbefifo_user_write(struct file *file, const char __user *buf,
838*4882a593Smuzhiyun size_t len, loff_t *offset)
839*4882a593Smuzhiyun {
840*4882a593Smuzhiyun struct sbefifo_user *user = file->private_data;
841*4882a593Smuzhiyun struct sbefifo *sbefifo;
842*4882a593Smuzhiyun int rc = len;
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun if (!user)
845*4882a593Smuzhiyun return -EINVAL;
846*4882a593Smuzhiyun sbefifo = user->sbefifo;
847*4882a593Smuzhiyun if (len > SBEFIFO_MAX_USER_CMD_LEN)
848*4882a593Smuzhiyun return -EINVAL;
849*4882a593Smuzhiyun if (len & 3)
850*4882a593Smuzhiyun return -EINVAL;
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun mutex_lock(&user->file_lock);
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun /* Can we use the pre-allocate buffer ? If not, allocate */
855*4882a593Smuzhiyun if (len <= PAGE_SIZE)
856*4882a593Smuzhiyun user->pending_cmd = user->cmd_page;
857*4882a593Smuzhiyun else
858*4882a593Smuzhiyun user->pending_cmd = vmalloc(len);
859*4882a593Smuzhiyun if (!user->pending_cmd) {
860*4882a593Smuzhiyun rc = -ENOMEM;
861*4882a593Smuzhiyun goto bail;
862*4882a593Smuzhiyun }
863*4882a593Smuzhiyun
864*4882a593Smuzhiyun /* Copy the command into the staging buffer */
865*4882a593Smuzhiyun if (copy_from_user(user->pending_cmd, buf, len)) {
866*4882a593Smuzhiyun rc = -EFAULT;
867*4882a593Smuzhiyun goto bail;
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
870*4882a593Smuzhiyun /* Check for the magic reset command */
871*4882a593Smuzhiyun if (len == 4 && be32_to_cpu(*(__be32 *)user->pending_cmd) ==
872*4882a593Smuzhiyun SBEFIFO_RESET_MAGIC) {
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* Clear out any pending command */
875*4882a593Smuzhiyun user->pending_len = 0;
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun /* Trigger reset request */
878*4882a593Smuzhiyun mutex_lock(&sbefifo->lock);
879*4882a593Smuzhiyun rc = sbefifo_request_reset(user->sbefifo);
880*4882a593Smuzhiyun mutex_unlock(&sbefifo->lock);
881*4882a593Smuzhiyun if (rc == 0)
882*4882a593Smuzhiyun rc = 4;
883*4882a593Smuzhiyun goto bail;
884*4882a593Smuzhiyun }
885*4882a593Smuzhiyun
886*4882a593Smuzhiyun /* Update the staging buffer size */
887*4882a593Smuzhiyun user->pending_len = len;
888*4882a593Smuzhiyun bail:
889*4882a593Smuzhiyun if (!user->pending_len)
890*4882a593Smuzhiyun sbefifo_release_command(user);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun mutex_unlock(&user->file_lock);
893*4882a593Smuzhiyun
894*4882a593Smuzhiyun /* And that's it, we'll issue the command on a read */
895*4882a593Smuzhiyun return rc;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
sbefifo_user_release(struct inode * inode,struct file * file)898*4882a593Smuzhiyun static int sbefifo_user_release(struct inode *inode, struct file *file)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun struct sbefifo_user *user = file->private_data;
901*4882a593Smuzhiyun
902*4882a593Smuzhiyun if (!user)
903*4882a593Smuzhiyun return -EINVAL;
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun sbefifo_release_command(user);
906*4882a593Smuzhiyun free_page((unsigned long)user->cmd_page);
907*4882a593Smuzhiyun kfree(user);
908*4882a593Smuzhiyun
909*4882a593Smuzhiyun return 0;
910*4882a593Smuzhiyun }
911*4882a593Smuzhiyun
912*4882a593Smuzhiyun static const struct file_operations sbefifo_fops = {
913*4882a593Smuzhiyun .owner = THIS_MODULE,
914*4882a593Smuzhiyun .open = sbefifo_user_open,
915*4882a593Smuzhiyun .read = sbefifo_user_read,
916*4882a593Smuzhiyun .write = sbefifo_user_write,
917*4882a593Smuzhiyun .release = sbefifo_user_release,
918*4882a593Smuzhiyun };
919*4882a593Smuzhiyun
sbefifo_free(struct device * dev)920*4882a593Smuzhiyun static void sbefifo_free(struct device *dev)
921*4882a593Smuzhiyun {
922*4882a593Smuzhiyun struct sbefifo *sbefifo = container_of(dev, struct sbefifo, dev);
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun put_device(&sbefifo->fsi_dev->dev);
925*4882a593Smuzhiyun kfree(sbefifo);
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun /*
929*4882a593Smuzhiyun * Probe/remove
930*4882a593Smuzhiyun */
931*4882a593Smuzhiyun
sbefifo_probe(struct device * dev)932*4882a593Smuzhiyun static int sbefifo_probe(struct device *dev)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct fsi_device *fsi_dev = to_fsi_dev(dev);
935*4882a593Smuzhiyun struct sbefifo *sbefifo;
936*4882a593Smuzhiyun struct device_node *np;
937*4882a593Smuzhiyun struct platform_device *child;
938*4882a593Smuzhiyun char child_name[32];
939*4882a593Smuzhiyun int rc, didx, child_idx = 0;
940*4882a593Smuzhiyun
941*4882a593Smuzhiyun dev_dbg(dev, "Found sbefifo device\n");
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun sbefifo = kzalloc(sizeof(*sbefifo), GFP_KERNEL);
944*4882a593Smuzhiyun if (!sbefifo)
945*4882a593Smuzhiyun return -ENOMEM;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun /* Grab a reference to the device (parent of our cdev), we'll drop it later */
948*4882a593Smuzhiyun if (!get_device(dev)) {
949*4882a593Smuzhiyun kfree(sbefifo);
950*4882a593Smuzhiyun return -ENODEV;
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun
953*4882a593Smuzhiyun sbefifo->magic = SBEFIFO_MAGIC;
954*4882a593Smuzhiyun sbefifo->fsi_dev = fsi_dev;
955*4882a593Smuzhiyun dev_set_drvdata(dev, sbefifo);
956*4882a593Smuzhiyun mutex_init(&sbefifo->lock);
957*4882a593Smuzhiyun
958*4882a593Smuzhiyun /*
959*4882a593Smuzhiyun * Try cleaning up the FIFO. If this fails, we still register the
960*4882a593Smuzhiyun * driver and will try cleaning things up again on the next access.
961*4882a593Smuzhiyun */
962*4882a593Smuzhiyun rc = sbefifo_cleanup_hw(sbefifo);
963*4882a593Smuzhiyun if (rc && rc != -ESHUTDOWN)
964*4882a593Smuzhiyun dev_err(dev, "Initial HW cleanup failed, will retry later\n");
965*4882a593Smuzhiyun
966*4882a593Smuzhiyun /* Create chardev for userspace access */
967*4882a593Smuzhiyun sbefifo->dev.type = &fsi_cdev_type;
968*4882a593Smuzhiyun sbefifo->dev.parent = dev;
969*4882a593Smuzhiyun sbefifo->dev.release = sbefifo_free;
970*4882a593Smuzhiyun device_initialize(&sbefifo->dev);
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun /* Allocate a minor in the FSI space */
973*4882a593Smuzhiyun rc = fsi_get_new_minor(fsi_dev, fsi_dev_sbefifo, &sbefifo->dev.devt, &didx);
974*4882a593Smuzhiyun if (rc)
975*4882a593Smuzhiyun goto err;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun dev_set_name(&sbefifo->dev, "sbefifo%d", didx);
978*4882a593Smuzhiyun cdev_init(&sbefifo->cdev, &sbefifo_fops);
979*4882a593Smuzhiyun rc = cdev_device_add(&sbefifo->cdev, &sbefifo->dev);
980*4882a593Smuzhiyun if (rc) {
981*4882a593Smuzhiyun dev_err(dev, "Error %d creating char device %s\n",
982*4882a593Smuzhiyun rc, dev_name(&sbefifo->dev));
983*4882a593Smuzhiyun goto err_free_minor;
984*4882a593Smuzhiyun }
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* Create platform devs for dts child nodes (occ, etc) */
987*4882a593Smuzhiyun for_each_available_child_of_node(dev->of_node, np) {
988*4882a593Smuzhiyun snprintf(child_name, sizeof(child_name), "%s-dev%d",
989*4882a593Smuzhiyun dev_name(&sbefifo->dev), child_idx++);
990*4882a593Smuzhiyun child = of_platform_device_create(np, child_name, dev);
991*4882a593Smuzhiyun if (!child)
992*4882a593Smuzhiyun dev_warn(dev, "failed to create child %s dev\n",
993*4882a593Smuzhiyun child_name);
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun return 0;
997*4882a593Smuzhiyun err_free_minor:
998*4882a593Smuzhiyun fsi_free_minor(sbefifo->dev.devt);
999*4882a593Smuzhiyun err:
1000*4882a593Smuzhiyun put_device(&sbefifo->dev);
1001*4882a593Smuzhiyun return rc;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun
sbefifo_unregister_child(struct device * dev,void * data)1004*4882a593Smuzhiyun static int sbefifo_unregister_child(struct device *dev, void *data)
1005*4882a593Smuzhiyun {
1006*4882a593Smuzhiyun struct platform_device *child = to_platform_device(dev);
1007*4882a593Smuzhiyun
1008*4882a593Smuzhiyun of_device_unregister(child);
1009*4882a593Smuzhiyun if (dev->of_node)
1010*4882a593Smuzhiyun of_node_clear_flag(dev->of_node, OF_POPULATED);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun }
1014*4882a593Smuzhiyun
sbefifo_remove(struct device * dev)1015*4882a593Smuzhiyun static int sbefifo_remove(struct device *dev)
1016*4882a593Smuzhiyun {
1017*4882a593Smuzhiyun struct sbefifo *sbefifo = dev_get_drvdata(dev);
1018*4882a593Smuzhiyun
1019*4882a593Smuzhiyun dev_dbg(dev, "Removing sbefifo device...\n");
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun mutex_lock(&sbefifo->lock);
1022*4882a593Smuzhiyun sbefifo->dead = true;
1023*4882a593Smuzhiyun mutex_unlock(&sbefifo->lock);
1024*4882a593Smuzhiyun
1025*4882a593Smuzhiyun cdev_device_del(&sbefifo->cdev, &sbefifo->dev);
1026*4882a593Smuzhiyun fsi_free_minor(sbefifo->dev.devt);
1027*4882a593Smuzhiyun device_for_each_child(dev, NULL, sbefifo_unregister_child);
1028*4882a593Smuzhiyun put_device(&sbefifo->dev);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun return 0;
1031*4882a593Smuzhiyun }
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun static const struct fsi_device_id sbefifo_ids[] = {
1034*4882a593Smuzhiyun {
1035*4882a593Smuzhiyun .engine_type = FSI_ENGID_SBE,
1036*4882a593Smuzhiyun .version = FSI_VERSION_ANY,
1037*4882a593Smuzhiyun },
1038*4882a593Smuzhiyun { 0 }
1039*4882a593Smuzhiyun };
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun static struct fsi_driver sbefifo_drv = {
1042*4882a593Smuzhiyun .id_table = sbefifo_ids,
1043*4882a593Smuzhiyun .drv = {
1044*4882a593Smuzhiyun .name = DEVICE_NAME,
1045*4882a593Smuzhiyun .bus = &fsi_bus_type,
1046*4882a593Smuzhiyun .probe = sbefifo_probe,
1047*4882a593Smuzhiyun .remove = sbefifo_remove,
1048*4882a593Smuzhiyun }
1049*4882a593Smuzhiyun };
1050*4882a593Smuzhiyun
sbefifo_init(void)1051*4882a593Smuzhiyun static int sbefifo_init(void)
1052*4882a593Smuzhiyun {
1053*4882a593Smuzhiyun return fsi_driver_register(&sbefifo_drv);
1054*4882a593Smuzhiyun }
1055*4882a593Smuzhiyun
sbefifo_exit(void)1056*4882a593Smuzhiyun static void sbefifo_exit(void)
1057*4882a593Smuzhiyun {
1058*4882a593Smuzhiyun fsi_driver_unregister(&sbefifo_drv);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun module_init(sbefifo_init);
1062*4882a593Smuzhiyun module_exit(sbefifo_exit);
1063*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1064*4882a593Smuzhiyun MODULE_AUTHOR("Brad Bishop <bradleyb@fuzziesquirrel.com>");
1065*4882a593Smuzhiyun MODULE_AUTHOR("Eddie James <eajames@linux.vnet.ibm.com>");
1066*4882a593Smuzhiyun MODULE_AUTHOR("Andrew Jeffery <andrew@aj.id.au>");
1067*4882a593Smuzhiyun MODULE_AUTHOR("Benjamin Herrenschmidt <benh@kernel.crashing.org>");
1068*4882a593Smuzhiyun MODULE_DESCRIPTION("Linux device interface to the POWER Self Boot Engine");
1069