1*4882a593Smuzhiyun // SPDX-License-Identifier: BSD-3-Clause
2*4882a593Smuzhiyun /* Copyright (c) 2016-2018, NXP Semiconductors
3*4882a593Smuzhiyun * Copyright (c) 2018, Sensor-Technik Wiedemann GmbH
4*4882a593Smuzhiyun * Copyright (c) 2018-2019, Vladimir Oltean <olteanv@gmail.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun #include <linux/spi/spi.h>
7*4882a593Smuzhiyun #include <linux/packing.h>
8*4882a593Smuzhiyun #include "sja1105.h"
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun #define SJA1105_SIZE_RESET_CMD 4
11*4882a593Smuzhiyun #define SJA1105_SIZE_SPI_MSG_HEADER 4
12*4882a593Smuzhiyun #define SJA1105_SIZE_SPI_MSG_MAXLEN (64 * 4)
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun struct sja1105_chunk {
15*4882a593Smuzhiyun u8 *buf;
16*4882a593Smuzhiyun size_t len;
17*4882a593Smuzhiyun u64 reg_addr;
18*4882a593Smuzhiyun };
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun static void
sja1105_spi_message_pack(void * buf,const struct sja1105_spi_message * msg)21*4882a593Smuzhiyun sja1105_spi_message_pack(void *buf, const struct sja1105_spi_message *msg)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun const int size = SJA1105_SIZE_SPI_MSG_HEADER;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun memset(buf, 0, size);
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun sja1105_pack(buf, &msg->access, 31, 31, size);
28*4882a593Smuzhiyun sja1105_pack(buf, &msg->read_count, 30, 25, size);
29*4882a593Smuzhiyun sja1105_pack(buf, &msg->address, 24, 4, size);
30*4882a593Smuzhiyun }
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #define sja1105_hdr_xfer(xfers, chunk) \
33*4882a593Smuzhiyun ((xfers) + 2 * (chunk))
34*4882a593Smuzhiyun #define sja1105_chunk_xfer(xfers, chunk) \
35*4882a593Smuzhiyun ((xfers) + 2 * (chunk) + 1)
36*4882a593Smuzhiyun #define sja1105_hdr_buf(hdr_bufs, chunk) \
37*4882a593Smuzhiyun ((hdr_bufs) + (chunk) * SJA1105_SIZE_SPI_MSG_HEADER)
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* If @rw is:
40*4882a593Smuzhiyun * - SPI_WRITE: creates and sends an SPI write message at absolute
41*4882a593Smuzhiyun * address reg_addr, taking @len bytes from *buf
42*4882a593Smuzhiyun * - SPI_READ: creates and sends an SPI read message from absolute
43*4882a593Smuzhiyun * address reg_addr, writing @len bytes into *buf
44*4882a593Smuzhiyun */
sja1105_xfer(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u8 * buf,size_t len,struct ptp_system_timestamp * ptp_sts)45*4882a593Smuzhiyun static int sja1105_xfer(const struct sja1105_private *priv,
46*4882a593Smuzhiyun sja1105_spi_rw_mode_t rw, u64 reg_addr, u8 *buf,
47*4882a593Smuzhiyun size_t len, struct ptp_system_timestamp *ptp_sts)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun struct sja1105_chunk chunk = {
50*4882a593Smuzhiyun .len = min_t(size_t, len, SJA1105_SIZE_SPI_MSG_MAXLEN),
51*4882a593Smuzhiyun .reg_addr = reg_addr,
52*4882a593Smuzhiyun .buf = buf,
53*4882a593Smuzhiyun };
54*4882a593Smuzhiyun struct spi_device *spi = priv->spidev;
55*4882a593Smuzhiyun struct spi_transfer *xfers;
56*4882a593Smuzhiyun int num_chunks;
57*4882a593Smuzhiyun int rc, i = 0;
58*4882a593Smuzhiyun u8 *hdr_bufs;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun num_chunks = DIV_ROUND_UP(len, SJA1105_SIZE_SPI_MSG_MAXLEN);
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun /* One transfer for each message header, one for each message
63*4882a593Smuzhiyun * payload (chunk).
64*4882a593Smuzhiyun */
65*4882a593Smuzhiyun xfers = kcalloc(2 * num_chunks, sizeof(struct spi_transfer),
66*4882a593Smuzhiyun GFP_KERNEL);
67*4882a593Smuzhiyun if (!xfers)
68*4882a593Smuzhiyun return -ENOMEM;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun /* Packed buffers for the num_chunks SPI message headers,
71*4882a593Smuzhiyun * stored as a contiguous array
72*4882a593Smuzhiyun */
73*4882a593Smuzhiyun hdr_bufs = kcalloc(num_chunks, SJA1105_SIZE_SPI_MSG_HEADER,
74*4882a593Smuzhiyun GFP_KERNEL);
75*4882a593Smuzhiyun if (!hdr_bufs) {
76*4882a593Smuzhiyun kfree(xfers);
77*4882a593Smuzhiyun return -ENOMEM;
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun for (i = 0; i < num_chunks; i++) {
81*4882a593Smuzhiyun struct spi_transfer *chunk_xfer = sja1105_chunk_xfer(xfers, i);
82*4882a593Smuzhiyun struct spi_transfer *hdr_xfer = sja1105_hdr_xfer(xfers, i);
83*4882a593Smuzhiyun u8 *hdr_buf = sja1105_hdr_buf(hdr_bufs, i);
84*4882a593Smuzhiyun struct spi_transfer *ptp_sts_xfer;
85*4882a593Smuzhiyun struct sja1105_spi_message msg;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /* Populate the transfer's header buffer */
88*4882a593Smuzhiyun msg.address = chunk.reg_addr;
89*4882a593Smuzhiyun msg.access = rw;
90*4882a593Smuzhiyun if (rw == SPI_READ)
91*4882a593Smuzhiyun msg.read_count = chunk.len / 4;
92*4882a593Smuzhiyun else
93*4882a593Smuzhiyun /* Ignored */
94*4882a593Smuzhiyun msg.read_count = 0;
95*4882a593Smuzhiyun sja1105_spi_message_pack(hdr_buf, &msg);
96*4882a593Smuzhiyun hdr_xfer->tx_buf = hdr_buf;
97*4882a593Smuzhiyun hdr_xfer->len = SJA1105_SIZE_SPI_MSG_HEADER;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* Populate the transfer's data buffer */
100*4882a593Smuzhiyun if (rw == SPI_READ)
101*4882a593Smuzhiyun chunk_xfer->rx_buf = chunk.buf;
102*4882a593Smuzhiyun else
103*4882a593Smuzhiyun chunk_xfer->tx_buf = chunk.buf;
104*4882a593Smuzhiyun chunk_xfer->len = chunk.len;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* Request timestamping for the transfer. Instead of letting
107*4882a593Smuzhiyun * callers specify which byte they want to timestamp, we can
108*4882a593Smuzhiyun * make certain assumptions:
109*4882a593Smuzhiyun * - A read operation will request a software timestamp when
110*4882a593Smuzhiyun * what's being read is the PTP time. That is snapshotted by
111*4882a593Smuzhiyun * the switch hardware at the end of the command portion
112*4882a593Smuzhiyun * (hdr_xfer).
113*4882a593Smuzhiyun * - A write operation will request a software timestamp on
114*4882a593Smuzhiyun * actions that modify the PTP time. Taking clock stepping as
115*4882a593Smuzhiyun * an example, the switch writes the PTP time at the end of
116*4882a593Smuzhiyun * the data portion (chunk_xfer).
117*4882a593Smuzhiyun */
118*4882a593Smuzhiyun if (rw == SPI_READ)
119*4882a593Smuzhiyun ptp_sts_xfer = hdr_xfer;
120*4882a593Smuzhiyun else
121*4882a593Smuzhiyun ptp_sts_xfer = chunk_xfer;
122*4882a593Smuzhiyun ptp_sts_xfer->ptp_sts_word_pre = ptp_sts_xfer->len - 1;
123*4882a593Smuzhiyun ptp_sts_xfer->ptp_sts_word_post = ptp_sts_xfer->len - 1;
124*4882a593Smuzhiyun ptp_sts_xfer->ptp_sts = ptp_sts;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun /* Calculate next chunk */
127*4882a593Smuzhiyun chunk.buf += chunk.len;
128*4882a593Smuzhiyun chunk.reg_addr += chunk.len / 4;
129*4882a593Smuzhiyun chunk.len = min_t(size_t, (ptrdiff_t)(buf + len - chunk.buf),
130*4882a593Smuzhiyun SJA1105_SIZE_SPI_MSG_MAXLEN);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun /* De-assert the chip select after each chunk. */
133*4882a593Smuzhiyun if (chunk.len)
134*4882a593Smuzhiyun chunk_xfer->cs_change = 1;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun rc = spi_sync_transfer(spi, xfers, 2 * num_chunks);
138*4882a593Smuzhiyun if (rc < 0)
139*4882a593Smuzhiyun dev_err(&spi->dev, "SPI transfer failed: %d\n", rc);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun kfree(hdr_bufs);
142*4882a593Smuzhiyun kfree(xfers);
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun return rc;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun
sja1105_xfer_buf(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u8 * buf,size_t len)147*4882a593Smuzhiyun int sja1105_xfer_buf(const struct sja1105_private *priv,
148*4882a593Smuzhiyun sja1105_spi_rw_mode_t rw, u64 reg_addr,
149*4882a593Smuzhiyun u8 *buf, size_t len)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun return sja1105_xfer(priv, rw, reg_addr, buf, len, NULL);
152*4882a593Smuzhiyun }
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun /* If @rw is:
155*4882a593Smuzhiyun * - SPI_WRITE: creates and sends an SPI write message at absolute
156*4882a593Smuzhiyun * address reg_addr
157*4882a593Smuzhiyun * - SPI_READ: creates and sends an SPI read message from absolute
158*4882a593Smuzhiyun * address reg_addr
159*4882a593Smuzhiyun *
160*4882a593Smuzhiyun * The u64 *value is unpacked, meaning that it's stored in the native
161*4882a593Smuzhiyun * CPU endianness and directly usable by software running on the core.
162*4882a593Smuzhiyun */
sja1105_xfer_u64(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u64 * value,struct ptp_system_timestamp * ptp_sts)163*4882a593Smuzhiyun int sja1105_xfer_u64(const struct sja1105_private *priv,
164*4882a593Smuzhiyun sja1105_spi_rw_mode_t rw, u64 reg_addr, u64 *value,
165*4882a593Smuzhiyun struct ptp_system_timestamp *ptp_sts)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun u8 packed_buf[8];
168*4882a593Smuzhiyun int rc;
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (rw == SPI_WRITE)
171*4882a593Smuzhiyun sja1105_pack(packed_buf, value, 63, 0, 8);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 8, ptp_sts);
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun if (rw == SPI_READ)
176*4882a593Smuzhiyun sja1105_unpack(packed_buf, value, 63, 0, 8);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun return rc;
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun /* Same as above, but transfers only a 4 byte word */
sja1105_xfer_u32(const struct sja1105_private * priv,sja1105_spi_rw_mode_t rw,u64 reg_addr,u32 * value,struct ptp_system_timestamp * ptp_sts)182*4882a593Smuzhiyun int sja1105_xfer_u32(const struct sja1105_private *priv,
183*4882a593Smuzhiyun sja1105_spi_rw_mode_t rw, u64 reg_addr, u32 *value,
184*4882a593Smuzhiyun struct ptp_system_timestamp *ptp_sts)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun u8 packed_buf[4];
187*4882a593Smuzhiyun u64 tmp;
188*4882a593Smuzhiyun int rc;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun if (rw == SPI_WRITE) {
191*4882a593Smuzhiyun /* The packing API only supports u64 as CPU word size,
192*4882a593Smuzhiyun * so we need to convert.
193*4882a593Smuzhiyun */
194*4882a593Smuzhiyun tmp = *value;
195*4882a593Smuzhiyun sja1105_pack(packed_buf, &tmp, 31, 0, 4);
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun rc = sja1105_xfer(priv, rw, reg_addr, packed_buf, 4, ptp_sts);
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun if (rw == SPI_READ) {
201*4882a593Smuzhiyun sja1105_unpack(packed_buf, &tmp, 31, 0, 4);
202*4882a593Smuzhiyun *value = tmp;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun return rc;
206*4882a593Smuzhiyun }
207*4882a593Smuzhiyun
sja1105et_reset_cmd(struct dsa_switch * ds)208*4882a593Smuzhiyun static int sja1105et_reset_cmd(struct dsa_switch *ds)
209*4882a593Smuzhiyun {
210*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
211*4882a593Smuzhiyun const struct sja1105_regs *regs = priv->info->regs;
212*4882a593Smuzhiyun u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
213*4882a593Smuzhiyun const int size = SJA1105_SIZE_RESET_CMD;
214*4882a593Smuzhiyun u64 cold_rst = 1;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun sja1105_pack(packed_buf, &cold_rst, 3, 3, size);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
219*4882a593Smuzhiyun SJA1105_SIZE_RESET_CMD);
220*4882a593Smuzhiyun }
221*4882a593Smuzhiyun
sja1105pqrs_reset_cmd(struct dsa_switch * ds)222*4882a593Smuzhiyun static int sja1105pqrs_reset_cmd(struct dsa_switch *ds)
223*4882a593Smuzhiyun {
224*4882a593Smuzhiyun struct sja1105_private *priv = ds->priv;
225*4882a593Smuzhiyun const struct sja1105_regs *regs = priv->info->regs;
226*4882a593Smuzhiyun u8 packed_buf[SJA1105_SIZE_RESET_CMD] = {0};
227*4882a593Smuzhiyun const int size = SJA1105_SIZE_RESET_CMD;
228*4882a593Smuzhiyun u64 cold_rst = 1;
229*4882a593Smuzhiyun
230*4882a593Smuzhiyun sja1105_pack(packed_buf, &cold_rst, 2, 2, size);
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun return sja1105_xfer_buf(priv, SPI_WRITE, regs->rgu, packed_buf,
233*4882a593Smuzhiyun SJA1105_SIZE_RESET_CMD);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
sja1105_inhibit_tx(const struct sja1105_private * priv,unsigned long port_bitmap,bool tx_inhibited)236*4882a593Smuzhiyun int sja1105_inhibit_tx(const struct sja1105_private *priv,
237*4882a593Smuzhiyun unsigned long port_bitmap, bool tx_inhibited)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun const struct sja1105_regs *regs = priv->info->regs;
240*4882a593Smuzhiyun u32 inhibit_cmd;
241*4882a593Smuzhiyun int rc;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun rc = sja1105_xfer_u32(priv, SPI_READ, regs->port_control,
244*4882a593Smuzhiyun &inhibit_cmd, NULL);
245*4882a593Smuzhiyun if (rc < 0)
246*4882a593Smuzhiyun return rc;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun if (tx_inhibited)
249*4882a593Smuzhiyun inhibit_cmd |= port_bitmap;
250*4882a593Smuzhiyun else
251*4882a593Smuzhiyun inhibit_cmd &= ~port_bitmap;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun return sja1105_xfer_u32(priv, SPI_WRITE, regs->port_control,
254*4882a593Smuzhiyun &inhibit_cmd, NULL);
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun struct sja1105_status {
258*4882a593Smuzhiyun u64 configs;
259*4882a593Smuzhiyun u64 crcchkl;
260*4882a593Smuzhiyun u64 ids;
261*4882a593Smuzhiyun u64 crcchkg;
262*4882a593Smuzhiyun };
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun /* This is not reading the entire General Status area, which is also
265*4882a593Smuzhiyun * divergent between E/T and P/Q/R/S, but only the relevant bits for
266*4882a593Smuzhiyun * ensuring that the static config upload procedure was successful.
267*4882a593Smuzhiyun */
sja1105_status_unpack(void * buf,struct sja1105_status * status)268*4882a593Smuzhiyun static void sja1105_status_unpack(void *buf, struct sja1105_status *status)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun /* So that addition translates to 4 bytes */
271*4882a593Smuzhiyun u32 *p = buf;
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* device_id is missing from the buffer, but we don't
274*4882a593Smuzhiyun * want to diverge from the manual definition of the
275*4882a593Smuzhiyun * register addresses, so we'll back off one step with
276*4882a593Smuzhiyun * the register pointer, and never access p[0].
277*4882a593Smuzhiyun */
278*4882a593Smuzhiyun p--;
279*4882a593Smuzhiyun sja1105_unpack(p + 0x1, &status->configs, 31, 31, 4);
280*4882a593Smuzhiyun sja1105_unpack(p + 0x1, &status->crcchkl, 30, 30, 4);
281*4882a593Smuzhiyun sja1105_unpack(p + 0x1, &status->ids, 29, 29, 4);
282*4882a593Smuzhiyun sja1105_unpack(p + 0x1, &status->crcchkg, 28, 28, 4);
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
sja1105_status_get(struct sja1105_private * priv,struct sja1105_status * status)285*4882a593Smuzhiyun static int sja1105_status_get(struct sja1105_private *priv,
286*4882a593Smuzhiyun struct sja1105_status *status)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun const struct sja1105_regs *regs = priv->info->regs;
289*4882a593Smuzhiyun u8 packed_buf[4];
290*4882a593Smuzhiyun int rc;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun rc = sja1105_xfer_buf(priv, SPI_READ, regs->status, packed_buf, 4);
293*4882a593Smuzhiyun if (rc < 0)
294*4882a593Smuzhiyun return rc;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun sja1105_status_unpack(packed_buf, status);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun /* Not const because unpacking priv->static_config into buffers and preparing
302*4882a593Smuzhiyun * for upload requires the recalculation of table CRCs and updating the
303*4882a593Smuzhiyun * structures with these.
304*4882a593Smuzhiyun */
static_config_buf_prepare_for_upload(struct sja1105_private * priv,void * config_buf,int buf_len)305*4882a593Smuzhiyun int static_config_buf_prepare_for_upload(struct sja1105_private *priv,
306*4882a593Smuzhiyun void *config_buf, int buf_len)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun struct sja1105_static_config *config = &priv->static_config;
309*4882a593Smuzhiyun struct sja1105_table_header final_header;
310*4882a593Smuzhiyun sja1105_config_valid_t valid;
311*4882a593Smuzhiyun char *final_header_ptr;
312*4882a593Smuzhiyun int crc_len;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun valid = sja1105_static_config_check_valid(config);
315*4882a593Smuzhiyun if (valid != SJA1105_CONFIG_OK) {
316*4882a593Smuzhiyun dev_err(&priv->spidev->dev,
317*4882a593Smuzhiyun sja1105_static_config_error_msg[valid]);
318*4882a593Smuzhiyun return -EINVAL;
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Write Device ID and config tables to config_buf */
322*4882a593Smuzhiyun sja1105_static_config_pack(config_buf, config);
323*4882a593Smuzhiyun /* Recalculate CRC of the last header (right now 0xDEADBEEF).
324*4882a593Smuzhiyun * Don't include the CRC field itself.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun crc_len = buf_len - 4;
327*4882a593Smuzhiyun /* Read the whole table header */
328*4882a593Smuzhiyun final_header_ptr = config_buf + buf_len - SJA1105_SIZE_TABLE_HEADER;
329*4882a593Smuzhiyun sja1105_table_header_packing(final_header_ptr, &final_header, UNPACK);
330*4882a593Smuzhiyun /* Modify */
331*4882a593Smuzhiyun final_header.crc = sja1105_crc32(config_buf, crc_len);
332*4882a593Smuzhiyun /* Rewrite */
333*4882a593Smuzhiyun sja1105_table_header_packing(final_header_ptr, &final_header, PACK);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun return 0;
336*4882a593Smuzhiyun }
337*4882a593Smuzhiyun
338*4882a593Smuzhiyun #define RETRIES 10
339*4882a593Smuzhiyun
sja1105_static_config_upload(struct sja1105_private * priv)340*4882a593Smuzhiyun int sja1105_static_config_upload(struct sja1105_private *priv)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun unsigned long port_bitmap = GENMASK_ULL(SJA1105_NUM_PORTS - 1, 0);
343*4882a593Smuzhiyun struct sja1105_static_config *config = &priv->static_config;
344*4882a593Smuzhiyun const struct sja1105_regs *regs = priv->info->regs;
345*4882a593Smuzhiyun struct device *dev = &priv->spidev->dev;
346*4882a593Smuzhiyun struct sja1105_status status;
347*4882a593Smuzhiyun int rc, retries = RETRIES;
348*4882a593Smuzhiyun u8 *config_buf;
349*4882a593Smuzhiyun int buf_len;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun buf_len = sja1105_static_config_get_length(config);
352*4882a593Smuzhiyun config_buf = kcalloc(buf_len, sizeof(char), GFP_KERNEL);
353*4882a593Smuzhiyun if (!config_buf)
354*4882a593Smuzhiyun return -ENOMEM;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun rc = static_config_buf_prepare_for_upload(priv, config_buf, buf_len);
357*4882a593Smuzhiyun if (rc < 0) {
358*4882a593Smuzhiyun dev_err(dev, "Invalid config, cannot upload\n");
359*4882a593Smuzhiyun rc = -EINVAL;
360*4882a593Smuzhiyun goto out;
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun /* Prevent PHY jabbering during switch reset by inhibiting
363*4882a593Smuzhiyun * Tx on all ports and waiting for current packet to drain.
364*4882a593Smuzhiyun * Otherwise, the PHY will see an unterminated Ethernet packet.
365*4882a593Smuzhiyun */
366*4882a593Smuzhiyun rc = sja1105_inhibit_tx(priv, port_bitmap, true);
367*4882a593Smuzhiyun if (rc < 0) {
368*4882a593Smuzhiyun dev_err(dev, "Failed to inhibit Tx on ports\n");
369*4882a593Smuzhiyun rc = -ENXIO;
370*4882a593Smuzhiyun goto out;
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun /* Wait for an eventual egress packet to finish transmission
373*4882a593Smuzhiyun * (reach IFG). It is guaranteed that a second one will not
374*4882a593Smuzhiyun * follow, and that switch cold reset is thus safe
375*4882a593Smuzhiyun */
376*4882a593Smuzhiyun usleep_range(500, 1000);
377*4882a593Smuzhiyun do {
378*4882a593Smuzhiyun /* Put the SJA1105 in programming mode */
379*4882a593Smuzhiyun rc = priv->info->reset_cmd(priv->ds);
380*4882a593Smuzhiyun if (rc < 0) {
381*4882a593Smuzhiyun dev_err(dev, "Failed to reset switch, retrying...\n");
382*4882a593Smuzhiyun continue;
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun /* Wait for the switch to come out of reset */
385*4882a593Smuzhiyun usleep_range(1000, 5000);
386*4882a593Smuzhiyun /* Upload the static config to the device */
387*4882a593Smuzhiyun rc = sja1105_xfer_buf(priv, SPI_WRITE, regs->config,
388*4882a593Smuzhiyun config_buf, buf_len);
389*4882a593Smuzhiyun if (rc < 0) {
390*4882a593Smuzhiyun dev_err(dev, "Failed to upload config, retrying...\n");
391*4882a593Smuzhiyun continue;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun /* Check that SJA1105 responded well to the config upload */
394*4882a593Smuzhiyun rc = sja1105_status_get(priv, &status);
395*4882a593Smuzhiyun if (rc < 0)
396*4882a593Smuzhiyun continue;
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun if (status.ids == 1) {
399*4882a593Smuzhiyun dev_err(dev, "Mismatch between hardware and static config "
400*4882a593Smuzhiyun "device id. Wrote 0x%llx, wants 0x%llx\n",
401*4882a593Smuzhiyun config->device_id, priv->info->device_id);
402*4882a593Smuzhiyun continue;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun if (status.crcchkl == 1) {
405*4882a593Smuzhiyun dev_err(dev, "Switch reported invalid local CRC on "
406*4882a593Smuzhiyun "the uploaded config, retrying...\n");
407*4882a593Smuzhiyun continue;
408*4882a593Smuzhiyun }
409*4882a593Smuzhiyun if (status.crcchkg == 1) {
410*4882a593Smuzhiyun dev_err(dev, "Switch reported invalid global CRC on "
411*4882a593Smuzhiyun "the uploaded config, retrying...\n");
412*4882a593Smuzhiyun continue;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun if (status.configs == 0) {
415*4882a593Smuzhiyun dev_err(dev, "Switch reported that configuration is "
416*4882a593Smuzhiyun "invalid, retrying...\n");
417*4882a593Smuzhiyun continue;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun /* Success! */
420*4882a593Smuzhiyun break;
421*4882a593Smuzhiyun } while (--retries);
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun if (!retries) {
424*4882a593Smuzhiyun rc = -EIO;
425*4882a593Smuzhiyun dev_err(dev, "Failed to upload config to device, giving up\n");
426*4882a593Smuzhiyun goto out;
427*4882a593Smuzhiyun } else if (retries != RETRIES) {
428*4882a593Smuzhiyun dev_info(dev, "Succeeded after %d tried\n", RETRIES - retries);
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun out:
432*4882a593Smuzhiyun kfree(config_buf);
433*4882a593Smuzhiyun return rc;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun static struct sja1105_regs sja1105et_regs = {
437*4882a593Smuzhiyun .device_id = 0x0,
438*4882a593Smuzhiyun .prod_id = 0x100BC3,
439*4882a593Smuzhiyun .status = 0x1,
440*4882a593Smuzhiyun .port_control = 0x11,
441*4882a593Smuzhiyun .vl_status = 0x10000,
442*4882a593Smuzhiyun .config = 0x020000,
443*4882a593Smuzhiyun .rgu = 0x100440,
444*4882a593Smuzhiyun /* UM10944.pdf, Table 86, ACU Register overview */
445*4882a593Smuzhiyun .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
446*4882a593Smuzhiyun .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
447*4882a593Smuzhiyun .rmii_pll1 = 0x10000A,
448*4882a593Smuzhiyun .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
449*4882a593Smuzhiyun .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
450*4882a593Smuzhiyun .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
451*4882a593Smuzhiyun .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
452*4882a593Smuzhiyun /* UM10944.pdf, Table 78, CGU Register overview */
453*4882a593Smuzhiyun .mii_tx_clk = {0x100013, 0x10001A, 0x100021, 0x100028, 0x10002F},
454*4882a593Smuzhiyun .mii_rx_clk = {0x100014, 0x10001B, 0x100022, 0x100029, 0x100030},
455*4882a593Smuzhiyun .mii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
456*4882a593Smuzhiyun .mii_ext_rx_clk = {0x100019, 0x100020, 0x100027, 0x10002E, 0x100035},
457*4882a593Smuzhiyun .rgmii_tx_clk = {0x100016, 0x10001D, 0x100024, 0x10002B, 0x100032},
458*4882a593Smuzhiyun .rmii_ref_clk = {0x100015, 0x10001C, 0x100023, 0x10002A, 0x100031},
459*4882a593Smuzhiyun .rmii_ext_tx_clk = {0x100018, 0x10001F, 0x100026, 0x10002D, 0x100034},
460*4882a593Smuzhiyun .ptpegr_ts = {0xC0, 0xC2, 0xC4, 0xC6, 0xC8},
461*4882a593Smuzhiyun .ptpschtm = 0x12, /* Spans 0x12 to 0x13 */
462*4882a593Smuzhiyun .ptppinst = 0x14,
463*4882a593Smuzhiyun .ptppindur = 0x16,
464*4882a593Smuzhiyun .ptp_control = 0x17,
465*4882a593Smuzhiyun .ptpclkval = 0x18, /* Spans 0x18 to 0x19 */
466*4882a593Smuzhiyun .ptpclkrate = 0x1A,
467*4882a593Smuzhiyun .ptpclkcorp = 0x1D,
468*4882a593Smuzhiyun };
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun static struct sja1105_regs sja1105pqrs_regs = {
471*4882a593Smuzhiyun .device_id = 0x0,
472*4882a593Smuzhiyun .prod_id = 0x100BC3,
473*4882a593Smuzhiyun .status = 0x1,
474*4882a593Smuzhiyun .port_control = 0x12,
475*4882a593Smuzhiyun .vl_status = 0x10000,
476*4882a593Smuzhiyun .config = 0x020000,
477*4882a593Smuzhiyun .rgu = 0x100440,
478*4882a593Smuzhiyun /* UM10944.pdf, Table 86, ACU Register overview */
479*4882a593Smuzhiyun .pad_mii_tx = {0x100800, 0x100802, 0x100804, 0x100806, 0x100808},
480*4882a593Smuzhiyun .pad_mii_rx = {0x100801, 0x100803, 0x100805, 0x100807, 0x100809},
481*4882a593Smuzhiyun .pad_mii_id = {0x100810, 0x100811, 0x100812, 0x100813, 0x100814},
482*4882a593Smuzhiyun .sgmii = 0x1F0000,
483*4882a593Smuzhiyun .rmii_pll1 = 0x10000A,
484*4882a593Smuzhiyun .cgu_idiv = {0x10000B, 0x10000C, 0x10000D, 0x10000E, 0x10000F},
485*4882a593Smuzhiyun .mac = {0x200, 0x202, 0x204, 0x206, 0x208},
486*4882a593Smuzhiyun .mac_hl1 = {0x400, 0x410, 0x420, 0x430, 0x440},
487*4882a593Smuzhiyun .mac_hl2 = {0x600, 0x610, 0x620, 0x630, 0x640},
488*4882a593Smuzhiyun .ether_stats = {0x1400, 0x1418, 0x1430, 0x1448, 0x1460},
489*4882a593Smuzhiyun /* UM11040.pdf, Table 114 */
490*4882a593Smuzhiyun .mii_tx_clk = {0x100013, 0x100019, 0x10001F, 0x100025, 0x10002B},
491*4882a593Smuzhiyun .mii_rx_clk = {0x100014, 0x10001A, 0x100020, 0x100026, 0x10002C},
492*4882a593Smuzhiyun .mii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
493*4882a593Smuzhiyun .mii_ext_rx_clk = {0x100018, 0x10001E, 0x100024, 0x10002A, 0x100030},
494*4882a593Smuzhiyun .rgmii_tx_clk = {0x100016, 0x10001C, 0x100022, 0x100028, 0x10002E},
495*4882a593Smuzhiyun .rmii_ref_clk = {0x100015, 0x10001B, 0x100021, 0x100027, 0x10002D},
496*4882a593Smuzhiyun .rmii_ext_tx_clk = {0x100017, 0x10001D, 0x100023, 0x100029, 0x10002F},
497*4882a593Smuzhiyun .qlevel = {0x604, 0x614, 0x624, 0x634, 0x644},
498*4882a593Smuzhiyun .ptpegr_ts = {0xC0, 0xC4, 0xC8, 0xCC, 0xD0},
499*4882a593Smuzhiyun .ptpschtm = 0x13, /* Spans 0x13 to 0x14 */
500*4882a593Smuzhiyun .ptppinst = 0x15,
501*4882a593Smuzhiyun .ptppindur = 0x17,
502*4882a593Smuzhiyun .ptp_control = 0x18,
503*4882a593Smuzhiyun .ptpclkval = 0x19,
504*4882a593Smuzhiyun .ptpclkrate = 0x1B,
505*4882a593Smuzhiyun .ptpclkcorp = 0x1E,
506*4882a593Smuzhiyun .ptpsyncts = 0x1F,
507*4882a593Smuzhiyun };
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun const struct sja1105_info sja1105e_info = {
510*4882a593Smuzhiyun .device_id = SJA1105E_DEVICE_ID,
511*4882a593Smuzhiyun .part_no = SJA1105ET_PART_NO,
512*4882a593Smuzhiyun .static_ops = sja1105e_table_ops,
513*4882a593Smuzhiyun .dyn_ops = sja1105et_dyn_ops,
514*4882a593Smuzhiyun .qinq_tpid = ETH_P_8021Q,
515*4882a593Smuzhiyun .ptp_ts_bits = 24,
516*4882a593Smuzhiyun .ptpegr_ts_bytes = 4,
517*4882a593Smuzhiyun .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
518*4882a593Smuzhiyun .reset_cmd = sja1105et_reset_cmd,
519*4882a593Smuzhiyun .fdb_add_cmd = sja1105et_fdb_add,
520*4882a593Smuzhiyun .fdb_del_cmd = sja1105et_fdb_del,
521*4882a593Smuzhiyun .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
522*4882a593Smuzhiyun .regs = &sja1105et_regs,
523*4882a593Smuzhiyun .name = "SJA1105E",
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun const struct sja1105_info sja1105t_info = {
527*4882a593Smuzhiyun .device_id = SJA1105T_DEVICE_ID,
528*4882a593Smuzhiyun .part_no = SJA1105ET_PART_NO,
529*4882a593Smuzhiyun .static_ops = sja1105t_table_ops,
530*4882a593Smuzhiyun .dyn_ops = sja1105et_dyn_ops,
531*4882a593Smuzhiyun .qinq_tpid = ETH_P_8021Q,
532*4882a593Smuzhiyun .ptp_ts_bits = 24,
533*4882a593Smuzhiyun .ptpegr_ts_bytes = 4,
534*4882a593Smuzhiyun .num_cbs_shapers = SJA1105ET_MAX_CBS_COUNT,
535*4882a593Smuzhiyun .reset_cmd = sja1105et_reset_cmd,
536*4882a593Smuzhiyun .fdb_add_cmd = sja1105et_fdb_add,
537*4882a593Smuzhiyun .fdb_del_cmd = sja1105et_fdb_del,
538*4882a593Smuzhiyun .ptp_cmd_packing = sja1105et_ptp_cmd_packing,
539*4882a593Smuzhiyun .regs = &sja1105et_regs,
540*4882a593Smuzhiyun .name = "SJA1105T",
541*4882a593Smuzhiyun };
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun const struct sja1105_info sja1105p_info = {
544*4882a593Smuzhiyun .device_id = SJA1105PR_DEVICE_ID,
545*4882a593Smuzhiyun .part_no = SJA1105P_PART_NO,
546*4882a593Smuzhiyun .static_ops = sja1105p_table_ops,
547*4882a593Smuzhiyun .dyn_ops = sja1105pqrs_dyn_ops,
548*4882a593Smuzhiyun .qinq_tpid = ETH_P_8021AD,
549*4882a593Smuzhiyun .ptp_ts_bits = 32,
550*4882a593Smuzhiyun .ptpegr_ts_bytes = 8,
551*4882a593Smuzhiyun .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
552*4882a593Smuzhiyun .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
553*4882a593Smuzhiyun .reset_cmd = sja1105pqrs_reset_cmd,
554*4882a593Smuzhiyun .fdb_add_cmd = sja1105pqrs_fdb_add,
555*4882a593Smuzhiyun .fdb_del_cmd = sja1105pqrs_fdb_del,
556*4882a593Smuzhiyun .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
557*4882a593Smuzhiyun .regs = &sja1105pqrs_regs,
558*4882a593Smuzhiyun .name = "SJA1105P",
559*4882a593Smuzhiyun };
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun const struct sja1105_info sja1105q_info = {
562*4882a593Smuzhiyun .device_id = SJA1105QS_DEVICE_ID,
563*4882a593Smuzhiyun .part_no = SJA1105Q_PART_NO,
564*4882a593Smuzhiyun .static_ops = sja1105q_table_ops,
565*4882a593Smuzhiyun .dyn_ops = sja1105pqrs_dyn_ops,
566*4882a593Smuzhiyun .qinq_tpid = ETH_P_8021AD,
567*4882a593Smuzhiyun .ptp_ts_bits = 32,
568*4882a593Smuzhiyun .ptpegr_ts_bytes = 8,
569*4882a593Smuzhiyun .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
570*4882a593Smuzhiyun .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
571*4882a593Smuzhiyun .reset_cmd = sja1105pqrs_reset_cmd,
572*4882a593Smuzhiyun .fdb_add_cmd = sja1105pqrs_fdb_add,
573*4882a593Smuzhiyun .fdb_del_cmd = sja1105pqrs_fdb_del,
574*4882a593Smuzhiyun .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
575*4882a593Smuzhiyun .regs = &sja1105pqrs_regs,
576*4882a593Smuzhiyun .name = "SJA1105Q",
577*4882a593Smuzhiyun };
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun const struct sja1105_info sja1105r_info = {
580*4882a593Smuzhiyun .device_id = SJA1105PR_DEVICE_ID,
581*4882a593Smuzhiyun .part_no = SJA1105R_PART_NO,
582*4882a593Smuzhiyun .static_ops = sja1105r_table_ops,
583*4882a593Smuzhiyun .dyn_ops = sja1105pqrs_dyn_ops,
584*4882a593Smuzhiyun .qinq_tpid = ETH_P_8021AD,
585*4882a593Smuzhiyun .ptp_ts_bits = 32,
586*4882a593Smuzhiyun .ptpegr_ts_bytes = 8,
587*4882a593Smuzhiyun .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
588*4882a593Smuzhiyun .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
589*4882a593Smuzhiyun .reset_cmd = sja1105pqrs_reset_cmd,
590*4882a593Smuzhiyun .fdb_add_cmd = sja1105pqrs_fdb_add,
591*4882a593Smuzhiyun .fdb_del_cmd = sja1105pqrs_fdb_del,
592*4882a593Smuzhiyun .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
593*4882a593Smuzhiyun .regs = &sja1105pqrs_regs,
594*4882a593Smuzhiyun .name = "SJA1105R",
595*4882a593Smuzhiyun };
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun const struct sja1105_info sja1105s_info = {
598*4882a593Smuzhiyun .device_id = SJA1105QS_DEVICE_ID,
599*4882a593Smuzhiyun .part_no = SJA1105S_PART_NO,
600*4882a593Smuzhiyun .static_ops = sja1105s_table_ops,
601*4882a593Smuzhiyun .dyn_ops = sja1105pqrs_dyn_ops,
602*4882a593Smuzhiyun .regs = &sja1105pqrs_regs,
603*4882a593Smuzhiyun .qinq_tpid = ETH_P_8021AD,
604*4882a593Smuzhiyun .ptp_ts_bits = 32,
605*4882a593Smuzhiyun .ptpegr_ts_bytes = 8,
606*4882a593Smuzhiyun .num_cbs_shapers = SJA1105PQRS_MAX_CBS_COUNT,
607*4882a593Smuzhiyun .setup_rgmii_delay = sja1105pqrs_setup_rgmii_delay,
608*4882a593Smuzhiyun .reset_cmd = sja1105pqrs_reset_cmd,
609*4882a593Smuzhiyun .fdb_add_cmd = sja1105pqrs_fdb_add,
610*4882a593Smuzhiyun .fdb_del_cmd = sja1105pqrs_fdb_del,
611*4882a593Smuzhiyun .ptp_cmd_packing = sja1105pqrs_ptp_cmd_packing,
612*4882a593Smuzhiyun .name = "SJA1105S",
613*4882a593Smuzhiyun };
614