1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * i2c-xiic.c
4*4882a593Smuzhiyun * Copyright (c) 2002-2007 Xilinx Inc.
5*4882a593Smuzhiyun * Copyright (c) 2009-2010 Intel Corporation
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This code was implemented by Mocean Laboratories AB when porting linux
8*4882a593Smuzhiyun * to the automotive development board Russellville. The copyright holder
9*4882a593Smuzhiyun * as seen in the header is Intel corporation.
10*4882a593Smuzhiyun * Mocean Laboratories forked off the GNU/Linux platform work into a
11*4882a593Smuzhiyun * separate company called Pelagicore AB, which committed the code to the
12*4882a593Smuzhiyun * kernel.
13*4882a593Smuzhiyun */
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun /* Supports:
16*4882a593Smuzhiyun * Xilinx IIC
17*4882a593Smuzhiyun */
18*4882a593Smuzhiyun #include <linux/kernel.h>
19*4882a593Smuzhiyun #include <linux/module.h>
20*4882a593Smuzhiyun #include <linux/errno.h>
21*4882a593Smuzhiyun #include <linux/err.h>
22*4882a593Smuzhiyun #include <linux/delay.h>
23*4882a593Smuzhiyun #include <linux/platform_device.h>
24*4882a593Smuzhiyun #include <linux/i2c.h>
25*4882a593Smuzhiyun #include <linux/interrupt.h>
26*4882a593Smuzhiyun #include <linux/wait.h>
27*4882a593Smuzhiyun #include <linux/platform_data/i2c-xiic.h>
28*4882a593Smuzhiyun #include <linux/io.h>
29*4882a593Smuzhiyun #include <linux/slab.h>
30*4882a593Smuzhiyun #include <linux/of.h>
31*4882a593Smuzhiyun #include <linux/clk.h>
32*4882a593Smuzhiyun #include <linux/pm_runtime.h>
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define DRIVER_NAME "xiic-i2c"
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun enum xilinx_i2c_state {
37*4882a593Smuzhiyun STATE_DONE,
38*4882a593Smuzhiyun STATE_ERROR,
39*4882a593Smuzhiyun STATE_START
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun enum xiic_endian {
43*4882a593Smuzhiyun LITTLE,
44*4882a593Smuzhiyun BIG
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /**
48*4882a593Smuzhiyun * struct xiic_i2c - Internal representation of the XIIC I2C bus
49*4882a593Smuzhiyun * @dev: Pointer to device structure
50*4882a593Smuzhiyun * @base: Memory base of the HW registers
51*4882a593Smuzhiyun * @wait: Wait queue for callers
52*4882a593Smuzhiyun * @adap: Kernel adapter representation
53*4882a593Smuzhiyun * @tx_msg: Messages from above to be sent
54*4882a593Smuzhiyun * @lock: Mutual exclusion
55*4882a593Smuzhiyun * @tx_pos: Current pos in TX message
56*4882a593Smuzhiyun * @nmsgs: Number of messages in tx_msg
57*4882a593Smuzhiyun * @rx_msg: Current RX message
58*4882a593Smuzhiyun * @rx_pos: Position within current RX message
59*4882a593Smuzhiyun * @endianness: big/little-endian byte order
60*4882a593Smuzhiyun * @clk: Pointer to AXI4-lite input clock
61*4882a593Smuzhiyun * @state: See STATE_
62*4882a593Smuzhiyun * @singlemaster: Indicates bus is single master
63*4882a593Smuzhiyun */
64*4882a593Smuzhiyun struct xiic_i2c {
65*4882a593Smuzhiyun struct device *dev;
66*4882a593Smuzhiyun void __iomem *base;
67*4882a593Smuzhiyun wait_queue_head_t wait;
68*4882a593Smuzhiyun struct i2c_adapter adap;
69*4882a593Smuzhiyun struct i2c_msg *tx_msg;
70*4882a593Smuzhiyun struct mutex lock;
71*4882a593Smuzhiyun unsigned int tx_pos;
72*4882a593Smuzhiyun unsigned int nmsgs;
73*4882a593Smuzhiyun struct i2c_msg *rx_msg;
74*4882a593Smuzhiyun int rx_pos;
75*4882a593Smuzhiyun enum xiic_endian endianness;
76*4882a593Smuzhiyun struct clk *clk;
77*4882a593Smuzhiyun enum xilinx_i2c_state state;
78*4882a593Smuzhiyun bool singlemaster;
79*4882a593Smuzhiyun };
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun #define XIIC_MSB_OFFSET 0
83*4882a593Smuzhiyun #define XIIC_REG_OFFSET (0x100+XIIC_MSB_OFFSET)
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /*
86*4882a593Smuzhiyun * Register offsets in bytes from RegisterBase. Three is added to the
87*4882a593Smuzhiyun * base offset to access LSB (IBM style) of the word
88*4882a593Smuzhiyun */
89*4882a593Smuzhiyun #define XIIC_CR_REG_OFFSET (0x00+XIIC_REG_OFFSET) /* Control Register */
90*4882a593Smuzhiyun #define XIIC_SR_REG_OFFSET (0x04+XIIC_REG_OFFSET) /* Status Register */
91*4882a593Smuzhiyun #define XIIC_DTR_REG_OFFSET (0x08+XIIC_REG_OFFSET) /* Data Tx Register */
92*4882a593Smuzhiyun #define XIIC_DRR_REG_OFFSET (0x0C+XIIC_REG_OFFSET) /* Data Rx Register */
93*4882a593Smuzhiyun #define XIIC_ADR_REG_OFFSET (0x10+XIIC_REG_OFFSET) /* Address Register */
94*4882a593Smuzhiyun #define XIIC_TFO_REG_OFFSET (0x14+XIIC_REG_OFFSET) /* Tx FIFO Occupancy */
95*4882a593Smuzhiyun #define XIIC_RFO_REG_OFFSET (0x18+XIIC_REG_OFFSET) /* Rx FIFO Occupancy */
96*4882a593Smuzhiyun #define XIIC_TBA_REG_OFFSET (0x1C+XIIC_REG_OFFSET) /* 10 Bit Address reg */
97*4882a593Smuzhiyun #define XIIC_RFD_REG_OFFSET (0x20+XIIC_REG_OFFSET) /* Rx FIFO Depth reg */
98*4882a593Smuzhiyun #define XIIC_GPO_REG_OFFSET (0x24+XIIC_REG_OFFSET) /* Output Register */
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /* Control Register masks */
101*4882a593Smuzhiyun #define XIIC_CR_ENABLE_DEVICE_MASK 0x01 /* Device enable = 1 */
102*4882a593Smuzhiyun #define XIIC_CR_TX_FIFO_RESET_MASK 0x02 /* Transmit FIFO reset=1 */
103*4882a593Smuzhiyun #define XIIC_CR_MSMS_MASK 0x04 /* Master starts Txing=1 */
104*4882a593Smuzhiyun #define XIIC_CR_DIR_IS_TX_MASK 0x08 /* Dir of tx. Txing=1 */
105*4882a593Smuzhiyun #define XIIC_CR_NO_ACK_MASK 0x10 /* Tx Ack. NO ack = 1 */
106*4882a593Smuzhiyun #define XIIC_CR_REPEATED_START_MASK 0x20 /* Repeated start = 1 */
107*4882a593Smuzhiyun #define XIIC_CR_GENERAL_CALL_MASK 0x40 /* Gen Call enabled = 1 */
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun /* Status Register masks */
110*4882a593Smuzhiyun #define XIIC_SR_GEN_CALL_MASK 0x01 /* 1=a mstr issued a GC */
111*4882a593Smuzhiyun #define XIIC_SR_ADDR_AS_SLAVE_MASK 0x02 /* 1=when addr as slave */
112*4882a593Smuzhiyun #define XIIC_SR_BUS_BUSY_MASK 0x04 /* 1 = bus is busy */
113*4882a593Smuzhiyun #define XIIC_SR_MSTR_RDING_SLAVE_MASK 0x08 /* 1=Dir: mstr <-- slave */
114*4882a593Smuzhiyun #define XIIC_SR_TX_FIFO_FULL_MASK 0x10 /* 1 = Tx FIFO full */
115*4882a593Smuzhiyun #define XIIC_SR_RX_FIFO_FULL_MASK 0x20 /* 1 = Rx FIFO full */
116*4882a593Smuzhiyun #define XIIC_SR_RX_FIFO_EMPTY_MASK 0x40 /* 1 = Rx FIFO empty */
117*4882a593Smuzhiyun #define XIIC_SR_TX_FIFO_EMPTY_MASK 0x80 /* 1 = Tx FIFO empty */
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun /* Interrupt Status Register masks Interrupt occurs when... */
120*4882a593Smuzhiyun #define XIIC_INTR_ARB_LOST_MASK 0x01 /* 1 = arbitration lost */
121*4882a593Smuzhiyun #define XIIC_INTR_TX_ERROR_MASK 0x02 /* 1=Tx error/msg complete */
122*4882a593Smuzhiyun #define XIIC_INTR_TX_EMPTY_MASK 0x04 /* 1 = Tx FIFO/reg empty */
123*4882a593Smuzhiyun #define XIIC_INTR_RX_FULL_MASK 0x08 /* 1=Rx FIFO/reg=OCY level */
124*4882a593Smuzhiyun #define XIIC_INTR_BNB_MASK 0x10 /* 1 = Bus not busy */
125*4882a593Smuzhiyun #define XIIC_INTR_AAS_MASK 0x20 /* 1 = when addr as slave */
126*4882a593Smuzhiyun #define XIIC_INTR_NAAS_MASK 0x40 /* 1 = not addr as slave */
127*4882a593Smuzhiyun #define XIIC_INTR_TX_HALF_MASK 0x80 /* 1 = TX FIFO half empty */
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* The following constants specify the depth of the FIFOs */
130*4882a593Smuzhiyun #define IIC_RX_FIFO_DEPTH 16 /* Rx fifo capacity */
131*4882a593Smuzhiyun #define IIC_TX_FIFO_DEPTH 16 /* Tx fifo capacity */
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* The following constants specify groups of interrupts that are typically
134*4882a593Smuzhiyun * enabled or disables at the same time
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun #define XIIC_TX_INTERRUPTS \
137*4882a593Smuzhiyun (XIIC_INTR_TX_ERROR_MASK | XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun #define XIIC_TX_RX_INTERRUPTS (XIIC_INTR_RX_FULL_MASK | XIIC_TX_INTERRUPTS)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /*
142*4882a593Smuzhiyun * Tx Fifo upper bit masks.
143*4882a593Smuzhiyun */
144*4882a593Smuzhiyun #define XIIC_TX_DYN_START_MASK 0x0100 /* 1 = Set dynamic start */
145*4882a593Smuzhiyun #define XIIC_TX_DYN_STOP_MASK 0x0200 /* 1 = Set dynamic stop */
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * The following constants define the register offsets for the Interrupt
149*4882a593Smuzhiyun * registers. There are some holes in the memory map for reserved addresses
150*4882a593Smuzhiyun * to allow other registers to be added and still match the memory map of the
151*4882a593Smuzhiyun * interrupt controller registers
152*4882a593Smuzhiyun */
153*4882a593Smuzhiyun #define XIIC_DGIER_OFFSET 0x1C /* Device Global Interrupt Enable Register */
154*4882a593Smuzhiyun #define XIIC_IISR_OFFSET 0x20 /* Interrupt Status Register */
155*4882a593Smuzhiyun #define XIIC_IIER_OFFSET 0x28 /* Interrupt Enable Register */
156*4882a593Smuzhiyun #define XIIC_RESETR_OFFSET 0x40 /* Reset Register */
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun #define XIIC_RESET_MASK 0xAUL
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun #define XIIC_PM_TIMEOUT 1000 /* ms */
161*4882a593Smuzhiyun /* timeout waiting for the controller to respond */
162*4882a593Smuzhiyun #define XIIC_I2C_TIMEOUT (msecs_to_jiffies(1000))
163*4882a593Smuzhiyun /*
164*4882a593Smuzhiyun * The following constant is used for the device global interrupt enable
165*4882a593Smuzhiyun * register, to enable all interrupts for the device, this is the only bit
166*4882a593Smuzhiyun * in the register
167*4882a593Smuzhiyun */
168*4882a593Smuzhiyun #define XIIC_GINTR_ENABLE_MASK 0x80000000UL
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun #define xiic_tx_space(i2c) ((i2c)->tx_msg->len - (i2c)->tx_pos)
171*4882a593Smuzhiyun #define xiic_rx_space(i2c) ((i2c)->rx_msg->len - (i2c)->rx_pos)
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun static int xiic_start_xfer(struct xiic_i2c *i2c);
174*4882a593Smuzhiyun static void __xiic_start_xfer(struct xiic_i2c *i2c);
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun /*
177*4882a593Smuzhiyun * For the register read and write functions, a little-endian and big-endian
178*4882a593Smuzhiyun * version are necessary. Endianness is detected during the probe function.
179*4882a593Smuzhiyun * Only the least significant byte [doublet] of the register are ever
180*4882a593Smuzhiyun * accessed. This requires an offset of 3 [2] from the base address for
181*4882a593Smuzhiyun * big-endian systems.
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun
xiic_setreg8(struct xiic_i2c * i2c,int reg,u8 value)184*4882a593Smuzhiyun static inline void xiic_setreg8(struct xiic_i2c *i2c, int reg, u8 value)
185*4882a593Smuzhiyun {
186*4882a593Smuzhiyun if (i2c->endianness == LITTLE)
187*4882a593Smuzhiyun iowrite8(value, i2c->base + reg);
188*4882a593Smuzhiyun else
189*4882a593Smuzhiyun iowrite8(value, i2c->base + reg + 3);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
xiic_getreg8(struct xiic_i2c * i2c,int reg)192*4882a593Smuzhiyun static inline u8 xiic_getreg8(struct xiic_i2c *i2c, int reg)
193*4882a593Smuzhiyun {
194*4882a593Smuzhiyun u8 ret;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (i2c->endianness == LITTLE)
197*4882a593Smuzhiyun ret = ioread8(i2c->base + reg);
198*4882a593Smuzhiyun else
199*4882a593Smuzhiyun ret = ioread8(i2c->base + reg + 3);
200*4882a593Smuzhiyun return ret;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
xiic_setreg16(struct xiic_i2c * i2c,int reg,u16 value)203*4882a593Smuzhiyun static inline void xiic_setreg16(struct xiic_i2c *i2c, int reg, u16 value)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun if (i2c->endianness == LITTLE)
206*4882a593Smuzhiyun iowrite16(value, i2c->base + reg);
207*4882a593Smuzhiyun else
208*4882a593Smuzhiyun iowrite16be(value, i2c->base + reg + 2);
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
xiic_setreg32(struct xiic_i2c * i2c,int reg,int value)211*4882a593Smuzhiyun static inline void xiic_setreg32(struct xiic_i2c *i2c, int reg, int value)
212*4882a593Smuzhiyun {
213*4882a593Smuzhiyun if (i2c->endianness == LITTLE)
214*4882a593Smuzhiyun iowrite32(value, i2c->base + reg);
215*4882a593Smuzhiyun else
216*4882a593Smuzhiyun iowrite32be(value, i2c->base + reg);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
xiic_getreg32(struct xiic_i2c * i2c,int reg)219*4882a593Smuzhiyun static inline int xiic_getreg32(struct xiic_i2c *i2c, int reg)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun u32 ret;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun if (i2c->endianness == LITTLE)
224*4882a593Smuzhiyun ret = ioread32(i2c->base + reg);
225*4882a593Smuzhiyun else
226*4882a593Smuzhiyun ret = ioread32be(i2c->base + reg);
227*4882a593Smuzhiyun return ret;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
xiic_irq_dis(struct xiic_i2c * i2c,u32 mask)230*4882a593Smuzhiyun static inline void xiic_irq_dis(struct xiic_i2c *i2c, u32 mask)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
233*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier & ~mask);
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun
xiic_irq_en(struct xiic_i2c * i2c,u32 mask)236*4882a593Smuzhiyun static inline void xiic_irq_en(struct xiic_i2c *i2c, u32 mask)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun u32 ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
239*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_IIER_OFFSET, ier | mask);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
xiic_irq_clr(struct xiic_i2c * i2c,u32 mask)242*4882a593Smuzhiyun static inline void xiic_irq_clr(struct xiic_i2c *i2c, u32 mask)
243*4882a593Smuzhiyun {
244*4882a593Smuzhiyun u32 isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
245*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_IISR_OFFSET, isr & mask);
246*4882a593Smuzhiyun }
247*4882a593Smuzhiyun
xiic_irq_clr_en(struct xiic_i2c * i2c,u32 mask)248*4882a593Smuzhiyun static inline void xiic_irq_clr_en(struct xiic_i2c *i2c, u32 mask)
249*4882a593Smuzhiyun {
250*4882a593Smuzhiyun xiic_irq_clr(i2c, mask);
251*4882a593Smuzhiyun xiic_irq_en(i2c, mask);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
xiic_clear_rx_fifo(struct xiic_i2c * i2c)254*4882a593Smuzhiyun static int xiic_clear_rx_fifo(struct xiic_i2c *i2c)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun u8 sr;
257*4882a593Smuzhiyun unsigned long timeout;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun timeout = jiffies + XIIC_I2C_TIMEOUT;
260*4882a593Smuzhiyun for (sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
261*4882a593Smuzhiyun !(sr & XIIC_SR_RX_FIFO_EMPTY_MASK);
262*4882a593Smuzhiyun sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET)) {
263*4882a593Smuzhiyun xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
264*4882a593Smuzhiyun if (time_after(jiffies, timeout)) {
265*4882a593Smuzhiyun dev_err(i2c->dev, "Failed to clear rx fifo\n");
266*4882a593Smuzhiyun return -ETIMEDOUT;
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun return 0;
271*4882a593Smuzhiyun }
272*4882a593Smuzhiyun
xiic_reinit(struct xiic_i2c * i2c)273*4882a593Smuzhiyun static int xiic_reinit(struct xiic_i2c *i2c)
274*4882a593Smuzhiyun {
275*4882a593Smuzhiyun int ret;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Set receive Fifo depth to maximum (zero based). */
280*4882a593Smuzhiyun xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, IIC_RX_FIFO_DEPTH - 1);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* Reset Tx Fifo. */
283*4882a593Smuzhiyun xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /* Enable IIC Device, remove Tx Fifo reset & disable general call. */
286*4882a593Smuzhiyun xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_ENABLE_DEVICE_MASK);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun /* make sure RX fifo is empty */
289*4882a593Smuzhiyun ret = xiic_clear_rx_fifo(i2c);
290*4882a593Smuzhiyun if (ret)
291*4882a593Smuzhiyun return ret;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun /* Enable interrupts */
294*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_DGIER_OFFSET, XIIC_GINTR_ENABLE_MASK);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun xiic_irq_clr_en(i2c, XIIC_INTR_ARB_LOST_MASK);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return 0;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
xiic_deinit(struct xiic_i2c * i2c)301*4882a593Smuzhiyun static void xiic_deinit(struct xiic_i2c *i2c)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun u8 cr;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_RESETR_OFFSET, XIIC_RESET_MASK);
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun /* Disable IIC Device. */
308*4882a593Smuzhiyun cr = xiic_getreg8(i2c, XIIC_CR_REG_OFFSET);
309*4882a593Smuzhiyun xiic_setreg8(i2c, XIIC_CR_REG_OFFSET, cr & ~XIIC_CR_ENABLE_DEVICE_MASK);
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun
xiic_read_rx(struct xiic_i2c * i2c)312*4882a593Smuzhiyun static void xiic_read_rx(struct xiic_i2c *i2c)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun u8 bytes_in_fifo;
315*4882a593Smuzhiyun int i;
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun bytes_in_fifo = xiic_getreg8(i2c, XIIC_RFO_REG_OFFSET) + 1;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
320*4882a593Smuzhiyun "%s entry, bytes in fifo: %d, msg: %d, SR: 0x%x, CR: 0x%x\n",
321*4882a593Smuzhiyun __func__, bytes_in_fifo, xiic_rx_space(i2c),
322*4882a593Smuzhiyun xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
323*4882a593Smuzhiyun xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
324*4882a593Smuzhiyun
325*4882a593Smuzhiyun if (bytes_in_fifo > xiic_rx_space(i2c))
326*4882a593Smuzhiyun bytes_in_fifo = xiic_rx_space(i2c);
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun for (i = 0; i < bytes_in_fifo; i++)
329*4882a593Smuzhiyun i2c->rx_msg->buf[i2c->rx_pos++] =
330*4882a593Smuzhiyun xiic_getreg8(i2c, XIIC_DRR_REG_OFFSET);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET,
333*4882a593Smuzhiyun (xiic_rx_space(i2c) > IIC_RX_FIFO_DEPTH) ?
334*4882a593Smuzhiyun IIC_RX_FIFO_DEPTH - 1 : xiic_rx_space(i2c) - 1);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
xiic_tx_fifo_space(struct xiic_i2c * i2c)337*4882a593Smuzhiyun static int xiic_tx_fifo_space(struct xiic_i2c *i2c)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun /* return the actual space left in the FIFO */
340*4882a593Smuzhiyun return IIC_TX_FIFO_DEPTH - xiic_getreg8(i2c, XIIC_TFO_REG_OFFSET) - 1;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
xiic_fill_tx_fifo(struct xiic_i2c * i2c)343*4882a593Smuzhiyun static void xiic_fill_tx_fifo(struct xiic_i2c *i2c)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun u8 fifo_space = xiic_tx_fifo_space(i2c);
346*4882a593Smuzhiyun int len = xiic_tx_space(i2c);
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun len = (len > fifo_space) ? fifo_space : len;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s entry, len: %d, fifo space: %d\n",
351*4882a593Smuzhiyun __func__, len, fifo_space);
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun while (len--) {
354*4882a593Smuzhiyun u16 data = i2c->tx_msg->buf[i2c->tx_pos++];
355*4882a593Smuzhiyun if ((xiic_tx_space(i2c) == 0) && (i2c->nmsgs == 1)) {
356*4882a593Smuzhiyun /* last message in transfer -> STOP */
357*4882a593Smuzhiyun data |= XIIC_TX_DYN_STOP_MASK;
358*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s TX STOP\n", __func__);
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
361*4882a593Smuzhiyun }
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
xiic_wakeup(struct xiic_i2c * i2c,int code)364*4882a593Smuzhiyun static void xiic_wakeup(struct xiic_i2c *i2c, int code)
365*4882a593Smuzhiyun {
366*4882a593Smuzhiyun i2c->tx_msg = NULL;
367*4882a593Smuzhiyun i2c->rx_msg = NULL;
368*4882a593Smuzhiyun i2c->nmsgs = 0;
369*4882a593Smuzhiyun i2c->state = code;
370*4882a593Smuzhiyun wake_up(&i2c->wait);
371*4882a593Smuzhiyun }
372*4882a593Smuzhiyun
xiic_process(int irq,void * dev_id)373*4882a593Smuzhiyun static irqreturn_t xiic_process(int irq, void *dev_id)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct xiic_i2c *i2c = dev_id;
376*4882a593Smuzhiyun u32 pend, isr, ier;
377*4882a593Smuzhiyun u32 clr = 0;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun /* Get the interrupt Status from the IPIF. There is no clearing of
380*4882a593Smuzhiyun * interrupts in the IPIF. Interrupts must be cleared at the source.
381*4882a593Smuzhiyun * To find which interrupts are pending; AND interrupts pending with
382*4882a593Smuzhiyun * interrupts masked.
383*4882a593Smuzhiyun */
384*4882a593Smuzhiyun mutex_lock(&i2c->lock);
385*4882a593Smuzhiyun isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
386*4882a593Smuzhiyun ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
387*4882a593Smuzhiyun pend = isr & ier;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s: IER: 0x%x, ISR: 0x%x, pend: 0x%x\n",
390*4882a593Smuzhiyun __func__, ier, isr, pend);
391*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s: SR: 0x%x, msg: %p, nmsgs: %d\n",
392*4882a593Smuzhiyun __func__, xiic_getreg8(i2c, XIIC_SR_REG_OFFSET),
393*4882a593Smuzhiyun i2c->tx_msg, i2c->nmsgs);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* Service requesting interrupt */
397*4882a593Smuzhiyun if ((pend & XIIC_INTR_ARB_LOST_MASK) ||
398*4882a593Smuzhiyun ((pend & XIIC_INTR_TX_ERROR_MASK) &&
399*4882a593Smuzhiyun !(pend & XIIC_INTR_RX_FULL_MASK))) {
400*4882a593Smuzhiyun /* bus arbritration lost, or...
401*4882a593Smuzhiyun * Transmit error _OR_ RX completed
402*4882a593Smuzhiyun * if this happens when RX_FULL is not set
403*4882a593Smuzhiyun * this is probably a TX error
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s error\n", __func__);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun /* dynamic mode seem to suffer from problems if we just flushes
409*4882a593Smuzhiyun * fifos and the next message is a TX with len 0 (only addr)
410*4882a593Smuzhiyun * reset the IP instead of just flush fifos
411*4882a593Smuzhiyun */
412*4882a593Smuzhiyun xiic_reinit(i2c);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (i2c->rx_msg)
415*4882a593Smuzhiyun xiic_wakeup(i2c, STATE_ERROR);
416*4882a593Smuzhiyun if (i2c->tx_msg)
417*4882a593Smuzhiyun xiic_wakeup(i2c, STATE_ERROR);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun if (pend & XIIC_INTR_RX_FULL_MASK) {
420*4882a593Smuzhiyun /* Receive register/FIFO is full */
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun clr |= XIIC_INTR_RX_FULL_MASK;
423*4882a593Smuzhiyun if (!i2c->rx_msg) {
424*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
425*4882a593Smuzhiyun "%s unexpected RX IRQ\n", __func__);
426*4882a593Smuzhiyun xiic_clear_rx_fifo(i2c);
427*4882a593Smuzhiyun goto out;
428*4882a593Smuzhiyun }
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun xiic_read_rx(i2c);
431*4882a593Smuzhiyun if (xiic_rx_space(i2c) == 0) {
432*4882a593Smuzhiyun /* this is the last part of the message */
433*4882a593Smuzhiyun i2c->rx_msg = NULL;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /* also clear TX error if there (RX complete) */
436*4882a593Smuzhiyun clr |= (isr & XIIC_INTR_TX_ERROR_MASK);
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
439*4882a593Smuzhiyun "%s end of message, nmsgs: %d\n",
440*4882a593Smuzhiyun __func__, i2c->nmsgs);
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun /* send next message if this wasn't the last,
443*4882a593Smuzhiyun * otherwise the transfer will be finialise when
444*4882a593Smuzhiyun * receiving the bus not busy interrupt
445*4882a593Smuzhiyun */
446*4882a593Smuzhiyun if (i2c->nmsgs > 1) {
447*4882a593Smuzhiyun i2c->nmsgs--;
448*4882a593Smuzhiyun i2c->tx_msg++;
449*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
450*4882a593Smuzhiyun "%s will start next...\n", __func__);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun __xiic_start_xfer(i2c);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun if (pend & XIIC_INTR_BNB_MASK) {
457*4882a593Smuzhiyun /* IIC bus has transitioned to not busy */
458*4882a593Smuzhiyun clr |= XIIC_INTR_BNB_MASK;
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun /* The bus is not busy, disable BusNotBusy interrupt */
461*4882a593Smuzhiyun xiic_irq_dis(i2c, XIIC_INTR_BNB_MASK);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (!i2c->tx_msg)
464*4882a593Smuzhiyun goto out;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun if ((i2c->nmsgs == 1) && !i2c->rx_msg &&
467*4882a593Smuzhiyun xiic_tx_space(i2c) == 0)
468*4882a593Smuzhiyun xiic_wakeup(i2c, STATE_DONE);
469*4882a593Smuzhiyun else
470*4882a593Smuzhiyun xiic_wakeup(i2c, STATE_ERROR);
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun if (pend & (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK)) {
473*4882a593Smuzhiyun /* Transmit register/FIFO is empty or ½ empty */
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun clr |= (pend &
476*4882a593Smuzhiyun (XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_HALF_MASK));
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun if (!i2c->tx_msg) {
479*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
480*4882a593Smuzhiyun "%s unexpected TX IRQ\n", __func__);
481*4882a593Smuzhiyun goto out;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun xiic_fill_tx_fifo(i2c);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* current message sent and there is space in the fifo */
487*4882a593Smuzhiyun if (!xiic_tx_space(i2c) && xiic_tx_fifo_space(i2c) >= 2) {
488*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
489*4882a593Smuzhiyun "%s end of message sent, nmsgs: %d\n",
490*4882a593Smuzhiyun __func__, i2c->nmsgs);
491*4882a593Smuzhiyun if (i2c->nmsgs > 1) {
492*4882a593Smuzhiyun i2c->nmsgs--;
493*4882a593Smuzhiyun i2c->tx_msg++;
494*4882a593Smuzhiyun __xiic_start_xfer(i2c);
495*4882a593Smuzhiyun } else {
496*4882a593Smuzhiyun xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent,
499*4882a593Smuzhiyun "%s Got TX IRQ but no more to do...\n",
500*4882a593Smuzhiyun __func__);
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun } else if (!xiic_tx_space(i2c) && (i2c->nmsgs == 1))
503*4882a593Smuzhiyun /* current frame is sent and is last,
504*4882a593Smuzhiyun * make sure to disable tx half
505*4882a593Smuzhiyun */
506*4882a593Smuzhiyun xiic_irq_dis(i2c, XIIC_INTR_TX_HALF_MASK);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun out:
509*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s clr: 0x%x\n", __func__, clr);
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_IISR_OFFSET, clr);
512*4882a593Smuzhiyun mutex_unlock(&i2c->lock);
513*4882a593Smuzhiyun return IRQ_HANDLED;
514*4882a593Smuzhiyun }
515*4882a593Smuzhiyun
xiic_bus_busy(struct xiic_i2c * i2c)516*4882a593Smuzhiyun static int xiic_bus_busy(struct xiic_i2c *i2c)
517*4882a593Smuzhiyun {
518*4882a593Smuzhiyun u8 sr = xiic_getreg8(i2c, XIIC_SR_REG_OFFSET);
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun return (sr & XIIC_SR_BUS_BUSY_MASK) ? -EBUSY : 0;
521*4882a593Smuzhiyun }
522*4882a593Smuzhiyun
xiic_busy(struct xiic_i2c * i2c)523*4882a593Smuzhiyun static int xiic_busy(struct xiic_i2c *i2c)
524*4882a593Smuzhiyun {
525*4882a593Smuzhiyun int tries = 3;
526*4882a593Smuzhiyun int err;
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun if (i2c->tx_msg)
529*4882a593Smuzhiyun return -EBUSY;
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* In single master mode bus can only be busy, when in use by this
532*4882a593Smuzhiyun * driver. If the register indicates bus being busy for some reason we
533*4882a593Smuzhiyun * should ignore it, since bus will never be released and i2c will be
534*4882a593Smuzhiyun * stuck forever.
535*4882a593Smuzhiyun */
536*4882a593Smuzhiyun if (i2c->singlemaster) {
537*4882a593Smuzhiyun return 0;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* for instance if previous transfer was terminated due to TX error
541*4882a593Smuzhiyun * it might be that the bus is on it's way to become available
542*4882a593Smuzhiyun * give it at most 3 ms to wake
543*4882a593Smuzhiyun */
544*4882a593Smuzhiyun err = xiic_bus_busy(i2c);
545*4882a593Smuzhiyun while (err && tries--) {
546*4882a593Smuzhiyun msleep(1);
547*4882a593Smuzhiyun err = xiic_bus_busy(i2c);
548*4882a593Smuzhiyun }
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun return err;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
xiic_start_recv(struct xiic_i2c * i2c)553*4882a593Smuzhiyun static void xiic_start_recv(struct xiic_i2c *i2c)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun u8 rx_watermark;
556*4882a593Smuzhiyun struct i2c_msg *msg = i2c->rx_msg = i2c->tx_msg;
557*4882a593Smuzhiyun unsigned long flags;
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* Clear and enable Rx full interrupt. */
560*4882a593Smuzhiyun xiic_irq_clr_en(i2c, XIIC_INTR_RX_FULL_MASK | XIIC_INTR_TX_ERROR_MASK);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun /* we want to get all but last byte, because the TX_ERROR IRQ is used
563*4882a593Smuzhiyun * to inidicate error ACK on the address, and negative ack on the last
564*4882a593Smuzhiyun * received byte, so to not mix them receive all but last.
565*4882a593Smuzhiyun * In the case where there is only one byte to receive
566*4882a593Smuzhiyun * we can check if ERROR and RX full is set at the same time
567*4882a593Smuzhiyun */
568*4882a593Smuzhiyun rx_watermark = msg->len;
569*4882a593Smuzhiyun if (rx_watermark > IIC_RX_FIFO_DEPTH)
570*4882a593Smuzhiyun rx_watermark = IIC_RX_FIFO_DEPTH;
571*4882a593Smuzhiyun xiic_setreg8(i2c, XIIC_RFD_REG_OFFSET, rx_watermark - 1);
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun local_irq_save(flags);
574*4882a593Smuzhiyun if (!(msg->flags & I2C_M_NOSTART))
575*4882a593Smuzhiyun /* write the address */
576*4882a593Smuzhiyun xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
577*4882a593Smuzhiyun i2c_8bit_addr_from_msg(msg) | XIIC_TX_DYN_START_MASK);
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET,
582*4882a593Smuzhiyun msg->len | ((i2c->nmsgs == 1) ? XIIC_TX_DYN_STOP_MASK : 0));
583*4882a593Smuzhiyun local_irq_restore(flags);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun if (i2c->nmsgs == 1)
586*4882a593Smuzhiyun /* very last, enable bus not busy as well */
587*4882a593Smuzhiyun xiic_irq_clr_en(i2c, XIIC_INTR_BNB_MASK);
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun /* the message is tx:ed */
590*4882a593Smuzhiyun i2c->tx_pos = msg->len;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun
xiic_start_send(struct xiic_i2c * i2c)593*4882a593Smuzhiyun static void xiic_start_send(struct xiic_i2c *i2c)
594*4882a593Smuzhiyun {
595*4882a593Smuzhiyun struct i2c_msg *msg = i2c->tx_msg;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun xiic_irq_clr(i2c, XIIC_INTR_TX_ERROR_MASK);
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, len: %d",
600*4882a593Smuzhiyun __func__, msg, msg->len);
601*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s entry, ISR: 0x%x, CR: 0x%x\n",
602*4882a593Smuzhiyun __func__, xiic_getreg32(i2c, XIIC_IISR_OFFSET),
603*4882a593Smuzhiyun xiic_getreg8(i2c, XIIC_CR_REG_OFFSET));
604*4882a593Smuzhiyun
605*4882a593Smuzhiyun if (!(msg->flags & I2C_M_NOSTART)) {
606*4882a593Smuzhiyun /* write the address */
607*4882a593Smuzhiyun u16 data = i2c_8bit_addr_from_msg(msg) |
608*4882a593Smuzhiyun XIIC_TX_DYN_START_MASK;
609*4882a593Smuzhiyun if ((i2c->nmsgs == 1) && msg->len == 0)
610*4882a593Smuzhiyun /* no data and last message -> add STOP */
611*4882a593Smuzhiyun data |= XIIC_TX_DYN_STOP_MASK;
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun xiic_setreg16(i2c, XIIC_DTR_REG_OFFSET, data);
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun xiic_fill_tx_fifo(i2c);
617*4882a593Smuzhiyun
618*4882a593Smuzhiyun /* Clear any pending Tx empty, Tx Error and then enable them. */
619*4882a593Smuzhiyun xiic_irq_clr_en(i2c, XIIC_INTR_TX_EMPTY_MASK | XIIC_INTR_TX_ERROR_MASK |
620*4882a593Smuzhiyun XIIC_INTR_BNB_MASK);
621*4882a593Smuzhiyun }
622*4882a593Smuzhiyun
xiic_isr(int irq,void * dev_id)623*4882a593Smuzhiyun static irqreturn_t xiic_isr(int irq, void *dev_id)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun struct xiic_i2c *i2c = dev_id;
626*4882a593Smuzhiyun u32 pend, isr, ier;
627*4882a593Smuzhiyun irqreturn_t ret = IRQ_NONE;
628*4882a593Smuzhiyun /* Do not processes a devices interrupts if the device has no
629*4882a593Smuzhiyun * interrupts pending
630*4882a593Smuzhiyun */
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s entry\n", __func__);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun isr = xiic_getreg32(i2c, XIIC_IISR_OFFSET);
635*4882a593Smuzhiyun ier = xiic_getreg32(i2c, XIIC_IIER_OFFSET);
636*4882a593Smuzhiyun pend = isr & ier;
637*4882a593Smuzhiyun if (pend)
638*4882a593Smuzhiyun ret = IRQ_WAKE_THREAD;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun return ret;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun
__xiic_start_xfer(struct xiic_i2c * i2c)643*4882a593Smuzhiyun static void __xiic_start_xfer(struct xiic_i2c *i2c)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun int first = 1;
646*4882a593Smuzhiyun int fifo_space = xiic_tx_fifo_space(i2c);
647*4882a593Smuzhiyun dev_dbg(i2c->adap.dev.parent, "%s entry, msg: %p, fifos space: %d\n",
648*4882a593Smuzhiyun __func__, i2c->tx_msg, fifo_space);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun if (!i2c->tx_msg)
651*4882a593Smuzhiyun return;
652*4882a593Smuzhiyun
653*4882a593Smuzhiyun i2c->rx_pos = 0;
654*4882a593Smuzhiyun i2c->tx_pos = 0;
655*4882a593Smuzhiyun i2c->state = STATE_START;
656*4882a593Smuzhiyun while ((fifo_space >= 2) && (first || (i2c->nmsgs > 1))) {
657*4882a593Smuzhiyun if (!first) {
658*4882a593Smuzhiyun i2c->nmsgs--;
659*4882a593Smuzhiyun i2c->tx_msg++;
660*4882a593Smuzhiyun i2c->tx_pos = 0;
661*4882a593Smuzhiyun } else
662*4882a593Smuzhiyun first = 0;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun if (i2c->tx_msg->flags & I2C_M_RD) {
665*4882a593Smuzhiyun /* we dont date putting several reads in the FIFO */
666*4882a593Smuzhiyun xiic_start_recv(i2c);
667*4882a593Smuzhiyun return;
668*4882a593Smuzhiyun } else {
669*4882a593Smuzhiyun xiic_start_send(i2c);
670*4882a593Smuzhiyun if (xiic_tx_space(i2c) != 0) {
671*4882a593Smuzhiyun /* the message could not be completely sent */
672*4882a593Smuzhiyun break;
673*4882a593Smuzhiyun }
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun fifo_space = xiic_tx_fifo_space(i2c);
677*4882a593Smuzhiyun }
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* there are more messages or the current one could not be completely
680*4882a593Smuzhiyun * put into the FIFO, also enable the half empty interrupt
681*4882a593Smuzhiyun */
682*4882a593Smuzhiyun if (i2c->nmsgs > 1 || xiic_tx_space(i2c))
683*4882a593Smuzhiyun xiic_irq_clr_en(i2c, XIIC_INTR_TX_HALF_MASK);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
xiic_start_xfer(struct xiic_i2c * i2c)687*4882a593Smuzhiyun static int xiic_start_xfer(struct xiic_i2c *i2c)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun int ret;
690*4882a593Smuzhiyun mutex_lock(&i2c->lock);
691*4882a593Smuzhiyun
692*4882a593Smuzhiyun ret = xiic_reinit(i2c);
693*4882a593Smuzhiyun if (!ret)
694*4882a593Smuzhiyun __xiic_start_xfer(i2c);
695*4882a593Smuzhiyun
696*4882a593Smuzhiyun mutex_unlock(&i2c->lock);
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun return ret;
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun
xiic_xfer(struct i2c_adapter * adap,struct i2c_msg * msgs,int num)701*4882a593Smuzhiyun static int xiic_xfer(struct i2c_adapter *adap, struct i2c_msg *msgs, int num)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun struct xiic_i2c *i2c = i2c_get_adapdata(adap);
704*4882a593Smuzhiyun int err;
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun dev_dbg(adap->dev.parent, "%s entry SR: 0x%x\n", __func__,
707*4882a593Smuzhiyun xiic_getreg8(i2c, XIIC_SR_REG_OFFSET));
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun err = pm_runtime_resume_and_get(i2c->dev);
710*4882a593Smuzhiyun if (err < 0)
711*4882a593Smuzhiyun return err;
712*4882a593Smuzhiyun
713*4882a593Smuzhiyun err = xiic_busy(i2c);
714*4882a593Smuzhiyun if (err)
715*4882a593Smuzhiyun goto out;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun i2c->tx_msg = msgs;
718*4882a593Smuzhiyun i2c->nmsgs = num;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun err = xiic_start_xfer(i2c);
721*4882a593Smuzhiyun if (err < 0) {
722*4882a593Smuzhiyun dev_err(adap->dev.parent, "Error xiic_start_xfer\n");
723*4882a593Smuzhiyun goto out;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (wait_event_timeout(i2c->wait, (i2c->state == STATE_ERROR) ||
727*4882a593Smuzhiyun (i2c->state == STATE_DONE), HZ)) {
728*4882a593Smuzhiyun err = (i2c->state == STATE_DONE) ? num : -EIO;
729*4882a593Smuzhiyun goto out;
730*4882a593Smuzhiyun } else {
731*4882a593Smuzhiyun i2c->tx_msg = NULL;
732*4882a593Smuzhiyun i2c->rx_msg = NULL;
733*4882a593Smuzhiyun i2c->nmsgs = 0;
734*4882a593Smuzhiyun err = -ETIMEDOUT;
735*4882a593Smuzhiyun goto out;
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun out:
738*4882a593Smuzhiyun pm_runtime_mark_last_busy(i2c->dev);
739*4882a593Smuzhiyun pm_runtime_put_autosuspend(i2c->dev);
740*4882a593Smuzhiyun return err;
741*4882a593Smuzhiyun }
742*4882a593Smuzhiyun
xiic_func(struct i2c_adapter * adap)743*4882a593Smuzhiyun static u32 xiic_func(struct i2c_adapter *adap)
744*4882a593Smuzhiyun {
745*4882a593Smuzhiyun return I2C_FUNC_I2C | I2C_FUNC_SMBUS_EMUL;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun static const struct i2c_algorithm xiic_algorithm = {
749*4882a593Smuzhiyun .master_xfer = xiic_xfer,
750*4882a593Smuzhiyun .functionality = xiic_func,
751*4882a593Smuzhiyun };
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun static const struct i2c_adapter_quirks xiic_quirks = {
754*4882a593Smuzhiyun .max_read_len = 255,
755*4882a593Smuzhiyun };
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun static const struct i2c_adapter xiic_adapter = {
758*4882a593Smuzhiyun .owner = THIS_MODULE,
759*4882a593Smuzhiyun .class = I2C_CLASS_DEPRECATED,
760*4882a593Smuzhiyun .algo = &xiic_algorithm,
761*4882a593Smuzhiyun .quirks = &xiic_quirks,
762*4882a593Smuzhiyun };
763*4882a593Smuzhiyun
764*4882a593Smuzhiyun
xiic_i2c_probe(struct platform_device * pdev)765*4882a593Smuzhiyun static int xiic_i2c_probe(struct platform_device *pdev)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun struct xiic_i2c *i2c;
768*4882a593Smuzhiyun struct xiic_i2c_platform_data *pdata;
769*4882a593Smuzhiyun struct resource *res;
770*4882a593Smuzhiyun int ret, irq;
771*4882a593Smuzhiyun u8 i;
772*4882a593Smuzhiyun u32 sr;
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun i2c = devm_kzalloc(&pdev->dev, sizeof(*i2c), GFP_KERNEL);
775*4882a593Smuzhiyun if (!i2c)
776*4882a593Smuzhiyun return -ENOMEM;
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
779*4882a593Smuzhiyun i2c->base = devm_ioremap_resource(&pdev->dev, res);
780*4882a593Smuzhiyun if (IS_ERR(i2c->base))
781*4882a593Smuzhiyun return PTR_ERR(i2c->base);
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun irq = platform_get_irq(pdev, 0);
784*4882a593Smuzhiyun if (irq < 0)
785*4882a593Smuzhiyun return irq;
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun pdata = dev_get_platdata(&pdev->dev);
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun /* hook up driver to tree */
790*4882a593Smuzhiyun platform_set_drvdata(pdev, i2c);
791*4882a593Smuzhiyun i2c->adap = xiic_adapter;
792*4882a593Smuzhiyun i2c_set_adapdata(&i2c->adap, i2c);
793*4882a593Smuzhiyun i2c->adap.dev.parent = &pdev->dev;
794*4882a593Smuzhiyun i2c->adap.dev.of_node = pdev->dev.of_node;
795*4882a593Smuzhiyun snprintf(i2c->adap.name, sizeof(i2c->adap.name),
796*4882a593Smuzhiyun DRIVER_NAME " %s", pdev->name);
797*4882a593Smuzhiyun
798*4882a593Smuzhiyun mutex_init(&i2c->lock);
799*4882a593Smuzhiyun init_waitqueue_head(&i2c->wait);
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun i2c->clk = devm_clk_get(&pdev->dev, NULL);
802*4882a593Smuzhiyun if (IS_ERR(i2c->clk)) {
803*4882a593Smuzhiyun if (PTR_ERR(i2c->clk) != -EPROBE_DEFER)
804*4882a593Smuzhiyun dev_err(&pdev->dev, "input clock not found.\n");
805*4882a593Smuzhiyun return PTR_ERR(i2c->clk);
806*4882a593Smuzhiyun }
807*4882a593Smuzhiyun ret = clk_prepare_enable(i2c->clk);
808*4882a593Smuzhiyun if (ret) {
809*4882a593Smuzhiyun dev_err(&pdev->dev, "Unable to enable clock.\n");
810*4882a593Smuzhiyun return ret;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun i2c->dev = &pdev->dev;
813*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(i2c->dev, XIIC_PM_TIMEOUT);
814*4882a593Smuzhiyun pm_runtime_use_autosuspend(i2c->dev);
815*4882a593Smuzhiyun pm_runtime_set_active(i2c->dev);
816*4882a593Smuzhiyun pm_runtime_enable(i2c->dev);
817*4882a593Smuzhiyun ret = devm_request_threaded_irq(&pdev->dev, irq, xiic_isr,
818*4882a593Smuzhiyun xiic_process, IRQF_ONESHOT,
819*4882a593Smuzhiyun pdev->name, i2c);
820*4882a593Smuzhiyun
821*4882a593Smuzhiyun if (ret < 0) {
822*4882a593Smuzhiyun dev_err(&pdev->dev, "Cannot claim IRQ\n");
823*4882a593Smuzhiyun goto err_clk_dis;
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun i2c->singlemaster =
827*4882a593Smuzhiyun of_property_read_bool(pdev->dev.of_node, "single-master");
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /*
830*4882a593Smuzhiyun * Detect endianness
831*4882a593Smuzhiyun * Try to reset the TX FIFO. Then check the EMPTY flag. If it is not
832*4882a593Smuzhiyun * set, assume that the endianness was wrong and swap.
833*4882a593Smuzhiyun */
834*4882a593Smuzhiyun i2c->endianness = LITTLE;
835*4882a593Smuzhiyun xiic_setreg32(i2c, XIIC_CR_REG_OFFSET, XIIC_CR_TX_FIFO_RESET_MASK);
836*4882a593Smuzhiyun /* Reset is cleared in xiic_reinit */
837*4882a593Smuzhiyun sr = xiic_getreg32(i2c, XIIC_SR_REG_OFFSET);
838*4882a593Smuzhiyun if (!(sr & XIIC_SR_TX_FIFO_EMPTY_MASK))
839*4882a593Smuzhiyun i2c->endianness = BIG;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun ret = xiic_reinit(i2c);
842*4882a593Smuzhiyun if (ret < 0) {
843*4882a593Smuzhiyun dev_err(&pdev->dev, "Cannot xiic_reinit\n");
844*4882a593Smuzhiyun goto err_clk_dis;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /* add i2c adapter to i2c tree */
848*4882a593Smuzhiyun ret = i2c_add_adapter(&i2c->adap);
849*4882a593Smuzhiyun if (ret) {
850*4882a593Smuzhiyun xiic_deinit(i2c);
851*4882a593Smuzhiyun goto err_clk_dis;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun if (pdata) {
855*4882a593Smuzhiyun /* add in known devices to the bus */
856*4882a593Smuzhiyun for (i = 0; i < pdata->num_devices; i++)
857*4882a593Smuzhiyun i2c_new_client_device(&i2c->adap, pdata->devices + i);
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun return 0;
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun err_clk_dis:
863*4882a593Smuzhiyun pm_runtime_set_suspended(&pdev->dev);
864*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
865*4882a593Smuzhiyun clk_disable_unprepare(i2c->clk);
866*4882a593Smuzhiyun return ret;
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun
xiic_i2c_remove(struct platform_device * pdev)869*4882a593Smuzhiyun static int xiic_i2c_remove(struct platform_device *pdev)
870*4882a593Smuzhiyun {
871*4882a593Smuzhiyun struct xiic_i2c *i2c = platform_get_drvdata(pdev);
872*4882a593Smuzhiyun int ret;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun /* remove adapter & data */
875*4882a593Smuzhiyun i2c_del_adapter(&i2c->adap);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun ret = pm_runtime_resume_and_get(i2c->dev);
878*4882a593Smuzhiyun if (ret < 0)
879*4882a593Smuzhiyun return ret;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun xiic_deinit(i2c);
882*4882a593Smuzhiyun pm_runtime_put_sync(i2c->dev);
883*4882a593Smuzhiyun clk_disable_unprepare(i2c->clk);
884*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
885*4882a593Smuzhiyun pm_runtime_set_suspended(&pdev->dev);
886*4882a593Smuzhiyun pm_runtime_dont_use_autosuspend(&pdev->dev);
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun #if defined(CONFIG_OF)
892*4882a593Smuzhiyun static const struct of_device_id xiic_of_match[] = {
893*4882a593Smuzhiyun { .compatible = "xlnx,xps-iic-2.00.a", },
894*4882a593Smuzhiyun {},
895*4882a593Smuzhiyun };
896*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, xiic_of_match);
897*4882a593Smuzhiyun #endif
898*4882a593Smuzhiyun
xiic_i2c_runtime_suspend(struct device * dev)899*4882a593Smuzhiyun static int __maybe_unused xiic_i2c_runtime_suspend(struct device *dev)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun struct xiic_i2c *i2c = dev_get_drvdata(dev);
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun clk_disable(i2c->clk);
904*4882a593Smuzhiyun
905*4882a593Smuzhiyun return 0;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun
xiic_i2c_runtime_resume(struct device * dev)908*4882a593Smuzhiyun static int __maybe_unused xiic_i2c_runtime_resume(struct device *dev)
909*4882a593Smuzhiyun {
910*4882a593Smuzhiyun struct xiic_i2c *i2c = dev_get_drvdata(dev);
911*4882a593Smuzhiyun int ret;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun ret = clk_enable(i2c->clk);
914*4882a593Smuzhiyun if (ret) {
915*4882a593Smuzhiyun dev_err(dev, "Cannot enable clock.\n");
916*4882a593Smuzhiyun return ret;
917*4882a593Smuzhiyun }
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun return 0;
920*4882a593Smuzhiyun }
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun static const struct dev_pm_ops xiic_dev_pm_ops = {
923*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(xiic_i2c_runtime_suspend,
924*4882a593Smuzhiyun xiic_i2c_runtime_resume, NULL)
925*4882a593Smuzhiyun };
926*4882a593Smuzhiyun static struct platform_driver xiic_i2c_driver = {
927*4882a593Smuzhiyun .probe = xiic_i2c_probe,
928*4882a593Smuzhiyun .remove = xiic_i2c_remove,
929*4882a593Smuzhiyun .driver = {
930*4882a593Smuzhiyun .name = DRIVER_NAME,
931*4882a593Smuzhiyun .of_match_table = of_match_ptr(xiic_of_match),
932*4882a593Smuzhiyun .pm = &xiic_dev_pm_ops,
933*4882a593Smuzhiyun },
934*4882a593Smuzhiyun };
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun module_platform_driver(xiic_i2c_driver);
937*4882a593Smuzhiyun
938*4882a593Smuzhiyun MODULE_ALIAS("platform:" DRIVER_NAME);
939*4882a593Smuzhiyun MODULE_AUTHOR("info@mocean-labs.com");
940*4882a593Smuzhiyun MODULE_DESCRIPTION("Xilinx I2C bus driver");
941*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
942*4882a593Smuzhiyun MODULE_ALIAS("platform:"DRIVER_NAME);
943