1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2012 Intel Corporation. All rights reserved.
3*4882a593Smuzhiyun * Copyright (c) 2006 - 2012 QLogic Corporation. All rights reserved.
4*4882a593Smuzhiyun * Copyright (c) 2003, 2004, 2005, 2006 PathScale, Inc. All rights reserved.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This software is available to you under a choice of one of two
7*4882a593Smuzhiyun * licenses. You may choose to be licensed under the terms of the GNU
8*4882a593Smuzhiyun * General Public License (GPL) Version 2, available from the file
9*4882a593Smuzhiyun * COPYING in the main directory of this source tree, or the
10*4882a593Smuzhiyun * OpenIB.org BSD license below:
11*4882a593Smuzhiyun *
12*4882a593Smuzhiyun * Redistribution and use in source and binary forms, with or
13*4882a593Smuzhiyun * without modification, are permitted provided that the following
14*4882a593Smuzhiyun * conditions are met:
15*4882a593Smuzhiyun *
16*4882a593Smuzhiyun * - Redistributions of source code must retain the above
17*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
18*4882a593Smuzhiyun * disclaimer.
19*4882a593Smuzhiyun *
20*4882a593Smuzhiyun * - Redistributions in binary form must reproduce the above
21*4882a593Smuzhiyun * copyright notice, this list of conditions and the following
22*4882a593Smuzhiyun * disclaimer in the documentation and/or other materials
23*4882a593Smuzhiyun * provided with the distribution.
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
26*4882a593Smuzhiyun * EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
27*4882a593Smuzhiyun * MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
28*4882a593Smuzhiyun * NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
29*4882a593Smuzhiyun * BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
30*4882a593Smuzhiyun * ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
31*4882a593Smuzhiyun * CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
32*4882a593Smuzhiyun * SOFTWARE.
33*4882a593Smuzhiyun */
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun #include <linux/delay.h>
36*4882a593Smuzhiyun #include <linux/pci.h>
37*4882a593Smuzhiyun #include <linux/vmalloc.h>
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun #include "qib.h"
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * QLogic_IB "Two Wire Serial Interface" driver.
43*4882a593Smuzhiyun * Originally written for a not-quite-i2c serial eeprom, which is
44*4882a593Smuzhiyun * still used on some supported boards. Later boards have added a
45*4882a593Smuzhiyun * variety of other uses, most board-specific, so the bit-boffing
46*4882a593Smuzhiyun * part has been split off to this file, while the other parts
47*4882a593Smuzhiyun * have been moved to chip-specific files.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * We have also dropped all pretense of fully generic (e.g. pretend
50*4882a593Smuzhiyun * we don't know whether '1' is the higher voltage) interface, as
51*4882a593Smuzhiyun * the restrictions of the generic i2c interface (e.g. no access from
52*4882a593Smuzhiyun * driver itself) make it unsuitable for this use.
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun #define READ_CMD 1
56*4882a593Smuzhiyun #define WRITE_CMD 0
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * i2c_wait_for_writes - wait for a write
60*4882a593Smuzhiyun * @dd: the qlogic_ib device
61*4882a593Smuzhiyun *
62*4882a593Smuzhiyun * We use this instead of udelay directly, so we can make sure
63*4882a593Smuzhiyun * that previous register writes have been flushed all the way
64*4882a593Smuzhiyun * to the chip. Since we are delaying anyway, the cost doesn't
65*4882a593Smuzhiyun * hurt, and makes the bit twiddling more regular
66*4882a593Smuzhiyun */
i2c_wait_for_writes(struct qib_devdata * dd)67*4882a593Smuzhiyun static void i2c_wait_for_writes(struct qib_devdata *dd)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun * implicit read of EXTStatus is as good as explicit
71*4882a593Smuzhiyun * read of scratch, if all we want to do is flush
72*4882a593Smuzhiyun * writes.
73*4882a593Smuzhiyun */
74*4882a593Smuzhiyun dd->f_gpio_mod(dd, 0, 0, 0);
75*4882a593Smuzhiyun rmb(); /* inlined, so prevent compiler reordering */
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun /*
79*4882a593Smuzhiyun * QSFP modules are allowed to hold SCL low for 500uSec. Allow twice that
80*4882a593Smuzhiyun * for "almost compliant" modules
81*4882a593Smuzhiyun */
82*4882a593Smuzhiyun #define SCL_WAIT_USEC 1000
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* BUF_WAIT is time bus must be free between STOP or ACK and to next START.
85*4882a593Smuzhiyun * Should be 20, but some chips need more.
86*4882a593Smuzhiyun */
87*4882a593Smuzhiyun #define TWSI_BUF_WAIT_USEC 60
88*4882a593Smuzhiyun
scl_out(struct qib_devdata * dd,u8 bit)89*4882a593Smuzhiyun static void scl_out(struct qib_devdata *dd, u8 bit)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun u32 mask;
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun udelay(1);
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun mask = 1UL << dd->gpio_scl_num;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun /* SCL is meant to be bare-drain, so never set "OUT", just DIR */
98*4882a593Smuzhiyun dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun /*
101*4882a593Smuzhiyun * Allow for slow slaves by simple
102*4882a593Smuzhiyun * delay for falling edge, sampling on rise.
103*4882a593Smuzhiyun */
104*4882a593Smuzhiyun if (!bit)
105*4882a593Smuzhiyun udelay(2);
106*4882a593Smuzhiyun else {
107*4882a593Smuzhiyun int rise_usec;
108*4882a593Smuzhiyun
109*4882a593Smuzhiyun for (rise_usec = SCL_WAIT_USEC; rise_usec > 0; rise_usec -= 2) {
110*4882a593Smuzhiyun if (mask & dd->f_gpio_mod(dd, 0, 0, 0))
111*4882a593Smuzhiyun break;
112*4882a593Smuzhiyun udelay(2);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun if (rise_usec <= 0)
115*4882a593Smuzhiyun qib_dev_err(dd, "SCL interface stuck low > %d uSec\n",
116*4882a593Smuzhiyun SCL_WAIT_USEC);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun i2c_wait_for_writes(dd);
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
sda_out(struct qib_devdata * dd,u8 bit)121*4882a593Smuzhiyun static void sda_out(struct qib_devdata *dd, u8 bit)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun u32 mask;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun mask = 1UL << dd->gpio_sda_num;
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
128*4882a593Smuzhiyun dd->f_gpio_mod(dd, 0, bit ? 0 : mask, mask);
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun i2c_wait_for_writes(dd);
131*4882a593Smuzhiyun udelay(2);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
sda_in(struct qib_devdata * dd,int wait)134*4882a593Smuzhiyun static u8 sda_in(struct qib_devdata *dd, int wait)
135*4882a593Smuzhiyun {
136*4882a593Smuzhiyun int bnum;
137*4882a593Smuzhiyun u32 read_val, mask;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun bnum = dd->gpio_sda_num;
140*4882a593Smuzhiyun mask = (1UL << bnum);
141*4882a593Smuzhiyun /* SDA is meant to be bare-drain, so never set "OUT", just DIR */
142*4882a593Smuzhiyun dd->f_gpio_mod(dd, 0, 0, mask);
143*4882a593Smuzhiyun read_val = dd->f_gpio_mod(dd, 0, 0, 0);
144*4882a593Smuzhiyun if (wait)
145*4882a593Smuzhiyun i2c_wait_for_writes(dd);
146*4882a593Smuzhiyun return (read_val & mask) >> bnum;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /**
150*4882a593Smuzhiyun * i2c_ackrcv - see if ack following write is true
151*4882a593Smuzhiyun * @dd: the qlogic_ib device
152*4882a593Smuzhiyun */
i2c_ackrcv(struct qib_devdata * dd)153*4882a593Smuzhiyun static int i2c_ackrcv(struct qib_devdata *dd)
154*4882a593Smuzhiyun {
155*4882a593Smuzhiyun u8 ack_received;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun /* AT ENTRY SCL = LOW */
158*4882a593Smuzhiyun /* change direction, ignore data */
159*4882a593Smuzhiyun ack_received = sda_in(dd, 1);
160*4882a593Smuzhiyun scl_out(dd, 1);
161*4882a593Smuzhiyun ack_received = sda_in(dd, 1) == 0;
162*4882a593Smuzhiyun scl_out(dd, 0);
163*4882a593Smuzhiyun return ack_received;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun static void stop_cmd(struct qib_devdata *dd);
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun /**
169*4882a593Smuzhiyun * rd_byte - read a byte, sending STOP on last, else ACK
170*4882a593Smuzhiyun * @dd: the qlogic_ib device
171*4882a593Smuzhiyun *
172*4882a593Smuzhiyun * Returns byte shifted out of device
173*4882a593Smuzhiyun */
rd_byte(struct qib_devdata * dd,int last)174*4882a593Smuzhiyun static int rd_byte(struct qib_devdata *dd, int last)
175*4882a593Smuzhiyun {
176*4882a593Smuzhiyun int bit_cntr, data;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun data = 0;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun for (bit_cntr = 7; bit_cntr >= 0; --bit_cntr) {
181*4882a593Smuzhiyun data <<= 1;
182*4882a593Smuzhiyun scl_out(dd, 1);
183*4882a593Smuzhiyun data |= sda_in(dd, 0);
184*4882a593Smuzhiyun scl_out(dd, 0);
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun if (last) {
187*4882a593Smuzhiyun scl_out(dd, 1);
188*4882a593Smuzhiyun stop_cmd(dd);
189*4882a593Smuzhiyun } else {
190*4882a593Smuzhiyun sda_out(dd, 0);
191*4882a593Smuzhiyun scl_out(dd, 1);
192*4882a593Smuzhiyun scl_out(dd, 0);
193*4882a593Smuzhiyun sda_out(dd, 1);
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun return data;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /**
199*4882a593Smuzhiyun * wr_byte - write a byte, one bit at a time
200*4882a593Smuzhiyun * @dd: the qlogic_ib device
201*4882a593Smuzhiyun * @data: the byte to write
202*4882a593Smuzhiyun *
203*4882a593Smuzhiyun * Returns 0 if we got the following ack, otherwise 1
204*4882a593Smuzhiyun */
wr_byte(struct qib_devdata * dd,u8 data)205*4882a593Smuzhiyun static int wr_byte(struct qib_devdata *dd, u8 data)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun int bit_cntr;
208*4882a593Smuzhiyun u8 bit;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun for (bit_cntr = 7; bit_cntr >= 0; bit_cntr--) {
211*4882a593Smuzhiyun bit = (data >> bit_cntr) & 1;
212*4882a593Smuzhiyun sda_out(dd, bit);
213*4882a593Smuzhiyun scl_out(dd, 1);
214*4882a593Smuzhiyun scl_out(dd, 0);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun return (!i2c_ackrcv(dd)) ? 1 : 0;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /*
220*4882a593Smuzhiyun * issue TWSI start sequence:
221*4882a593Smuzhiyun * (both clock/data high, clock high, data low while clock is high)
222*4882a593Smuzhiyun */
start_seq(struct qib_devdata * dd)223*4882a593Smuzhiyun static void start_seq(struct qib_devdata *dd)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun sda_out(dd, 1);
226*4882a593Smuzhiyun scl_out(dd, 1);
227*4882a593Smuzhiyun sda_out(dd, 0);
228*4882a593Smuzhiyun udelay(1);
229*4882a593Smuzhiyun scl_out(dd, 0);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /**
233*4882a593Smuzhiyun * stop_seq - transmit the stop sequence
234*4882a593Smuzhiyun * @dd: the qlogic_ib device
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * (both clock/data low, clock high, data high while clock is high)
237*4882a593Smuzhiyun */
stop_seq(struct qib_devdata * dd)238*4882a593Smuzhiyun static void stop_seq(struct qib_devdata *dd)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun scl_out(dd, 0);
241*4882a593Smuzhiyun sda_out(dd, 0);
242*4882a593Smuzhiyun scl_out(dd, 1);
243*4882a593Smuzhiyun sda_out(dd, 1);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /**
247*4882a593Smuzhiyun * stop_cmd - transmit the stop condition
248*4882a593Smuzhiyun * @dd: the qlogic_ib device
249*4882a593Smuzhiyun *
250*4882a593Smuzhiyun * (both clock/data low, clock high, data high while clock is high)
251*4882a593Smuzhiyun */
stop_cmd(struct qib_devdata * dd)252*4882a593Smuzhiyun static void stop_cmd(struct qib_devdata *dd)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun stop_seq(dd);
255*4882a593Smuzhiyun udelay(TWSI_BUF_WAIT_USEC);
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun /**
259*4882a593Smuzhiyun * qib_twsi_reset - reset I2C communication
260*4882a593Smuzhiyun * @dd: the qlogic_ib device
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun
qib_twsi_reset(struct qib_devdata * dd)263*4882a593Smuzhiyun int qib_twsi_reset(struct qib_devdata *dd)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun int clock_cycles_left = 9;
266*4882a593Smuzhiyun int was_high = 0;
267*4882a593Smuzhiyun u32 pins, mask;
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun /* Both SCL and SDA should be high. If not, there
270*4882a593Smuzhiyun * is something wrong.
271*4882a593Smuzhiyun */
272*4882a593Smuzhiyun mask = (1UL << dd->gpio_scl_num) | (1UL << dd->gpio_sda_num);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun /*
275*4882a593Smuzhiyun * Force pins to desired innocuous state.
276*4882a593Smuzhiyun * This is the default power-on state with out=0 and dir=0,
277*4882a593Smuzhiyun * So tri-stated and should be floating high (barring HW problems)
278*4882a593Smuzhiyun */
279*4882a593Smuzhiyun dd->f_gpio_mod(dd, 0, 0, mask);
280*4882a593Smuzhiyun
281*4882a593Smuzhiyun /*
282*4882a593Smuzhiyun * Clock nine times to get all listeners into a sane state.
283*4882a593Smuzhiyun * If SDA does not go high at any point, we are wedged.
284*4882a593Smuzhiyun * One vendor recommends then issuing START followed by STOP.
285*4882a593Smuzhiyun * we cannot use our "normal" functions to do that, because
286*4882a593Smuzhiyun * if SCL drops between them, another vendor's part will
287*4882a593Smuzhiyun * wedge, dropping SDA and keeping it low forever, at the end of
288*4882a593Smuzhiyun * the next transaction (even if it was not the device addressed).
289*4882a593Smuzhiyun * So our START and STOP take place with SCL held high.
290*4882a593Smuzhiyun */
291*4882a593Smuzhiyun while (clock_cycles_left--) {
292*4882a593Smuzhiyun scl_out(dd, 0);
293*4882a593Smuzhiyun scl_out(dd, 1);
294*4882a593Smuzhiyun /* Note if SDA is high, but keep clocking to sync slave */
295*4882a593Smuzhiyun was_high |= sda_in(dd, 0);
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun if (was_high) {
299*4882a593Smuzhiyun /*
300*4882a593Smuzhiyun * We saw a high, which we hope means the slave is sync'd.
301*4882a593Smuzhiyun * Issue START, STOP, pause for T_BUF.
302*4882a593Smuzhiyun */
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun pins = dd->f_gpio_mod(dd, 0, 0, 0);
305*4882a593Smuzhiyun if ((pins & mask) != mask)
306*4882a593Smuzhiyun qib_dev_err(dd, "GPIO pins not at rest: %d\n",
307*4882a593Smuzhiyun pins & mask);
308*4882a593Smuzhiyun /* Drop SDA to issue START */
309*4882a593Smuzhiyun udelay(1); /* Guarantee .6 uSec setup */
310*4882a593Smuzhiyun sda_out(dd, 0);
311*4882a593Smuzhiyun udelay(1); /* Guarantee .6 uSec hold */
312*4882a593Smuzhiyun /* At this point, SCL is high, SDA low. Raise SDA for STOP */
313*4882a593Smuzhiyun sda_out(dd, 1);
314*4882a593Smuzhiyun udelay(TWSI_BUF_WAIT_USEC);
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun return !was_high;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun #define QIB_TWSI_START 0x100
321*4882a593Smuzhiyun #define QIB_TWSI_STOP 0x200
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /* Write byte to TWSI, optionally prefixed with START or suffixed with
324*4882a593Smuzhiyun * STOP.
325*4882a593Smuzhiyun * returns 0 if OK (ACK received), else != 0
326*4882a593Smuzhiyun */
qib_twsi_wr(struct qib_devdata * dd,int data,int flags)327*4882a593Smuzhiyun static int qib_twsi_wr(struct qib_devdata *dd, int data, int flags)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun int ret = 1;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (flags & QIB_TWSI_START)
332*4882a593Smuzhiyun start_seq(dd);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun ret = wr_byte(dd, data); /* Leaves SCL low (from i2c_ackrcv()) */
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun if (flags & QIB_TWSI_STOP)
337*4882a593Smuzhiyun stop_cmd(dd);
338*4882a593Smuzhiyun return ret;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun /* Added functionality for IBA7220-based cards */
342*4882a593Smuzhiyun #define QIB_TEMP_DEV 0x98
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun /*
345*4882a593Smuzhiyun * qib_twsi_blk_rd
346*4882a593Smuzhiyun * Formerly called qib_eeprom_internal_read, and only used for eeprom,
347*4882a593Smuzhiyun * but now the general interface for data transfer from twsi devices.
348*4882a593Smuzhiyun * One vestige of its former role is that it recognizes a device
349*4882a593Smuzhiyun * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
350*4882a593Smuzhiyun * which responded to all TWSI device codes, interpreting them as
351*4882a593Smuzhiyun * address within device. On all other devices found on board handled by
352*4882a593Smuzhiyun * this driver, the device is followed by a one-byte "address" which selects
353*4882a593Smuzhiyun * the "register" or "offset" within the device from which data should
354*4882a593Smuzhiyun * be read.
355*4882a593Smuzhiyun */
qib_twsi_blk_rd(struct qib_devdata * dd,int dev,int addr,void * buffer,int len)356*4882a593Smuzhiyun int qib_twsi_blk_rd(struct qib_devdata *dd, int dev, int addr,
357*4882a593Smuzhiyun void *buffer, int len)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun int ret;
360*4882a593Smuzhiyun u8 *bp = buffer;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun ret = 1;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (dev == QIB_TWSI_NO_DEV) {
365*4882a593Smuzhiyun /* legacy not-really-I2C */
366*4882a593Smuzhiyun addr = (addr << 1) | READ_CMD;
367*4882a593Smuzhiyun ret = qib_twsi_wr(dd, addr, QIB_TWSI_START);
368*4882a593Smuzhiyun } else {
369*4882a593Smuzhiyun /* Actual I2C */
370*4882a593Smuzhiyun ret = qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START);
371*4882a593Smuzhiyun if (ret) {
372*4882a593Smuzhiyun stop_cmd(dd);
373*4882a593Smuzhiyun ret = 1;
374*4882a593Smuzhiyun goto bail;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun /*
377*4882a593Smuzhiyun * SFF spec claims we do _not_ stop after the addr
378*4882a593Smuzhiyun * but simply issue a start with the "read" dev-addr.
379*4882a593Smuzhiyun * Since we are implicitely waiting for ACK here,
380*4882a593Smuzhiyun * we need t_buf (nominally 20uSec) before that start,
381*4882a593Smuzhiyun * and cannot rely on the delay built in to the STOP
382*4882a593Smuzhiyun */
383*4882a593Smuzhiyun ret = qib_twsi_wr(dd, addr, 0);
384*4882a593Smuzhiyun udelay(TWSI_BUF_WAIT_USEC);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (ret) {
387*4882a593Smuzhiyun qib_dev_err(dd,
388*4882a593Smuzhiyun "Failed to write interface read addr %02X\n",
389*4882a593Smuzhiyun addr);
390*4882a593Smuzhiyun ret = 1;
391*4882a593Smuzhiyun goto bail;
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun ret = qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START);
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun if (ret) {
396*4882a593Smuzhiyun stop_cmd(dd);
397*4882a593Smuzhiyun ret = 1;
398*4882a593Smuzhiyun goto bail;
399*4882a593Smuzhiyun }
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /*
402*4882a593Smuzhiyun * block devices keeps clocking data out as long as we ack,
403*4882a593Smuzhiyun * automatically incrementing the address. Some have "pages"
404*4882a593Smuzhiyun * whose boundaries will not be crossed, but the handling
405*4882a593Smuzhiyun * of these is left to the caller, who is in a better
406*4882a593Smuzhiyun * position to know.
407*4882a593Smuzhiyun */
408*4882a593Smuzhiyun while (len-- > 0) {
409*4882a593Smuzhiyun /*
410*4882a593Smuzhiyun * Get and store data, sending ACK if length remaining,
411*4882a593Smuzhiyun * else STOP
412*4882a593Smuzhiyun */
413*4882a593Smuzhiyun *bp++ = rd_byte(dd, !len);
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun ret = 0;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun bail:
419*4882a593Smuzhiyun return ret;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /*
423*4882a593Smuzhiyun * qib_twsi_blk_wr
424*4882a593Smuzhiyun * Formerly called qib_eeprom_internal_write, and only used for eeprom,
425*4882a593Smuzhiyun * but now the general interface for data transfer to twsi devices.
426*4882a593Smuzhiyun * One vestige of its former role is that it recognizes a device
427*4882a593Smuzhiyun * QIB_TWSI_NO_DEV and does the correct operation for the legacy part,
428*4882a593Smuzhiyun * which responded to all TWSI device codes, interpreting them as
429*4882a593Smuzhiyun * address within device. On all other devices found on board handled by
430*4882a593Smuzhiyun * this driver, the device is followed by a one-byte "address" which selects
431*4882a593Smuzhiyun * the "register" or "offset" within the device to which data should
432*4882a593Smuzhiyun * be written.
433*4882a593Smuzhiyun */
qib_twsi_blk_wr(struct qib_devdata * dd,int dev,int addr,const void * buffer,int len)434*4882a593Smuzhiyun int qib_twsi_blk_wr(struct qib_devdata *dd, int dev, int addr,
435*4882a593Smuzhiyun const void *buffer, int len)
436*4882a593Smuzhiyun {
437*4882a593Smuzhiyun int sub_len;
438*4882a593Smuzhiyun const u8 *bp = buffer;
439*4882a593Smuzhiyun int max_wait_time, i;
440*4882a593Smuzhiyun int ret = 1;
441*4882a593Smuzhiyun
442*4882a593Smuzhiyun while (len > 0) {
443*4882a593Smuzhiyun if (dev == QIB_TWSI_NO_DEV) {
444*4882a593Smuzhiyun if (qib_twsi_wr(dd, (addr << 1) | WRITE_CMD,
445*4882a593Smuzhiyun QIB_TWSI_START)) {
446*4882a593Smuzhiyun goto failed_write;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun } else {
449*4882a593Smuzhiyun /* Real I2C */
450*4882a593Smuzhiyun if (qib_twsi_wr(dd, dev | WRITE_CMD, QIB_TWSI_START))
451*4882a593Smuzhiyun goto failed_write;
452*4882a593Smuzhiyun ret = qib_twsi_wr(dd, addr, 0);
453*4882a593Smuzhiyun if (ret) {
454*4882a593Smuzhiyun qib_dev_err(dd,
455*4882a593Smuzhiyun "Failed to write interface write addr %02X\n",
456*4882a593Smuzhiyun addr);
457*4882a593Smuzhiyun goto failed_write;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun sub_len = min(len, 4);
462*4882a593Smuzhiyun addr += sub_len;
463*4882a593Smuzhiyun len -= sub_len;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun for (i = 0; i < sub_len; i++)
466*4882a593Smuzhiyun if (qib_twsi_wr(dd, *bp++, 0))
467*4882a593Smuzhiyun goto failed_write;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun stop_cmd(dd);
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun /*
472*4882a593Smuzhiyun * Wait for write complete by waiting for a successful
473*4882a593Smuzhiyun * read (the chip replies with a zero after the write
474*4882a593Smuzhiyun * cmd completes, and before it writes to the eeprom.
475*4882a593Smuzhiyun * The startcmd for the read will fail the ack until
476*4882a593Smuzhiyun * the writes have completed. We do this inline to avoid
477*4882a593Smuzhiyun * the debug prints that are in the real read routine
478*4882a593Smuzhiyun * if the startcmd fails.
479*4882a593Smuzhiyun * We also use the proper device address, so it doesn't matter
480*4882a593Smuzhiyun * whether we have real eeprom_dev. Legacy likes any address.
481*4882a593Smuzhiyun */
482*4882a593Smuzhiyun max_wait_time = 100;
483*4882a593Smuzhiyun while (qib_twsi_wr(dd, dev | READ_CMD, QIB_TWSI_START)) {
484*4882a593Smuzhiyun stop_cmd(dd);
485*4882a593Smuzhiyun if (!--max_wait_time)
486*4882a593Smuzhiyun goto failed_write;
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun /* now read (and ignore) the resulting byte */
489*4882a593Smuzhiyun rd_byte(dd, 1);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun ret = 0;
493*4882a593Smuzhiyun goto bail;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun failed_write:
496*4882a593Smuzhiyun stop_cmd(dd);
497*4882a593Smuzhiyun ret = 1;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun bail:
500*4882a593Smuzhiyun return ret;
501*4882a593Smuzhiyun }
502