xref: /OK3568_Linux_fs/kernel/drivers/input/rmi4/rmi_spi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2011-2016 Synaptics Incorporated
4*4882a593Smuzhiyun  * Copyright (c) 2011 Unixphere
5*4882a593Smuzhiyun  */
6*4882a593Smuzhiyun 
7*4882a593Smuzhiyun #include <linux/kernel.h>
8*4882a593Smuzhiyun #include <linux/module.h>
9*4882a593Smuzhiyun #include <linux/rmi.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun #include <linux/spi/spi.h>
12*4882a593Smuzhiyun #include <linux/of.h>
13*4882a593Smuzhiyun #include "rmi_driver.h"
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun #define RMI_SPI_DEFAULT_XFER_BUF_SIZE	64
16*4882a593Smuzhiyun 
17*4882a593Smuzhiyun #define RMI_PAGE_SELECT_REGISTER	0x00FF
18*4882a593Smuzhiyun #define RMI_SPI_PAGE(addr)		(((addr) >> 8) & 0x80)
19*4882a593Smuzhiyun #define RMI_SPI_XFER_SIZE_LIMIT		255
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun #define BUFFER_SIZE_INCREMENT 32
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun enum rmi_spi_op {
24*4882a593Smuzhiyun 	RMI_SPI_WRITE = 0,
25*4882a593Smuzhiyun 	RMI_SPI_READ,
26*4882a593Smuzhiyun 	RMI_SPI_V2_READ_UNIFIED,
27*4882a593Smuzhiyun 	RMI_SPI_V2_READ_SPLIT,
28*4882a593Smuzhiyun 	RMI_SPI_V2_WRITE,
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun struct rmi_spi_cmd {
32*4882a593Smuzhiyun 	enum rmi_spi_op op;
33*4882a593Smuzhiyun 	u16 addr;
34*4882a593Smuzhiyun };
35*4882a593Smuzhiyun 
36*4882a593Smuzhiyun struct rmi_spi_xport {
37*4882a593Smuzhiyun 	struct rmi_transport_dev xport;
38*4882a593Smuzhiyun 	struct spi_device *spi;
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun 	struct mutex page_mutex;
41*4882a593Smuzhiyun 	int page;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	u8 *rx_buf;
44*4882a593Smuzhiyun 	u8 *tx_buf;
45*4882a593Smuzhiyun 	int xfer_buf_size;
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun 	struct spi_transfer *rx_xfers;
48*4882a593Smuzhiyun 	struct spi_transfer *tx_xfers;
49*4882a593Smuzhiyun 	int rx_xfer_count;
50*4882a593Smuzhiyun 	int tx_xfer_count;
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun 
rmi_spi_manage_pools(struct rmi_spi_xport * rmi_spi,int len)53*4882a593Smuzhiyun static int rmi_spi_manage_pools(struct rmi_spi_xport *rmi_spi, int len)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun 	struct spi_device *spi = rmi_spi->spi;
56*4882a593Smuzhiyun 	int buf_size = rmi_spi->xfer_buf_size
57*4882a593Smuzhiyun 		? rmi_spi->xfer_buf_size : RMI_SPI_DEFAULT_XFER_BUF_SIZE;
58*4882a593Smuzhiyun 	struct spi_transfer *xfer_buf;
59*4882a593Smuzhiyun 	void *buf;
60*4882a593Smuzhiyun 	void *tmp;
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun 	while (buf_size < len)
63*4882a593Smuzhiyun 		buf_size *= 2;
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun 	if (buf_size > RMI_SPI_XFER_SIZE_LIMIT)
66*4882a593Smuzhiyun 		buf_size = RMI_SPI_XFER_SIZE_LIMIT;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	tmp = rmi_spi->rx_buf;
69*4882a593Smuzhiyun 	buf = devm_kcalloc(&spi->dev, buf_size, 2,
70*4882a593Smuzhiyun 				GFP_KERNEL | GFP_DMA);
71*4882a593Smuzhiyun 	if (!buf)
72*4882a593Smuzhiyun 		return -ENOMEM;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	rmi_spi->rx_buf = buf;
75*4882a593Smuzhiyun 	rmi_spi->tx_buf = &rmi_spi->rx_buf[buf_size];
76*4882a593Smuzhiyun 	rmi_spi->xfer_buf_size = buf_size;
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun 	if (tmp)
79*4882a593Smuzhiyun 		devm_kfree(&spi->dev, tmp);
80*4882a593Smuzhiyun 
81*4882a593Smuzhiyun 	if (rmi_spi->xport.pdata.spi_data.read_delay_us)
82*4882a593Smuzhiyun 		rmi_spi->rx_xfer_count = buf_size;
83*4882a593Smuzhiyun 	else
84*4882a593Smuzhiyun 		rmi_spi->rx_xfer_count = 1;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	if (rmi_spi->xport.pdata.spi_data.write_delay_us)
87*4882a593Smuzhiyun 		rmi_spi->tx_xfer_count = buf_size;
88*4882a593Smuzhiyun 	else
89*4882a593Smuzhiyun 		rmi_spi->tx_xfer_count = 1;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	/*
92*4882a593Smuzhiyun 	 * Allocate a pool of spi_transfer buffers for devices which need
93*4882a593Smuzhiyun 	 * per byte delays.
94*4882a593Smuzhiyun 	 */
95*4882a593Smuzhiyun 	tmp = rmi_spi->rx_xfers;
96*4882a593Smuzhiyun 	xfer_buf = devm_kcalloc(&spi->dev,
97*4882a593Smuzhiyun 		rmi_spi->rx_xfer_count + rmi_spi->tx_xfer_count,
98*4882a593Smuzhiyun 		sizeof(struct spi_transfer),
99*4882a593Smuzhiyun 		GFP_KERNEL);
100*4882a593Smuzhiyun 	if (!xfer_buf)
101*4882a593Smuzhiyun 		return -ENOMEM;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	rmi_spi->rx_xfers = xfer_buf;
104*4882a593Smuzhiyun 	rmi_spi->tx_xfers = &xfer_buf[rmi_spi->rx_xfer_count];
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (tmp)
107*4882a593Smuzhiyun 		devm_kfree(&spi->dev, tmp);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return 0;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
rmi_spi_xfer(struct rmi_spi_xport * rmi_spi,const struct rmi_spi_cmd * cmd,const u8 * tx_buf,int tx_len,u8 * rx_buf,int rx_len)112*4882a593Smuzhiyun static int rmi_spi_xfer(struct rmi_spi_xport *rmi_spi,
113*4882a593Smuzhiyun 			const struct rmi_spi_cmd *cmd, const u8 *tx_buf,
114*4882a593Smuzhiyun 			int tx_len, u8 *rx_buf, int rx_len)
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun 	struct spi_device *spi = rmi_spi->spi;
117*4882a593Smuzhiyun 	struct rmi_device_platform_data_spi *spi_data =
118*4882a593Smuzhiyun 					&rmi_spi->xport.pdata.spi_data;
119*4882a593Smuzhiyun 	struct spi_message msg;
120*4882a593Smuzhiyun 	struct spi_transfer *xfer;
121*4882a593Smuzhiyun 	int ret = 0;
122*4882a593Smuzhiyun 	int len;
123*4882a593Smuzhiyun 	int cmd_len = 0;
124*4882a593Smuzhiyun 	int total_tx_len;
125*4882a593Smuzhiyun 	int i;
126*4882a593Smuzhiyun 	u16 addr = cmd->addr;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	spi_message_init(&msg);
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	switch (cmd->op) {
131*4882a593Smuzhiyun 	case RMI_SPI_WRITE:
132*4882a593Smuzhiyun 	case RMI_SPI_READ:
133*4882a593Smuzhiyun 		cmd_len += 2;
134*4882a593Smuzhiyun 		break;
135*4882a593Smuzhiyun 	case RMI_SPI_V2_READ_UNIFIED:
136*4882a593Smuzhiyun 	case RMI_SPI_V2_READ_SPLIT:
137*4882a593Smuzhiyun 	case RMI_SPI_V2_WRITE:
138*4882a593Smuzhiyun 		cmd_len += 4;
139*4882a593Smuzhiyun 		break;
140*4882a593Smuzhiyun 	}
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun 	total_tx_len = cmd_len + tx_len;
143*4882a593Smuzhiyun 	len = max(total_tx_len, rx_len);
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	if (len > RMI_SPI_XFER_SIZE_LIMIT)
146*4882a593Smuzhiyun 		return -EINVAL;
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (rmi_spi->xfer_buf_size < len) {
149*4882a593Smuzhiyun 		ret = rmi_spi_manage_pools(rmi_spi, len);
150*4882a593Smuzhiyun 		if (ret < 0)
151*4882a593Smuzhiyun 			return ret;
152*4882a593Smuzhiyun 	}
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (addr == 0)
155*4882a593Smuzhiyun 		/*
156*4882a593Smuzhiyun 		 * SPI needs an address. Use 0x7FF if we want to keep
157*4882a593Smuzhiyun 		 * reading from the last position of the register pointer.
158*4882a593Smuzhiyun 		 */
159*4882a593Smuzhiyun 		addr = 0x7FF;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	switch (cmd->op) {
162*4882a593Smuzhiyun 	case RMI_SPI_WRITE:
163*4882a593Smuzhiyun 		rmi_spi->tx_buf[0] = (addr >> 8);
164*4882a593Smuzhiyun 		rmi_spi->tx_buf[1] = addr & 0xFF;
165*4882a593Smuzhiyun 		break;
166*4882a593Smuzhiyun 	case RMI_SPI_READ:
167*4882a593Smuzhiyun 		rmi_spi->tx_buf[0] = (addr >> 8) | 0x80;
168*4882a593Smuzhiyun 		rmi_spi->tx_buf[1] = addr & 0xFF;
169*4882a593Smuzhiyun 		break;
170*4882a593Smuzhiyun 	case RMI_SPI_V2_READ_UNIFIED:
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 	case RMI_SPI_V2_READ_SPLIT:
173*4882a593Smuzhiyun 		break;
174*4882a593Smuzhiyun 	case RMI_SPI_V2_WRITE:
175*4882a593Smuzhiyun 		rmi_spi->tx_buf[0] = 0x40;
176*4882a593Smuzhiyun 		rmi_spi->tx_buf[1] = (addr >> 8) & 0xFF;
177*4882a593Smuzhiyun 		rmi_spi->tx_buf[2] = addr & 0xFF;
178*4882a593Smuzhiyun 		rmi_spi->tx_buf[3] = tx_len;
179*4882a593Smuzhiyun 		break;
180*4882a593Smuzhiyun 	}
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	if (tx_buf)
183*4882a593Smuzhiyun 		memcpy(&rmi_spi->tx_buf[cmd_len], tx_buf, tx_len);
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 	if (rmi_spi->tx_xfer_count > 1) {
186*4882a593Smuzhiyun 		for (i = 0; i < total_tx_len; i++) {
187*4882a593Smuzhiyun 			xfer = &rmi_spi->tx_xfers[i];
188*4882a593Smuzhiyun 			memset(xfer, 0,	sizeof(struct spi_transfer));
189*4882a593Smuzhiyun 			xfer->tx_buf = &rmi_spi->tx_buf[i];
190*4882a593Smuzhiyun 			xfer->len = 1;
191*4882a593Smuzhiyun 			xfer->delay_usecs = spi_data->write_delay_us;
192*4882a593Smuzhiyun 			spi_message_add_tail(xfer, &msg);
193*4882a593Smuzhiyun 		}
194*4882a593Smuzhiyun 	} else {
195*4882a593Smuzhiyun 		xfer = rmi_spi->tx_xfers;
196*4882a593Smuzhiyun 		memset(xfer, 0, sizeof(struct spi_transfer));
197*4882a593Smuzhiyun 		xfer->tx_buf = rmi_spi->tx_buf;
198*4882a593Smuzhiyun 		xfer->len = total_tx_len;
199*4882a593Smuzhiyun 		spi_message_add_tail(xfer, &msg);
200*4882a593Smuzhiyun 	}
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: cmd: %s tx_buf len: %d tx_buf: %*ph\n",
203*4882a593Smuzhiyun 		__func__, cmd->op == RMI_SPI_WRITE ? "WRITE" : "READ",
204*4882a593Smuzhiyun 		total_tx_len, total_tx_len, rmi_spi->tx_buf);
205*4882a593Smuzhiyun 
206*4882a593Smuzhiyun 	if (rx_buf) {
207*4882a593Smuzhiyun 		if (rmi_spi->rx_xfer_count > 1) {
208*4882a593Smuzhiyun 			for (i = 0; i < rx_len; i++) {
209*4882a593Smuzhiyun 				xfer = &rmi_spi->rx_xfers[i];
210*4882a593Smuzhiyun 				memset(xfer, 0, sizeof(struct spi_transfer));
211*4882a593Smuzhiyun 				xfer->rx_buf = &rmi_spi->rx_buf[i];
212*4882a593Smuzhiyun 				xfer->len = 1;
213*4882a593Smuzhiyun 				xfer->delay_usecs = spi_data->read_delay_us;
214*4882a593Smuzhiyun 				spi_message_add_tail(xfer, &msg);
215*4882a593Smuzhiyun 			}
216*4882a593Smuzhiyun 		} else {
217*4882a593Smuzhiyun 			xfer = rmi_spi->rx_xfers;
218*4882a593Smuzhiyun 			memset(xfer, 0, sizeof(struct spi_transfer));
219*4882a593Smuzhiyun 			xfer->rx_buf = rmi_spi->rx_buf;
220*4882a593Smuzhiyun 			xfer->len = rx_len;
221*4882a593Smuzhiyun 			spi_message_add_tail(xfer, &msg);
222*4882a593Smuzhiyun 		}
223*4882a593Smuzhiyun 	}
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun 	ret = spi_sync(spi, &msg);
226*4882a593Smuzhiyun 	if (ret < 0) {
227*4882a593Smuzhiyun 		dev_err(&spi->dev, "spi xfer failed: %d\n", ret);
228*4882a593Smuzhiyun 		return ret;
229*4882a593Smuzhiyun 	}
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	if (rx_buf) {
232*4882a593Smuzhiyun 		memcpy(rx_buf, rmi_spi->rx_buf, rx_len);
233*4882a593Smuzhiyun 		rmi_dbg(RMI_DEBUG_XPORT, &spi->dev, "%s: (%d) %*ph\n",
234*4882a593Smuzhiyun 			__func__, rx_len, rx_len, rx_buf);
235*4882a593Smuzhiyun 	}
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun  * rmi_set_page - Set RMI page
242*4882a593Smuzhiyun  * @xport: The pointer to the rmi_transport_dev struct
243*4882a593Smuzhiyun  * @page: The new page address.
244*4882a593Smuzhiyun  *
245*4882a593Smuzhiyun  * RMI devices have 16-bit addressing, but some of the transport
246*4882a593Smuzhiyun  * implementations (like SMBus) only have 8-bit addressing. So RMI implements
247*4882a593Smuzhiyun  * a page address at 0xff of every page so we can reliable page addresses
248*4882a593Smuzhiyun  * every 256 registers.
249*4882a593Smuzhiyun  *
250*4882a593Smuzhiyun  * The page_mutex lock must be held when this function is entered.
251*4882a593Smuzhiyun  *
252*4882a593Smuzhiyun  * Returns zero on success, non-zero on failure.
253*4882a593Smuzhiyun  */
rmi_set_page(struct rmi_spi_xport * rmi_spi,u8 page)254*4882a593Smuzhiyun static int rmi_set_page(struct rmi_spi_xport *rmi_spi, u8 page)
255*4882a593Smuzhiyun {
256*4882a593Smuzhiyun 	struct rmi_spi_cmd cmd;
257*4882a593Smuzhiyun 	int ret;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	cmd.op = RMI_SPI_WRITE;
260*4882a593Smuzhiyun 	cmd.addr = RMI_PAGE_SELECT_REGISTER;
261*4882a593Smuzhiyun 
262*4882a593Smuzhiyun 	ret = rmi_spi_xfer(rmi_spi, &cmd, &page, 1, NULL, 0);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	if (ret)
265*4882a593Smuzhiyun 		rmi_spi->page = page;
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return ret;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
rmi_spi_write_block(struct rmi_transport_dev * xport,u16 addr,const void * buf,size_t len)270*4882a593Smuzhiyun static int rmi_spi_write_block(struct rmi_transport_dev *xport, u16 addr,
271*4882a593Smuzhiyun 			       const void *buf, size_t len)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi =
274*4882a593Smuzhiyun 		container_of(xport, struct rmi_spi_xport, xport);
275*4882a593Smuzhiyun 	struct rmi_spi_cmd cmd;
276*4882a593Smuzhiyun 	int ret;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	mutex_lock(&rmi_spi->page_mutex);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
281*4882a593Smuzhiyun 		ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
282*4882a593Smuzhiyun 		if (ret)
283*4882a593Smuzhiyun 			goto exit;
284*4882a593Smuzhiyun 	}
285*4882a593Smuzhiyun 
286*4882a593Smuzhiyun 	cmd.op = RMI_SPI_WRITE;
287*4882a593Smuzhiyun 	cmd.addr = addr;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 	ret = rmi_spi_xfer(rmi_spi, &cmd, buf, len, NULL, 0);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun exit:
292*4882a593Smuzhiyun 	mutex_unlock(&rmi_spi->page_mutex);
293*4882a593Smuzhiyun 	return ret;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun 
rmi_spi_read_block(struct rmi_transport_dev * xport,u16 addr,void * buf,size_t len)296*4882a593Smuzhiyun static int rmi_spi_read_block(struct rmi_transport_dev *xport, u16 addr,
297*4882a593Smuzhiyun 			      void *buf, size_t len)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi =
300*4882a593Smuzhiyun 		container_of(xport, struct rmi_spi_xport, xport);
301*4882a593Smuzhiyun 	struct rmi_spi_cmd cmd;
302*4882a593Smuzhiyun 	int ret;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	mutex_lock(&rmi_spi->page_mutex);
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	if (RMI_SPI_PAGE(addr) != rmi_spi->page) {
307*4882a593Smuzhiyun 		ret = rmi_set_page(rmi_spi, RMI_SPI_PAGE(addr));
308*4882a593Smuzhiyun 		if (ret)
309*4882a593Smuzhiyun 			goto exit;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	cmd.op = RMI_SPI_READ;
313*4882a593Smuzhiyun 	cmd.addr = addr;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun 	ret = rmi_spi_xfer(rmi_spi, &cmd, NULL, 0, buf, len);
316*4882a593Smuzhiyun 
317*4882a593Smuzhiyun exit:
318*4882a593Smuzhiyun 	mutex_unlock(&rmi_spi->page_mutex);
319*4882a593Smuzhiyun 	return ret;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun static const struct rmi_transport_ops rmi_spi_ops = {
323*4882a593Smuzhiyun 	.write_block	= rmi_spi_write_block,
324*4882a593Smuzhiyun 	.read_block	= rmi_spi_read_block,
325*4882a593Smuzhiyun };
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun #ifdef CONFIG_OF
rmi_spi_of_probe(struct spi_device * spi,struct rmi_device_platform_data * pdata)328*4882a593Smuzhiyun static int rmi_spi_of_probe(struct spi_device *spi,
329*4882a593Smuzhiyun 			struct rmi_device_platform_data *pdata)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct device *dev = &spi->dev;
332*4882a593Smuzhiyun 	int retval;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	retval = rmi_of_property_read_u32(dev,
335*4882a593Smuzhiyun 			&pdata->spi_data.read_delay_us,
336*4882a593Smuzhiyun 			"spi-rx-delay-us", 1);
337*4882a593Smuzhiyun 	if (retval)
338*4882a593Smuzhiyun 		return retval;
339*4882a593Smuzhiyun 
340*4882a593Smuzhiyun 	retval = rmi_of_property_read_u32(dev,
341*4882a593Smuzhiyun 			&pdata->spi_data.write_delay_us,
342*4882a593Smuzhiyun 			"spi-tx-delay-us", 1);
343*4882a593Smuzhiyun 	if (retval)
344*4882a593Smuzhiyun 		return retval;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	return 0;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun 
349*4882a593Smuzhiyun static const struct of_device_id rmi_spi_of_match[] = {
350*4882a593Smuzhiyun 	{ .compatible = "syna,rmi4-spi" },
351*4882a593Smuzhiyun 	{},
352*4882a593Smuzhiyun };
353*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, rmi_spi_of_match);
354*4882a593Smuzhiyun #else
rmi_spi_of_probe(struct spi_device * spi,struct rmi_device_platform_data * pdata)355*4882a593Smuzhiyun static inline int rmi_spi_of_probe(struct spi_device *spi,
356*4882a593Smuzhiyun 				struct rmi_device_platform_data *pdata)
357*4882a593Smuzhiyun {
358*4882a593Smuzhiyun 	return -ENODEV;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun #endif
361*4882a593Smuzhiyun 
rmi_spi_unregister_transport(void * data)362*4882a593Smuzhiyun static void rmi_spi_unregister_transport(void *data)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi = data;
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	rmi_unregister_transport_device(&rmi_spi->xport);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
rmi_spi_probe(struct spi_device * spi)369*4882a593Smuzhiyun static int rmi_spi_probe(struct spi_device *spi)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi;
372*4882a593Smuzhiyun 	struct rmi_device_platform_data *pdata;
373*4882a593Smuzhiyun 	struct rmi_device_platform_data *spi_pdata = spi->dev.platform_data;
374*4882a593Smuzhiyun 	int error;
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	if (spi->master->flags & SPI_MASTER_HALF_DUPLEX)
377*4882a593Smuzhiyun 		return -EINVAL;
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	rmi_spi = devm_kzalloc(&spi->dev, sizeof(struct rmi_spi_xport),
380*4882a593Smuzhiyun 			GFP_KERNEL);
381*4882a593Smuzhiyun 	if (!rmi_spi)
382*4882a593Smuzhiyun 		return -ENOMEM;
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun 	pdata = &rmi_spi->xport.pdata;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	if (spi->dev.of_node) {
387*4882a593Smuzhiyun 		error = rmi_spi_of_probe(spi, pdata);
388*4882a593Smuzhiyun 		if (error)
389*4882a593Smuzhiyun 			return error;
390*4882a593Smuzhiyun 	} else if (spi_pdata) {
391*4882a593Smuzhiyun 		*pdata = *spi_pdata;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	if (pdata->spi_data.bits_per_word)
395*4882a593Smuzhiyun 		spi->bits_per_word = pdata->spi_data.bits_per_word;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (pdata->spi_data.mode)
398*4882a593Smuzhiyun 		spi->mode = pdata->spi_data.mode;
399*4882a593Smuzhiyun 
400*4882a593Smuzhiyun 	error = spi_setup(spi);
401*4882a593Smuzhiyun 	if (error < 0) {
402*4882a593Smuzhiyun 		dev_err(&spi->dev, "spi_setup failed!\n");
403*4882a593Smuzhiyun 		return error;
404*4882a593Smuzhiyun 	}
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	pdata->irq = spi->irq;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	rmi_spi->spi = spi;
409*4882a593Smuzhiyun 	mutex_init(&rmi_spi->page_mutex);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	rmi_spi->xport.dev = &spi->dev;
412*4882a593Smuzhiyun 	rmi_spi->xport.proto_name = "spi";
413*4882a593Smuzhiyun 	rmi_spi->xport.ops = &rmi_spi_ops;
414*4882a593Smuzhiyun 
415*4882a593Smuzhiyun 	spi_set_drvdata(spi, rmi_spi);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	error = rmi_spi_manage_pools(rmi_spi, RMI_SPI_DEFAULT_XFER_BUF_SIZE);
418*4882a593Smuzhiyun 	if (error)
419*4882a593Smuzhiyun 		return error;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	/*
422*4882a593Smuzhiyun 	 * Setting the page to zero will (a) make sure the PSR is in a
423*4882a593Smuzhiyun 	 * known state, and (b) make sure we can talk to the device.
424*4882a593Smuzhiyun 	 */
425*4882a593Smuzhiyun 	error = rmi_set_page(rmi_spi, 0);
426*4882a593Smuzhiyun 	if (error) {
427*4882a593Smuzhiyun 		dev_err(&spi->dev, "Failed to set page select to 0.\n");
428*4882a593Smuzhiyun 		return error;
429*4882a593Smuzhiyun 	}
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	dev_info(&spi->dev, "registering SPI-connected sensor\n");
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	error = rmi_register_transport_device(&rmi_spi->xport);
434*4882a593Smuzhiyun 	if (error) {
435*4882a593Smuzhiyun 		dev_err(&spi->dev, "failed to register sensor: %d\n", error);
436*4882a593Smuzhiyun 		return error;
437*4882a593Smuzhiyun 	}
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 	error = devm_add_action_or_reset(&spi->dev,
440*4882a593Smuzhiyun 					  rmi_spi_unregister_transport,
441*4882a593Smuzhiyun 					  rmi_spi);
442*4882a593Smuzhiyun 	if (error)
443*4882a593Smuzhiyun 		return error;
444*4882a593Smuzhiyun 
445*4882a593Smuzhiyun 	return 0;
446*4882a593Smuzhiyun }
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
rmi_spi_suspend(struct device * dev)449*4882a593Smuzhiyun static int rmi_spi_suspend(struct device *dev)
450*4882a593Smuzhiyun {
451*4882a593Smuzhiyun 	struct spi_device *spi = to_spi_device(dev);
452*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
453*4882a593Smuzhiyun 	int ret;
454*4882a593Smuzhiyun 
455*4882a593Smuzhiyun 	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, true);
456*4882a593Smuzhiyun 	if (ret)
457*4882a593Smuzhiyun 		dev_warn(dev, "Failed to resume device: %d\n", ret);
458*4882a593Smuzhiyun 
459*4882a593Smuzhiyun 	return ret;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun 
rmi_spi_resume(struct device * dev)462*4882a593Smuzhiyun static int rmi_spi_resume(struct device *dev)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	struct spi_device *spi = to_spi_device(dev);
465*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
466*4882a593Smuzhiyun 	int ret;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, true);
469*4882a593Smuzhiyun 	if (ret)
470*4882a593Smuzhiyun 		dev_warn(dev, "Failed to resume device: %d\n", ret);
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return ret;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun #endif
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun #ifdef CONFIG_PM
rmi_spi_runtime_suspend(struct device * dev)477*4882a593Smuzhiyun static int rmi_spi_runtime_suspend(struct device *dev)
478*4882a593Smuzhiyun {
479*4882a593Smuzhiyun 	struct spi_device *spi = to_spi_device(dev);
480*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
481*4882a593Smuzhiyun 	int ret;
482*4882a593Smuzhiyun 
483*4882a593Smuzhiyun 	ret = rmi_driver_suspend(rmi_spi->xport.rmi_dev, false);
484*4882a593Smuzhiyun 	if (ret)
485*4882a593Smuzhiyun 		dev_warn(dev, "Failed to resume device: %d\n", ret);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
rmi_spi_runtime_resume(struct device * dev)490*4882a593Smuzhiyun static int rmi_spi_runtime_resume(struct device *dev)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct spi_device *spi = to_spi_device(dev);
493*4882a593Smuzhiyun 	struct rmi_spi_xport *rmi_spi = spi_get_drvdata(spi);
494*4882a593Smuzhiyun 	int ret;
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	ret = rmi_driver_resume(rmi_spi->xport.rmi_dev, false);
497*4882a593Smuzhiyun 	if (ret)
498*4882a593Smuzhiyun 		dev_warn(dev, "Failed to resume device: %d\n", ret);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	return 0;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun #endif
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun static const struct dev_pm_ops rmi_spi_pm = {
505*4882a593Smuzhiyun 	SET_SYSTEM_SLEEP_PM_OPS(rmi_spi_suspend, rmi_spi_resume)
506*4882a593Smuzhiyun 	SET_RUNTIME_PM_OPS(rmi_spi_runtime_suspend, rmi_spi_runtime_resume,
507*4882a593Smuzhiyun 			   NULL)
508*4882a593Smuzhiyun };
509*4882a593Smuzhiyun 
510*4882a593Smuzhiyun static const struct spi_device_id rmi_id[] = {
511*4882a593Smuzhiyun 	{ "rmi4_spi", 0 },
512*4882a593Smuzhiyun 	{ }
513*4882a593Smuzhiyun };
514*4882a593Smuzhiyun MODULE_DEVICE_TABLE(spi, rmi_id);
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun static struct spi_driver rmi_spi_driver = {
517*4882a593Smuzhiyun 	.driver = {
518*4882a593Smuzhiyun 		.name	= "rmi4_spi",
519*4882a593Smuzhiyun 		.pm	= &rmi_spi_pm,
520*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(rmi_spi_of_match),
521*4882a593Smuzhiyun 	},
522*4882a593Smuzhiyun 	.id_table	= rmi_id,
523*4882a593Smuzhiyun 	.probe		= rmi_spi_probe,
524*4882a593Smuzhiyun };
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun module_spi_driver(rmi_spi_driver);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun MODULE_AUTHOR("Christopher Heiny <cheiny@synaptics.com>");
529*4882a593Smuzhiyun MODULE_AUTHOR("Andrew Duggan <aduggan@synaptics.com>");
530*4882a593Smuzhiyun MODULE_DESCRIPTION("RMI SPI driver");
531*4882a593Smuzhiyun MODULE_LICENSE("GPL");
532