xref: /OK3568_Linux_fs/u-boot/drivers/spi/spi-mem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2018 Exceet Electronics GmbH
4*4882a593Smuzhiyun  * Copyright (C) 2018 Bootlin
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef __UBOOT__
10*4882a593Smuzhiyun #include <linux/dmaengine.h>
11*4882a593Smuzhiyun #include <linux/pm_runtime.h>
12*4882a593Smuzhiyun #include "internals.h"
13*4882a593Smuzhiyun #else
14*4882a593Smuzhiyun #include <spi.h>
15*4882a593Smuzhiyun #include <spi-mem.h>
16*4882a593Smuzhiyun #endif
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #ifndef __UBOOT__
19*4882a593Smuzhiyun /**
20*4882a593Smuzhiyun  * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
21*4882a593Smuzhiyun  *					  memory operation
22*4882a593Smuzhiyun  * @ctlr: the SPI controller requesting this dma_map()
23*4882a593Smuzhiyun  * @op: the memory operation containing the buffer to map
24*4882a593Smuzhiyun  * @sgt: a pointer to a non-initialized sg_table that will be filled by this
25*4882a593Smuzhiyun  *	 function
26*4882a593Smuzhiyun  *
27*4882a593Smuzhiyun  * Some controllers might want to do DMA on the data buffer embedded in @op.
28*4882a593Smuzhiyun  * This helper prepares everything for you and provides a ready-to-use
29*4882a593Smuzhiyun  * sg_table. This function is not intended to be called from spi drivers.
30*4882a593Smuzhiyun  * Only SPI controller drivers should use it.
31*4882a593Smuzhiyun  * Note that the caller must ensure the memory region pointed by
32*4882a593Smuzhiyun  * op->data.buf.{in,out} is DMA-able before calling this function.
33*4882a593Smuzhiyun  *
34*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
35*4882a593Smuzhiyun  */
spi_controller_dma_map_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)36*4882a593Smuzhiyun int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
37*4882a593Smuzhiyun 				       const struct spi_mem_op *op,
38*4882a593Smuzhiyun 				       struct sg_table *sgt)
39*4882a593Smuzhiyun {
40*4882a593Smuzhiyun 	struct device *dmadev;
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun 	if (!op->data.nbytes)
43*4882a593Smuzhiyun 		return -EINVAL;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
46*4882a593Smuzhiyun 		dmadev = ctlr->dma_tx->device->dev;
47*4882a593Smuzhiyun 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
48*4882a593Smuzhiyun 		dmadev = ctlr->dma_rx->device->dev;
49*4882a593Smuzhiyun 	else
50*4882a593Smuzhiyun 		dmadev = ctlr->dev.parent;
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	if (!dmadev)
53*4882a593Smuzhiyun 		return -EINVAL;
54*4882a593Smuzhiyun 
55*4882a593Smuzhiyun 	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
56*4882a593Smuzhiyun 			   op->data.dir == SPI_MEM_DATA_IN ?
57*4882a593Smuzhiyun 			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun  * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
63*4882a593Smuzhiyun  *					    memory operation
64*4882a593Smuzhiyun  * @ctlr: the SPI controller requesting this dma_unmap()
65*4882a593Smuzhiyun  * @op: the memory operation containing the buffer to unmap
66*4882a593Smuzhiyun  * @sgt: a pointer to an sg_table previously initialized by
67*4882a593Smuzhiyun  *	 spi_controller_dma_map_mem_op_data()
68*4882a593Smuzhiyun  *
69*4882a593Smuzhiyun  * Some controllers might want to do DMA on the data buffer embedded in @op.
70*4882a593Smuzhiyun  * This helper prepares things so that the CPU can access the
71*4882a593Smuzhiyun  * op->data.buf.{in,out} buffer again.
72*4882a593Smuzhiyun  *
73*4882a593Smuzhiyun  * This function is not intended to be called from SPI drivers. Only SPI
74*4882a593Smuzhiyun  * controller drivers should use it.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * This function should be called after the DMA operation has finished and is
77*4882a593Smuzhiyun  * only valid if the previous spi_controller_dma_map_mem_op_data() call
78*4882a593Smuzhiyun  * returned 0.
79*4882a593Smuzhiyun  *
80*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
81*4882a593Smuzhiyun  */
spi_controller_dma_unmap_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)82*4882a593Smuzhiyun void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
83*4882a593Smuzhiyun 					  const struct spi_mem_op *op,
84*4882a593Smuzhiyun 					  struct sg_table *sgt)
85*4882a593Smuzhiyun {
86*4882a593Smuzhiyun 	struct device *dmadev;
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (!op->data.nbytes)
89*4882a593Smuzhiyun 		return;
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
92*4882a593Smuzhiyun 		dmadev = ctlr->dma_tx->device->dev;
93*4882a593Smuzhiyun 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
94*4882a593Smuzhiyun 		dmadev = ctlr->dma_rx->device->dev;
95*4882a593Smuzhiyun 	else
96*4882a593Smuzhiyun 		dmadev = ctlr->dev.parent;
97*4882a593Smuzhiyun 
98*4882a593Smuzhiyun 	spi_unmap_buf(ctlr, dmadev, sgt,
99*4882a593Smuzhiyun 		      op->data.dir == SPI_MEM_DATA_IN ?
100*4882a593Smuzhiyun 		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
103*4882a593Smuzhiyun #endif /* __UBOOT__ */
104*4882a593Smuzhiyun 
spi_check_buswidth_req(struct spi_slave * slave,u8 buswidth,bool tx)105*4882a593Smuzhiyun static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun 	u32 mode = slave->mode;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	switch (buswidth) {
110*4882a593Smuzhiyun 	case 1:
111*4882a593Smuzhiyun 		return 0;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	case 2:
114*4882a593Smuzhiyun 		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
115*4882a593Smuzhiyun 		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
116*4882a593Smuzhiyun 			return 0;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 		break;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	case 4:
121*4882a593Smuzhiyun 		if ((tx && (mode & SPI_TX_QUAD)) ||
122*4882a593Smuzhiyun 		    (!tx && (mode & SPI_RX_QUAD)))
123*4882a593Smuzhiyun 			return 0;
124*4882a593Smuzhiyun 
125*4882a593Smuzhiyun 		break;
126*4882a593Smuzhiyun 	case 8:
127*4882a593Smuzhiyun 		if ((tx && (mode & SPI_TX_OCTAL)) ||
128*4882a593Smuzhiyun 		    (!tx && (mode & SPI_RX_OCTAL)))
129*4882a593Smuzhiyun 			return 0;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 		break;
132*4882a593Smuzhiyun 
133*4882a593Smuzhiyun 	default:
134*4882a593Smuzhiyun 		break;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	return -ENOTSUPP;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun 
spi_mem_default_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)140*4882a593Smuzhiyun bool spi_mem_default_supports_op(struct spi_slave *slave,
141*4882a593Smuzhiyun 				 const struct spi_mem_op *op)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun 	if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
144*4882a593Smuzhiyun 		return false;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	if (op->addr.nbytes &&
147*4882a593Smuzhiyun 	    spi_check_buswidth_req(slave, op->addr.buswidth, true))
148*4882a593Smuzhiyun 		return false;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	if (op->dummy.nbytes &&
151*4882a593Smuzhiyun 	    spi_check_buswidth_req(slave, op->dummy.buswidth, true))
152*4882a593Smuzhiyun 		return false;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	if (op->data.nbytes &&
155*4882a593Smuzhiyun 	    spi_check_buswidth_req(slave, op->data.buswidth,
156*4882a593Smuzhiyun 				   op->data.dir == SPI_MEM_DATA_OUT))
157*4882a593Smuzhiyun 		return false;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	return true;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun /**
164*4882a593Smuzhiyun  * spi_mem_supports_op() - Check if a memory device and the controller it is
165*4882a593Smuzhiyun  *			   connected to support a specific memory operation
166*4882a593Smuzhiyun  * @slave: the SPI device
167*4882a593Smuzhiyun  * @op: the memory operation to check
168*4882a593Smuzhiyun  *
169*4882a593Smuzhiyun  * Some controllers are only supporting Single or Dual IOs, others might only
170*4882a593Smuzhiyun  * support specific opcodes, or it can even be that the controller and device
171*4882a593Smuzhiyun  * both support Quad IOs but the hardware prevents you from using it because
172*4882a593Smuzhiyun  * only 2 IO lines are connected.
173*4882a593Smuzhiyun  *
174*4882a593Smuzhiyun  * This function checks whether a specific operation is supported.
175*4882a593Smuzhiyun  *
176*4882a593Smuzhiyun  * Return: true if @op is supported, false otherwise.
177*4882a593Smuzhiyun  */
spi_mem_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)178*4882a593Smuzhiyun bool spi_mem_supports_op(struct spi_slave *slave,
179*4882a593Smuzhiyun 			 const struct spi_mem_op *op)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct udevice *bus = slave->dev->parent;
182*4882a593Smuzhiyun 	struct dm_spi_ops *ops = spi_get_ops(bus);
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 	if (ops->mem_ops && ops->mem_ops->supports_op)
185*4882a593Smuzhiyun 		return ops->mem_ops->supports_op(slave, op);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 	return spi_mem_default_supports_op(slave, op);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_supports_op);
190*4882a593Smuzhiyun 
191*4882a593Smuzhiyun /**
192*4882a593Smuzhiyun  * spi_mem_exec_op() - Execute a memory operation
193*4882a593Smuzhiyun  * @slave: the SPI device
194*4882a593Smuzhiyun  * @op: the memory operation to execute
195*4882a593Smuzhiyun  *
196*4882a593Smuzhiyun  * Executes a memory operation.
197*4882a593Smuzhiyun  *
198*4882a593Smuzhiyun  * This function first checks that @op is supported and then tries to execute
199*4882a593Smuzhiyun  * it.
200*4882a593Smuzhiyun  *
201*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error code otherwise.
202*4882a593Smuzhiyun  */
spi_mem_exec_op(struct spi_slave * slave,const struct spi_mem_op * op)203*4882a593Smuzhiyun int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
204*4882a593Smuzhiyun {
205*4882a593Smuzhiyun 	struct udevice *bus = slave->dev->parent;
206*4882a593Smuzhiyun 	struct dm_spi_ops *ops = spi_get_ops(bus);
207*4882a593Smuzhiyun 	unsigned int pos = 0;
208*4882a593Smuzhiyun 	const u8 *tx_buf = NULL;
209*4882a593Smuzhiyun 	u8 *rx_buf = NULL;
210*4882a593Smuzhiyun 	int op_len;
211*4882a593Smuzhiyun 	u32 flag;
212*4882a593Smuzhiyun 	int ret;
213*4882a593Smuzhiyun 	int i;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	if (!spi_mem_supports_op(slave, op))
216*4882a593Smuzhiyun 		return -ENOTSUPP;
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	ret = spi_claim_bus(slave);
219*4882a593Smuzhiyun 	if (ret < 0)
220*4882a593Smuzhiyun 		return ret;
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	if (ops->mem_ops && ops->mem_ops->exec_op) {
223*4882a593Smuzhiyun #ifndef __UBOOT__
224*4882a593Smuzhiyun 		/*
225*4882a593Smuzhiyun 		 * Flush the message queue before executing our SPI memory
226*4882a593Smuzhiyun 		 * operation to prevent preemption of regular SPI transfers.
227*4882a593Smuzhiyun 		 */
228*4882a593Smuzhiyun 		spi_flush_queue(ctlr);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 		if (ctlr->auto_runtime_pm) {
231*4882a593Smuzhiyun 			ret = pm_runtime_get_sync(ctlr->dev.parent);
232*4882a593Smuzhiyun 			if (ret < 0) {
233*4882a593Smuzhiyun 				dev_err(&ctlr->dev,
234*4882a593Smuzhiyun 					"Failed to power device: %d\n",
235*4882a593Smuzhiyun 					ret);
236*4882a593Smuzhiyun 				return ret;
237*4882a593Smuzhiyun 			}
238*4882a593Smuzhiyun 		}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 		mutex_lock(&ctlr->bus_lock_mutex);
241*4882a593Smuzhiyun 		mutex_lock(&ctlr->io_mutex);
242*4882a593Smuzhiyun #endif
243*4882a593Smuzhiyun 		ret = ops->mem_ops->exec_op(slave, op);
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun #ifndef __UBOOT__
246*4882a593Smuzhiyun 		mutex_unlock(&ctlr->io_mutex);
247*4882a593Smuzhiyun 		mutex_unlock(&ctlr->bus_lock_mutex);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 		if (ctlr->auto_runtime_pm)
250*4882a593Smuzhiyun 			pm_runtime_put(ctlr->dev.parent);
251*4882a593Smuzhiyun #endif
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 		/*
254*4882a593Smuzhiyun 		 * Some controllers only optimize specific paths (typically the
255*4882a593Smuzhiyun 		 * read path) and expect the core to use the regular SPI
256*4882a593Smuzhiyun 		 * interface in other cases.
257*4882a593Smuzhiyun 		 */
258*4882a593Smuzhiyun 		if (!ret || ret != -ENOTSUPP) {
259*4882a593Smuzhiyun 			spi_release_bus(slave);
260*4882a593Smuzhiyun 			return ret;
261*4882a593Smuzhiyun 		}
262*4882a593Smuzhiyun 	}
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun #ifndef __UBOOT__
265*4882a593Smuzhiyun 	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
266*4882a593Smuzhiyun 		     op->dummy.nbytes;
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	/*
269*4882a593Smuzhiyun 	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
270*4882a593Smuzhiyun 	 * we're guaranteed that this buffer is DMA-able, as required by the
271*4882a593Smuzhiyun 	 * SPI layer.
272*4882a593Smuzhiyun 	 */
273*4882a593Smuzhiyun 	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
274*4882a593Smuzhiyun 	if (!tmpbuf)
275*4882a593Smuzhiyun 		return -ENOMEM;
276*4882a593Smuzhiyun 
277*4882a593Smuzhiyun 	spi_message_init(&msg);
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	tmpbuf[0] = op->cmd.opcode;
280*4882a593Smuzhiyun 	xfers[xferpos].tx_buf = tmpbuf;
281*4882a593Smuzhiyun 	xfers[xferpos].len = sizeof(op->cmd.opcode);
282*4882a593Smuzhiyun 	xfers[xferpos].tx_nbits = op->cmd.buswidth;
283*4882a593Smuzhiyun 	spi_message_add_tail(&xfers[xferpos], &msg);
284*4882a593Smuzhiyun 	xferpos++;
285*4882a593Smuzhiyun 	totalxferlen++;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	if (op->addr.nbytes) {
288*4882a593Smuzhiyun 		int i;
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 		for (i = 0; i < op->addr.nbytes; i++)
291*4882a593Smuzhiyun 			tmpbuf[i + 1] = op->addr.val >>
292*4882a593Smuzhiyun 					(8 * (op->addr.nbytes - i - 1));
293*4882a593Smuzhiyun 
294*4882a593Smuzhiyun 		xfers[xferpos].tx_buf = tmpbuf + 1;
295*4882a593Smuzhiyun 		xfers[xferpos].len = op->addr.nbytes;
296*4882a593Smuzhiyun 		xfers[xferpos].tx_nbits = op->addr.buswidth;
297*4882a593Smuzhiyun 		spi_message_add_tail(&xfers[xferpos], &msg);
298*4882a593Smuzhiyun 		xferpos++;
299*4882a593Smuzhiyun 		totalxferlen += op->addr.nbytes;
300*4882a593Smuzhiyun 	}
301*4882a593Smuzhiyun 
302*4882a593Smuzhiyun 	if (op->dummy.nbytes) {
303*4882a593Smuzhiyun 		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
304*4882a593Smuzhiyun 		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
305*4882a593Smuzhiyun 		xfers[xferpos].len = op->dummy.nbytes;
306*4882a593Smuzhiyun 		xfers[xferpos].tx_nbits = op->dummy.buswidth;
307*4882a593Smuzhiyun 		spi_message_add_tail(&xfers[xferpos], &msg);
308*4882a593Smuzhiyun 		xferpos++;
309*4882a593Smuzhiyun 		totalxferlen += op->dummy.nbytes;
310*4882a593Smuzhiyun 	}
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	if (op->data.nbytes) {
313*4882a593Smuzhiyun 		if (op->data.dir == SPI_MEM_DATA_IN) {
314*4882a593Smuzhiyun 			xfers[xferpos].rx_buf = op->data.buf.in;
315*4882a593Smuzhiyun 			xfers[xferpos].rx_nbits = op->data.buswidth;
316*4882a593Smuzhiyun 		} else {
317*4882a593Smuzhiyun 			xfers[xferpos].tx_buf = op->data.buf.out;
318*4882a593Smuzhiyun 			xfers[xferpos].tx_nbits = op->data.buswidth;
319*4882a593Smuzhiyun 		}
320*4882a593Smuzhiyun 
321*4882a593Smuzhiyun 		xfers[xferpos].len = op->data.nbytes;
322*4882a593Smuzhiyun 		spi_message_add_tail(&xfers[xferpos], &msg);
323*4882a593Smuzhiyun 		xferpos++;
324*4882a593Smuzhiyun 		totalxferlen += op->data.nbytes;
325*4882a593Smuzhiyun 	}
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	ret = spi_sync(slave, &msg);
328*4882a593Smuzhiyun 
329*4882a593Smuzhiyun 	kfree(tmpbuf);
330*4882a593Smuzhiyun 
331*4882a593Smuzhiyun 	if (ret)
332*4882a593Smuzhiyun 		return ret;
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	if (msg.actual_length != totalxferlen)
335*4882a593Smuzhiyun 		return -EIO;
336*4882a593Smuzhiyun #else
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	if (op->data.nbytes) {
339*4882a593Smuzhiyun 		if (op->data.dir == SPI_MEM_DATA_IN)
340*4882a593Smuzhiyun 			rx_buf = op->data.buf.in;
341*4882a593Smuzhiyun 		else
342*4882a593Smuzhiyun 			tx_buf = op->data.buf.out;
343*4882a593Smuzhiyun 	}
344*4882a593Smuzhiyun 
345*4882a593Smuzhiyun 	op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
346*4882a593Smuzhiyun 
347*4882a593Smuzhiyun 	/*
348*4882a593Smuzhiyun 	 * Avoid using malloc() here so that we can use this code in SPL where
349*4882a593Smuzhiyun 	 * simple malloc may be used. That implementation does not allow free()
350*4882a593Smuzhiyun 	 * so repeated calls to this code can exhaust the space.
351*4882a593Smuzhiyun 	 *
352*4882a593Smuzhiyun 	 * The value of op_len is small, since it does not include the actual
353*4882a593Smuzhiyun 	 * data being sent, only the op-code and address. In fact, it should be
354*4882a593Smuzhiyun 	 * possible to just use a small fixed value here instead of op_len.
355*4882a593Smuzhiyun 	 */
356*4882a593Smuzhiyun 	u8 op_buf[op_len];
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	op_buf[pos++] = op->cmd.opcode;
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	if (op->addr.nbytes) {
361*4882a593Smuzhiyun 		for (i = 0; i < op->addr.nbytes; i++)
362*4882a593Smuzhiyun 			op_buf[pos + i] = op->addr.val >>
363*4882a593Smuzhiyun 				(8 * (op->addr.nbytes - i - 1));
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 		pos += op->addr.nbytes;
366*4882a593Smuzhiyun 	}
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	if (op->dummy.nbytes)
369*4882a593Smuzhiyun 		memset(op_buf + pos, 0xff, op->dummy.nbytes);
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	/* 1st transfer: opcode + address + dummy cycles */
372*4882a593Smuzhiyun 	flag = SPI_XFER_BEGIN;
373*4882a593Smuzhiyun 	/* Make sure to set END bit if no tx or rx data messages follow */
374*4882a593Smuzhiyun 	if (!tx_buf && !rx_buf)
375*4882a593Smuzhiyun 		flag |= SPI_XFER_END;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
378*4882a593Smuzhiyun 	if (ret)
379*4882a593Smuzhiyun 		return ret;
380*4882a593Smuzhiyun 
381*4882a593Smuzhiyun 	/* 2nd transfer: rx or tx data path */
382*4882a593Smuzhiyun 	if (tx_buf || rx_buf) {
383*4882a593Smuzhiyun 		flag = SPI_XFER_END;
384*4882a593Smuzhiyun 		if (slave->mode & SPI_DMA_PREPARE)
385*4882a593Smuzhiyun 			flag |= SPI_XFER_PREPARE;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 		ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
388*4882a593Smuzhiyun 			       rx_buf, flag);
389*4882a593Smuzhiyun 		if (ret)
390*4882a593Smuzhiyun 			return ret;
391*4882a593Smuzhiyun 	}
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	spi_release_bus(slave);
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	for (i = 0; i < pos; i++)
396*4882a593Smuzhiyun 		debug("%02x ", op_buf[i]);
397*4882a593Smuzhiyun 	debug("| [%dB %s] ",
398*4882a593Smuzhiyun 	      tx_buf || rx_buf ? op->data.nbytes : 0,
399*4882a593Smuzhiyun 	      tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
400*4882a593Smuzhiyun 	for (i = 0; i < op->data.nbytes; i++)
401*4882a593Smuzhiyun 		debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
402*4882a593Smuzhiyun 	debug("[ret %d]\n", ret);
403*4882a593Smuzhiyun 
404*4882a593Smuzhiyun 	if (ret < 0)
405*4882a593Smuzhiyun 		return ret;
406*4882a593Smuzhiyun #endif /* __UBOOT__ */
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 	return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_exec_op);
411*4882a593Smuzhiyun 
412*4882a593Smuzhiyun /**
413*4882a593Smuzhiyun  * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
414*4882a593Smuzhiyun  *				 match controller limitations
415*4882a593Smuzhiyun  * @slave: the SPI device
416*4882a593Smuzhiyun  * @op: the operation to adjust
417*4882a593Smuzhiyun  *
418*4882a593Smuzhiyun  * Some controllers have FIFO limitations and must split a data transfer
419*4882a593Smuzhiyun  * operation into multiple ones, others require a specific alignment for
420*4882a593Smuzhiyun  * optimized accesses. This function allows SPI mem drivers to split a single
421*4882a593Smuzhiyun  * operation into multiple sub-operations when required.
422*4882a593Smuzhiyun  *
423*4882a593Smuzhiyun  * Return: a negative error code if the controller can't properly adjust @op,
424*4882a593Smuzhiyun  *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
425*4882a593Smuzhiyun  *	   can't be handled in a single step.
426*4882a593Smuzhiyun  */
spi_mem_adjust_op_size(struct spi_slave * slave,struct spi_mem_op * op)427*4882a593Smuzhiyun int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct udevice *bus = slave->dev->parent;
430*4882a593Smuzhiyun 	struct dm_spi_ops *ops = spi_get_ops(bus);
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (ops->mem_ops && ops->mem_ops->adjust_op_size)
433*4882a593Smuzhiyun 		return ops->mem_ops->adjust_op_size(slave, op);
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (!ops->mem_ops || !ops->mem_ops->exec_op) {
436*4882a593Smuzhiyun 		unsigned int len;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 		len = sizeof(op->cmd.opcode) + op->addr.nbytes +
439*4882a593Smuzhiyun 			op->dummy.nbytes;
440*4882a593Smuzhiyun 		if (slave->max_write_size && len > slave->max_write_size)
441*4882a593Smuzhiyun 			return -EINVAL;
442*4882a593Smuzhiyun 
443*4882a593Smuzhiyun 		if (op->data.dir == SPI_MEM_DATA_IN && slave->max_read_size)
444*4882a593Smuzhiyun 			op->data.nbytes = min(op->data.nbytes,
445*4882a593Smuzhiyun 					      slave->max_read_size);
446*4882a593Smuzhiyun 		else if (slave->max_write_size)
447*4882a593Smuzhiyun 			op->data.nbytes = min(op->data.nbytes,
448*4882a593Smuzhiyun 					      slave->max_write_size - len);
449*4882a593Smuzhiyun 
450*4882a593Smuzhiyun 		if (!op->data.nbytes)
451*4882a593Smuzhiyun 			return -EINVAL;
452*4882a593Smuzhiyun 	}
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	return 0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun #ifndef __UBOOT__
to_spi_mem_drv(struct device_driver * drv)459*4882a593Smuzhiyun static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	return container_of(drv, struct spi_mem_driver, spidrv.driver);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
spi_mem_probe(struct spi_device * spi)464*4882a593Smuzhiyun static int spi_mem_probe(struct spi_device *spi)
465*4882a593Smuzhiyun {
466*4882a593Smuzhiyun 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
467*4882a593Smuzhiyun 	struct spi_mem *mem;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
470*4882a593Smuzhiyun 	if (!mem)
471*4882a593Smuzhiyun 		return -ENOMEM;
472*4882a593Smuzhiyun 
473*4882a593Smuzhiyun 	mem->spi = spi;
474*4882a593Smuzhiyun 	spi_set_drvdata(spi, mem);
475*4882a593Smuzhiyun 
476*4882a593Smuzhiyun 	return memdrv->probe(mem);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun 
spi_mem_remove(struct spi_device * spi)479*4882a593Smuzhiyun static int spi_mem_remove(struct spi_device *spi)
480*4882a593Smuzhiyun {
481*4882a593Smuzhiyun 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
482*4882a593Smuzhiyun 	struct spi_mem *mem = spi_get_drvdata(spi);
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 	if (memdrv->remove)
485*4882a593Smuzhiyun 		return memdrv->remove(mem);
486*4882a593Smuzhiyun 
487*4882a593Smuzhiyun 	return 0;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun 
spi_mem_shutdown(struct spi_device * spi)490*4882a593Smuzhiyun static void spi_mem_shutdown(struct spi_device *spi)
491*4882a593Smuzhiyun {
492*4882a593Smuzhiyun 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
493*4882a593Smuzhiyun 	struct spi_mem *mem = spi_get_drvdata(spi);
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (memdrv->shutdown)
496*4882a593Smuzhiyun 		memdrv->shutdown(mem);
497*4882a593Smuzhiyun }
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun /**
500*4882a593Smuzhiyun  * spi_mem_driver_register_with_owner() - Register a SPI memory driver
501*4882a593Smuzhiyun  * @memdrv: the SPI memory driver to register
502*4882a593Smuzhiyun  * @owner: the owner of this driver
503*4882a593Smuzhiyun  *
504*4882a593Smuzhiyun  * Registers a SPI memory driver.
505*4882a593Smuzhiyun  *
506*4882a593Smuzhiyun  * Return: 0 in case of success, a negative error core otherwise.
507*4882a593Smuzhiyun  */
508*4882a593Smuzhiyun 
spi_mem_driver_register_with_owner(struct spi_mem_driver * memdrv,struct module * owner)509*4882a593Smuzhiyun int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
510*4882a593Smuzhiyun 				       struct module *owner)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun 	memdrv->spidrv.probe = spi_mem_probe;
513*4882a593Smuzhiyun 	memdrv->spidrv.remove = spi_mem_remove;
514*4882a593Smuzhiyun 	memdrv->spidrv.shutdown = spi_mem_shutdown;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	return __spi_register_driver(owner, &memdrv->spidrv);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun /**
521*4882a593Smuzhiyun  * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
522*4882a593Smuzhiyun  * @memdrv: the SPI memory driver to unregister
523*4882a593Smuzhiyun  *
524*4882a593Smuzhiyun  * Unregisters a SPI memory driver.
525*4882a593Smuzhiyun  */
spi_mem_driver_unregister(struct spi_mem_driver * memdrv)526*4882a593Smuzhiyun void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
527*4882a593Smuzhiyun {
528*4882a593Smuzhiyun 	spi_unregister_driver(&memdrv->spidrv);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
531*4882a593Smuzhiyun #endif /* __UBOOT__ */
532