xref: /OK3568_Linux_fs/u-boot/drivers/spi/spi-mem.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1 // SPDX-License-Identifier: GPL-2.0+
2 /*
3  * Copyright (C) 2018 Exceet Electronics GmbH
4  * Copyright (C) 2018 Bootlin
5  *
6  * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7  */
8 
9 #ifndef __UBOOT__
10 #include <linux/dmaengine.h>
11 #include <linux/pm_runtime.h>
12 #include "internals.h"
13 #else
14 #include <spi.h>
15 #include <spi-mem.h>
16 #endif
17 
18 #ifndef __UBOOT__
19 /**
20  * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
21  *					  memory operation
22  * @ctlr: the SPI controller requesting this dma_map()
23  * @op: the memory operation containing the buffer to map
24  * @sgt: a pointer to a non-initialized sg_table that will be filled by this
25  *	 function
26  *
27  * Some controllers might want to do DMA on the data buffer embedded in @op.
28  * This helper prepares everything for you and provides a ready-to-use
29  * sg_table. This function is not intended to be called from spi drivers.
30  * Only SPI controller drivers should use it.
31  * Note that the caller must ensure the memory region pointed by
32  * op->data.buf.{in,out} is DMA-able before calling this function.
33  *
34  * Return: 0 in case of success, a negative error code otherwise.
35  */
spi_controller_dma_map_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)36 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
37 				       const struct spi_mem_op *op,
38 				       struct sg_table *sgt)
39 {
40 	struct device *dmadev;
41 
42 	if (!op->data.nbytes)
43 		return -EINVAL;
44 
45 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
46 		dmadev = ctlr->dma_tx->device->dev;
47 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
48 		dmadev = ctlr->dma_rx->device->dev;
49 	else
50 		dmadev = ctlr->dev.parent;
51 
52 	if (!dmadev)
53 		return -EINVAL;
54 
55 	return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
56 			   op->data.dir == SPI_MEM_DATA_IN ?
57 			   DMA_FROM_DEVICE : DMA_TO_DEVICE);
58 }
59 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
60 
61 /**
62  * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
63  *					    memory operation
64  * @ctlr: the SPI controller requesting this dma_unmap()
65  * @op: the memory operation containing the buffer to unmap
66  * @sgt: a pointer to an sg_table previously initialized by
67  *	 spi_controller_dma_map_mem_op_data()
68  *
69  * Some controllers might want to do DMA on the data buffer embedded in @op.
70  * This helper prepares things so that the CPU can access the
71  * op->data.buf.{in,out} buffer again.
72  *
73  * This function is not intended to be called from SPI drivers. Only SPI
74  * controller drivers should use it.
75  *
76  * This function should be called after the DMA operation has finished and is
77  * only valid if the previous spi_controller_dma_map_mem_op_data() call
78  * returned 0.
79  *
80  * Return: 0 in case of success, a negative error code otherwise.
81  */
spi_controller_dma_unmap_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)82 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
83 					  const struct spi_mem_op *op,
84 					  struct sg_table *sgt)
85 {
86 	struct device *dmadev;
87 
88 	if (!op->data.nbytes)
89 		return;
90 
91 	if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
92 		dmadev = ctlr->dma_tx->device->dev;
93 	else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
94 		dmadev = ctlr->dma_rx->device->dev;
95 	else
96 		dmadev = ctlr->dev.parent;
97 
98 	spi_unmap_buf(ctlr, dmadev, sgt,
99 		      op->data.dir == SPI_MEM_DATA_IN ?
100 		      DMA_FROM_DEVICE : DMA_TO_DEVICE);
101 }
102 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
103 #endif /* __UBOOT__ */
104 
spi_check_buswidth_req(struct spi_slave * slave,u8 buswidth,bool tx)105 static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx)
106 {
107 	u32 mode = slave->mode;
108 
109 	switch (buswidth) {
110 	case 1:
111 		return 0;
112 
113 	case 2:
114 		if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) ||
115 		    (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD))))
116 			return 0;
117 
118 		break;
119 
120 	case 4:
121 		if ((tx && (mode & SPI_TX_QUAD)) ||
122 		    (!tx && (mode & SPI_RX_QUAD)))
123 			return 0;
124 
125 		break;
126 	case 8:
127 		if ((tx && (mode & SPI_TX_OCTAL)) ||
128 		    (!tx && (mode & SPI_RX_OCTAL)))
129 			return 0;
130 
131 		break;
132 
133 	default:
134 		break;
135 	}
136 
137 	return -ENOTSUPP;
138 }
139 
spi_mem_default_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)140 bool spi_mem_default_supports_op(struct spi_slave *slave,
141 				 const struct spi_mem_op *op)
142 {
143 	if (spi_check_buswidth_req(slave, op->cmd.buswidth, true))
144 		return false;
145 
146 	if (op->addr.nbytes &&
147 	    spi_check_buswidth_req(slave, op->addr.buswidth, true))
148 		return false;
149 
150 	if (op->dummy.nbytes &&
151 	    spi_check_buswidth_req(slave, op->dummy.buswidth, true))
152 		return false;
153 
154 	if (op->data.nbytes &&
155 	    spi_check_buswidth_req(slave, op->data.buswidth,
156 				   op->data.dir == SPI_MEM_DATA_OUT))
157 		return false;
158 
159 	return true;
160 }
161 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
162 
163 /**
164  * spi_mem_supports_op() - Check if a memory device and the controller it is
165  *			   connected to support a specific memory operation
166  * @slave: the SPI device
167  * @op: the memory operation to check
168  *
169  * Some controllers are only supporting Single or Dual IOs, others might only
170  * support specific opcodes, or it can even be that the controller and device
171  * both support Quad IOs but the hardware prevents you from using it because
172  * only 2 IO lines are connected.
173  *
174  * This function checks whether a specific operation is supported.
175  *
176  * Return: true if @op is supported, false otherwise.
177  */
spi_mem_supports_op(struct spi_slave * slave,const struct spi_mem_op * op)178 bool spi_mem_supports_op(struct spi_slave *slave,
179 			 const struct spi_mem_op *op)
180 {
181 	struct udevice *bus = slave->dev->parent;
182 	struct dm_spi_ops *ops = spi_get_ops(bus);
183 
184 	if (ops->mem_ops && ops->mem_ops->supports_op)
185 		return ops->mem_ops->supports_op(slave, op);
186 
187 	return spi_mem_default_supports_op(slave, op);
188 }
189 EXPORT_SYMBOL_GPL(spi_mem_supports_op);
190 
191 /**
192  * spi_mem_exec_op() - Execute a memory operation
193  * @slave: the SPI device
194  * @op: the memory operation to execute
195  *
196  * Executes a memory operation.
197  *
198  * This function first checks that @op is supported and then tries to execute
199  * it.
200  *
201  * Return: 0 in case of success, a negative error code otherwise.
202  */
spi_mem_exec_op(struct spi_slave * slave,const struct spi_mem_op * op)203 int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op)
204 {
205 	struct udevice *bus = slave->dev->parent;
206 	struct dm_spi_ops *ops = spi_get_ops(bus);
207 	unsigned int pos = 0;
208 	const u8 *tx_buf = NULL;
209 	u8 *rx_buf = NULL;
210 	int op_len;
211 	u32 flag;
212 	int ret;
213 	int i;
214 
215 	if (!spi_mem_supports_op(slave, op))
216 		return -ENOTSUPP;
217 
218 	ret = spi_claim_bus(slave);
219 	if (ret < 0)
220 		return ret;
221 
222 	if (ops->mem_ops && ops->mem_ops->exec_op) {
223 #ifndef __UBOOT__
224 		/*
225 		 * Flush the message queue before executing our SPI memory
226 		 * operation to prevent preemption of regular SPI transfers.
227 		 */
228 		spi_flush_queue(ctlr);
229 
230 		if (ctlr->auto_runtime_pm) {
231 			ret = pm_runtime_get_sync(ctlr->dev.parent);
232 			if (ret < 0) {
233 				dev_err(&ctlr->dev,
234 					"Failed to power device: %d\n",
235 					ret);
236 				return ret;
237 			}
238 		}
239 
240 		mutex_lock(&ctlr->bus_lock_mutex);
241 		mutex_lock(&ctlr->io_mutex);
242 #endif
243 		ret = ops->mem_ops->exec_op(slave, op);
244 
245 #ifndef __UBOOT__
246 		mutex_unlock(&ctlr->io_mutex);
247 		mutex_unlock(&ctlr->bus_lock_mutex);
248 
249 		if (ctlr->auto_runtime_pm)
250 			pm_runtime_put(ctlr->dev.parent);
251 #endif
252 
253 		/*
254 		 * Some controllers only optimize specific paths (typically the
255 		 * read path) and expect the core to use the regular SPI
256 		 * interface in other cases.
257 		 */
258 		if (!ret || ret != -ENOTSUPP) {
259 			spi_release_bus(slave);
260 			return ret;
261 		}
262 	}
263 
264 #ifndef __UBOOT__
265 	tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes +
266 		     op->dummy.nbytes;
267 
268 	/*
269 	 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
270 	 * we're guaranteed that this buffer is DMA-able, as required by the
271 	 * SPI layer.
272 	 */
273 	tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
274 	if (!tmpbuf)
275 		return -ENOMEM;
276 
277 	spi_message_init(&msg);
278 
279 	tmpbuf[0] = op->cmd.opcode;
280 	xfers[xferpos].tx_buf = tmpbuf;
281 	xfers[xferpos].len = sizeof(op->cmd.opcode);
282 	xfers[xferpos].tx_nbits = op->cmd.buswidth;
283 	spi_message_add_tail(&xfers[xferpos], &msg);
284 	xferpos++;
285 	totalxferlen++;
286 
287 	if (op->addr.nbytes) {
288 		int i;
289 
290 		for (i = 0; i < op->addr.nbytes; i++)
291 			tmpbuf[i + 1] = op->addr.val >>
292 					(8 * (op->addr.nbytes - i - 1));
293 
294 		xfers[xferpos].tx_buf = tmpbuf + 1;
295 		xfers[xferpos].len = op->addr.nbytes;
296 		xfers[xferpos].tx_nbits = op->addr.buswidth;
297 		spi_message_add_tail(&xfers[xferpos], &msg);
298 		xferpos++;
299 		totalxferlen += op->addr.nbytes;
300 	}
301 
302 	if (op->dummy.nbytes) {
303 		memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
304 		xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
305 		xfers[xferpos].len = op->dummy.nbytes;
306 		xfers[xferpos].tx_nbits = op->dummy.buswidth;
307 		spi_message_add_tail(&xfers[xferpos], &msg);
308 		xferpos++;
309 		totalxferlen += op->dummy.nbytes;
310 	}
311 
312 	if (op->data.nbytes) {
313 		if (op->data.dir == SPI_MEM_DATA_IN) {
314 			xfers[xferpos].rx_buf = op->data.buf.in;
315 			xfers[xferpos].rx_nbits = op->data.buswidth;
316 		} else {
317 			xfers[xferpos].tx_buf = op->data.buf.out;
318 			xfers[xferpos].tx_nbits = op->data.buswidth;
319 		}
320 
321 		xfers[xferpos].len = op->data.nbytes;
322 		spi_message_add_tail(&xfers[xferpos], &msg);
323 		xferpos++;
324 		totalxferlen += op->data.nbytes;
325 	}
326 
327 	ret = spi_sync(slave, &msg);
328 
329 	kfree(tmpbuf);
330 
331 	if (ret)
332 		return ret;
333 
334 	if (msg.actual_length != totalxferlen)
335 		return -EIO;
336 #else
337 
338 	if (op->data.nbytes) {
339 		if (op->data.dir == SPI_MEM_DATA_IN)
340 			rx_buf = op->data.buf.in;
341 		else
342 			tx_buf = op->data.buf.out;
343 	}
344 
345 	op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes;
346 
347 	/*
348 	 * Avoid using malloc() here so that we can use this code in SPL where
349 	 * simple malloc may be used. That implementation does not allow free()
350 	 * so repeated calls to this code can exhaust the space.
351 	 *
352 	 * The value of op_len is small, since it does not include the actual
353 	 * data being sent, only the op-code and address. In fact, it should be
354 	 * possible to just use a small fixed value here instead of op_len.
355 	 */
356 	u8 op_buf[op_len];
357 
358 	op_buf[pos++] = op->cmd.opcode;
359 
360 	if (op->addr.nbytes) {
361 		for (i = 0; i < op->addr.nbytes; i++)
362 			op_buf[pos + i] = op->addr.val >>
363 				(8 * (op->addr.nbytes - i - 1));
364 
365 		pos += op->addr.nbytes;
366 	}
367 
368 	if (op->dummy.nbytes)
369 		memset(op_buf + pos, 0xff, op->dummy.nbytes);
370 
371 	/* 1st transfer: opcode + address + dummy cycles */
372 	flag = SPI_XFER_BEGIN;
373 	/* Make sure to set END bit if no tx or rx data messages follow */
374 	if (!tx_buf && !rx_buf)
375 		flag |= SPI_XFER_END;
376 
377 	ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag);
378 	if (ret)
379 		return ret;
380 
381 	/* 2nd transfer: rx or tx data path */
382 	if (tx_buf || rx_buf) {
383 		flag = SPI_XFER_END;
384 		if (slave->mode & SPI_DMA_PREPARE)
385 			flag |= SPI_XFER_PREPARE;
386 
387 		ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf,
388 			       rx_buf, flag);
389 		if (ret)
390 			return ret;
391 	}
392 
393 	spi_release_bus(slave);
394 
395 	for (i = 0; i < pos; i++)
396 		debug("%02x ", op_buf[i]);
397 	debug("| [%dB %s] ",
398 	      tx_buf || rx_buf ? op->data.nbytes : 0,
399 	      tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-");
400 	for (i = 0; i < op->data.nbytes; i++)
401 		debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]);
402 	debug("[ret %d]\n", ret);
403 
404 	if (ret < 0)
405 		return ret;
406 #endif /* __UBOOT__ */
407 
408 	return 0;
409 }
410 EXPORT_SYMBOL_GPL(spi_mem_exec_op);
411 
412 /**
413  * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
414  *				 match controller limitations
415  * @slave: the SPI device
416  * @op: the operation to adjust
417  *
418  * Some controllers have FIFO limitations and must split a data transfer
419  * operation into multiple ones, others require a specific alignment for
420  * optimized accesses. This function allows SPI mem drivers to split a single
421  * operation into multiple sub-operations when required.
422  *
423  * Return: a negative error code if the controller can't properly adjust @op,
424  *	   0 otherwise. Note that @op->data.nbytes will be updated if @op
425  *	   can't be handled in a single step.
426  */
spi_mem_adjust_op_size(struct spi_slave * slave,struct spi_mem_op * op)427 int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op)
428 {
429 	struct udevice *bus = slave->dev->parent;
430 	struct dm_spi_ops *ops = spi_get_ops(bus);
431 
432 	if (ops->mem_ops && ops->mem_ops->adjust_op_size)
433 		return ops->mem_ops->adjust_op_size(slave, op);
434 
435 	if (!ops->mem_ops || !ops->mem_ops->exec_op) {
436 		unsigned int len;
437 
438 		len = sizeof(op->cmd.opcode) + op->addr.nbytes +
439 			op->dummy.nbytes;
440 		if (slave->max_write_size && len > slave->max_write_size)
441 			return -EINVAL;
442 
443 		if (op->data.dir == SPI_MEM_DATA_IN && slave->max_read_size)
444 			op->data.nbytes = min(op->data.nbytes,
445 					      slave->max_read_size);
446 		else if (slave->max_write_size)
447 			op->data.nbytes = min(op->data.nbytes,
448 					      slave->max_write_size - len);
449 
450 		if (!op->data.nbytes)
451 			return -EINVAL;
452 	}
453 
454 	return 0;
455 }
456 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
457 
458 #ifndef __UBOOT__
to_spi_mem_drv(struct device_driver * drv)459 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
460 {
461 	return container_of(drv, struct spi_mem_driver, spidrv.driver);
462 }
463 
spi_mem_probe(struct spi_device * spi)464 static int spi_mem_probe(struct spi_device *spi)
465 {
466 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
467 	struct spi_mem *mem;
468 
469 	mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
470 	if (!mem)
471 		return -ENOMEM;
472 
473 	mem->spi = spi;
474 	spi_set_drvdata(spi, mem);
475 
476 	return memdrv->probe(mem);
477 }
478 
spi_mem_remove(struct spi_device * spi)479 static int spi_mem_remove(struct spi_device *spi)
480 {
481 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
482 	struct spi_mem *mem = spi_get_drvdata(spi);
483 
484 	if (memdrv->remove)
485 		return memdrv->remove(mem);
486 
487 	return 0;
488 }
489 
spi_mem_shutdown(struct spi_device * spi)490 static void spi_mem_shutdown(struct spi_device *spi)
491 {
492 	struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
493 	struct spi_mem *mem = spi_get_drvdata(spi);
494 
495 	if (memdrv->shutdown)
496 		memdrv->shutdown(mem);
497 }
498 
499 /**
500  * spi_mem_driver_register_with_owner() - Register a SPI memory driver
501  * @memdrv: the SPI memory driver to register
502  * @owner: the owner of this driver
503  *
504  * Registers a SPI memory driver.
505  *
506  * Return: 0 in case of success, a negative error core otherwise.
507  */
508 
spi_mem_driver_register_with_owner(struct spi_mem_driver * memdrv,struct module * owner)509 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
510 				       struct module *owner)
511 {
512 	memdrv->spidrv.probe = spi_mem_probe;
513 	memdrv->spidrv.remove = spi_mem_remove;
514 	memdrv->spidrv.shutdown = spi_mem_shutdown;
515 
516 	return __spi_register_driver(owner, &memdrv->spidrv);
517 }
518 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
519 
520 /**
521  * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
522  * @memdrv: the SPI memory driver to unregister
523  *
524  * Unregisters a SPI memory driver.
525  */
spi_mem_driver_unregister(struct spi_mem_driver * memdrv)526 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
527 {
528 	spi_unregister_driver(&memdrv->spidrv);
529 }
530 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
531 #endif /* __UBOOT__ */
532