1 // SPDX-License-Identifier: GPL-2.0+ 2 /* 3 * Copyright (C) 2018 Exceet Electronics GmbH 4 * Copyright (C) 2018 Bootlin 5 * 6 * Author: Boris Brezillon <boris.brezillon@bootlin.com> 7 */ 8 9 #ifndef __UBOOT__ 10 #include <linux/dmaengine.h> 11 #include <linux/pm_runtime.h> 12 #include "internals.h" 13 #else 14 #include <spi.h> 15 #include <spi-mem.h> 16 #endif 17 18 #ifndef __UBOOT__ 19 /** 20 * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a 21 * memory operation 22 * @ctlr: the SPI controller requesting this dma_map() 23 * @op: the memory operation containing the buffer to map 24 * @sgt: a pointer to a non-initialized sg_table that will be filled by this 25 * function 26 * 27 * Some controllers might want to do DMA on the data buffer embedded in @op. 28 * This helper prepares everything for you and provides a ready-to-use 29 * sg_table. This function is not intended to be called from spi drivers. 30 * Only SPI controller drivers should use it. 31 * Note that the caller must ensure the memory region pointed by 32 * op->data.buf.{in,out} is DMA-able before calling this function. 33 * 34 * Return: 0 in case of success, a negative error code otherwise. 35 */ 36 int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr, 37 const struct spi_mem_op *op, 38 struct sg_table *sgt) 39 { 40 struct device *dmadev; 41 42 if (!op->data.nbytes) 43 return -EINVAL; 44 45 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 46 dmadev = ctlr->dma_tx->device->dev; 47 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 48 dmadev = ctlr->dma_rx->device->dev; 49 else 50 dmadev = ctlr->dev.parent; 51 52 if (!dmadev) 53 return -EINVAL; 54 55 return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes, 56 op->data.dir == SPI_MEM_DATA_IN ? 57 DMA_FROM_DEVICE : DMA_TO_DEVICE); 58 } 59 EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data); 60 61 /** 62 * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a 63 * memory operation 64 * @ctlr: the SPI controller requesting this dma_unmap() 65 * @op: the memory operation containing the buffer to unmap 66 * @sgt: a pointer to an sg_table previously initialized by 67 * spi_controller_dma_map_mem_op_data() 68 * 69 * Some controllers might want to do DMA on the data buffer embedded in @op. 70 * This helper prepares things so that the CPU can access the 71 * op->data.buf.{in,out} buffer again. 72 * 73 * This function is not intended to be called from SPI drivers. Only SPI 74 * controller drivers should use it. 75 * 76 * This function should be called after the DMA operation has finished and is 77 * only valid if the previous spi_controller_dma_map_mem_op_data() call 78 * returned 0. 79 * 80 * Return: 0 in case of success, a negative error code otherwise. 81 */ 82 void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr, 83 const struct spi_mem_op *op, 84 struct sg_table *sgt) 85 { 86 struct device *dmadev; 87 88 if (!op->data.nbytes) 89 return; 90 91 if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx) 92 dmadev = ctlr->dma_tx->device->dev; 93 else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx) 94 dmadev = ctlr->dma_rx->device->dev; 95 else 96 dmadev = ctlr->dev.parent; 97 98 spi_unmap_buf(ctlr, dmadev, sgt, 99 op->data.dir == SPI_MEM_DATA_IN ? 100 DMA_FROM_DEVICE : DMA_TO_DEVICE); 101 } 102 EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data); 103 #endif /* __UBOOT__ */ 104 105 static int spi_check_buswidth_req(struct spi_slave *slave, u8 buswidth, bool tx) 106 { 107 u32 mode = slave->mode; 108 109 switch (buswidth) { 110 case 1: 111 return 0; 112 113 case 2: 114 if ((tx && (mode & (SPI_TX_DUAL | SPI_TX_QUAD))) || 115 (!tx && (mode & (SPI_RX_DUAL | SPI_RX_QUAD)))) 116 return 0; 117 118 break; 119 120 case 4: 121 if ((tx && (mode & SPI_TX_QUAD)) || 122 (!tx && (mode & SPI_RX_QUAD))) 123 return 0; 124 125 break; 126 127 default: 128 break; 129 } 130 131 return -ENOTSUPP; 132 } 133 134 bool spi_mem_default_supports_op(struct spi_slave *slave, 135 const struct spi_mem_op *op) 136 { 137 if (spi_check_buswidth_req(slave, op->cmd.buswidth, true)) 138 return false; 139 140 if (op->addr.nbytes && 141 spi_check_buswidth_req(slave, op->addr.buswidth, true)) 142 return false; 143 144 if (op->dummy.nbytes && 145 spi_check_buswidth_req(slave, op->dummy.buswidth, true)) 146 return false; 147 148 if (op->data.nbytes && 149 spi_check_buswidth_req(slave, op->data.buswidth, 150 op->data.dir == SPI_MEM_DATA_OUT)) 151 return false; 152 153 return true; 154 } 155 EXPORT_SYMBOL_GPL(spi_mem_default_supports_op); 156 157 /** 158 * spi_mem_supports_op() - Check if a memory device and the controller it is 159 * connected to support a specific memory operation 160 * @slave: the SPI device 161 * @op: the memory operation to check 162 * 163 * Some controllers are only supporting Single or Dual IOs, others might only 164 * support specific opcodes, or it can even be that the controller and device 165 * both support Quad IOs but the hardware prevents you from using it because 166 * only 2 IO lines are connected. 167 * 168 * This function checks whether a specific operation is supported. 169 * 170 * Return: true if @op is supported, false otherwise. 171 */ 172 bool spi_mem_supports_op(struct spi_slave *slave, 173 const struct spi_mem_op *op) 174 { 175 struct udevice *bus = slave->dev->parent; 176 struct dm_spi_ops *ops = spi_get_ops(bus); 177 178 if (ops->mem_ops && ops->mem_ops->supports_op) 179 return ops->mem_ops->supports_op(slave, op); 180 181 return spi_mem_default_supports_op(slave, op); 182 } 183 EXPORT_SYMBOL_GPL(spi_mem_supports_op); 184 185 /** 186 * spi_mem_exec_op() - Execute a memory operation 187 * @slave: the SPI device 188 * @op: the memory operation to execute 189 * 190 * Executes a memory operation. 191 * 192 * This function first checks that @op is supported and then tries to execute 193 * it. 194 * 195 * Return: 0 in case of success, a negative error code otherwise. 196 */ 197 int spi_mem_exec_op(struct spi_slave *slave, const struct spi_mem_op *op) 198 { 199 struct udevice *bus = slave->dev->parent; 200 struct dm_spi_ops *ops = spi_get_ops(bus); 201 unsigned int pos = 0; 202 const u8 *tx_buf = NULL; 203 u8 *rx_buf = NULL; 204 u8 *op_buf; 205 int op_len; 206 u32 flag; 207 int ret; 208 int i; 209 210 if (!spi_mem_supports_op(slave, op)) 211 return -ENOTSUPP; 212 213 if (ops->mem_ops) { 214 #ifndef __UBOOT__ 215 /* 216 * Flush the message queue before executing our SPI memory 217 * operation to prevent preemption of regular SPI transfers. 218 */ 219 spi_flush_queue(ctlr); 220 221 if (ctlr->auto_runtime_pm) { 222 ret = pm_runtime_get_sync(ctlr->dev.parent); 223 if (ret < 0) { 224 dev_err(&ctlr->dev, 225 "Failed to power device: %d\n", 226 ret); 227 return ret; 228 } 229 } 230 231 mutex_lock(&ctlr->bus_lock_mutex); 232 mutex_lock(&ctlr->io_mutex); 233 #endif 234 ret = ops->mem_ops->exec_op(slave, op); 235 #ifndef __UBOOT__ 236 mutex_unlock(&ctlr->io_mutex); 237 mutex_unlock(&ctlr->bus_lock_mutex); 238 239 if (ctlr->auto_runtime_pm) 240 pm_runtime_put(ctlr->dev.parent); 241 #endif 242 243 /* 244 * Some controllers only optimize specific paths (typically the 245 * read path) and expect the core to use the regular SPI 246 * interface in other cases. 247 */ 248 if (!ret || ret != -ENOTSUPP) 249 return ret; 250 } 251 252 #ifndef __UBOOT__ 253 tmpbufsize = sizeof(op->cmd.opcode) + op->addr.nbytes + 254 op->dummy.nbytes; 255 256 /* 257 * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so 258 * we're guaranteed that this buffer is DMA-able, as required by the 259 * SPI layer. 260 */ 261 tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA); 262 if (!tmpbuf) 263 return -ENOMEM; 264 265 spi_message_init(&msg); 266 267 tmpbuf[0] = op->cmd.opcode; 268 xfers[xferpos].tx_buf = tmpbuf; 269 xfers[xferpos].len = sizeof(op->cmd.opcode); 270 xfers[xferpos].tx_nbits = op->cmd.buswidth; 271 spi_message_add_tail(&xfers[xferpos], &msg); 272 xferpos++; 273 totalxferlen++; 274 275 if (op->addr.nbytes) { 276 int i; 277 278 for (i = 0; i < op->addr.nbytes; i++) 279 tmpbuf[i + 1] = op->addr.val >> 280 (8 * (op->addr.nbytes - i - 1)); 281 282 xfers[xferpos].tx_buf = tmpbuf + 1; 283 xfers[xferpos].len = op->addr.nbytes; 284 xfers[xferpos].tx_nbits = op->addr.buswidth; 285 spi_message_add_tail(&xfers[xferpos], &msg); 286 xferpos++; 287 totalxferlen += op->addr.nbytes; 288 } 289 290 if (op->dummy.nbytes) { 291 memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes); 292 xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1; 293 xfers[xferpos].len = op->dummy.nbytes; 294 xfers[xferpos].tx_nbits = op->dummy.buswidth; 295 spi_message_add_tail(&xfers[xferpos], &msg); 296 xferpos++; 297 totalxferlen += op->dummy.nbytes; 298 } 299 300 if (op->data.nbytes) { 301 if (op->data.dir == SPI_MEM_DATA_IN) { 302 xfers[xferpos].rx_buf = op->data.buf.in; 303 xfers[xferpos].rx_nbits = op->data.buswidth; 304 } else { 305 xfers[xferpos].tx_buf = op->data.buf.out; 306 xfers[xferpos].tx_nbits = op->data.buswidth; 307 } 308 309 xfers[xferpos].len = op->data.nbytes; 310 spi_message_add_tail(&xfers[xferpos], &msg); 311 xferpos++; 312 totalxferlen += op->data.nbytes; 313 } 314 315 ret = spi_sync(slave, &msg); 316 317 kfree(tmpbuf); 318 319 if (ret) 320 return ret; 321 322 if (msg.actual_length != totalxferlen) 323 return -EIO; 324 #else 325 326 if (op->data.nbytes) { 327 if (op->data.dir == SPI_MEM_DATA_IN) 328 rx_buf = op->data.buf.in; 329 else 330 tx_buf = op->data.buf.out; 331 } 332 333 op_len = sizeof(op->cmd.opcode) + op->addr.nbytes + op->dummy.nbytes; 334 op_buf = calloc(1, op_len); 335 336 ret = spi_claim_bus(slave); 337 if (ret < 0) 338 return ret; 339 340 op_buf[pos++] = op->cmd.opcode; 341 342 if (op->addr.nbytes) { 343 for (i = 0; i < op->addr.nbytes; i++) 344 op_buf[pos + i] = op->addr.val >> 345 (8 * (op->addr.nbytes - i - 1)); 346 347 pos += op->addr.nbytes; 348 } 349 350 if (op->dummy.nbytes) 351 memset(op_buf + pos, 0xff, op->dummy.nbytes); 352 353 /* 1st transfer: opcode + address + dummy cycles */ 354 flag = SPI_XFER_BEGIN; 355 /* Make sure to set END bit if no tx or rx data messages follow */ 356 if (!tx_buf && !rx_buf) 357 flag |= SPI_XFER_END; 358 359 ret = spi_xfer(slave, op_len * 8, op_buf, NULL, flag); 360 if (ret) 361 return ret; 362 363 /* 2nd transfer: rx or tx data path */ 364 if (tx_buf || rx_buf) { 365 ret = spi_xfer(slave, op->data.nbytes * 8, tx_buf, 366 rx_buf, SPI_XFER_END); 367 if (ret) 368 return ret; 369 } 370 371 spi_release_bus(slave); 372 373 for (i = 0; i < pos; i++) 374 debug("%02x ", op_buf[i]); 375 debug("| [%dB %s] ", 376 tx_buf || rx_buf ? op->data.nbytes : 0, 377 tx_buf || rx_buf ? (tx_buf ? "out" : "in") : "-"); 378 for (i = 0; i < op->data.nbytes; i++) 379 debug("%02x ", tx_buf ? tx_buf[i] : rx_buf[i]); 380 debug("[ret %d]\n", ret); 381 382 free(op_buf); 383 384 if (ret < 0) 385 return ret; 386 #endif /* __UBOOT__ */ 387 388 return 0; 389 } 390 EXPORT_SYMBOL_GPL(spi_mem_exec_op); 391 392 /** 393 * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to 394 * match controller limitations 395 * @slave: the SPI device 396 * @op: the operation to adjust 397 * 398 * Some controllers have FIFO limitations and must split a data transfer 399 * operation into multiple ones, others require a specific alignment for 400 * optimized accesses. This function allows SPI mem drivers to split a single 401 * operation into multiple sub-operations when required. 402 * 403 * Return: a negative error code if the controller can't properly adjust @op, 404 * 0 otherwise. Note that @op->data.nbytes will be updated if @op 405 * can't be handled in a single step. 406 */ 407 int spi_mem_adjust_op_size(struct spi_slave *slave, struct spi_mem_op *op) 408 { 409 struct udevice *bus = slave->dev->parent; 410 struct dm_spi_ops *ops = spi_get_ops(bus); 411 412 if (ops->mem_ops && ops->mem_ops->adjust_op_size) 413 return ops->mem_ops->adjust_op_size(slave, op); 414 415 if (!ops->mem_ops || !ops->mem_ops->exec_op) { 416 unsigned int len; 417 418 len = sizeof(op->cmd.opcode) + op->addr.nbytes + 419 op->dummy.nbytes; 420 if (slave->max_write_size && len > slave->max_write_size) 421 return -EINVAL; 422 423 if (op->data.dir == SPI_MEM_DATA_IN && slave->max_read_size) 424 op->data.nbytes = min(op->data.nbytes, 425 slave->max_read_size); 426 else if (slave->max_write_size) 427 op->data.nbytes = min(op->data.nbytes, 428 slave->max_write_size - len); 429 430 if (!op->data.nbytes) 431 return -EINVAL; 432 } 433 434 return 0; 435 } 436 EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size); 437 438 #ifndef __UBOOT__ 439 static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv) 440 { 441 return container_of(drv, struct spi_mem_driver, spidrv.driver); 442 } 443 444 static int spi_mem_probe(struct spi_device *spi) 445 { 446 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 447 struct spi_mem *mem; 448 449 mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL); 450 if (!mem) 451 return -ENOMEM; 452 453 mem->spi = spi; 454 spi_set_drvdata(spi, mem); 455 456 return memdrv->probe(mem); 457 } 458 459 static int spi_mem_remove(struct spi_device *spi) 460 { 461 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 462 struct spi_mem *mem = spi_get_drvdata(spi); 463 464 if (memdrv->remove) 465 return memdrv->remove(mem); 466 467 return 0; 468 } 469 470 static void spi_mem_shutdown(struct spi_device *spi) 471 { 472 struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver); 473 struct spi_mem *mem = spi_get_drvdata(spi); 474 475 if (memdrv->shutdown) 476 memdrv->shutdown(mem); 477 } 478 479 /** 480 * spi_mem_driver_register_with_owner() - Register a SPI memory driver 481 * @memdrv: the SPI memory driver to register 482 * @owner: the owner of this driver 483 * 484 * Registers a SPI memory driver. 485 * 486 * Return: 0 in case of success, a negative error core otherwise. 487 */ 488 489 int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv, 490 struct module *owner) 491 { 492 memdrv->spidrv.probe = spi_mem_probe; 493 memdrv->spidrv.remove = spi_mem_remove; 494 memdrv->spidrv.shutdown = spi_mem_shutdown; 495 496 return __spi_register_driver(owner, &memdrv->spidrv); 497 } 498 EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner); 499 500 /** 501 * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver 502 * @memdrv: the SPI memory driver to unregister 503 * 504 * Unregisters a SPI memory driver. 505 */ 506 void spi_mem_driver_unregister(struct spi_mem_driver *memdrv) 507 { 508 spi_unregister_driver(&memdrv->spidrv); 509 } 510 EXPORT_SYMBOL_GPL(spi_mem_driver_unregister); 511 #endif /* __UBOOT__ */ 512