1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2018 Exceet Electronics GmbH
4*4882a593Smuzhiyun * Copyright (C) 2018 Bootlin
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * Author: Boris Brezillon <boris.brezillon@bootlin.com>
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun #include <linux/dmaengine.h>
9*4882a593Smuzhiyun #include <linux/pm_runtime.h>
10*4882a593Smuzhiyun #include <linux/spi/spi.h>
11*4882a593Smuzhiyun #include <linux/spi/spi-mem.h>
12*4882a593Smuzhiyun
13*4882a593Smuzhiyun #include "internals.h"
14*4882a593Smuzhiyun
15*4882a593Smuzhiyun #define SPI_MEM_MAX_BUSWIDTH 8
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /**
18*4882a593Smuzhiyun * spi_controller_dma_map_mem_op_data() - DMA-map the buffer attached to a
19*4882a593Smuzhiyun * memory operation
20*4882a593Smuzhiyun * @ctlr: the SPI controller requesting this dma_map()
21*4882a593Smuzhiyun * @op: the memory operation containing the buffer to map
22*4882a593Smuzhiyun * @sgt: a pointer to a non-initialized sg_table that will be filled by this
23*4882a593Smuzhiyun * function
24*4882a593Smuzhiyun *
25*4882a593Smuzhiyun * Some controllers might want to do DMA on the data buffer embedded in @op.
26*4882a593Smuzhiyun * This helper prepares everything for you and provides a ready-to-use
27*4882a593Smuzhiyun * sg_table. This function is not intended to be called from spi drivers.
28*4882a593Smuzhiyun * Only SPI controller drivers should use it.
29*4882a593Smuzhiyun * Note that the caller must ensure the memory region pointed by
30*4882a593Smuzhiyun * op->data.buf.{in,out} is DMA-able before calling this function.
31*4882a593Smuzhiyun *
32*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
33*4882a593Smuzhiyun */
spi_controller_dma_map_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)34*4882a593Smuzhiyun int spi_controller_dma_map_mem_op_data(struct spi_controller *ctlr,
35*4882a593Smuzhiyun const struct spi_mem_op *op,
36*4882a593Smuzhiyun struct sg_table *sgt)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun struct device *dmadev;
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun if (!op->data.nbytes)
41*4882a593Smuzhiyun return -EINVAL;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
44*4882a593Smuzhiyun dmadev = ctlr->dma_tx->device->dev;
45*4882a593Smuzhiyun else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
46*4882a593Smuzhiyun dmadev = ctlr->dma_rx->device->dev;
47*4882a593Smuzhiyun else
48*4882a593Smuzhiyun dmadev = ctlr->dev.parent;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (!dmadev)
51*4882a593Smuzhiyun return -EINVAL;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun return spi_map_buf(ctlr, dmadev, sgt, op->data.buf.in, op->data.nbytes,
54*4882a593Smuzhiyun op->data.dir == SPI_MEM_DATA_IN ?
55*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_controller_dma_map_mem_op_data);
58*4882a593Smuzhiyun
59*4882a593Smuzhiyun /**
60*4882a593Smuzhiyun * spi_controller_dma_unmap_mem_op_data() - DMA-unmap the buffer attached to a
61*4882a593Smuzhiyun * memory operation
62*4882a593Smuzhiyun * @ctlr: the SPI controller requesting this dma_unmap()
63*4882a593Smuzhiyun * @op: the memory operation containing the buffer to unmap
64*4882a593Smuzhiyun * @sgt: a pointer to an sg_table previously initialized by
65*4882a593Smuzhiyun * spi_controller_dma_map_mem_op_data()
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * Some controllers might want to do DMA on the data buffer embedded in @op.
68*4882a593Smuzhiyun * This helper prepares things so that the CPU can access the
69*4882a593Smuzhiyun * op->data.buf.{in,out} buffer again.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * This function is not intended to be called from SPI drivers. Only SPI
72*4882a593Smuzhiyun * controller drivers should use it.
73*4882a593Smuzhiyun *
74*4882a593Smuzhiyun * This function should be called after the DMA operation has finished and is
75*4882a593Smuzhiyun * only valid if the previous spi_controller_dma_map_mem_op_data() call
76*4882a593Smuzhiyun * returned 0.
77*4882a593Smuzhiyun *
78*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
79*4882a593Smuzhiyun */
spi_controller_dma_unmap_mem_op_data(struct spi_controller * ctlr,const struct spi_mem_op * op,struct sg_table * sgt)80*4882a593Smuzhiyun void spi_controller_dma_unmap_mem_op_data(struct spi_controller *ctlr,
81*4882a593Smuzhiyun const struct spi_mem_op *op,
82*4882a593Smuzhiyun struct sg_table *sgt)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun struct device *dmadev;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun if (!op->data.nbytes)
87*4882a593Smuzhiyun return;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (op->data.dir == SPI_MEM_DATA_OUT && ctlr->dma_tx)
90*4882a593Smuzhiyun dmadev = ctlr->dma_tx->device->dev;
91*4882a593Smuzhiyun else if (op->data.dir == SPI_MEM_DATA_IN && ctlr->dma_rx)
92*4882a593Smuzhiyun dmadev = ctlr->dma_rx->device->dev;
93*4882a593Smuzhiyun else
94*4882a593Smuzhiyun dmadev = ctlr->dev.parent;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun spi_unmap_buf(ctlr, dmadev, sgt,
97*4882a593Smuzhiyun op->data.dir == SPI_MEM_DATA_IN ?
98*4882a593Smuzhiyun DMA_FROM_DEVICE : DMA_TO_DEVICE);
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_controller_dma_unmap_mem_op_data);
101*4882a593Smuzhiyun
spi_check_buswidth_req(struct spi_mem * mem,u8 buswidth,bool tx)102*4882a593Smuzhiyun static int spi_check_buswidth_req(struct spi_mem *mem, u8 buswidth, bool tx)
103*4882a593Smuzhiyun {
104*4882a593Smuzhiyun u32 mode = mem->spi->mode;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun switch (buswidth) {
107*4882a593Smuzhiyun case 1:
108*4882a593Smuzhiyun return 0;
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun case 2:
111*4882a593Smuzhiyun if ((tx &&
112*4882a593Smuzhiyun (mode & (SPI_TX_DUAL | SPI_TX_QUAD | SPI_TX_OCTAL))) ||
113*4882a593Smuzhiyun (!tx &&
114*4882a593Smuzhiyun (mode & (SPI_RX_DUAL | SPI_RX_QUAD | SPI_RX_OCTAL))))
115*4882a593Smuzhiyun return 0;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun break;
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun case 4:
120*4882a593Smuzhiyun if ((tx && (mode & (SPI_TX_QUAD | SPI_TX_OCTAL))) ||
121*4882a593Smuzhiyun (!tx && (mode & (SPI_RX_QUAD | SPI_RX_OCTAL))))
122*4882a593Smuzhiyun return 0;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun break;
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun case 8:
127*4882a593Smuzhiyun if ((tx && (mode & SPI_TX_OCTAL)) ||
128*4882a593Smuzhiyun (!tx && (mode & SPI_RX_OCTAL)))
129*4882a593Smuzhiyun return 0;
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun break;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun default:
134*4882a593Smuzhiyun break;
135*4882a593Smuzhiyun }
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun return -ENOTSUPP;
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
spi_mem_check_buswidth(struct spi_mem * mem,const struct spi_mem_op * op)140*4882a593Smuzhiyun static bool spi_mem_check_buswidth(struct spi_mem *mem,
141*4882a593Smuzhiyun const struct spi_mem_op *op)
142*4882a593Smuzhiyun {
143*4882a593Smuzhiyun if (spi_check_buswidth_req(mem, op->cmd.buswidth, true))
144*4882a593Smuzhiyun return false;
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (op->addr.nbytes &&
147*4882a593Smuzhiyun spi_check_buswidth_req(mem, op->addr.buswidth, true))
148*4882a593Smuzhiyun return false;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun if (op->dummy.nbytes &&
151*4882a593Smuzhiyun spi_check_buswidth_req(mem, op->dummy.buswidth, true))
152*4882a593Smuzhiyun return false;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun if (op->data.dir != SPI_MEM_NO_DATA &&
155*4882a593Smuzhiyun spi_check_buswidth_req(mem, op->data.buswidth,
156*4882a593Smuzhiyun op->data.dir == SPI_MEM_DATA_OUT))
157*4882a593Smuzhiyun return false;
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun return true;
160*4882a593Smuzhiyun }
161*4882a593Smuzhiyun
spi_mem_dtr_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)162*4882a593Smuzhiyun bool spi_mem_dtr_supports_op(struct spi_mem *mem,
163*4882a593Smuzhiyun const struct spi_mem_op *op)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun if (op->cmd.nbytes != 2)
166*4882a593Smuzhiyun return false;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun return spi_mem_check_buswidth(mem, op);
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_dtr_supports_op);
171*4882a593Smuzhiyun
spi_mem_default_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)172*4882a593Smuzhiyun bool spi_mem_default_supports_op(struct spi_mem *mem,
173*4882a593Smuzhiyun const struct spi_mem_op *op)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun if (op->cmd.dtr || op->addr.dtr || op->dummy.dtr || op->data.dtr)
176*4882a593Smuzhiyun return false;
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun if (op->cmd.nbytes != 1)
179*4882a593Smuzhiyun return false;
180*4882a593Smuzhiyun
181*4882a593Smuzhiyun return spi_mem_check_buswidth(mem, op);
182*4882a593Smuzhiyun }
183*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_default_supports_op);
184*4882a593Smuzhiyun
spi_mem_buswidth_is_valid(u8 buswidth)185*4882a593Smuzhiyun static bool spi_mem_buswidth_is_valid(u8 buswidth)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun if (hweight8(buswidth) > 1 || buswidth > SPI_MEM_MAX_BUSWIDTH)
188*4882a593Smuzhiyun return false;
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun return true;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
spi_mem_check_op(const struct spi_mem_op * op)193*4882a593Smuzhiyun static int spi_mem_check_op(const struct spi_mem_op *op)
194*4882a593Smuzhiyun {
195*4882a593Smuzhiyun if (!op->cmd.buswidth || !op->cmd.nbytes)
196*4882a593Smuzhiyun return -EINVAL;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if ((op->addr.nbytes && !op->addr.buswidth) ||
199*4882a593Smuzhiyun (op->dummy.nbytes && !op->dummy.buswidth) ||
200*4882a593Smuzhiyun (op->data.nbytes && !op->data.buswidth))
201*4882a593Smuzhiyun return -EINVAL;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun if (!spi_mem_buswidth_is_valid(op->cmd.buswidth) ||
204*4882a593Smuzhiyun !spi_mem_buswidth_is_valid(op->addr.buswidth) ||
205*4882a593Smuzhiyun !spi_mem_buswidth_is_valid(op->dummy.buswidth) ||
206*4882a593Smuzhiyun !spi_mem_buswidth_is_valid(op->data.buswidth))
207*4882a593Smuzhiyun return -EINVAL;
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun return 0;
210*4882a593Smuzhiyun }
211*4882a593Smuzhiyun
spi_mem_internal_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)212*4882a593Smuzhiyun static bool spi_mem_internal_supports_op(struct spi_mem *mem,
213*4882a593Smuzhiyun const struct spi_mem_op *op)
214*4882a593Smuzhiyun {
215*4882a593Smuzhiyun struct spi_controller *ctlr = mem->spi->controller;
216*4882a593Smuzhiyun
217*4882a593Smuzhiyun if (ctlr->mem_ops && ctlr->mem_ops->supports_op)
218*4882a593Smuzhiyun return ctlr->mem_ops->supports_op(mem, op);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun return spi_mem_default_supports_op(mem, op);
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun /**
224*4882a593Smuzhiyun * spi_mem_supports_op() - Check if a memory device and the controller it is
225*4882a593Smuzhiyun * connected to support a specific memory operation
226*4882a593Smuzhiyun * @mem: the SPI memory
227*4882a593Smuzhiyun * @op: the memory operation to check
228*4882a593Smuzhiyun *
229*4882a593Smuzhiyun * Some controllers are only supporting Single or Dual IOs, others might only
230*4882a593Smuzhiyun * support specific opcodes, or it can even be that the controller and device
231*4882a593Smuzhiyun * both support Quad IOs but the hardware prevents you from using it because
232*4882a593Smuzhiyun * only 2 IO lines are connected.
233*4882a593Smuzhiyun *
234*4882a593Smuzhiyun * This function checks whether a specific operation is supported.
235*4882a593Smuzhiyun *
236*4882a593Smuzhiyun * Return: true if @op is supported, false otherwise.
237*4882a593Smuzhiyun */
spi_mem_supports_op(struct spi_mem * mem,const struct spi_mem_op * op)238*4882a593Smuzhiyun bool spi_mem_supports_op(struct spi_mem *mem, const struct spi_mem_op *op)
239*4882a593Smuzhiyun {
240*4882a593Smuzhiyun if (spi_mem_check_op(op))
241*4882a593Smuzhiyun return false;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun return spi_mem_internal_supports_op(mem, op);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_supports_op);
246*4882a593Smuzhiyun
spi_mem_access_start(struct spi_mem * mem)247*4882a593Smuzhiyun static int spi_mem_access_start(struct spi_mem *mem)
248*4882a593Smuzhiyun {
249*4882a593Smuzhiyun struct spi_controller *ctlr = mem->spi->controller;
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun /*
252*4882a593Smuzhiyun * Flush the message queue before executing our SPI memory
253*4882a593Smuzhiyun * operation to prevent preemption of regular SPI transfers.
254*4882a593Smuzhiyun */
255*4882a593Smuzhiyun spi_flush_queue(ctlr);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (ctlr->auto_runtime_pm) {
258*4882a593Smuzhiyun int ret;
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ret = pm_runtime_get_sync(ctlr->dev.parent);
261*4882a593Smuzhiyun if (ret < 0) {
262*4882a593Smuzhiyun pm_runtime_put_noidle(ctlr->dev.parent);
263*4882a593Smuzhiyun dev_err(&ctlr->dev, "Failed to power device: %d\n",
264*4882a593Smuzhiyun ret);
265*4882a593Smuzhiyun return ret;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun }
268*4882a593Smuzhiyun
269*4882a593Smuzhiyun mutex_lock(&ctlr->bus_lock_mutex);
270*4882a593Smuzhiyun mutex_lock(&ctlr->io_mutex);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun return 0;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
spi_mem_access_end(struct spi_mem * mem)275*4882a593Smuzhiyun static void spi_mem_access_end(struct spi_mem *mem)
276*4882a593Smuzhiyun {
277*4882a593Smuzhiyun struct spi_controller *ctlr = mem->spi->controller;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun mutex_unlock(&ctlr->io_mutex);
280*4882a593Smuzhiyun mutex_unlock(&ctlr->bus_lock_mutex);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun if (ctlr->auto_runtime_pm)
283*4882a593Smuzhiyun pm_runtime_put(ctlr->dev.parent);
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /**
287*4882a593Smuzhiyun * spi_mem_exec_op() - Execute a memory operation
288*4882a593Smuzhiyun * @mem: the SPI memory
289*4882a593Smuzhiyun * @op: the memory operation to execute
290*4882a593Smuzhiyun *
291*4882a593Smuzhiyun * Executes a memory operation.
292*4882a593Smuzhiyun *
293*4882a593Smuzhiyun * This function first checks that @op is supported and then tries to execute
294*4882a593Smuzhiyun * it.
295*4882a593Smuzhiyun *
296*4882a593Smuzhiyun * Return: 0 in case of success, a negative error code otherwise.
297*4882a593Smuzhiyun */
spi_mem_exec_op(struct spi_mem * mem,const struct spi_mem_op * op)298*4882a593Smuzhiyun int spi_mem_exec_op(struct spi_mem *mem, const struct spi_mem_op *op)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun unsigned int tmpbufsize, xferpos = 0, totalxferlen = 0;
301*4882a593Smuzhiyun struct spi_controller *ctlr = mem->spi->controller;
302*4882a593Smuzhiyun struct spi_transfer xfers[4] = { };
303*4882a593Smuzhiyun struct spi_message msg;
304*4882a593Smuzhiyun u8 *tmpbuf;
305*4882a593Smuzhiyun int ret;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun ret = spi_mem_check_op(op);
308*4882a593Smuzhiyun if (ret)
309*4882a593Smuzhiyun return ret;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun if (!spi_mem_internal_supports_op(mem, op))
312*4882a593Smuzhiyun return -ENOTSUPP;
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun if (ctlr->mem_ops && !mem->spi->cs_gpiod) {
315*4882a593Smuzhiyun ret = spi_mem_access_start(mem);
316*4882a593Smuzhiyun if (ret)
317*4882a593Smuzhiyun return ret;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun ret = ctlr->mem_ops->exec_op(mem, op);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun spi_mem_access_end(mem);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * Some controllers only optimize specific paths (typically the
325*4882a593Smuzhiyun * read path) and expect the core to use the regular SPI
326*4882a593Smuzhiyun * interface in other cases.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun if (!ret || ret != -ENOTSUPP)
329*4882a593Smuzhiyun return ret;
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun tmpbufsize = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * Allocate a buffer to transmit the CMD, ADDR cycles with kmalloc() so
336*4882a593Smuzhiyun * we're guaranteed that this buffer is DMA-able, as required by the
337*4882a593Smuzhiyun * SPI layer.
338*4882a593Smuzhiyun */
339*4882a593Smuzhiyun tmpbuf = kzalloc(tmpbufsize, GFP_KERNEL | GFP_DMA);
340*4882a593Smuzhiyun if (!tmpbuf)
341*4882a593Smuzhiyun return -ENOMEM;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun spi_message_init(&msg);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun tmpbuf[0] = op->cmd.opcode;
346*4882a593Smuzhiyun xfers[xferpos].tx_buf = tmpbuf;
347*4882a593Smuzhiyun xfers[xferpos].len = op->cmd.nbytes;
348*4882a593Smuzhiyun xfers[xferpos].tx_nbits = op->cmd.buswidth;
349*4882a593Smuzhiyun spi_message_add_tail(&xfers[xferpos], &msg);
350*4882a593Smuzhiyun xferpos++;
351*4882a593Smuzhiyun totalxferlen++;
352*4882a593Smuzhiyun
353*4882a593Smuzhiyun if (op->addr.nbytes) {
354*4882a593Smuzhiyun int i;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun for (i = 0; i < op->addr.nbytes; i++)
357*4882a593Smuzhiyun tmpbuf[i + 1] = op->addr.val >>
358*4882a593Smuzhiyun (8 * (op->addr.nbytes - i - 1));
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun xfers[xferpos].tx_buf = tmpbuf + 1;
361*4882a593Smuzhiyun xfers[xferpos].len = op->addr.nbytes;
362*4882a593Smuzhiyun xfers[xferpos].tx_nbits = op->addr.buswidth;
363*4882a593Smuzhiyun spi_message_add_tail(&xfers[xferpos], &msg);
364*4882a593Smuzhiyun xferpos++;
365*4882a593Smuzhiyun totalxferlen += op->addr.nbytes;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun if (op->dummy.nbytes) {
369*4882a593Smuzhiyun memset(tmpbuf + op->addr.nbytes + 1, 0xff, op->dummy.nbytes);
370*4882a593Smuzhiyun xfers[xferpos].tx_buf = tmpbuf + op->addr.nbytes + 1;
371*4882a593Smuzhiyun xfers[xferpos].len = op->dummy.nbytes;
372*4882a593Smuzhiyun xfers[xferpos].tx_nbits = op->dummy.buswidth;
373*4882a593Smuzhiyun spi_message_add_tail(&xfers[xferpos], &msg);
374*4882a593Smuzhiyun xferpos++;
375*4882a593Smuzhiyun totalxferlen += op->dummy.nbytes;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun if (op->data.nbytes) {
379*4882a593Smuzhiyun if (op->data.dir == SPI_MEM_DATA_IN) {
380*4882a593Smuzhiyun xfers[xferpos].rx_buf = op->data.buf.in;
381*4882a593Smuzhiyun xfers[xferpos].rx_nbits = op->data.buswidth;
382*4882a593Smuzhiyun } else {
383*4882a593Smuzhiyun xfers[xferpos].tx_buf = op->data.buf.out;
384*4882a593Smuzhiyun xfers[xferpos].tx_nbits = op->data.buswidth;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun xfers[xferpos].len = op->data.nbytes;
388*4882a593Smuzhiyun spi_message_add_tail(&xfers[xferpos], &msg);
389*4882a593Smuzhiyun xferpos++;
390*4882a593Smuzhiyun totalxferlen += op->data.nbytes;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun ret = spi_sync(mem->spi, &msg);
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun kfree(tmpbuf);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun if (ret)
398*4882a593Smuzhiyun return ret;
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun if (msg.actual_length != totalxferlen)
401*4882a593Smuzhiyun return -EIO;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun return 0;
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_exec_op);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun /**
408*4882a593Smuzhiyun * spi_mem_get_name() - Return the SPI mem device name to be used by the
409*4882a593Smuzhiyun * upper layer if necessary
410*4882a593Smuzhiyun * @mem: the SPI memory
411*4882a593Smuzhiyun *
412*4882a593Smuzhiyun * This function allows SPI mem users to retrieve the SPI mem device name.
413*4882a593Smuzhiyun * It is useful if the upper layer needs to expose a custom name for
414*4882a593Smuzhiyun * compatibility reasons.
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * Return: a string containing the name of the memory device to be used
417*4882a593Smuzhiyun * by the SPI mem user
418*4882a593Smuzhiyun */
spi_mem_get_name(struct spi_mem * mem)419*4882a593Smuzhiyun const char *spi_mem_get_name(struct spi_mem *mem)
420*4882a593Smuzhiyun {
421*4882a593Smuzhiyun return mem->name;
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_get_name);
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /**
426*4882a593Smuzhiyun * spi_mem_adjust_op_size() - Adjust the data size of a SPI mem operation to
427*4882a593Smuzhiyun * match controller limitations
428*4882a593Smuzhiyun * @mem: the SPI memory
429*4882a593Smuzhiyun * @op: the operation to adjust
430*4882a593Smuzhiyun *
431*4882a593Smuzhiyun * Some controllers have FIFO limitations and must split a data transfer
432*4882a593Smuzhiyun * operation into multiple ones, others require a specific alignment for
433*4882a593Smuzhiyun * optimized accesses. This function allows SPI mem drivers to split a single
434*4882a593Smuzhiyun * operation into multiple sub-operations when required.
435*4882a593Smuzhiyun *
436*4882a593Smuzhiyun * Return: a negative error code if the controller can't properly adjust @op,
437*4882a593Smuzhiyun * 0 otherwise. Note that @op->data.nbytes will be updated if @op
438*4882a593Smuzhiyun * can't be handled in a single step.
439*4882a593Smuzhiyun */
spi_mem_adjust_op_size(struct spi_mem * mem,struct spi_mem_op * op)440*4882a593Smuzhiyun int spi_mem_adjust_op_size(struct spi_mem *mem, struct spi_mem_op *op)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct spi_controller *ctlr = mem->spi->controller;
443*4882a593Smuzhiyun size_t len;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (ctlr->mem_ops && ctlr->mem_ops->adjust_op_size)
446*4882a593Smuzhiyun return ctlr->mem_ops->adjust_op_size(mem, op);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun if (!ctlr->mem_ops || !ctlr->mem_ops->exec_op) {
449*4882a593Smuzhiyun len = op->cmd.nbytes + op->addr.nbytes + op->dummy.nbytes;
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (len > spi_max_transfer_size(mem->spi))
452*4882a593Smuzhiyun return -EINVAL;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun op->data.nbytes = min3((size_t)op->data.nbytes,
455*4882a593Smuzhiyun spi_max_transfer_size(mem->spi),
456*4882a593Smuzhiyun spi_max_message_size(mem->spi) -
457*4882a593Smuzhiyun len);
458*4882a593Smuzhiyun if (!op->data.nbytes)
459*4882a593Smuzhiyun return -EINVAL;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun return 0;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_adjust_op_size);
465*4882a593Smuzhiyun
spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)466*4882a593Smuzhiyun static ssize_t spi_mem_no_dirmap_read(struct spi_mem_dirmap_desc *desc,
467*4882a593Smuzhiyun u64 offs, size_t len, void *buf)
468*4882a593Smuzhiyun {
469*4882a593Smuzhiyun struct spi_mem_op op = desc->info.op_tmpl;
470*4882a593Smuzhiyun int ret;
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun op.addr.val = desc->info.offset + offs;
473*4882a593Smuzhiyun op.data.buf.in = buf;
474*4882a593Smuzhiyun op.data.nbytes = len;
475*4882a593Smuzhiyun ret = spi_mem_adjust_op_size(desc->mem, &op);
476*4882a593Smuzhiyun if (ret)
477*4882a593Smuzhiyun return ret;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun ret = spi_mem_exec_op(desc->mem, &op);
480*4882a593Smuzhiyun if (ret)
481*4882a593Smuzhiyun return ret;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun return op.data.nbytes;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun
spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)486*4882a593Smuzhiyun static ssize_t spi_mem_no_dirmap_write(struct spi_mem_dirmap_desc *desc,
487*4882a593Smuzhiyun u64 offs, size_t len, const void *buf)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun struct spi_mem_op op = desc->info.op_tmpl;
490*4882a593Smuzhiyun int ret;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun op.addr.val = desc->info.offset + offs;
493*4882a593Smuzhiyun op.data.buf.out = buf;
494*4882a593Smuzhiyun op.data.nbytes = len;
495*4882a593Smuzhiyun ret = spi_mem_adjust_op_size(desc->mem, &op);
496*4882a593Smuzhiyun if (ret)
497*4882a593Smuzhiyun return ret;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun ret = spi_mem_exec_op(desc->mem, &op);
500*4882a593Smuzhiyun if (ret)
501*4882a593Smuzhiyun return ret;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun return op.data.nbytes;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /**
507*4882a593Smuzhiyun * spi_mem_dirmap_create() - Create a direct mapping descriptor
508*4882a593Smuzhiyun * @mem: SPI mem device this direct mapping should be created for
509*4882a593Smuzhiyun * @info: direct mapping information
510*4882a593Smuzhiyun *
511*4882a593Smuzhiyun * This function is creating a direct mapping descriptor which can then be used
512*4882a593Smuzhiyun * to access the memory using spi_mem_dirmap_read() or spi_mem_dirmap_write().
513*4882a593Smuzhiyun * If the SPI controller driver does not support direct mapping, this function
514*4882a593Smuzhiyun * falls back to an implementation using spi_mem_exec_op(), so that the caller
515*4882a593Smuzhiyun * doesn't have to bother implementing a fallback on his own.
516*4882a593Smuzhiyun *
517*4882a593Smuzhiyun * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
518*4882a593Smuzhiyun */
519*4882a593Smuzhiyun struct spi_mem_dirmap_desc *
spi_mem_dirmap_create(struct spi_mem * mem,const struct spi_mem_dirmap_info * info)520*4882a593Smuzhiyun spi_mem_dirmap_create(struct spi_mem *mem,
521*4882a593Smuzhiyun const struct spi_mem_dirmap_info *info)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun struct spi_controller *ctlr = mem->spi->controller;
524*4882a593Smuzhiyun struct spi_mem_dirmap_desc *desc;
525*4882a593Smuzhiyun int ret = -ENOTSUPP;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun /* Make sure the number of address cycles is between 1 and 8 bytes. */
528*4882a593Smuzhiyun if (!info->op_tmpl.addr.nbytes || info->op_tmpl.addr.nbytes > 8)
529*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /* data.dir should either be SPI_MEM_DATA_IN or SPI_MEM_DATA_OUT. */
532*4882a593Smuzhiyun if (info->op_tmpl.data.dir == SPI_MEM_NO_DATA)
533*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun desc = kzalloc(sizeof(*desc), GFP_KERNEL);
536*4882a593Smuzhiyun if (!desc)
537*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun desc->mem = mem;
540*4882a593Smuzhiyun desc->info = *info;
541*4882a593Smuzhiyun if (ctlr->mem_ops && ctlr->mem_ops->dirmap_create)
542*4882a593Smuzhiyun ret = ctlr->mem_ops->dirmap_create(desc);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun if (ret) {
545*4882a593Smuzhiyun desc->nodirmap = true;
546*4882a593Smuzhiyun if (!spi_mem_supports_op(desc->mem, &desc->info.op_tmpl))
547*4882a593Smuzhiyun ret = -ENOTSUPP;
548*4882a593Smuzhiyun else
549*4882a593Smuzhiyun ret = 0;
550*4882a593Smuzhiyun }
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun if (ret) {
553*4882a593Smuzhiyun kfree(desc);
554*4882a593Smuzhiyun return ERR_PTR(ret);
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun return desc;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_dirmap_create);
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /**
562*4882a593Smuzhiyun * spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor
563*4882a593Smuzhiyun * @desc: the direct mapping descriptor to destroy
564*4882a593Smuzhiyun *
565*4882a593Smuzhiyun * This function destroys a direct mapping descriptor previously created by
566*4882a593Smuzhiyun * spi_mem_dirmap_create().
567*4882a593Smuzhiyun */
spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc * desc)568*4882a593Smuzhiyun void spi_mem_dirmap_destroy(struct spi_mem_dirmap_desc *desc)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun struct spi_controller *ctlr = desc->mem->spi->controller;
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (!desc->nodirmap && ctlr->mem_ops && ctlr->mem_ops->dirmap_destroy)
573*4882a593Smuzhiyun ctlr->mem_ops->dirmap_destroy(desc);
574*4882a593Smuzhiyun
575*4882a593Smuzhiyun kfree(desc);
576*4882a593Smuzhiyun }
577*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_dirmap_destroy);
578*4882a593Smuzhiyun
devm_spi_mem_dirmap_release(struct device * dev,void * res)579*4882a593Smuzhiyun static void devm_spi_mem_dirmap_release(struct device *dev, void *res)
580*4882a593Smuzhiyun {
581*4882a593Smuzhiyun struct spi_mem_dirmap_desc *desc = *(struct spi_mem_dirmap_desc **)res;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun spi_mem_dirmap_destroy(desc);
584*4882a593Smuzhiyun }
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun /**
587*4882a593Smuzhiyun * devm_spi_mem_dirmap_create() - Create a direct mapping descriptor and attach
588*4882a593Smuzhiyun * it to a device
589*4882a593Smuzhiyun * @dev: device the dirmap desc will be attached to
590*4882a593Smuzhiyun * @mem: SPI mem device this direct mapping should be created for
591*4882a593Smuzhiyun * @info: direct mapping information
592*4882a593Smuzhiyun *
593*4882a593Smuzhiyun * devm_ variant of the spi_mem_dirmap_create() function. See
594*4882a593Smuzhiyun * spi_mem_dirmap_create() for more details.
595*4882a593Smuzhiyun *
596*4882a593Smuzhiyun * Return: a valid pointer in case of success, and ERR_PTR() otherwise.
597*4882a593Smuzhiyun */
598*4882a593Smuzhiyun struct spi_mem_dirmap_desc *
devm_spi_mem_dirmap_create(struct device * dev,struct spi_mem * mem,const struct spi_mem_dirmap_info * info)599*4882a593Smuzhiyun devm_spi_mem_dirmap_create(struct device *dev, struct spi_mem *mem,
600*4882a593Smuzhiyun const struct spi_mem_dirmap_info *info)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun struct spi_mem_dirmap_desc **ptr, *desc;
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun ptr = devres_alloc(devm_spi_mem_dirmap_release, sizeof(*ptr),
605*4882a593Smuzhiyun GFP_KERNEL);
606*4882a593Smuzhiyun if (!ptr)
607*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun desc = spi_mem_dirmap_create(mem, info);
610*4882a593Smuzhiyun if (IS_ERR(desc)) {
611*4882a593Smuzhiyun devres_free(ptr);
612*4882a593Smuzhiyun } else {
613*4882a593Smuzhiyun *ptr = desc;
614*4882a593Smuzhiyun devres_add(dev, ptr);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun return desc;
618*4882a593Smuzhiyun }
619*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_create);
620*4882a593Smuzhiyun
devm_spi_mem_dirmap_match(struct device * dev,void * res,void * data)621*4882a593Smuzhiyun static int devm_spi_mem_dirmap_match(struct device *dev, void *res, void *data)
622*4882a593Smuzhiyun {
623*4882a593Smuzhiyun struct spi_mem_dirmap_desc **ptr = res;
624*4882a593Smuzhiyun
625*4882a593Smuzhiyun if (WARN_ON(!ptr || !*ptr))
626*4882a593Smuzhiyun return 0;
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun return *ptr == data;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /**
632*4882a593Smuzhiyun * devm_spi_mem_dirmap_destroy() - Destroy a direct mapping descriptor attached
633*4882a593Smuzhiyun * to a device
634*4882a593Smuzhiyun * @dev: device the dirmap desc is attached to
635*4882a593Smuzhiyun * @desc: the direct mapping descriptor to destroy
636*4882a593Smuzhiyun *
637*4882a593Smuzhiyun * devm_ variant of the spi_mem_dirmap_destroy() function. See
638*4882a593Smuzhiyun * spi_mem_dirmap_destroy() for more details.
639*4882a593Smuzhiyun */
devm_spi_mem_dirmap_destroy(struct device * dev,struct spi_mem_dirmap_desc * desc)640*4882a593Smuzhiyun void devm_spi_mem_dirmap_destroy(struct device *dev,
641*4882a593Smuzhiyun struct spi_mem_dirmap_desc *desc)
642*4882a593Smuzhiyun {
643*4882a593Smuzhiyun devres_release(dev, devm_spi_mem_dirmap_release,
644*4882a593Smuzhiyun devm_spi_mem_dirmap_match, desc);
645*4882a593Smuzhiyun }
646*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(devm_spi_mem_dirmap_destroy);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /**
649*4882a593Smuzhiyun * spi_mem_dirmap_read() - Read data through a direct mapping
650*4882a593Smuzhiyun * @desc: direct mapping descriptor
651*4882a593Smuzhiyun * @offs: offset to start reading from. Note that this is not an absolute
652*4882a593Smuzhiyun * offset, but the offset within the direct mapping which already has
653*4882a593Smuzhiyun * its own offset
654*4882a593Smuzhiyun * @len: length in bytes
655*4882a593Smuzhiyun * @buf: destination buffer. This buffer must be DMA-able
656*4882a593Smuzhiyun *
657*4882a593Smuzhiyun * This function reads data from a memory device using a direct mapping
658*4882a593Smuzhiyun * previously instantiated with spi_mem_dirmap_create().
659*4882a593Smuzhiyun *
660*4882a593Smuzhiyun * Return: the amount of data read from the memory device or a negative error
661*4882a593Smuzhiyun * code. Note that the returned size might be smaller than @len, and the caller
662*4882a593Smuzhiyun * is responsible for calling spi_mem_dirmap_read() again when that happens.
663*4882a593Smuzhiyun */
spi_mem_dirmap_read(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,void * buf)664*4882a593Smuzhiyun ssize_t spi_mem_dirmap_read(struct spi_mem_dirmap_desc *desc,
665*4882a593Smuzhiyun u64 offs, size_t len, void *buf)
666*4882a593Smuzhiyun {
667*4882a593Smuzhiyun struct spi_controller *ctlr = desc->mem->spi->controller;
668*4882a593Smuzhiyun ssize_t ret;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_IN)
671*4882a593Smuzhiyun return -EINVAL;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (!len)
674*4882a593Smuzhiyun return 0;
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun if (desc->nodirmap) {
677*4882a593Smuzhiyun ret = spi_mem_no_dirmap_read(desc, offs, len, buf);
678*4882a593Smuzhiyun } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_read) {
679*4882a593Smuzhiyun ret = spi_mem_access_start(desc->mem);
680*4882a593Smuzhiyun if (ret)
681*4882a593Smuzhiyun return ret;
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun ret = ctlr->mem_ops->dirmap_read(desc, offs, len, buf);
684*4882a593Smuzhiyun
685*4882a593Smuzhiyun spi_mem_access_end(desc->mem);
686*4882a593Smuzhiyun } else {
687*4882a593Smuzhiyun ret = -ENOTSUPP;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun return ret;
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_dirmap_read);
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun /**
695*4882a593Smuzhiyun * spi_mem_dirmap_write() - Write data through a direct mapping
696*4882a593Smuzhiyun * @desc: direct mapping descriptor
697*4882a593Smuzhiyun * @offs: offset to start writing from. Note that this is not an absolute
698*4882a593Smuzhiyun * offset, but the offset within the direct mapping which already has
699*4882a593Smuzhiyun * its own offset
700*4882a593Smuzhiyun * @len: length in bytes
701*4882a593Smuzhiyun * @buf: source buffer. This buffer must be DMA-able
702*4882a593Smuzhiyun *
703*4882a593Smuzhiyun * This function writes data to a memory device using a direct mapping
704*4882a593Smuzhiyun * previously instantiated with spi_mem_dirmap_create().
705*4882a593Smuzhiyun *
706*4882a593Smuzhiyun * Return: the amount of data written to the memory device or a negative error
707*4882a593Smuzhiyun * code. Note that the returned size might be smaller than @len, and the caller
708*4882a593Smuzhiyun * is responsible for calling spi_mem_dirmap_write() again when that happens.
709*4882a593Smuzhiyun */
spi_mem_dirmap_write(struct spi_mem_dirmap_desc * desc,u64 offs,size_t len,const void * buf)710*4882a593Smuzhiyun ssize_t spi_mem_dirmap_write(struct spi_mem_dirmap_desc *desc,
711*4882a593Smuzhiyun u64 offs, size_t len, const void *buf)
712*4882a593Smuzhiyun {
713*4882a593Smuzhiyun struct spi_controller *ctlr = desc->mem->spi->controller;
714*4882a593Smuzhiyun ssize_t ret;
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun if (desc->info.op_tmpl.data.dir != SPI_MEM_DATA_OUT)
717*4882a593Smuzhiyun return -EINVAL;
718*4882a593Smuzhiyun
719*4882a593Smuzhiyun if (!len)
720*4882a593Smuzhiyun return 0;
721*4882a593Smuzhiyun
722*4882a593Smuzhiyun if (desc->nodirmap) {
723*4882a593Smuzhiyun ret = spi_mem_no_dirmap_write(desc, offs, len, buf);
724*4882a593Smuzhiyun } else if (ctlr->mem_ops && ctlr->mem_ops->dirmap_write) {
725*4882a593Smuzhiyun ret = spi_mem_access_start(desc->mem);
726*4882a593Smuzhiyun if (ret)
727*4882a593Smuzhiyun return ret;
728*4882a593Smuzhiyun
729*4882a593Smuzhiyun ret = ctlr->mem_ops->dirmap_write(desc, offs, len, buf);
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun spi_mem_access_end(desc->mem);
732*4882a593Smuzhiyun } else {
733*4882a593Smuzhiyun ret = -ENOTSUPP;
734*4882a593Smuzhiyun }
735*4882a593Smuzhiyun
736*4882a593Smuzhiyun return ret;
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_dirmap_write);
739*4882a593Smuzhiyun
to_spi_mem_drv(struct device_driver * drv)740*4882a593Smuzhiyun static inline struct spi_mem_driver *to_spi_mem_drv(struct device_driver *drv)
741*4882a593Smuzhiyun {
742*4882a593Smuzhiyun return container_of(drv, struct spi_mem_driver, spidrv.driver);
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
spi_mem_probe(struct spi_device * spi)745*4882a593Smuzhiyun static int spi_mem_probe(struct spi_device *spi)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
748*4882a593Smuzhiyun struct spi_controller *ctlr = spi->controller;
749*4882a593Smuzhiyun struct spi_mem *mem;
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun mem = devm_kzalloc(&spi->dev, sizeof(*mem), GFP_KERNEL);
752*4882a593Smuzhiyun if (!mem)
753*4882a593Smuzhiyun return -ENOMEM;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun mem->spi = spi;
756*4882a593Smuzhiyun
757*4882a593Smuzhiyun if (ctlr->mem_ops && ctlr->mem_ops->get_name)
758*4882a593Smuzhiyun mem->name = ctlr->mem_ops->get_name(mem);
759*4882a593Smuzhiyun else
760*4882a593Smuzhiyun mem->name = dev_name(&spi->dev);
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun if (IS_ERR_OR_NULL(mem->name))
763*4882a593Smuzhiyun return PTR_ERR(mem->name);
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun spi_set_drvdata(spi, mem);
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun return memdrv->probe(mem);
768*4882a593Smuzhiyun }
769*4882a593Smuzhiyun
spi_mem_remove(struct spi_device * spi)770*4882a593Smuzhiyun static int spi_mem_remove(struct spi_device *spi)
771*4882a593Smuzhiyun {
772*4882a593Smuzhiyun struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
773*4882a593Smuzhiyun struct spi_mem *mem = spi_get_drvdata(spi);
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun if (memdrv->remove)
776*4882a593Smuzhiyun return memdrv->remove(mem);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun return 0;
779*4882a593Smuzhiyun }
780*4882a593Smuzhiyun
spi_mem_shutdown(struct spi_device * spi)781*4882a593Smuzhiyun static void spi_mem_shutdown(struct spi_device *spi)
782*4882a593Smuzhiyun {
783*4882a593Smuzhiyun struct spi_mem_driver *memdrv = to_spi_mem_drv(spi->dev.driver);
784*4882a593Smuzhiyun struct spi_mem *mem = spi_get_drvdata(spi);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun if (memdrv->shutdown)
787*4882a593Smuzhiyun memdrv->shutdown(mem);
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /**
791*4882a593Smuzhiyun * spi_mem_driver_register_with_owner() - Register a SPI memory driver
792*4882a593Smuzhiyun * @memdrv: the SPI memory driver to register
793*4882a593Smuzhiyun * @owner: the owner of this driver
794*4882a593Smuzhiyun *
795*4882a593Smuzhiyun * Registers a SPI memory driver.
796*4882a593Smuzhiyun *
797*4882a593Smuzhiyun * Return: 0 in case of success, a negative error core otherwise.
798*4882a593Smuzhiyun */
799*4882a593Smuzhiyun
spi_mem_driver_register_with_owner(struct spi_mem_driver * memdrv,struct module * owner)800*4882a593Smuzhiyun int spi_mem_driver_register_with_owner(struct spi_mem_driver *memdrv,
801*4882a593Smuzhiyun struct module *owner)
802*4882a593Smuzhiyun {
803*4882a593Smuzhiyun memdrv->spidrv.probe = spi_mem_probe;
804*4882a593Smuzhiyun memdrv->spidrv.remove = spi_mem_remove;
805*4882a593Smuzhiyun memdrv->spidrv.shutdown = spi_mem_shutdown;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun return __spi_register_driver(owner, &memdrv->spidrv);
808*4882a593Smuzhiyun }
809*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_driver_register_with_owner);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun /**
812*4882a593Smuzhiyun * spi_mem_driver_unregister_with_owner() - Unregister a SPI memory driver
813*4882a593Smuzhiyun * @memdrv: the SPI memory driver to unregister
814*4882a593Smuzhiyun *
815*4882a593Smuzhiyun * Unregisters a SPI memory driver.
816*4882a593Smuzhiyun */
spi_mem_driver_unregister(struct spi_mem_driver * memdrv)817*4882a593Smuzhiyun void spi_mem_driver_unregister(struct spi_mem_driver *memdrv)
818*4882a593Smuzhiyun {
819*4882a593Smuzhiyun spi_unregister_driver(&memdrv->spidrv);
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(spi_mem_driver_unregister);
822