1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun *
3*4882a593Smuzhiyun * Copyright (C) 2005 David Brownell
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef __LINUX_SPI_H
7*4882a593Smuzhiyun #define __LINUX_SPI_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/mod_devicetable.h>
11*4882a593Smuzhiyun #include <linux/slab.h>
12*4882a593Smuzhiyun #include <linux/kthread.h>
13*4882a593Smuzhiyun #include <linux/completion.h>
14*4882a593Smuzhiyun #include <linux/scatterlist.h>
15*4882a593Smuzhiyun #include <linux/gpio/consumer.h>
16*4882a593Smuzhiyun #include <linux/ptp_clock_kernel.h>
17*4882a593Smuzhiyun #include <linux/android_kabi.h>
18*4882a593Smuzhiyun
19*4882a593Smuzhiyun struct dma_chan;
20*4882a593Smuzhiyun struct property_entry;
21*4882a593Smuzhiyun struct spi_controller;
22*4882a593Smuzhiyun struct spi_transfer;
23*4882a593Smuzhiyun struct spi_controller_mem_ops;
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /*
26*4882a593Smuzhiyun * INTERFACES between SPI master-side drivers and SPI slave protocol handlers,
27*4882a593Smuzhiyun * and SPI infrastructure.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun extern struct bus_type spi_bus_type;
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /**
32*4882a593Smuzhiyun * struct spi_statistics - statistics for spi transfers
33*4882a593Smuzhiyun * @lock: lock protecting this structure
34*4882a593Smuzhiyun *
35*4882a593Smuzhiyun * @messages: number of spi-messages handled
36*4882a593Smuzhiyun * @transfers: number of spi_transfers handled
37*4882a593Smuzhiyun * @errors: number of errors during spi_transfer
38*4882a593Smuzhiyun * @timedout: number of timeouts during spi_transfer
39*4882a593Smuzhiyun *
40*4882a593Smuzhiyun * @spi_sync: number of times spi_sync is used
41*4882a593Smuzhiyun * @spi_sync_immediate:
42*4882a593Smuzhiyun * number of times spi_sync is executed immediately
43*4882a593Smuzhiyun * in calling context without queuing and scheduling
44*4882a593Smuzhiyun * @spi_async: number of times spi_async is used
45*4882a593Smuzhiyun *
46*4882a593Smuzhiyun * @bytes: number of bytes transferred to/from device
47*4882a593Smuzhiyun * @bytes_tx: number of bytes sent to device
48*4882a593Smuzhiyun * @bytes_rx: number of bytes received from device
49*4882a593Smuzhiyun *
50*4882a593Smuzhiyun * @transfer_bytes_histo:
51*4882a593Smuzhiyun * transfer bytes histogramm
52*4882a593Smuzhiyun *
53*4882a593Smuzhiyun * @transfers_split_maxsize:
54*4882a593Smuzhiyun * number of transfers that have been split because of
55*4882a593Smuzhiyun * maxsize limit
56*4882a593Smuzhiyun */
57*4882a593Smuzhiyun struct spi_statistics {
58*4882a593Smuzhiyun spinlock_t lock; /* lock for the whole structure */
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun unsigned long messages;
61*4882a593Smuzhiyun unsigned long transfers;
62*4882a593Smuzhiyun unsigned long errors;
63*4882a593Smuzhiyun unsigned long timedout;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun unsigned long spi_sync;
66*4882a593Smuzhiyun unsigned long spi_sync_immediate;
67*4882a593Smuzhiyun unsigned long spi_async;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun unsigned long long bytes;
70*4882a593Smuzhiyun unsigned long long bytes_rx;
71*4882a593Smuzhiyun unsigned long long bytes_tx;
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun #define SPI_STATISTICS_HISTO_SIZE 17
74*4882a593Smuzhiyun unsigned long transfer_bytes_histo[SPI_STATISTICS_HISTO_SIZE];
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun unsigned long transfers_split_maxsize;
77*4882a593Smuzhiyun };
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun void spi_statistics_add_transfer_stats(struct spi_statistics *stats,
80*4882a593Smuzhiyun struct spi_transfer *xfer,
81*4882a593Smuzhiyun struct spi_controller *ctlr);
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun #define SPI_STATISTICS_ADD_TO_FIELD(stats, field, count) \
84*4882a593Smuzhiyun do { \
85*4882a593Smuzhiyun unsigned long flags; \
86*4882a593Smuzhiyun spin_lock_irqsave(&(stats)->lock, flags); \
87*4882a593Smuzhiyun (stats)->field += count; \
88*4882a593Smuzhiyun spin_unlock_irqrestore(&(stats)->lock, flags); \
89*4882a593Smuzhiyun } while (0)
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun #define SPI_STATISTICS_INCREMENT_FIELD(stats, field) \
92*4882a593Smuzhiyun SPI_STATISTICS_ADD_TO_FIELD(stats, field, 1)
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun /**
95*4882a593Smuzhiyun * struct spi_delay - SPI delay information
96*4882a593Smuzhiyun * @value: Value for the delay
97*4882a593Smuzhiyun * @unit: Unit for the delay
98*4882a593Smuzhiyun */
99*4882a593Smuzhiyun struct spi_delay {
100*4882a593Smuzhiyun #define SPI_DELAY_UNIT_USECS 0
101*4882a593Smuzhiyun #define SPI_DELAY_UNIT_NSECS 1
102*4882a593Smuzhiyun #define SPI_DELAY_UNIT_SCK 2
103*4882a593Smuzhiyun u16 value;
104*4882a593Smuzhiyun u8 unit;
105*4882a593Smuzhiyun };
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun extern int spi_delay_to_ns(struct spi_delay *_delay, struct spi_transfer *xfer);
108*4882a593Smuzhiyun extern int spi_delay_exec(struct spi_delay *_delay, struct spi_transfer *xfer);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun /**
111*4882a593Smuzhiyun * struct spi_device - Controller side proxy for an SPI slave device
112*4882a593Smuzhiyun * @dev: Driver model representation of the device.
113*4882a593Smuzhiyun * @controller: SPI controller used with the device.
114*4882a593Smuzhiyun * @master: Copy of controller, for backwards compatibility.
115*4882a593Smuzhiyun * @max_speed_hz: Maximum clock rate to be used with this chip
116*4882a593Smuzhiyun * (on this board); may be changed by the device's driver.
117*4882a593Smuzhiyun * The spi_transfer.speed_hz can override this for each transfer.
118*4882a593Smuzhiyun * @chip_select: Chipselect, distinguishing chips handled by @controller.
119*4882a593Smuzhiyun * @mode: The spi mode defines how data is clocked out and in.
120*4882a593Smuzhiyun * This may be changed by the device's driver.
121*4882a593Smuzhiyun * The "active low" default for chipselect mode can be overridden
122*4882a593Smuzhiyun * (by specifying SPI_CS_HIGH) as can the "MSB first" default for
123*4882a593Smuzhiyun * each word in a transfer (by specifying SPI_LSB_FIRST).
124*4882a593Smuzhiyun * @bits_per_word: Data transfers involve one or more words; word sizes
125*4882a593Smuzhiyun * like eight or 12 bits are common. In-memory wordsizes are
126*4882a593Smuzhiyun * powers of two bytes (e.g. 20 bit samples use 32 bits).
127*4882a593Smuzhiyun * This may be changed by the device's driver, or left at the
128*4882a593Smuzhiyun * default (0) indicating protocol words are eight bit bytes.
129*4882a593Smuzhiyun * The spi_transfer.bits_per_word can override this for each transfer.
130*4882a593Smuzhiyun * @rt: Make the pump thread real time priority.
131*4882a593Smuzhiyun * @irq: Negative, or the number passed to request_irq() to receive
132*4882a593Smuzhiyun * interrupts from this device.
133*4882a593Smuzhiyun * @controller_state: Controller's runtime state
134*4882a593Smuzhiyun * @controller_data: Board-specific definitions for controller, such as
135*4882a593Smuzhiyun * FIFO initialization parameters; from board_info.controller_data
136*4882a593Smuzhiyun * @modalias: Name of the driver to use with this device, or an alias
137*4882a593Smuzhiyun * for that name. This appears in the sysfs "modalias" attribute
138*4882a593Smuzhiyun * for driver coldplugging, and in uevents used for hotplugging
139*4882a593Smuzhiyun * @driver_override: If the name of a driver is written to this attribute, then
140*4882a593Smuzhiyun * the device will bind to the named driver and only the named driver.
141*4882a593Smuzhiyun * @cs_gpio: LEGACY: gpio number of the chipselect line (optional, -ENOENT when
142*4882a593Smuzhiyun * not using a GPIO line) use cs_gpiod in new drivers by opting in on
143*4882a593Smuzhiyun * the spi_master.
144*4882a593Smuzhiyun * @cs_gpiod: gpio descriptor of the chipselect line (optional, NULL when
145*4882a593Smuzhiyun * not using a GPIO line)
146*4882a593Smuzhiyun * @word_delay: delay to be inserted between consecutive
147*4882a593Smuzhiyun * words of a transfer
148*4882a593Smuzhiyun *
149*4882a593Smuzhiyun * @statistics: statistics for the spi_device
150*4882a593Smuzhiyun *
151*4882a593Smuzhiyun * A @spi_device is used to interchange data between an SPI slave
152*4882a593Smuzhiyun * (usually a discrete chip) and CPU memory.
153*4882a593Smuzhiyun *
154*4882a593Smuzhiyun * In @dev, the platform_data is used to hold information about this
155*4882a593Smuzhiyun * device that's meaningful to the device's protocol driver, but not
156*4882a593Smuzhiyun * to its controller. One example might be an identifier for a chip
157*4882a593Smuzhiyun * variant with slightly different functionality; another might be
158*4882a593Smuzhiyun * information about how this particular board wires the chip's pins.
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun struct spi_device {
161*4882a593Smuzhiyun struct device dev;
162*4882a593Smuzhiyun struct spi_controller *controller;
163*4882a593Smuzhiyun struct spi_controller *master; /* compatibility layer */
164*4882a593Smuzhiyun u32 max_speed_hz;
165*4882a593Smuzhiyun u8 chip_select;
166*4882a593Smuzhiyun u8 bits_per_word;
167*4882a593Smuzhiyun bool rt;
168*4882a593Smuzhiyun u32 mode;
169*4882a593Smuzhiyun #define SPI_CPHA 0x01 /* clock phase */
170*4882a593Smuzhiyun #define SPI_CPOL 0x02 /* clock polarity */
171*4882a593Smuzhiyun #define SPI_MODE_0 (0|0) /* (original MicroWire) */
172*4882a593Smuzhiyun #define SPI_MODE_1 (0|SPI_CPHA)
173*4882a593Smuzhiyun #define SPI_MODE_2 (SPI_CPOL|0)
174*4882a593Smuzhiyun #define SPI_MODE_3 (SPI_CPOL|SPI_CPHA)
175*4882a593Smuzhiyun #define SPI_CS_HIGH 0x04 /* chipselect active high? */
176*4882a593Smuzhiyun #define SPI_LSB_FIRST 0x08 /* per-word bits-on-wire */
177*4882a593Smuzhiyun #define SPI_3WIRE 0x10 /* SI/SO signals shared */
178*4882a593Smuzhiyun #define SPI_LOOP 0x20 /* loopback mode */
179*4882a593Smuzhiyun #define SPI_NO_CS 0x40 /* 1 dev/bus, no chipselect */
180*4882a593Smuzhiyun #define SPI_READY 0x80 /* slave pulls low to pause */
181*4882a593Smuzhiyun #define SPI_TX_DUAL 0x100 /* transmit with 2 wires */
182*4882a593Smuzhiyun #define SPI_TX_QUAD 0x200 /* transmit with 4 wires */
183*4882a593Smuzhiyun #define SPI_RX_DUAL 0x400 /* receive with 2 wires */
184*4882a593Smuzhiyun #define SPI_RX_QUAD 0x800 /* receive with 4 wires */
185*4882a593Smuzhiyun #define SPI_CS_WORD 0x1000 /* toggle cs after each word */
186*4882a593Smuzhiyun #define SPI_TX_OCTAL 0x2000 /* transmit with 8 wires */
187*4882a593Smuzhiyun #define SPI_RX_OCTAL 0x4000 /* receive with 8 wires */
188*4882a593Smuzhiyun #define SPI_3WIRE_HIZ 0x8000 /* high impedance turnaround */
189*4882a593Smuzhiyun int irq;
190*4882a593Smuzhiyun void *controller_state;
191*4882a593Smuzhiyun void *controller_data;
192*4882a593Smuzhiyun char modalias[SPI_NAME_SIZE];
193*4882a593Smuzhiyun const char *driver_override;
194*4882a593Smuzhiyun int cs_gpio; /* LEGACY: chip select gpio */
195*4882a593Smuzhiyun struct gpio_desc *cs_gpiod; /* chip select gpio desc */
196*4882a593Smuzhiyun struct spi_delay word_delay; /* inter-word delay */
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun /* the statistics */
199*4882a593Smuzhiyun struct spi_statistics statistics;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
202*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun /*
205*4882a593Smuzhiyun * likely need more hooks for more protocol options affecting how
206*4882a593Smuzhiyun * the controller talks to each chip, like:
207*4882a593Smuzhiyun * - memory packing (12 bit samples into low bits, others zeroed)
208*4882a593Smuzhiyun * - priority
209*4882a593Smuzhiyun * - chipselect delays
210*4882a593Smuzhiyun * - ...
211*4882a593Smuzhiyun */
212*4882a593Smuzhiyun };
213*4882a593Smuzhiyun
to_spi_device(struct device * dev)214*4882a593Smuzhiyun static inline struct spi_device *to_spi_device(struct device *dev)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun return dev ? container_of(dev, struct spi_device, dev) : NULL;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun /* most drivers won't need to care about device refcounting */
spi_dev_get(struct spi_device * spi)220*4882a593Smuzhiyun static inline struct spi_device *spi_dev_get(struct spi_device *spi)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return (spi && get_device(&spi->dev)) ? spi : NULL;
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
spi_dev_put(struct spi_device * spi)225*4882a593Smuzhiyun static inline void spi_dev_put(struct spi_device *spi)
226*4882a593Smuzhiyun {
227*4882a593Smuzhiyun if (spi)
228*4882a593Smuzhiyun put_device(&spi->dev);
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* ctldata is for the bus_controller driver's runtime state */
spi_get_ctldata(struct spi_device * spi)232*4882a593Smuzhiyun static inline void *spi_get_ctldata(struct spi_device *spi)
233*4882a593Smuzhiyun {
234*4882a593Smuzhiyun return spi->controller_state;
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
spi_set_ctldata(struct spi_device * spi,void * state)237*4882a593Smuzhiyun static inline void spi_set_ctldata(struct spi_device *spi, void *state)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun spi->controller_state = state;
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun /* device driver data */
243*4882a593Smuzhiyun
spi_set_drvdata(struct spi_device * spi,void * data)244*4882a593Smuzhiyun static inline void spi_set_drvdata(struct spi_device *spi, void *data)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun dev_set_drvdata(&spi->dev, data);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
spi_get_drvdata(struct spi_device * spi)249*4882a593Smuzhiyun static inline void *spi_get_drvdata(struct spi_device *spi)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun return dev_get_drvdata(&spi->dev);
252*4882a593Smuzhiyun }
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun struct spi_message;
255*4882a593Smuzhiyun struct spi_transfer;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun /**
258*4882a593Smuzhiyun * struct spi_driver - Host side "protocol" driver
259*4882a593Smuzhiyun * @id_table: List of SPI devices supported by this driver
260*4882a593Smuzhiyun * @probe: Binds this driver to the spi device. Drivers can verify
261*4882a593Smuzhiyun * that the device is actually present, and may need to configure
262*4882a593Smuzhiyun * characteristics (such as bits_per_word) which weren't needed for
263*4882a593Smuzhiyun * the initial configuration done during system setup.
264*4882a593Smuzhiyun * @remove: Unbinds this driver from the spi device
265*4882a593Smuzhiyun * @shutdown: Standard shutdown callback used during system state
266*4882a593Smuzhiyun * transitions such as powerdown/halt and kexec
267*4882a593Smuzhiyun * @driver: SPI device drivers should initialize the name and owner
268*4882a593Smuzhiyun * field of this structure.
269*4882a593Smuzhiyun *
270*4882a593Smuzhiyun * This represents the kind of device driver that uses SPI messages to
271*4882a593Smuzhiyun * interact with the hardware at the other end of a SPI link. It's called
272*4882a593Smuzhiyun * a "protocol" driver because it works through messages rather than talking
273*4882a593Smuzhiyun * directly to SPI hardware (which is what the underlying SPI controller
274*4882a593Smuzhiyun * driver does to pass those messages). These protocols are defined in the
275*4882a593Smuzhiyun * specification for the device(s) supported by the driver.
276*4882a593Smuzhiyun *
277*4882a593Smuzhiyun * As a rule, those device protocols represent the lowest level interface
278*4882a593Smuzhiyun * supported by a driver, and it will support upper level interfaces too.
279*4882a593Smuzhiyun * Examples of such upper levels include frameworks like MTD, networking,
280*4882a593Smuzhiyun * MMC, RTC, filesystem character device nodes, and hardware monitoring.
281*4882a593Smuzhiyun */
282*4882a593Smuzhiyun struct spi_driver {
283*4882a593Smuzhiyun const struct spi_device_id *id_table;
284*4882a593Smuzhiyun int (*probe)(struct spi_device *spi);
285*4882a593Smuzhiyun int (*remove)(struct spi_device *spi);
286*4882a593Smuzhiyun void (*shutdown)(struct spi_device *spi);
287*4882a593Smuzhiyun struct device_driver driver;
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
290*4882a593Smuzhiyun };
291*4882a593Smuzhiyun
to_spi_driver(struct device_driver * drv)292*4882a593Smuzhiyun static inline struct spi_driver *to_spi_driver(struct device_driver *drv)
293*4882a593Smuzhiyun {
294*4882a593Smuzhiyun return drv ? container_of(drv, struct spi_driver, driver) : NULL;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun extern int __spi_register_driver(struct module *owner, struct spi_driver *sdrv);
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun /**
300*4882a593Smuzhiyun * spi_unregister_driver - reverse effect of spi_register_driver
301*4882a593Smuzhiyun * @sdrv: the driver to unregister
302*4882a593Smuzhiyun * Context: can sleep
303*4882a593Smuzhiyun */
spi_unregister_driver(struct spi_driver * sdrv)304*4882a593Smuzhiyun static inline void spi_unregister_driver(struct spi_driver *sdrv)
305*4882a593Smuzhiyun {
306*4882a593Smuzhiyun if (sdrv)
307*4882a593Smuzhiyun driver_unregister(&sdrv->driver);
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* use a define to avoid include chaining to get THIS_MODULE */
311*4882a593Smuzhiyun #define spi_register_driver(driver) \
312*4882a593Smuzhiyun __spi_register_driver(THIS_MODULE, driver)
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun /**
315*4882a593Smuzhiyun * module_spi_driver() - Helper macro for registering a SPI driver
316*4882a593Smuzhiyun * @__spi_driver: spi_driver struct
317*4882a593Smuzhiyun *
318*4882a593Smuzhiyun * Helper macro for SPI drivers which do not do anything special in module
319*4882a593Smuzhiyun * init/exit. This eliminates a lot of boilerplate. Each module may only
320*4882a593Smuzhiyun * use this macro once, and calling it replaces module_init() and module_exit()
321*4882a593Smuzhiyun */
322*4882a593Smuzhiyun #define module_spi_driver(__spi_driver) \
323*4882a593Smuzhiyun module_driver(__spi_driver, spi_register_driver, \
324*4882a593Smuzhiyun spi_unregister_driver)
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * struct spi_controller - interface to SPI master or slave controller
328*4882a593Smuzhiyun * @dev: device interface to this driver
329*4882a593Smuzhiyun * @list: link with the global spi_controller list
330*4882a593Smuzhiyun * @bus_num: board-specific (and often SOC-specific) identifier for a
331*4882a593Smuzhiyun * given SPI controller.
332*4882a593Smuzhiyun * @num_chipselect: chipselects are used to distinguish individual
333*4882a593Smuzhiyun * SPI slaves, and are numbered from zero to num_chipselects.
334*4882a593Smuzhiyun * each slave has a chipselect signal, but it's common that not
335*4882a593Smuzhiyun * every chipselect is connected to a slave.
336*4882a593Smuzhiyun * @dma_alignment: SPI controller constraint on DMA buffers alignment.
337*4882a593Smuzhiyun * @mode_bits: flags understood by this controller driver
338*4882a593Smuzhiyun * @buswidth_override_bits: flags to override for this controller driver
339*4882a593Smuzhiyun * @bits_per_word_mask: A mask indicating which values of bits_per_word are
340*4882a593Smuzhiyun * supported by the driver. Bit n indicates that a bits_per_word n+1 is
341*4882a593Smuzhiyun * supported. If set, the SPI core will reject any transfer with an
342*4882a593Smuzhiyun * unsupported bits_per_word. If not set, this value is simply ignored,
343*4882a593Smuzhiyun * and it's up to the individual driver to perform any validation.
344*4882a593Smuzhiyun * @min_speed_hz: Lowest supported transfer speed
345*4882a593Smuzhiyun * @max_speed_hz: Highest supported transfer speed
346*4882a593Smuzhiyun * @flags: other constraints relevant to this driver
347*4882a593Smuzhiyun * @slave: indicates that this is an SPI slave controller
348*4882a593Smuzhiyun * @max_transfer_size: function that returns the max transfer size for
349*4882a593Smuzhiyun * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
350*4882a593Smuzhiyun * @max_message_size: function that returns the max message size for
351*4882a593Smuzhiyun * a &spi_device; may be %NULL, so the default %SIZE_MAX will be used.
352*4882a593Smuzhiyun * @io_mutex: mutex for physical bus access
353*4882a593Smuzhiyun * @bus_lock_spinlock: spinlock for SPI bus locking
354*4882a593Smuzhiyun * @bus_lock_mutex: mutex for exclusion of multiple callers
355*4882a593Smuzhiyun * @bus_lock_flag: indicates that the SPI bus is locked for exclusive use
356*4882a593Smuzhiyun * @setup: updates the device mode and clocking records used by a
357*4882a593Smuzhiyun * device's SPI controller; protocol code may call this. This
358*4882a593Smuzhiyun * must fail if an unrecognized or unsupported mode is requested.
359*4882a593Smuzhiyun * It's always safe to call this unless transfers are pending on
360*4882a593Smuzhiyun * the device whose settings are being modified.
361*4882a593Smuzhiyun * @set_cs_timing: optional hook for SPI devices to request SPI master
362*4882a593Smuzhiyun * controller for configuring specific CS setup time, hold time and inactive
363*4882a593Smuzhiyun * delay interms of clock counts
364*4882a593Smuzhiyun * @transfer: adds a message to the controller's transfer queue.
365*4882a593Smuzhiyun * @cleanup: frees controller-specific state
366*4882a593Smuzhiyun * @can_dma: determine whether this controller supports DMA
367*4882a593Smuzhiyun * @queued: whether this controller is providing an internal message queue
368*4882a593Smuzhiyun * @kworker: pointer to thread struct for message pump
369*4882a593Smuzhiyun * @pump_messages: work struct for scheduling work to the message pump
370*4882a593Smuzhiyun * @queue_lock: spinlock to syncronise access to message queue
371*4882a593Smuzhiyun * @queue: message queue
372*4882a593Smuzhiyun * @idling: the device is entering idle state
373*4882a593Smuzhiyun * @cur_msg: the currently in-flight message
374*4882a593Smuzhiyun * @cur_msg_prepared: spi_prepare_message was called for the currently
375*4882a593Smuzhiyun * in-flight message
376*4882a593Smuzhiyun * @cur_msg_mapped: message has been mapped for DMA
377*4882a593Smuzhiyun * @last_cs_enable: was enable true on the last call to set_cs.
378*4882a593Smuzhiyun * @last_cs_mode_high: was (mode & SPI_CS_HIGH) true on the last call to set_cs.
379*4882a593Smuzhiyun * @xfer_completion: used by core transfer_one_message()
380*4882a593Smuzhiyun * @busy: message pump is busy
381*4882a593Smuzhiyun * @running: message pump is running
382*4882a593Smuzhiyun * @rt: whether this queue is set to run as a realtime task
383*4882a593Smuzhiyun * @auto_runtime_pm: the core should ensure a runtime PM reference is held
384*4882a593Smuzhiyun * while the hardware is prepared, using the parent
385*4882a593Smuzhiyun * device for the spidev
386*4882a593Smuzhiyun * @max_dma_len: Maximum length of a DMA transfer for the device.
387*4882a593Smuzhiyun * @prepare_transfer_hardware: a message will soon arrive from the queue
388*4882a593Smuzhiyun * so the subsystem requests the driver to prepare the transfer hardware
389*4882a593Smuzhiyun * by issuing this call
390*4882a593Smuzhiyun * @transfer_one_message: the subsystem calls the driver to transfer a single
391*4882a593Smuzhiyun * message while queuing transfers that arrive in the meantime. When the
392*4882a593Smuzhiyun * driver is finished with this message, it must call
393*4882a593Smuzhiyun * spi_finalize_current_message() so the subsystem can issue the next
394*4882a593Smuzhiyun * message
395*4882a593Smuzhiyun * @unprepare_transfer_hardware: there are currently no more messages on the
396*4882a593Smuzhiyun * queue so the subsystem notifies the driver that it may relax the
397*4882a593Smuzhiyun * hardware by issuing this call
398*4882a593Smuzhiyun *
399*4882a593Smuzhiyun * @set_cs: set the logic level of the chip select line. May be called
400*4882a593Smuzhiyun * from interrupt context.
401*4882a593Smuzhiyun * @prepare_message: set up the controller to transfer a single message,
402*4882a593Smuzhiyun * for example doing DMA mapping. Called from threaded
403*4882a593Smuzhiyun * context.
404*4882a593Smuzhiyun * @transfer_one: transfer a single spi_transfer.
405*4882a593Smuzhiyun *
406*4882a593Smuzhiyun * - return 0 if the transfer is finished,
407*4882a593Smuzhiyun * - return 1 if the transfer is still in progress. When
408*4882a593Smuzhiyun * the driver is finished with this transfer it must
409*4882a593Smuzhiyun * call spi_finalize_current_transfer() so the subsystem
410*4882a593Smuzhiyun * can issue the next transfer. Note: transfer_one and
411*4882a593Smuzhiyun * transfer_one_message are mutually exclusive; when both
412*4882a593Smuzhiyun * are set, the generic subsystem does not call your
413*4882a593Smuzhiyun * transfer_one callback.
414*4882a593Smuzhiyun * @handle_err: the subsystem calls the driver to handle an error that occurs
415*4882a593Smuzhiyun * in the generic implementation of transfer_one_message().
416*4882a593Smuzhiyun * @mem_ops: optimized/dedicated operations for interactions with SPI memory.
417*4882a593Smuzhiyun * This field is optional and should only be implemented if the
418*4882a593Smuzhiyun * controller has native support for memory like operations.
419*4882a593Smuzhiyun * @unprepare_message: undo any work done by prepare_message().
420*4882a593Smuzhiyun * @slave_abort: abort the ongoing transfer request on an SPI slave controller
421*4882a593Smuzhiyun * @cs_setup: delay to be introduced by the controller after CS is asserted
422*4882a593Smuzhiyun * @cs_hold: delay to be introduced by the controller before CS is deasserted
423*4882a593Smuzhiyun * @cs_inactive: delay to be introduced by the controller after CS is
424*4882a593Smuzhiyun * deasserted. If @cs_change_delay is used from @spi_transfer, then the
425*4882a593Smuzhiyun * two delays will be added up.
426*4882a593Smuzhiyun * @cs_gpios: LEGACY: array of GPIO descs to use as chip select lines; one per
427*4882a593Smuzhiyun * CS number. Any individual value may be -ENOENT for CS lines that
428*4882a593Smuzhiyun * are not GPIOs (driven by the SPI controller itself). Use the cs_gpiods
429*4882a593Smuzhiyun * in new drivers.
430*4882a593Smuzhiyun * @cs_gpiods: Array of GPIO descs to use as chip select lines; one per CS
431*4882a593Smuzhiyun * number. Any individual value may be NULL for CS lines that
432*4882a593Smuzhiyun * are not GPIOs (driven by the SPI controller itself).
433*4882a593Smuzhiyun * @use_gpio_descriptors: Turns on the code in the SPI core to parse and grab
434*4882a593Smuzhiyun * GPIO descriptors rather than using global GPIO numbers grabbed by the
435*4882a593Smuzhiyun * driver. This will fill in @cs_gpiods and @cs_gpios should not be used,
436*4882a593Smuzhiyun * and SPI devices will have the cs_gpiod assigned rather than cs_gpio.
437*4882a593Smuzhiyun * @unused_native_cs: When cs_gpiods is used, spi_register_controller() will
438*4882a593Smuzhiyun * fill in this field with the first unused native CS, to be used by SPI
439*4882a593Smuzhiyun * controller drivers that need to drive a native CS when using GPIO CS.
440*4882a593Smuzhiyun * @max_native_cs: When cs_gpiods is used, and this field is filled in,
441*4882a593Smuzhiyun * spi_register_controller() will validate all native CS (including the
442*4882a593Smuzhiyun * unused native CS) against this value.
443*4882a593Smuzhiyun * @statistics: statistics for the spi_controller
444*4882a593Smuzhiyun * @dma_tx: DMA transmit channel
445*4882a593Smuzhiyun * @dma_rx: DMA receive channel
446*4882a593Smuzhiyun * @dummy_rx: dummy receive buffer for full-duplex devices
447*4882a593Smuzhiyun * @dummy_tx: dummy transmit buffer for full-duplex devices
448*4882a593Smuzhiyun * @fw_translate_cs: If the boot firmware uses different numbering scheme
449*4882a593Smuzhiyun * what Linux expects, this optional hook can be used to translate
450*4882a593Smuzhiyun * between the two.
451*4882a593Smuzhiyun * @ptp_sts_supported: If the driver sets this to true, it must provide a
452*4882a593Smuzhiyun * time snapshot in @spi_transfer->ptp_sts as close as possible to the
453*4882a593Smuzhiyun * moment in time when @spi_transfer->ptp_sts_word_pre and
454*4882a593Smuzhiyun * @spi_transfer->ptp_sts_word_post were transmitted.
455*4882a593Smuzhiyun * If the driver does not set this, the SPI core takes the snapshot as
456*4882a593Smuzhiyun * close to the driver hand-over as possible.
457*4882a593Smuzhiyun * @irq_flags: Interrupt enable state during PTP system timestamping
458*4882a593Smuzhiyun * @fallback: fallback to pio if dma transfer return failure with
459*4882a593Smuzhiyun * SPI_TRANS_FAIL_NO_START.
460*4882a593Smuzhiyun *
461*4882a593Smuzhiyun * Each SPI controller can communicate with one or more @spi_device
462*4882a593Smuzhiyun * children. These make a small bus, sharing MOSI, MISO and SCK signals
463*4882a593Smuzhiyun * but not chip select signals. Each device may be configured to use a
464*4882a593Smuzhiyun * different clock rate, since those shared signals are ignored unless
465*4882a593Smuzhiyun * the chip is selected.
466*4882a593Smuzhiyun *
467*4882a593Smuzhiyun * The driver for an SPI controller manages access to those devices through
468*4882a593Smuzhiyun * a queue of spi_message transactions, copying data between CPU memory and
469*4882a593Smuzhiyun * an SPI slave device. For each such message it queues, it calls the
470*4882a593Smuzhiyun * message's completion function when the transaction completes.
471*4882a593Smuzhiyun */
472*4882a593Smuzhiyun struct spi_controller {
473*4882a593Smuzhiyun struct device dev;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun struct list_head list;
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun /* other than negative (== assign one dynamically), bus_num is fully
478*4882a593Smuzhiyun * board-specific. usually that simplifies to being SOC-specific.
479*4882a593Smuzhiyun * example: one SOC has three SPI controllers, numbered 0..2,
480*4882a593Smuzhiyun * and one board's schematics might show it using SPI-2. software
481*4882a593Smuzhiyun * would normally use bus_num=2 for that controller.
482*4882a593Smuzhiyun */
483*4882a593Smuzhiyun s16 bus_num;
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun /* chipselects will be integral to many controllers; some others
486*4882a593Smuzhiyun * might use board-specific GPIOs.
487*4882a593Smuzhiyun */
488*4882a593Smuzhiyun u16 num_chipselect;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* some SPI controllers pose alignment requirements on DMAable
491*4882a593Smuzhiyun * buffers; let protocol drivers know about these requirements.
492*4882a593Smuzhiyun */
493*4882a593Smuzhiyun u16 dma_alignment;
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun /* spi_device.mode flags understood by this controller driver */
496*4882a593Smuzhiyun u32 mode_bits;
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun /* spi_device.mode flags override flags for this controller */
499*4882a593Smuzhiyun u32 buswidth_override_bits;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun /* bitmask of supported bits_per_word for transfers */
502*4882a593Smuzhiyun u32 bits_per_word_mask;
503*4882a593Smuzhiyun #define SPI_BPW_MASK(bits) BIT((bits) - 1)
504*4882a593Smuzhiyun #define SPI_BPW_RANGE_MASK(min, max) GENMASK((max) - 1, (min) - 1)
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* limits on transfer speed */
507*4882a593Smuzhiyun u32 min_speed_hz;
508*4882a593Smuzhiyun u32 max_speed_hz;
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* other constraints relevant to this driver */
511*4882a593Smuzhiyun u16 flags;
512*4882a593Smuzhiyun #define SPI_CONTROLLER_HALF_DUPLEX BIT(0) /* can't do full duplex */
513*4882a593Smuzhiyun #define SPI_CONTROLLER_NO_RX BIT(1) /* can't do buffer read */
514*4882a593Smuzhiyun #define SPI_CONTROLLER_NO_TX BIT(2) /* can't do buffer write */
515*4882a593Smuzhiyun #define SPI_CONTROLLER_MUST_RX BIT(3) /* requires rx */
516*4882a593Smuzhiyun #define SPI_CONTROLLER_MUST_TX BIT(4) /* requires tx */
517*4882a593Smuzhiyun
518*4882a593Smuzhiyun #define SPI_MASTER_GPIO_SS BIT(5) /* GPIO CS must select slave */
519*4882a593Smuzhiyun
520*4882a593Smuzhiyun /* flag indicating this is an SPI slave controller */
521*4882a593Smuzhiyun bool slave;
522*4882a593Smuzhiyun
523*4882a593Smuzhiyun /*
524*4882a593Smuzhiyun * on some hardware transfer / message size may be constrained
525*4882a593Smuzhiyun * the limit may depend on device transfer settings
526*4882a593Smuzhiyun */
527*4882a593Smuzhiyun size_t (*max_transfer_size)(struct spi_device *spi);
528*4882a593Smuzhiyun size_t (*max_message_size)(struct spi_device *spi);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* I/O mutex */
531*4882a593Smuzhiyun struct mutex io_mutex;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun /* lock and mutex for SPI bus locking */
534*4882a593Smuzhiyun spinlock_t bus_lock_spinlock;
535*4882a593Smuzhiyun struct mutex bus_lock_mutex;
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun /* flag indicating that the SPI bus is locked for exclusive use */
538*4882a593Smuzhiyun bool bus_lock_flag;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun /* Setup mode and clock, etc (spi driver may call many times).
541*4882a593Smuzhiyun *
542*4882a593Smuzhiyun * IMPORTANT: this may be called when transfers to another
543*4882a593Smuzhiyun * device are active. DO NOT UPDATE SHARED REGISTERS in ways
544*4882a593Smuzhiyun * which could break those transfers.
545*4882a593Smuzhiyun */
546*4882a593Smuzhiyun int (*setup)(struct spi_device *spi);
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun /*
549*4882a593Smuzhiyun * set_cs_timing() method is for SPI controllers that supports
550*4882a593Smuzhiyun * configuring CS timing.
551*4882a593Smuzhiyun *
552*4882a593Smuzhiyun * This hook allows SPI client drivers to request SPI controllers
553*4882a593Smuzhiyun * to configure specific CS timing through spi_set_cs_timing() after
554*4882a593Smuzhiyun * spi_setup().
555*4882a593Smuzhiyun */
556*4882a593Smuzhiyun int (*set_cs_timing)(struct spi_device *spi, struct spi_delay *setup,
557*4882a593Smuzhiyun struct spi_delay *hold, struct spi_delay *inactive);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* bidirectional bulk transfers
560*4882a593Smuzhiyun *
561*4882a593Smuzhiyun * + The transfer() method may not sleep; its main role is
562*4882a593Smuzhiyun * just to add the message to the queue.
563*4882a593Smuzhiyun * + For now there's no remove-from-queue operation, or
564*4882a593Smuzhiyun * any other request management
565*4882a593Smuzhiyun * + To a given spi_device, message queueing is pure fifo
566*4882a593Smuzhiyun *
567*4882a593Smuzhiyun * + The controller's main job is to process its message queue,
568*4882a593Smuzhiyun * selecting a chip (for masters), then transferring data
569*4882a593Smuzhiyun * + If there are multiple spi_device children, the i/o queue
570*4882a593Smuzhiyun * arbitration algorithm is unspecified (round robin, fifo,
571*4882a593Smuzhiyun * priority, reservations, preemption, etc)
572*4882a593Smuzhiyun *
573*4882a593Smuzhiyun * + Chipselect stays active during the entire message
574*4882a593Smuzhiyun * (unless modified by spi_transfer.cs_change != 0).
575*4882a593Smuzhiyun * + The message transfers use clock and SPI mode parameters
576*4882a593Smuzhiyun * previously established by setup() for this device
577*4882a593Smuzhiyun */
578*4882a593Smuzhiyun int (*transfer)(struct spi_device *spi,
579*4882a593Smuzhiyun struct spi_message *mesg);
580*4882a593Smuzhiyun
581*4882a593Smuzhiyun /* called on release() to free memory provided by spi_controller */
582*4882a593Smuzhiyun void (*cleanup)(struct spi_device *spi);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun /*
585*4882a593Smuzhiyun * Used to enable core support for DMA handling, if can_dma()
586*4882a593Smuzhiyun * exists and returns true then the transfer will be mapped
587*4882a593Smuzhiyun * prior to transfer_one() being called. The driver should
588*4882a593Smuzhiyun * not modify or store xfer and dma_tx and dma_rx must be set
589*4882a593Smuzhiyun * while the device is prepared.
590*4882a593Smuzhiyun */
591*4882a593Smuzhiyun bool (*can_dma)(struct spi_controller *ctlr,
592*4882a593Smuzhiyun struct spi_device *spi,
593*4882a593Smuzhiyun struct spi_transfer *xfer);
594*4882a593Smuzhiyun
595*4882a593Smuzhiyun /*
596*4882a593Smuzhiyun * These hooks are for drivers that want to use the generic
597*4882a593Smuzhiyun * controller transfer queueing mechanism. If these are used, the
598*4882a593Smuzhiyun * transfer() function above must NOT be specified by the driver.
599*4882a593Smuzhiyun * Over time we expect SPI drivers to be phased over to this API.
600*4882a593Smuzhiyun */
601*4882a593Smuzhiyun bool queued;
602*4882a593Smuzhiyun struct kthread_worker *kworker;
603*4882a593Smuzhiyun struct kthread_work pump_messages;
604*4882a593Smuzhiyun spinlock_t queue_lock;
605*4882a593Smuzhiyun struct list_head queue;
606*4882a593Smuzhiyun struct spi_message *cur_msg;
607*4882a593Smuzhiyun bool idling;
608*4882a593Smuzhiyun bool busy;
609*4882a593Smuzhiyun bool running;
610*4882a593Smuzhiyun bool rt;
611*4882a593Smuzhiyun bool auto_runtime_pm;
612*4882a593Smuzhiyun bool cur_msg_prepared;
613*4882a593Smuzhiyun bool cur_msg_mapped;
614*4882a593Smuzhiyun bool last_cs_enable;
615*4882a593Smuzhiyun bool last_cs_mode_high;
616*4882a593Smuzhiyun bool fallback;
617*4882a593Smuzhiyun struct completion xfer_completion;
618*4882a593Smuzhiyun size_t max_dma_len;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun int (*prepare_transfer_hardware)(struct spi_controller *ctlr);
621*4882a593Smuzhiyun int (*transfer_one_message)(struct spi_controller *ctlr,
622*4882a593Smuzhiyun struct spi_message *mesg);
623*4882a593Smuzhiyun int (*unprepare_transfer_hardware)(struct spi_controller *ctlr);
624*4882a593Smuzhiyun int (*prepare_message)(struct spi_controller *ctlr,
625*4882a593Smuzhiyun struct spi_message *message);
626*4882a593Smuzhiyun int (*unprepare_message)(struct spi_controller *ctlr,
627*4882a593Smuzhiyun struct spi_message *message);
628*4882a593Smuzhiyun int (*slave_abort)(struct spi_controller *ctlr);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /*
631*4882a593Smuzhiyun * These hooks are for drivers that use a generic implementation
632*4882a593Smuzhiyun * of transfer_one_message() provied by the core.
633*4882a593Smuzhiyun */
634*4882a593Smuzhiyun void (*set_cs)(struct spi_device *spi, bool enable);
635*4882a593Smuzhiyun int (*transfer_one)(struct spi_controller *ctlr, struct spi_device *spi,
636*4882a593Smuzhiyun struct spi_transfer *transfer);
637*4882a593Smuzhiyun void (*handle_err)(struct spi_controller *ctlr,
638*4882a593Smuzhiyun struct spi_message *message);
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* Optimized handlers for SPI memory-like operations. */
641*4882a593Smuzhiyun const struct spi_controller_mem_ops *mem_ops;
642*4882a593Smuzhiyun
643*4882a593Smuzhiyun /* CS delays */
644*4882a593Smuzhiyun struct spi_delay cs_setup;
645*4882a593Smuzhiyun struct spi_delay cs_hold;
646*4882a593Smuzhiyun struct spi_delay cs_inactive;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* gpio chip select */
649*4882a593Smuzhiyun int *cs_gpios;
650*4882a593Smuzhiyun struct gpio_desc **cs_gpiods;
651*4882a593Smuzhiyun bool use_gpio_descriptors;
652*4882a593Smuzhiyun // KABI fix up for 35f3f8504c3b ("spi: Switch to signed types for *_native_cs
653*4882a593Smuzhiyun // SPI controller fields") that showed up in 5.10.63
654*4882a593Smuzhiyun #ifdef __GENKSYMS__
655*4882a593Smuzhiyun u8 unused_native_cs;
656*4882a593Smuzhiyun u8 max_native_cs;
657*4882a593Smuzhiyun #else
658*4882a593Smuzhiyun s8 unused_native_cs;
659*4882a593Smuzhiyun s8 max_native_cs;
660*4882a593Smuzhiyun #endif
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun /* statistics */
663*4882a593Smuzhiyun struct spi_statistics statistics;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* DMA channels for use with core dmaengine helpers */
666*4882a593Smuzhiyun struct dma_chan *dma_tx;
667*4882a593Smuzhiyun struct dma_chan *dma_rx;
668*4882a593Smuzhiyun
669*4882a593Smuzhiyun /* dummy data for full duplex devices */
670*4882a593Smuzhiyun void *dummy_rx;
671*4882a593Smuzhiyun void *dummy_tx;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun int (*fw_translate_cs)(struct spi_controller *ctlr, unsigned cs);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /*
676*4882a593Smuzhiyun * Driver sets this field to indicate it is able to snapshot SPI
677*4882a593Smuzhiyun * transfers (needed e.g. for reading the time of POSIX clocks)
678*4882a593Smuzhiyun */
679*4882a593Smuzhiyun bool ptp_sts_supported;
680*4882a593Smuzhiyun
681*4882a593Smuzhiyun /* Interrupt enable state during PTP system timestamping */
682*4882a593Smuzhiyun unsigned long irq_flags;
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
685*4882a593Smuzhiyun ANDROID_KABI_RESERVE(2);
686*4882a593Smuzhiyun };
687*4882a593Smuzhiyun
spi_controller_get_devdata(struct spi_controller * ctlr)688*4882a593Smuzhiyun static inline void *spi_controller_get_devdata(struct spi_controller *ctlr)
689*4882a593Smuzhiyun {
690*4882a593Smuzhiyun return dev_get_drvdata(&ctlr->dev);
691*4882a593Smuzhiyun }
692*4882a593Smuzhiyun
spi_controller_set_devdata(struct spi_controller * ctlr,void * data)693*4882a593Smuzhiyun static inline void spi_controller_set_devdata(struct spi_controller *ctlr,
694*4882a593Smuzhiyun void *data)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun dev_set_drvdata(&ctlr->dev, data);
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
spi_controller_get(struct spi_controller * ctlr)699*4882a593Smuzhiyun static inline struct spi_controller *spi_controller_get(struct spi_controller *ctlr)
700*4882a593Smuzhiyun {
701*4882a593Smuzhiyun if (!ctlr || !get_device(&ctlr->dev))
702*4882a593Smuzhiyun return NULL;
703*4882a593Smuzhiyun return ctlr;
704*4882a593Smuzhiyun }
705*4882a593Smuzhiyun
spi_controller_put(struct spi_controller * ctlr)706*4882a593Smuzhiyun static inline void spi_controller_put(struct spi_controller *ctlr)
707*4882a593Smuzhiyun {
708*4882a593Smuzhiyun if (ctlr)
709*4882a593Smuzhiyun put_device(&ctlr->dev);
710*4882a593Smuzhiyun }
711*4882a593Smuzhiyun
spi_controller_is_slave(struct spi_controller * ctlr)712*4882a593Smuzhiyun static inline bool spi_controller_is_slave(struct spi_controller *ctlr)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun return IS_ENABLED(CONFIG_SPI_SLAVE) && ctlr->slave;
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun /* PM calls that need to be issued by the driver */
718*4882a593Smuzhiyun extern int spi_controller_suspend(struct spi_controller *ctlr);
719*4882a593Smuzhiyun extern int spi_controller_resume(struct spi_controller *ctlr);
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* Calls the driver make to interact with the message queue */
722*4882a593Smuzhiyun extern struct spi_message *spi_get_next_queued_message(struct spi_controller *ctlr);
723*4882a593Smuzhiyun extern void spi_finalize_current_message(struct spi_controller *ctlr);
724*4882a593Smuzhiyun extern void spi_finalize_current_transfer(struct spi_controller *ctlr);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Helper calls for driver to timestamp transfer */
727*4882a593Smuzhiyun void spi_take_timestamp_pre(struct spi_controller *ctlr,
728*4882a593Smuzhiyun struct spi_transfer *xfer,
729*4882a593Smuzhiyun size_t progress, bool irqs_off);
730*4882a593Smuzhiyun void spi_take_timestamp_post(struct spi_controller *ctlr,
731*4882a593Smuzhiyun struct spi_transfer *xfer,
732*4882a593Smuzhiyun size_t progress, bool irqs_off);
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* the spi driver core manages memory for the spi_controller classdev */
735*4882a593Smuzhiyun extern struct spi_controller *__spi_alloc_controller(struct device *host,
736*4882a593Smuzhiyun unsigned int size, bool slave);
737*4882a593Smuzhiyun
spi_alloc_master(struct device * host,unsigned int size)738*4882a593Smuzhiyun static inline struct spi_controller *spi_alloc_master(struct device *host,
739*4882a593Smuzhiyun unsigned int size)
740*4882a593Smuzhiyun {
741*4882a593Smuzhiyun return __spi_alloc_controller(host, size, false);
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
spi_alloc_slave(struct device * host,unsigned int size)744*4882a593Smuzhiyun static inline struct spi_controller *spi_alloc_slave(struct device *host,
745*4882a593Smuzhiyun unsigned int size)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_SPI_SLAVE))
748*4882a593Smuzhiyun return NULL;
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun return __spi_alloc_controller(host, size, true);
751*4882a593Smuzhiyun }
752*4882a593Smuzhiyun
753*4882a593Smuzhiyun struct spi_controller *__devm_spi_alloc_controller(struct device *dev,
754*4882a593Smuzhiyun unsigned int size,
755*4882a593Smuzhiyun bool slave);
756*4882a593Smuzhiyun
devm_spi_alloc_master(struct device * dev,unsigned int size)757*4882a593Smuzhiyun static inline struct spi_controller *devm_spi_alloc_master(struct device *dev,
758*4882a593Smuzhiyun unsigned int size)
759*4882a593Smuzhiyun {
760*4882a593Smuzhiyun return __devm_spi_alloc_controller(dev, size, false);
761*4882a593Smuzhiyun }
762*4882a593Smuzhiyun
devm_spi_alloc_slave(struct device * dev,unsigned int size)763*4882a593Smuzhiyun static inline struct spi_controller *devm_spi_alloc_slave(struct device *dev,
764*4882a593Smuzhiyun unsigned int size)
765*4882a593Smuzhiyun {
766*4882a593Smuzhiyun if (!IS_ENABLED(CONFIG_SPI_SLAVE))
767*4882a593Smuzhiyun return NULL;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun return __devm_spi_alloc_controller(dev, size, true);
770*4882a593Smuzhiyun }
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun extern int spi_register_controller(struct spi_controller *ctlr);
773*4882a593Smuzhiyun extern int devm_spi_register_controller(struct device *dev,
774*4882a593Smuzhiyun struct spi_controller *ctlr);
775*4882a593Smuzhiyun extern void spi_unregister_controller(struct spi_controller *ctlr);
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun extern struct spi_controller *spi_busnum_to_master(u16 busnum);
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun /*
780*4882a593Smuzhiyun * SPI resource management while processing a SPI message
781*4882a593Smuzhiyun */
782*4882a593Smuzhiyun
783*4882a593Smuzhiyun typedef void (*spi_res_release_t)(struct spi_controller *ctlr,
784*4882a593Smuzhiyun struct spi_message *msg,
785*4882a593Smuzhiyun void *res);
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun /**
788*4882a593Smuzhiyun * struct spi_res - spi resource management structure
789*4882a593Smuzhiyun * @entry: list entry
790*4882a593Smuzhiyun * @release: release code called prior to freeing this resource
791*4882a593Smuzhiyun * @data: extra data allocated for the specific use-case
792*4882a593Smuzhiyun *
793*4882a593Smuzhiyun * this is based on ideas from devres, but focused on life-cycle
794*4882a593Smuzhiyun * management during spi_message processing
795*4882a593Smuzhiyun */
796*4882a593Smuzhiyun struct spi_res {
797*4882a593Smuzhiyun struct list_head entry;
798*4882a593Smuzhiyun spi_res_release_t release;
799*4882a593Smuzhiyun unsigned long long data[]; /* guarantee ull alignment */
800*4882a593Smuzhiyun };
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun extern void *spi_res_alloc(struct spi_device *spi,
803*4882a593Smuzhiyun spi_res_release_t release,
804*4882a593Smuzhiyun size_t size, gfp_t gfp);
805*4882a593Smuzhiyun extern void spi_res_add(struct spi_message *message, void *res);
806*4882a593Smuzhiyun extern void spi_res_free(void *res);
807*4882a593Smuzhiyun
808*4882a593Smuzhiyun extern void spi_res_release(struct spi_controller *ctlr,
809*4882a593Smuzhiyun struct spi_message *message);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun /*
814*4882a593Smuzhiyun * I/O INTERFACE between SPI controller and protocol drivers
815*4882a593Smuzhiyun *
816*4882a593Smuzhiyun * Protocol drivers use a queue of spi_messages, each transferring data
817*4882a593Smuzhiyun * between the controller and memory buffers.
818*4882a593Smuzhiyun *
819*4882a593Smuzhiyun * The spi_messages themselves consist of a series of read+write transfer
820*4882a593Smuzhiyun * segments. Those segments always read the same number of bits as they
821*4882a593Smuzhiyun * write; but one or the other is easily ignored by passing a null buffer
822*4882a593Smuzhiyun * pointer. (This is unlike most types of I/O API, because SPI hardware
823*4882a593Smuzhiyun * is full duplex.)
824*4882a593Smuzhiyun *
825*4882a593Smuzhiyun * NOTE: Allocation of spi_transfer and spi_message memory is entirely
826*4882a593Smuzhiyun * up to the protocol driver, which guarantees the integrity of both (as
827*4882a593Smuzhiyun * well as the data buffers) for as long as the message is queued.
828*4882a593Smuzhiyun */
829*4882a593Smuzhiyun
830*4882a593Smuzhiyun /**
831*4882a593Smuzhiyun * struct spi_transfer - a read/write buffer pair
832*4882a593Smuzhiyun * @tx_buf: data to be written (dma-safe memory), or NULL
833*4882a593Smuzhiyun * @rx_buf: data to be read (dma-safe memory), or NULL
834*4882a593Smuzhiyun * @tx_dma: DMA address of tx_buf, if @spi_message.is_dma_mapped
835*4882a593Smuzhiyun * @rx_dma: DMA address of rx_buf, if @spi_message.is_dma_mapped
836*4882a593Smuzhiyun * @tx_nbits: number of bits used for writing. If 0 the default
837*4882a593Smuzhiyun * (SPI_NBITS_SINGLE) is used.
838*4882a593Smuzhiyun * @rx_nbits: number of bits used for reading. If 0 the default
839*4882a593Smuzhiyun * (SPI_NBITS_SINGLE) is used.
840*4882a593Smuzhiyun * @len: size of rx and tx buffers (in bytes)
841*4882a593Smuzhiyun * @speed_hz: Select a speed other than the device default for this
842*4882a593Smuzhiyun * transfer. If 0 the default (from @spi_device) is used.
843*4882a593Smuzhiyun * @bits_per_word: select a bits_per_word other than the device default
844*4882a593Smuzhiyun * for this transfer. If 0 the default (from @spi_device) is used.
845*4882a593Smuzhiyun * @cs_change: affects chipselect after this transfer completes
846*4882a593Smuzhiyun * @cs_change_delay: delay between cs deassert and assert when
847*4882a593Smuzhiyun * @cs_change is set and @spi_transfer is not the last in @spi_message
848*4882a593Smuzhiyun * @delay: delay to be introduced after this transfer before
849*4882a593Smuzhiyun * (optionally) changing the chipselect status, then starting
850*4882a593Smuzhiyun * the next transfer or completing this @spi_message.
851*4882a593Smuzhiyun * @delay_usecs: microseconds to delay after this transfer before
852*4882a593Smuzhiyun * (optionally) changing the chipselect status, then starting
853*4882a593Smuzhiyun * the next transfer or completing this @spi_message.
854*4882a593Smuzhiyun * @word_delay: inter word delay to be introduced after each word size
855*4882a593Smuzhiyun * (set by bits_per_word) transmission.
856*4882a593Smuzhiyun * @effective_speed_hz: the effective SCK-speed that was used to
857*4882a593Smuzhiyun * transfer this transfer. Set to 0 if the spi bus driver does
858*4882a593Smuzhiyun * not support it.
859*4882a593Smuzhiyun * @transfer_list: transfers are sequenced through @spi_message.transfers
860*4882a593Smuzhiyun * @tx_sg: Scatterlist for transmit, currently not for client use
861*4882a593Smuzhiyun * @rx_sg: Scatterlist for receive, currently not for client use
862*4882a593Smuzhiyun * @ptp_sts_word_pre: The word (subject to bits_per_word semantics) offset
863*4882a593Smuzhiyun * within @tx_buf for which the SPI device is requesting that the time
864*4882a593Smuzhiyun * snapshot for this transfer begins. Upon completing the SPI transfer,
865*4882a593Smuzhiyun * this value may have changed compared to what was requested, depending
866*4882a593Smuzhiyun * on the available snapshotting resolution (DMA transfer,
867*4882a593Smuzhiyun * @ptp_sts_supported is false, etc).
868*4882a593Smuzhiyun * @ptp_sts_word_post: See @ptp_sts_word_post. The two can be equal (meaning
869*4882a593Smuzhiyun * that a single byte should be snapshotted).
870*4882a593Smuzhiyun * If the core takes care of the timestamp (if @ptp_sts_supported is false
871*4882a593Smuzhiyun * for this controller), it will set @ptp_sts_word_pre to 0, and
872*4882a593Smuzhiyun * @ptp_sts_word_post to the length of the transfer. This is done
873*4882a593Smuzhiyun * purposefully (instead of setting to spi_transfer->len - 1) to denote
874*4882a593Smuzhiyun * that a transfer-level snapshot taken from within the driver may still
875*4882a593Smuzhiyun * be of higher quality.
876*4882a593Smuzhiyun * @ptp_sts: Pointer to a memory location held by the SPI slave device where a
877*4882a593Smuzhiyun * PTP system timestamp structure may lie. If drivers use PIO or their
878*4882a593Smuzhiyun * hardware has some sort of assist for retrieving exact transfer timing,
879*4882a593Smuzhiyun * they can (and should) assert @ptp_sts_supported and populate this
880*4882a593Smuzhiyun * structure using the ptp_read_system_*ts helper functions.
881*4882a593Smuzhiyun * The timestamp must represent the time at which the SPI slave device has
882*4882a593Smuzhiyun * processed the word, i.e. the "pre" timestamp should be taken before
883*4882a593Smuzhiyun * transmitting the "pre" word, and the "post" timestamp after receiving
884*4882a593Smuzhiyun * transmit confirmation from the controller for the "post" word.
885*4882a593Smuzhiyun * @timestamped: true if the transfer has been timestamped
886*4882a593Smuzhiyun * @error: Error status logged by spi controller driver.
887*4882a593Smuzhiyun *
888*4882a593Smuzhiyun * SPI transfers always write the same number of bytes as they read.
889*4882a593Smuzhiyun * Protocol drivers should always provide @rx_buf and/or @tx_buf.
890*4882a593Smuzhiyun * In some cases, they may also want to provide DMA addresses for
891*4882a593Smuzhiyun * the data being transferred; that may reduce overhead, when the
892*4882a593Smuzhiyun * underlying driver uses dma.
893*4882a593Smuzhiyun *
894*4882a593Smuzhiyun * If the transmit buffer is null, zeroes will be shifted out
895*4882a593Smuzhiyun * while filling @rx_buf. If the receive buffer is null, the data
896*4882a593Smuzhiyun * shifted in will be discarded. Only "len" bytes shift out (or in).
897*4882a593Smuzhiyun * It's an error to try to shift out a partial word. (For example, by
898*4882a593Smuzhiyun * shifting out three bytes with word size of sixteen or twenty bits;
899*4882a593Smuzhiyun * the former uses two bytes per word, the latter uses four bytes.)
900*4882a593Smuzhiyun *
901*4882a593Smuzhiyun * In-memory data values are always in native CPU byte order, translated
902*4882a593Smuzhiyun * from the wire byte order (big-endian except with SPI_LSB_FIRST). So
903*4882a593Smuzhiyun * for example when bits_per_word is sixteen, buffers are 2N bytes long
904*4882a593Smuzhiyun * (@len = 2N) and hold N sixteen bit words in CPU byte order.
905*4882a593Smuzhiyun *
906*4882a593Smuzhiyun * When the word size of the SPI transfer is not a power-of-two multiple
907*4882a593Smuzhiyun * of eight bits, those in-memory words include extra bits. In-memory
908*4882a593Smuzhiyun * words are always seen by protocol drivers as right-justified, so the
909*4882a593Smuzhiyun * undefined (rx) or unused (tx) bits are always the most significant bits.
910*4882a593Smuzhiyun *
911*4882a593Smuzhiyun * All SPI transfers start with the relevant chipselect active. Normally
912*4882a593Smuzhiyun * it stays selected until after the last transfer in a message. Drivers
913*4882a593Smuzhiyun * can affect the chipselect signal using cs_change.
914*4882a593Smuzhiyun *
915*4882a593Smuzhiyun * (i) If the transfer isn't the last one in the message, this flag is
916*4882a593Smuzhiyun * used to make the chipselect briefly go inactive in the middle of the
917*4882a593Smuzhiyun * message. Toggling chipselect in this way may be needed to terminate
918*4882a593Smuzhiyun * a chip command, letting a single spi_message perform all of group of
919*4882a593Smuzhiyun * chip transactions together.
920*4882a593Smuzhiyun *
921*4882a593Smuzhiyun * (ii) When the transfer is the last one in the message, the chip may
922*4882a593Smuzhiyun * stay selected until the next transfer. On multi-device SPI busses
923*4882a593Smuzhiyun * with nothing blocking messages going to other devices, this is just
924*4882a593Smuzhiyun * a performance hint; starting a message to another device deselects
925*4882a593Smuzhiyun * this one. But in other cases, this can be used to ensure correctness.
926*4882a593Smuzhiyun * Some devices need protocol transactions to be built from a series of
927*4882a593Smuzhiyun * spi_message submissions, where the content of one message is determined
928*4882a593Smuzhiyun * by the results of previous messages and where the whole transaction
929*4882a593Smuzhiyun * ends when the chipselect goes intactive.
930*4882a593Smuzhiyun *
931*4882a593Smuzhiyun * When SPI can transfer in 1x,2x or 4x. It can get this transfer information
932*4882a593Smuzhiyun * from device through @tx_nbits and @rx_nbits. In Bi-direction, these
933*4882a593Smuzhiyun * two should both be set. User can set transfer mode with SPI_NBITS_SINGLE(1x)
934*4882a593Smuzhiyun * SPI_NBITS_DUAL(2x) and SPI_NBITS_QUAD(4x) to support these three transfer.
935*4882a593Smuzhiyun *
936*4882a593Smuzhiyun * The code that submits an spi_message (and its spi_transfers)
937*4882a593Smuzhiyun * to the lower layers is responsible for managing its memory.
938*4882a593Smuzhiyun * Zero-initialize every field you don't set up explicitly, to
939*4882a593Smuzhiyun * insulate against future API updates. After you submit a message
940*4882a593Smuzhiyun * and its transfers, ignore them until its completion callback.
941*4882a593Smuzhiyun */
942*4882a593Smuzhiyun struct spi_transfer {
943*4882a593Smuzhiyun /* it's ok if tx_buf == rx_buf (right?)
944*4882a593Smuzhiyun * for MicroWire, one buffer must be null
945*4882a593Smuzhiyun * buffers must work with dma_*map_single() calls, unless
946*4882a593Smuzhiyun * spi_message.is_dma_mapped reports a pre-existing mapping
947*4882a593Smuzhiyun */
948*4882a593Smuzhiyun const void *tx_buf;
949*4882a593Smuzhiyun void *rx_buf;
950*4882a593Smuzhiyun unsigned len;
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun dma_addr_t tx_dma;
953*4882a593Smuzhiyun dma_addr_t rx_dma;
954*4882a593Smuzhiyun struct sg_table tx_sg;
955*4882a593Smuzhiyun struct sg_table rx_sg;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun unsigned cs_change:1;
958*4882a593Smuzhiyun unsigned tx_nbits:3;
959*4882a593Smuzhiyun unsigned rx_nbits:3;
960*4882a593Smuzhiyun #define SPI_NBITS_SINGLE 0x01 /* 1bit transfer */
961*4882a593Smuzhiyun #define SPI_NBITS_DUAL 0x02 /* 2bits transfer */
962*4882a593Smuzhiyun #define SPI_NBITS_QUAD 0x04 /* 4bits transfer */
963*4882a593Smuzhiyun u8 bits_per_word;
964*4882a593Smuzhiyun u16 delay_usecs;
965*4882a593Smuzhiyun struct spi_delay delay;
966*4882a593Smuzhiyun struct spi_delay cs_change_delay;
967*4882a593Smuzhiyun struct spi_delay word_delay;
968*4882a593Smuzhiyun u32 speed_hz;
969*4882a593Smuzhiyun
970*4882a593Smuzhiyun u32 effective_speed_hz;
971*4882a593Smuzhiyun
972*4882a593Smuzhiyun unsigned int ptp_sts_word_pre;
973*4882a593Smuzhiyun unsigned int ptp_sts_word_post;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun struct ptp_system_timestamp *ptp_sts;
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun bool timestamped;
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun struct list_head transfer_list;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun #define SPI_TRANS_FAIL_NO_START BIT(0)
982*4882a593Smuzhiyun u16 error;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
985*4882a593Smuzhiyun };
986*4882a593Smuzhiyun
987*4882a593Smuzhiyun /**
988*4882a593Smuzhiyun * struct spi_message - one multi-segment SPI transaction
989*4882a593Smuzhiyun * @transfers: list of transfer segments in this transaction
990*4882a593Smuzhiyun * @spi: SPI device to which the transaction is queued
991*4882a593Smuzhiyun * @is_dma_mapped: if true, the caller provided both dma and cpu virtual
992*4882a593Smuzhiyun * addresses for each transfer buffer
993*4882a593Smuzhiyun * @complete: called to report transaction completions
994*4882a593Smuzhiyun * @context: the argument to complete() when it's called
995*4882a593Smuzhiyun * @frame_length: the total number of bytes in the message
996*4882a593Smuzhiyun * @actual_length: the total number of bytes that were transferred in all
997*4882a593Smuzhiyun * successful segments
998*4882a593Smuzhiyun * @status: zero for success, else negative errno
999*4882a593Smuzhiyun * @queue: for use by whichever driver currently owns the message
1000*4882a593Smuzhiyun * @state: for use by whichever driver currently owns the message
1001*4882a593Smuzhiyun * @resources: for resource management when the spi message is processed
1002*4882a593Smuzhiyun *
1003*4882a593Smuzhiyun * A @spi_message is used to execute an atomic sequence of data transfers,
1004*4882a593Smuzhiyun * each represented by a struct spi_transfer. The sequence is "atomic"
1005*4882a593Smuzhiyun * in the sense that no other spi_message may use that SPI bus until that
1006*4882a593Smuzhiyun * sequence completes. On some systems, many such sequences can execute as
1007*4882a593Smuzhiyun * a single programmed DMA transfer. On all systems, these messages are
1008*4882a593Smuzhiyun * queued, and might complete after transactions to other devices. Messages
1009*4882a593Smuzhiyun * sent to a given spi_device are always executed in FIFO order.
1010*4882a593Smuzhiyun *
1011*4882a593Smuzhiyun * The code that submits an spi_message (and its spi_transfers)
1012*4882a593Smuzhiyun * to the lower layers is responsible for managing its memory.
1013*4882a593Smuzhiyun * Zero-initialize every field you don't set up explicitly, to
1014*4882a593Smuzhiyun * insulate against future API updates. After you submit a message
1015*4882a593Smuzhiyun * and its transfers, ignore them until its completion callback.
1016*4882a593Smuzhiyun */
1017*4882a593Smuzhiyun struct spi_message {
1018*4882a593Smuzhiyun struct list_head transfers;
1019*4882a593Smuzhiyun
1020*4882a593Smuzhiyun struct spi_device *spi;
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun unsigned is_dma_mapped:1;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun /* REVISIT: we might want a flag affecting the behavior of the
1025*4882a593Smuzhiyun * last transfer ... allowing things like "read 16 bit length L"
1026*4882a593Smuzhiyun * immediately followed by "read L bytes". Basically imposing
1027*4882a593Smuzhiyun * a specific message scheduling algorithm.
1028*4882a593Smuzhiyun *
1029*4882a593Smuzhiyun * Some controller drivers (message-at-a-time queue processing)
1030*4882a593Smuzhiyun * could provide that as their default scheduling algorithm. But
1031*4882a593Smuzhiyun * others (with multi-message pipelines) could need a flag to
1032*4882a593Smuzhiyun * tell them about such special cases.
1033*4882a593Smuzhiyun */
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun /* completion is reported through a callback */
1036*4882a593Smuzhiyun void (*complete)(void *context);
1037*4882a593Smuzhiyun void *context;
1038*4882a593Smuzhiyun unsigned frame_length;
1039*4882a593Smuzhiyun unsigned actual_length;
1040*4882a593Smuzhiyun int status;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun /* for optional use by whatever driver currently owns the
1043*4882a593Smuzhiyun * spi_message ... between calls to spi_async and then later
1044*4882a593Smuzhiyun * complete(), that's the spi_controller controller driver.
1045*4882a593Smuzhiyun */
1046*4882a593Smuzhiyun struct list_head queue;
1047*4882a593Smuzhiyun void *state;
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* list of spi_res reources when the spi message is processed */
1050*4882a593Smuzhiyun struct list_head resources;
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
1053*4882a593Smuzhiyun };
1054*4882a593Smuzhiyun
spi_message_init_no_memset(struct spi_message * m)1055*4882a593Smuzhiyun static inline void spi_message_init_no_memset(struct spi_message *m)
1056*4882a593Smuzhiyun {
1057*4882a593Smuzhiyun INIT_LIST_HEAD(&m->transfers);
1058*4882a593Smuzhiyun INIT_LIST_HEAD(&m->resources);
1059*4882a593Smuzhiyun }
1060*4882a593Smuzhiyun
spi_message_init(struct spi_message * m)1061*4882a593Smuzhiyun static inline void spi_message_init(struct spi_message *m)
1062*4882a593Smuzhiyun {
1063*4882a593Smuzhiyun memset(m, 0, sizeof *m);
1064*4882a593Smuzhiyun spi_message_init_no_memset(m);
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun static inline void
spi_message_add_tail(struct spi_transfer * t,struct spi_message * m)1068*4882a593Smuzhiyun spi_message_add_tail(struct spi_transfer *t, struct spi_message *m)
1069*4882a593Smuzhiyun {
1070*4882a593Smuzhiyun list_add_tail(&t->transfer_list, &m->transfers);
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun static inline void
spi_transfer_del(struct spi_transfer * t)1074*4882a593Smuzhiyun spi_transfer_del(struct spi_transfer *t)
1075*4882a593Smuzhiyun {
1076*4882a593Smuzhiyun list_del(&t->transfer_list);
1077*4882a593Smuzhiyun }
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun static inline int
spi_transfer_delay_exec(struct spi_transfer * t)1080*4882a593Smuzhiyun spi_transfer_delay_exec(struct spi_transfer *t)
1081*4882a593Smuzhiyun {
1082*4882a593Smuzhiyun struct spi_delay d;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun if (t->delay_usecs) {
1085*4882a593Smuzhiyun d.value = t->delay_usecs;
1086*4882a593Smuzhiyun d.unit = SPI_DELAY_UNIT_USECS;
1087*4882a593Smuzhiyun return spi_delay_exec(&d, NULL);
1088*4882a593Smuzhiyun }
1089*4882a593Smuzhiyun
1090*4882a593Smuzhiyun return spi_delay_exec(&t->delay, t);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun /**
1094*4882a593Smuzhiyun * spi_message_init_with_transfers - Initialize spi_message and append transfers
1095*4882a593Smuzhiyun * @m: spi_message to be initialized
1096*4882a593Smuzhiyun * @xfers: An array of spi transfers
1097*4882a593Smuzhiyun * @num_xfers: Number of items in the xfer array
1098*4882a593Smuzhiyun *
1099*4882a593Smuzhiyun * This function initializes the given spi_message and adds each spi_transfer in
1100*4882a593Smuzhiyun * the given array to the message.
1101*4882a593Smuzhiyun */
1102*4882a593Smuzhiyun static inline void
spi_message_init_with_transfers(struct spi_message * m,struct spi_transfer * xfers,unsigned int num_xfers)1103*4882a593Smuzhiyun spi_message_init_with_transfers(struct spi_message *m,
1104*4882a593Smuzhiyun struct spi_transfer *xfers, unsigned int num_xfers)
1105*4882a593Smuzhiyun {
1106*4882a593Smuzhiyun unsigned int i;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun spi_message_init(m);
1109*4882a593Smuzhiyun for (i = 0; i < num_xfers; ++i)
1110*4882a593Smuzhiyun spi_message_add_tail(&xfers[i], m);
1111*4882a593Smuzhiyun }
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun /* It's fine to embed message and transaction structures in other data
1114*4882a593Smuzhiyun * structures so long as you don't free them while they're in use.
1115*4882a593Smuzhiyun */
1116*4882a593Smuzhiyun
spi_message_alloc(unsigned ntrans,gfp_t flags)1117*4882a593Smuzhiyun static inline struct spi_message *spi_message_alloc(unsigned ntrans, gfp_t flags)
1118*4882a593Smuzhiyun {
1119*4882a593Smuzhiyun struct spi_message *m;
1120*4882a593Smuzhiyun
1121*4882a593Smuzhiyun m = kzalloc(sizeof(struct spi_message)
1122*4882a593Smuzhiyun + ntrans * sizeof(struct spi_transfer),
1123*4882a593Smuzhiyun flags);
1124*4882a593Smuzhiyun if (m) {
1125*4882a593Smuzhiyun unsigned i;
1126*4882a593Smuzhiyun struct spi_transfer *t = (struct spi_transfer *)(m + 1);
1127*4882a593Smuzhiyun
1128*4882a593Smuzhiyun spi_message_init_no_memset(m);
1129*4882a593Smuzhiyun for (i = 0; i < ntrans; i++, t++)
1130*4882a593Smuzhiyun spi_message_add_tail(t, m);
1131*4882a593Smuzhiyun }
1132*4882a593Smuzhiyun return m;
1133*4882a593Smuzhiyun }
1134*4882a593Smuzhiyun
spi_message_free(struct spi_message * m)1135*4882a593Smuzhiyun static inline void spi_message_free(struct spi_message *m)
1136*4882a593Smuzhiyun {
1137*4882a593Smuzhiyun kfree(m);
1138*4882a593Smuzhiyun }
1139*4882a593Smuzhiyun
1140*4882a593Smuzhiyun extern int spi_set_cs_timing(struct spi_device *spi,
1141*4882a593Smuzhiyun struct spi_delay *setup,
1142*4882a593Smuzhiyun struct spi_delay *hold,
1143*4882a593Smuzhiyun struct spi_delay *inactive);
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun extern int spi_setup(struct spi_device *spi);
1146*4882a593Smuzhiyun extern int spi_async(struct spi_device *spi, struct spi_message *message);
1147*4882a593Smuzhiyun extern int spi_async_locked(struct spi_device *spi,
1148*4882a593Smuzhiyun struct spi_message *message);
1149*4882a593Smuzhiyun extern int spi_slave_abort(struct spi_device *spi);
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun static inline size_t
spi_max_message_size(struct spi_device * spi)1152*4882a593Smuzhiyun spi_max_message_size(struct spi_device *spi)
1153*4882a593Smuzhiyun {
1154*4882a593Smuzhiyun struct spi_controller *ctlr = spi->controller;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun if (!ctlr->max_message_size)
1157*4882a593Smuzhiyun return SIZE_MAX;
1158*4882a593Smuzhiyun return ctlr->max_message_size(spi);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun
1161*4882a593Smuzhiyun static inline size_t
spi_max_transfer_size(struct spi_device * spi)1162*4882a593Smuzhiyun spi_max_transfer_size(struct spi_device *spi)
1163*4882a593Smuzhiyun {
1164*4882a593Smuzhiyun struct spi_controller *ctlr = spi->controller;
1165*4882a593Smuzhiyun size_t tr_max = SIZE_MAX;
1166*4882a593Smuzhiyun size_t msg_max = spi_max_message_size(spi);
1167*4882a593Smuzhiyun
1168*4882a593Smuzhiyun if (ctlr->max_transfer_size)
1169*4882a593Smuzhiyun tr_max = ctlr->max_transfer_size(spi);
1170*4882a593Smuzhiyun
1171*4882a593Smuzhiyun /* transfer size limit must not be greater than messsage size limit */
1172*4882a593Smuzhiyun return min(tr_max, msg_max);
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun /**
1176*4882a593Smuzhiyun * spi_is_bpw_supported - Check if bits per word is supported
1177*4882a593Smuzhiyun * @spi: SPI device
1178*4882a593Smuzhiyun * @bpw: Bits per word
1179*4882a593Smuzhiyun *
1180*4882a593Smuzhiyun * This function checks to see if the SPI controller supports @bpw.
1181*4882a593Smuzhiyun *
1182*4882a593Smuzhiyun * Returns:
1183*4882a593Smuzhiyun * True if @bpw is supported, false otherwise.
1184*4882a593Smuzhiyun */
spi_is_bpw_supported(struct spi_device * spi,u32 bpw)1185*4882a593Smuzhiyun static inline bool spi_is_bpw_supported(struct spi_device *spi, u32 bpw)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun u32 bpw_mask = spi->master->bits_per_word_mask;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun if (bpw == 8 || (bpw <= 32 && bpw_mask & SPI_BPW_MASK(bpw)))
1190*4882a593Smuzhiyun return true;
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun return false;
1193*4882a593Smuzhiyun }
1194*4882a593Smuzhiyun
1195*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun /* SPI transfer replacement methods which make use of spi_res */
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun struct spi_replaced_transfers;
1200*4882a593Smuzhiyun typedef void (*spi_replaced_release_t)(struct spi_controller *ctlr,
1201*4882a593Smuzhiyun struct spi_message *msg,
1202*4882a593Smuzhiyun struct spi_replaced_transfers *res);
1203*4882a593Smuzhiyun /**
1204*4882a593Smuzhiyun * struct spi_replaced_transfers - structure describing the spi_transfer
1205*4882a593Smuzhiyun * replacements that have occurred
1206*4882a593Smuzhiyun * so that they can get reverted
1207*4882a593Smuzhiyun * @release: some extra release code to get executed prior to
1208*4882a593Smuzhiyun * relasing this structure
1209*4882a593Smuzhiyun * @extradata: pointer to some extra data if requested or NULL
1210*4882a593Smuzhiyun * @replaced_transfers: transfers that have been replaced and which need
1211*4882a593Smuzhiyun * to get restored
1212*4882a593Smuzhiyun * @replaced_after: the transfer after which the @replaced_transfers
1213*4882a593Smuzhiyun * are to get re-inserted
1214*4882a593Smuzhiyun * @inserted: number of transfers inserted
1215*4882a593Smuzhiyun * @inserted_transfers: array of spi_transfers of array-size @inserted,
1216*4882a593Smuzhiyun * that have been replacing replaced_transfers
1217*4882a593Smuzhiyun *
1218*4882a593Smuzhiyun * note: that @extradata will point to @inserted_transfers[@inserted]
1219*4882a593Smuzhiyun * if some extra allocation is requested, so alignment will be the same
1220*4882a593Smuzhiyun * as for spi_transfers
1221*4882a593Smuzhiyun */
1222*4882a593Smuzhiyun struct spi_replaced_transfers {
1223*4882a593Smuzhiyun spi_replaced_release_t release;
1224*4882a593Smuzhiyun void *extradata;
1225*4882a593Smuzhiyun struct list_head replaced_transfers;
1226*4882a593Smuzhiyun struct list_head *replaced_after;
1227*4882a593Smuzhiyun size_t inserted;
1228*4882a593Smuzhiyun struct spi_transfer inserted_transfers[];
1229*4882a593Smuzhiyun };
1230*4882a593Smuzhiyun
1231*4882a593Smuzhiyun extern struct spi_replaced_transfers *spi_replace_transfers(
1232*4882a593Smuzhiyun struct spi_message *msg,
1233*4882a593Smuzhiyun struct spi_transfer *xfer_first,
1234*4882a593Smuzhiyun size_t remove,
1235*4882a593Smuzhiyun size_t insert,
1236*4882a593Smuzhiyun spi_replaced_release_t release,
1237*4882a593Smuzhiyun size_t extradatasize,
1238*4882a593Smuzhiyun gfp_t gfp);
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /* SPI transfer transformation methods */
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun extern int spi_split_transfers_maxsize(struct spi_controller *ctlr,
1245*4882a593Smuzhiyun struct spi_message *msg,
1246*4882a593Smuzhiyun size_t maxsize,
1247*4882a593Smuzhiyun gfp_t gfp);
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun /* All these synchronous SPI transfer routines are utilities layered
1252*4882a593Smuzhiyun * over the core async transfer primitive. Here, "synchronous" means
1253*4882a593Smuzhiyun * they will sleep uninterruptibly until the async transfer completes.
1254*4882a593Smuzhiyun */
1255*4882a593Smuzhiyun
1256*4882a593Smuzhiyun extern int spi_sync(struct spi_device *spi, struct spi_message *message);
1257*4882a593Smuzhiyun extern int spi_sync_locked(struct spi_device *spi, struct spi_message *message);
1258*4882a593Smuzhiyun extern int spi_bus_lock(struct spi_controller *ctlr);
1259*4882a593Smuzhiyun extern int spi_bus_unlock(struct spi_controller *ctlr);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun /**
1262*4882a593Smuzhiyun * spi_sync_transfer - synchronous SPI data transfer
1263*4882a593Smuzhiyun * @spi: device with which data will be exchanged
1264*4882a593Smuzhiyun * @xfers: An array of spi_transfers
1265*4882a593Smuzhiyun * @num_xfers: Number of items in the xfer array
1266*4882a593Smuzhiyun * Context: can sleep
1267*4882a593Smuzhiyun *
1268*4882a593Smuzhiyun * Does a synchronous SPI data transfer of the given spi_transfer array.
1269*4882a593Smuzhiyun *
1270*4882a593Smuzhiyun * For more specific semantics see spi_sync().
1271*4882a593Smuzhiyun *
1272*4882a593Smuzhiyun * Return: zero on success, else a negative error code.
1273*4882a593Smuzhiyun */
1274*4882a593Smuzhiyun static inline int
spi_sync_transfer(struct spi_device * spi,struct spi_transfer * xfers,unsigned int num_xfers)1275*4882a593Smuzhiyun spi_sync_transfer(struct spi_device *spi, struct spi_transfer *xfers,
1276*4882a593Smuzhiyun unsigned int num_xfers)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun struct spi_message msg;
1279*4882a593Smuzhiyun
1280*4882a593Smuzhiyun spi_message_init_with_transfers(&msg, xfers, num_xfers);
1281*4882a593Smuzhiyun
1282*4882a593Smuzhiyun return spi_sync(spi, &msg);
1283*4882a593Smuzhiyun }
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun /**
1286*4882a593Smuzhiyun * spi_write - SPI synchronous write
1287*4882a593Smuzhiyun * @spi: device to which data will be written
1288*4882a593Smuzhiyun * @buf: data buffer
1289*4882a593Smuzhiyun * @len: data buffer size
1290*4882a593Smuzhiyun * Context: can sleep
1291*4882a593Smuzhiyun *
1292*4882a593Smuzhiyun * This function writes the buffer @buf.
1293*4882a593Smuzhiyun * Callable only from contexts that can sleep.
1294*4882a593Smuzhiyun *
1295*4882a593Smuzhiyun * Return: zero on success, else a negative error code.
1296*4882a593Smuzhiyun */
1297*4882a593Smuzhiyun static inline int
spi_write(struct spi_device * spi,const void * buf,size_t len)1298*4882a593Smuzhiyun spi_write(struct spi_device *spi, const void *buf, size_t len)
1299*4882a593Smuzhiyun {
1300*4882a593Smuzhiyun struct spi_transfer t = {
1301*4882a593Smuzhiyun .tx_buf = buf,
1302*4882a593Smuzhiyun .len = len,
1303*4882a593Smuzhiyun };
1304*4882a593Smuzhiyun
1305*4882a593Smuzhiyun return spi_sync_transfer(spi, &t, 1);
1306*4882a593Smuzhiyun }
1307*4882a593Smuzhiyun
1308*4882a593Smuzhiyun /**
1309*4882a593Smuzhiyun * spi_read - SPI synchronous read
1310*4882a593Smuzhiyun * @spi: device from which data will be read
1311*4882a593Smuzhiyun * @buf: data buffer
1312*4882a593Smuzhiyun * @len: data buffer size
1313*4882a593Smuzhiyun * Context: can sleep
1314*4882a593Smuzhiyun *
1315*4882a593Smuzhiyun * This function reads the buffer @buf.
1316*4882a593Smuzhiyun * Callable only from contexts that can sleep.
1317*4882a593Smuzhiyun *
1318*4882a593Smuzhiyun * Return: zero on success, else a negative error code.
1319*4882a593Smuzhiyun */
1320*4882a593Smuzhiyun static inline int
spi_read(struct spi_device * spi,void * buf,size_t len)1321*4882a593Smuzhiyun spi_read(struct spi_device *spi, void *buf, size_t len)
1322*4882a593Smuzhiyun {
1323*4882a593Smuzhiyun struct spi_transfer t = {
1324*4882a593Smuzhiyun .rx_buf = buf,
1325*4882a593Smuzhiyun .len = len,
1326*4882a593Smuzhiyun };
1327*4882a593Smuzhiyun
1328*4882a593Smuzhiyun return spi_sync_transfer(spi, &t, 1);
1329*4882a593Smuzhiyun }
1330*4882a593Smuzhiyun
1331*4882a593Smuzhiyun /* this copies txbuf and rxbuf data; for small transfers only! */
1332*4882a593Smuzhiyun extern int spi_write_then_read(struct spi_device *spi,
1333*4882a593Smuzhiyun const void *txbuf, unsigned n_tx,
1334*4882a593Smuzhiyun void *rxbuf, unsigned n_rx);
1335*4882a593Smuzhiyun
1336*4882a593Smuzhiyun /**
1337*4882a593Smuzhiyun * spi_w8r8 - SPI synchronous 8 bit write followed by 8 bit read
1338*4882a593Smuzhiyun * @spi: device with which data will be exchanged
1339*4882a593Smuzhiyun * @cmd: command to be written before data is read back
1340*4882a593Smuzhiyun * Context: can sleep
1341*4882a593Smuzhiyun *
1342*4882a593Smuzhiyun * Callable only from contexts that can sleep.
1343*4882a593Smuzhiyun *
1344*4882a593Smuzhiyun * Return: the (unsigned) eight bit number returned by the
1345*4882a593Smuzhiyun * device, or else a negative error code.
1346*4882a593Smuzhiyun */
spi_w8r8(struct spi_device * spi,u8 cmd)1347*4882a593Smuzhiyun static inline ssize_t spi_w8r8(struct spi_device *spi, u8 cmd)
1348*4882a593Smuzhiyun {
1349*4882a593Smuzhiyun ssize_t status;
1350*4882a593Smuzhiyun u8 result;
1351*4882a593Smuzhiyun
1352*4882a593Smuzhiyun status = spi_write_then_read(spi, &cmd, 1, &result, 1);
1353*4882a593Smuzhiyun
1354*4882a593Smuzhiyun /* return negative errno or unsigned value */
1355*4882a593Smuzhiyun return (status < 0) ? status : result;
1356*4882a593Smuzhiyun }
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun /**
1359*4882a593Smuzhiyun * spi_w8r16 - SPI synchronous 8 bit write followed by 16 bit read
1360*4882a593Smuzhiyun * @spi: device with which data will be exchanged
1361*4882a593Smuzhiyun * @cmd: command to be written before data is read back
1362*4882a593Smuzhiyun * Context: can sleep
1363*4882a593Smuzhiyun *
1364*4882a593Smuzhiyun * The number is returned in wire-order, which is at least sometimes
1365*4882a593Smuzhiyun * big-endian.
1366*4882a593Smuzhiyun *
1367*4882a593Smuzhiyun * Callable only from contexts that can sleep.
1368*4882a593Smuzhiyun *
1369*4882a593Smuzhiyun * Return: the (unsigned) sixteen bit number returned by the
1370*4882a593Smuzhiyun * device, or else a negative error code.
1371*4882a593Smuzhiyun */
spi_w8r16(struct spi_device * spi,u8 cmd)1372*4882a593Smuzhiyun static inline ssize_t spi_w8r16(struct spi_device *spi, u8 cmd)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun ssize_t status;
1375*4882a593Smuzhiyun u16 result;
1376*4882a593Smuzhiyun
1377*4882a593Smuzhiyun status = spi_write_then_read(spi, &cmd, 1, &result, 2);
1378*4882a593Smuzhiyun
1379*4882a593Smuzhiyun /* return negative errno or unsigned value */
1380*4882a593Smuzhiyun return (status < 0) ? status : result;
1381*4882a593Smuzhiyun }
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun /**
1384*4882a593Smuzhiyun * spi_w8r16be - SPI synchronous 8 bit write followed by 16 bit big-endian read
1385*4882a593Smuzhiyun * @spi: device with which data will be exchanged
1386*4882a593Smuzhiyun * @cmd: command to be written before data is read back
1387*4882a593Smuzhiyun * Context: can sleep
1388*4882a593Smuzhiyun *
1389*4882a593Smuzhiyun * This function is similar to spi_w8r16, with the exception that it will
1390*4882a593Smuzhiyun * convert the read 16 bit data word from big-endian to native endianness.
1391*4882a593Smuzhiyun *
1392*4882a593Smuzhiyun * Callable only from contexts that can sleep.
1393*4882a593Smuzhiyun *
1394*4882a593Smuzhiyun * Return: the (unsigned) sixteen bit number returned by the device in cpu
1395*4882a593Smuzhiyun * endianness, or else a negative error code.
1396*4882a593Smuzhiyun */
spi_w8r16be(struct spi_device * spi,u8 cmd)1397*4882a593Smuzhiyun static inline ssize_t spi_w8r16be(struct spi_device *spi, u8 cmd)
1398*4882a593Smuzhiyun
1399*4882a593Smuzhiyun {
1400*4882a593Smuzhiyun ssize_t status;
1401*4882a593Smuzhiyun __be16 result;
1402*4882a593Smuzhiyun
1403*4882a593Smuzhiyun status = spi_write_then_read(spi, &cmd, 1, &result, 2);
1404*4882a593Smuzhiyun if (status < 0)
1405*4882a593Smuzhiyun return status;
1406*4882a593Smuzhiyun
1407*4882a593Smuzhiyun return be16_to_cpu(result);
1408*4882a593Smuzhiyun }
1409*4882a593Smuzhiyun
1410*4882a593Smuzhiyun /*---------------------------------------------------------------------------*/
1411*4882a593Smuzhiyun
1412*4882a593Smuzhiyun /*
1413*4882a593Smuzhiyun * INTERFACE between board init code and SPI infrastructure.
1414*4882a593Smuzhiyun *
1415*4882a593Smuzhiyun * No SPI driver ever sees these SPI device table segments, but
1416*4882a593Smuzhiyun * it's how the SPI core (or adapters that get hotplugged) grows
1417*4882a593Smuzhiyun * the driver model tree.
1418*4882a593Smuzhiyun *
1419*4882a593Smuzhiyun * As a rule, SPI devices can't be probed. Instead, board init code
1420*4882a593Smuzhiyun * provides a table listing the devices which are present, with enough
1421*4882a593Smuzhiyun * information to bind and set up the device's driver. There's basic
1422*4882a593Smuzhiyun * support for nonstatic configurations too; enough to handle adding
1423*4882a593Smuzhiyun * parport adapters, or microcontrollers acting as USB-to-SPI bridges.
1424*4882a593Smuzhiyun */
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun /**
1427*4882a593Smuzhiyun * struct spi_board_info - board-specific template for a SPI device
1428*4882a593Smuzhiyun * @modalias: Initializes spi_device.modalias; identifies the driver.
1429*4882a593Smuzhiyun * @platform_data: Initializes spi_device.platform_data; the particular
1430*4882a593Smuzhiyun * data stored there is driver-specific.
1431*4882a593Smuzhiyun * @properties: Additional device properties for the device.
1432*4882a593Smuzhiyun * @controller_data: Initializes spi_device.controller_data; some
1433*4882a593Smuzhiyun * controllers need hints about hardware setup, e.g. for DMA.
1434*4882a593Smuzhiyun * @irq: Initializes spi_device.irq; depends on how the board is wired.
1435*4882a593Smuzhiyun * @max_speed_hz: Initializes spi_device.max_speed_hz; based on limits
1436*4882a593Smuzhiyun * from the chip datasheet and board-specific signal quality issues.
1437*4882a593Smuzhiyun * @bus_num: Identifies which spi_controller parents the spi_device; unused
1438*4882a593Smuzhiyun * by spi_new_device(), and otherwise depends on board wiring.
1439*4882a593Smuzhiyun * @chip_select: Initializes spi_device.chip_select; depends on how
1440*4882a593Smuzhiyun * the board is wired.
1441*4882a593Smuzhiyun * @mode: Initializes spi_device.mode; based on the chip datasheet, board
1442*4882a593Smuzhiyun * wiring (some devices support both 3WIRE and standard modes), and
1443*4882a593Smuzhiyun * possibly presence of an inverter in the chipselect path.
1444*4882a593Smuzhiyun *
1445*4882a593Smuzhiyun * When adding new SPI devices to the device tree, these structures serve
1446*4882a593Smuzhiyun * as a partial device template. They hold information which can't always
1447*4882a593Smuzhiyun * be determined by drivers. Information that probe() can establish (such
1448*4882a593Smuzhiyun * as the default transfer wordsize) is not included here.
1449*4882a593Smuzhiyun *
1450*4882a593Smuzhiyun * These structures are used in two places. Their primary role is to
1451*4882a593Smuzhiyun * be stored in tables of board-specific device descriptors, which are
1452*4882a593Smuzhiyun * declared early in board initialization and then used (much later) to
1453*4882a593Smuzhiyun * populate a controller's device tree after the that controller's driver
1454*4882a593Smuzhiyun * initializes. A secondary (and atypical) role is as a parameter to
1455*4882a593Smuzhiyun * spi_new_device() call, which happens after those controller drivers
1456*4882a593Smuzhiyun * are active in some dynamic board configuration models.
1457*4882a593Smuzhiyun */
1458*4882a593Smuzhiyun struct spi_board_info {
1459*4882a593Smuzhiyun /* the device name and module name are coupled, like platform_bus;
1460*4882a593Smuzhiyun * "modalias" is normally the driver name.
1461*4882a593Smuzhiyun *
1462*4882a593Smuzhiyun * platform_data goes to spi_device.dev.platform_data,
1463*4882a593Smuzhiyun * controller_data goes to spi_device.controller_data,
1464*4882a593Smuzhiyun * device properties are copied and attached to spi_device,
1465*4882a593Smuzhiyun * irq is copied too
1466*4882a593Smuzhiyun */
1467*4882a593Smuzhiyun char modalias[SPI_NAME_SIZE];
1468*4882a593Smuzhiyun const void *platform_data;
1469*4882a593Smuzhiyun const struct property_entry *properties;
1470*4882a593Smuzhiyun void *controller_data;
1471*4882a593Smuzhiyun int irq;
1472*4882a593Smuzhiyun
1473*4882a593Smuzhiyun /* slower signaling on noisy or low voltage boards */
1474*4882a593Smuzhiyun u32 max_speed_hz;
1475*4882a593Smuzhiyun
1476*4882a593Smuzhiyun
1477*4882a593Smuzhiyun /* bus_num is board specific and matches the bus_num of some
1478*4882a593Smuzhiyun * spi_controller that will probably be registered later.
1479*4882a593Smuzhiyun *
1480*4882a593Smuzhiyun * chip_select reflects how this chip is wired to that master;
1481*4882a593Smuzhiyun * it's less than num_chipselect.
1482*4882a593Smuzhiyun */
1483*4882a593Smuzhiyun u16 bus_num;
1484*4882a593Smuzhiyun u16 chip_select;
1485*4882a593Smuzhiyun
1486*4882a593Smuzhiyun /* mode becomes spi_device.mode, and is essential for chips
1487*4882a593Smuzhiyun * where the default of SPI_CS_HIGH = 0 is wrong.
1488*4882a593Smuzhiyun */
1489*4882a593Smuzhiyun u32 mode;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun ANDROID_KABI_RESERVE(1);
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun /* ... may need additional spi_device chip config data here.
1494*4882a593Smuzhiyun * avoid stuff protocol drivers can set; but include stuff
1495*4882a593Smuzhiyun * needed to behave without being bound to a driver:
1496*4882a593Smuzhiyun * - quirks like clock rate mattering when not selected
1497*4882a593Smuzhiyun */
1498*4882a593Smuzhiyun };
1499*4882a593Smuzhiyun
1500*4882a593Smuzhiyun #ifdef CONFIG_SPI
1501*4882a593Smuzhiyun extern int
1502*4882a593Smuzhiyun spi_register_board_info(struct spi_board_info const *info, unsigned n);
1503*4882a593Smuzhiyun #else
1504*4882a593Smuzhiyun /* board init code may ignore whether SPI is configured or not */
1505*4882a593Smuzhiyun static inline int
spi_register_board_info(struct spi_board_info const * info,unsigned n)1506*4882a593Smuzhiyun spi_register_board_info(struct spi_board_info const *info, unsigned n)
1507*4882a593Smuzhiyun { return 0; }
1508*4882a593Smuzhiyun #endif
1509*4882a593Smuzhiyun
1510*4882a593Smuzhiyun /* If you're hotplugging an adapter with devices (parport, usb, etc)
1511*4882a593Smuzhiyun * use spi_new_device() to describe each device. You can also call
1512*4882a593Smuzhiyun * spi_unregister_device() to start making that device vanish, but
1513*4882a593Smuzhiyun * normally that would be handled by spi_unregister_controller().
1514*4882a593Smuzhiyun *
1515*4882a593Smuzhiyun * You can also use spi_alloc_device() and spi_add_device() to use a two
1516*4882a593Smuzhiyun * stage registration sequence for each spi_device. This gives the caller
1517*4882a593Smuzhiyun * some more control over the spi_device structure before it is registered,
1518*4882a593Smuzhiyun * but requires that caller to initialize fields that would otherwise
1519*4882a593Smuzhiyun * be defined using the board info.
1520*4882a593Smuzhiyun */
1521*4882a593Smuzhiyun extern struct spi_device *
1522*4882a593Smuzhiyun spi_alloc_device(struct spi_controller *ctlr);
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun extern int
1525*4882a593Smuzhiyun spi_add_device(struct spi_device *spi);
1526*4882a593Smuzhiyun
1527*4882a593Smuzhiyun extern struct spi_device *
1528*4882a593Smuzhiyun spi_new_device(struct spi_controller *, struct spi_board_info *);
1529*4882a593Smuzhiyun
1530*4882a593Smuzhiyun extern void spi_unregister_device(struct spi_device *spi);
1531*4882a593Smuzhiyun
1532*4882a593Smuzhiyun extern const struct spi_device_id *
1533*4882a593Smuzhiyun spi_get_device_id(const struct spi_device *sdev);
1534*4882a593Smuzhiyun
1535*4882a593Smuzhiyun static inline bool
spi_transfer_is_last(struct spi_controller * ctlr,struct spi_transfer * xfer)1536*4882a593Smuzhiyun spi_transfer_is_last(struct spi_controller *ctlr, struct spi_transfer *xfer)
1537*4882a593Smuzhiyun {
1538*4882a593Smuzhiyun return list_is_last(&xfer->transfer_list, &ctlr->cur_msg->transfers);
1539*4882a593Smuzhiyun }
1540*4882a593Smuzhiyun
1541*4882a593Smuzhiyun /* OF support code */
1542*4882a593Smuzhiyun #if IS_ENABLED(CONFIG_OF)
1543*4882a593Smuzhiyun
1544*4882a593Smuzhiyun /* must call put_device() when done with returned spi_device device */
1545*4882a593Smuzhiyun extern struct spi_device *
1546*4882a593Smuzhiyun of_find_spi_device_by_node(struct device_node *node);
1547*4882a593Smuzhiyun
1548*4882a593Smuzhiyun #else
1549*4882a593Smuzhiyun
1550*4882a593Smuzhiyun static inline struct spi_device *
of_find_spi_device_by_node(struct device_node * node)1551*4882a593Smuzhiyun of_find_spi_device_by_node(struct device_node *node)
1552*4882a593Smuzhiyun {
1553*4882a593Smuzhiyun return NULL;
1554*4882a593Smuzhiyun }
1555*4882a593Smuzhiyun
1556*4882a593Smuzhiyun #endif /* IS_ENABLED(CONFIG_OF) */
1557*4882a593Smuzhiyun
1558*4882a593Smuzhiyun /* Compatibility layer */
1559*4882a593Smuzhiyun #define spi_master spi_controller
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun #define SPI_MASTER_HALF_DUPLEX SPI_CONTROLLER_HALF_DUPLEX
1562*4882a593Smuzhiyun #define SPI_MASTER_NO_RX SPI_CONTROLLER_NO_RX
1563*4882a593Smuzhiyun #define SPI_MASTER_NO_TX SPI_CONTROLLER_NO_TX
1564*4882a593Smuzhiyun #define SPI_MASTER_MUST_RX SPI_CONTROLLER_MUST_RX
1565*4882a593Smuzhiyun #define SPI_MASTER_MUST_TX SPI_CONTROLLER_MUST_TX
1566*4882a593Smuzhiyun
1567*4882a593Smuzhiyun #define spi_master_get_devdata(_ctlr) spi_controller_get_devdata(_ctlr)
1568*4882a593Smuzhiyun #define spi_master_set_devdata(_ctlr, _data) \
1569*4882a593Smuzhiyun spi_controller_set_devdata(_ctlr, _data)
1570*4882a593Smuzhiyun #define spi_master_get(_ctlr) spi_controller_get(_ctlr)
1571*4882a593Smuzhiyun #define spi_master_put(_ctlr) spi_controller_put(_ctlr)
1572*4882a593Smuzhiyun #define spi_master_suspend(_ctlr) spi_controller_suspend(_ctlr)
1573*4882a593Smuzhiyun #define spi_master_resume(_ctlr) spi_controller_resume(_ctlr)
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun #define spi_register_master(_ctlr) spi_register_controller(_ctlr)
1576*4882a593Smuzhiyun #define devm_spi_register_master(_dev, _ctlr) \
1577*4882a593Smuzhiyun devm_spi_register_controller(_dev, _ctlr)
1578*4882a593Smuzhiyun #define spi_unregister_master(_ctlr) spi_unregister_controller(_ctlr)
1579*4882a593Smuzhiyun
1580*4882a593Smuzhiyun #endif /* __LINUX_SPI_H */
1581