xref: /OK3568_Linux_fs/kernel/drivers/spi/spidev.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Simple synchronous userspace interface to SPI devices
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2006 SWAPP
6*4882a593Smuzhiyun  *	Andrea Paterniani <a.paterniani@swapp-eng.it>
7*4882a593Smuzhiyun  * Copyright (C) 2007 David Brownell (simplification, cleanup)
8*4882a593Smuzhiyun  */
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/ioctl.h>
13*4882a593Smuzhiyun #include <linux/fs.h>
14*4882a593Smuzhiyun #include <linux/device.h>
15*4882a593Smuzhiyun #include <linux/err.h>
16*4882a593Smuzhiyun #include <linux/list.h>
17*4882a593Smuzhiyun #include <linux/errno.h>
18*4882a593Smuzhiyun #include <linux/mutex.h>
19*4882a593Smuzhiyun #include <linux/slab.h>
20*4882a593Smuzhiyun #include <linux/compat.h>
21*4882a593Smuzhiyun #include <linux/of.h>
22*4882a593Smuzhiyun #include <linux/of_device.h>
23*4882a593Smuzhiyun #include <linux/acpi.h>
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun #include <linux/spi/spi.h>
26*4882a593Smuzhiyun #include <linux/spi/spidev.h>
27*4882a593Smuzhiyun 
28*4882a593Smuzhiyun #include <linux/uaccess.h>
29*4882a593Smuzhiyun 
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /*
32*4882a593Smuzhiyun  * This supports access to SPI devices using normal userspace I/O calls.
33*4882a593Smuzhiyun  * Note that while traditional UNIX/POSIX I/O semantics are half duplex,
34*4882a593Smuzhiyun  * and often mask message boundaries, full SPI support requires full duplex
35*4882a593Smuzhiyun  * transfers.  There are several kinds of internal message boundaries to
36*4882a593Smuzhiyun  * handle chipselect management and other protocol options.
37*4882a593Smuzhiyun  *
38*4882a593Smuzhiyun  * SPI has a character major number assigned.  We allocate minor numbers
39*4882a593Smuzhiyun  * dynamically using a bitmask.  You must use hotplug tools, such as udev
40*4882a593Smuzhiyun  * (or mdev with busybox) to create and destroy the /dev/spidevB.C device
41*4882a593Smuzhiyun  * nodes, since there is no fixed association of minor numbers with any
42*4882a593Smuzhiyun  * particular SPI bus or device.
43*4882a593Smuzhiyun  */
44*4882a593Smuzhiyun #define SPIDEV_MAJOR			153	/* assigned */
45*4882a593Smuzhiyun #define N_SPI_MINORS			32	/* ... up to 256 */
46*4882a593Smuzhiyun 
47*4882a593Smuzhiyun static DECLARE_BITMAP(minors, N_SPI_MINORS);
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun 
50*4882a593Smuzhiyun /* Bit masks for spi_device.mode management.  Note that incorrect
51*4882a593Smuzhiyun  * settings for some settings can cause *lots* of trouble for other
52*4882a593Smuzhiyun  * devices on a shared bus:
53*4882a593Smuzhiyun  *
54*4882a593Smuzhiyun  *  - CS_HIGH ... this device will be active when it shouldn't be
55*4882a593Smuzhiyun  *  - 3WIRE ... when active, it won't behave as it should
56*4882a593Smuzhiyun  *  - NO_CS ... there will be no explicit message boundaries; this
57*4882a593Smuzhiyun  *	is completely incompatible with the shared bus model
58*4882a593Smuzhiyun  *  - READY ... transfers may proceed when they shouldn't.
59*4882a593Smuzhiyun  *
60*4882a593Smuzhiyun  * REVISIT should changing those flags be privileged?
61*4882a593Smuzhiyun  */
62*4882a593Smuzhiyun #define SPI_MODE_MASK		(SPI_CPHA | SPI_CPOL | SPI_CS_HIGH \
63*4882a593Smuzhiyun 				| SPI_LSB_FIRST | SPI_3WIRE | SPI_LOOP \
64*4882a593Smuzhiyun 				| SPI_NO_CS | SPI_READY | SPI_TX_DUAL \
65*4882a593Smuzhiyun 				| SPI_TX_QUAD | SPI_TX_OCTAL | SPI_RX_DUAL \
66*4882a593Smuzhiyun 				| SPI_RX_QUAD | SPI_RX_OCTAL)
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun struct spidev_data {
69*4882a593Smuzhiyun 	dev_t			devt;
70*4882a593Smuzhiyun 	spinlock_t		spi_lock;
71*4882a593Smuzhiyun 	struct spi_device	*spi;
72*4882a593Smuzhiyun 	struct list_head	device_entry;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	/* TX/RX buffers are NULL unless this device is open (users > 0) */
75*4882a593Smuzhiyun 	struct mutex		buf_lock;
76*4882a593Smuzhiyun 	unsigned		users;
77*4882a593Smuzhiyun 	u8			*tx_buffer;
78*4882a593Smuzhiyun 	u8			*rx_buffer;
79*4882a593Smuzhiyun 	u32			speed_hz;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun static LIST_HEAD(device_list);
83*4882a593Smuzhiyun static DEFINE_MUTEX(device_list_lock);
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun static unsigned bufsiz = 4096;
86*4882a593Smuzhiyun module_param(bufsiz, uint, S_IRUGO);
87*4882a593Smuzhiyun MODULE_PARM_DESC(bufsiz, "data bytes in biggest supported SPI message");
88*4882a593Smuzhiyun 
89*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun static ssize_t
spidev_sync(struct spidev_data * spidev,struct spi_message * message)92*4882a593Smuzhiyun spidev_sync(struct spidev_data *spidev, struct spi_message *message)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun 	int status;
95*4882a593Smuzhiyun 	struct spi_device *spi;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	spin_lock_irq(&spidev->spi_lock);
98*4882a593Smuzhiyun 	spi = spidev->spi;
99*4882a593Smuzhiyun 	spin_unlock_irq(&spidev->spi_lock);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	if (spi == NULL)
102*4882a593Smuzhiyun 		status = -ESHUTDOWN;
103*4882a593Smuzhiyun 	else
104*4882a593Smuzhiyun 		status = spi_sync(spi, message);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	if (status == 0)
107*4882a593Smuzhiyun 		status = message->actual_length;
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	return status;
110*4882a593Smuzhiyun }
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun static inline ssize_t
spidev_sync_write(struct spidev_data * spidev,size_t len)113*4882a593Smuzhiyun spidev_sync_write(struct spidev_data *spidev, size_t len)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun 	struct spi_transfer	t = {
116*4882a593Smuzhiyun 			.tx_buf		= spidev->tx_buffer,
117*4882a593Smuzhiyun 			.len		= len,
118*4882a593Smuzhiyun 			.speed_hz	= spidev->speed_hz,
119*4882a593Smuzhiyun 		};
120*4882a593Smuzhiyun 	struct spi_message	m;
121*4882a593Smuzhiyun 
122*4882a593Smuzhiyun 	spi_message_init(&m);
123*4882a593Smuzhiyun 	spi_message_add_tail(&t, &m);
124*4882a593Smuzhiyun 	return spidev_sync(spidev, &m);
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun 
127*4882a593Smuzhiyun static inline ssize_t
spidev_sync_read(struct spidev_data * spidev,size_t len)128*4882a593Smuzhiyun spidev_sync_read(struct spidev_data *spidev, size_t len)
129*4882a593Smuzhiyun {
130*4882a593Smuzhiyun 	struct spi_transfer	t = {
131*4882a593Smuzhiyun 			.rx_buf		= spidev->rx_buffer,
132*4882a593Smuzhiyun 			.len		= len,
133*4882a593Smuzhiyun 			.speed_hz	= spidev->speed_hz,
134*4882a593Smuzhiyun 		};
135*4882a593Smuzhiyun 	struct spi_message	m;
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	spi_message_init(&m);
138*4882a593Smuzhiyun 	spi_message_add_tail(&t, &m);
139*4882a593Smuzhiyun 	return spidev_sync(spidev, &m);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
142*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun /* Read-only message with current device setup */
145*4882a593Smuzhiyun static ssize_t
spidev_read(struct file * filp,char __user * buf,size_t count,loff_t * f_pos)146*4882a593Smuzhiyun spidev_read(struct file *filp, char __user *buf, size_t count, loff_t *f_pos)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun 	struct spidev_data	*spidev;
149*4882a593Smuzhiyun 	ssize_t			status;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	/* chipselect only toggles at start or end of operation */
152*4882a593Smuzhiyun 	if (count > bufsiz)
153*4882a593Smuzhiyun 		return -EMSGSIZE;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	spidev = filp->private_data;
156*4882a593Smuzhiyun 
157*4882a593Smuzhiyun 	mutex_lock(&spidev->buf_lock);
158*4882a593Smuzhiyun 	status = spidev_sync_read(spidev, count);
159*4882a593Smuzhiyun 	if (status > 0) {
160*4882a593Smuzhiyun 		unsigned long	missing;
161*4882a593Smuzhiyun 
162*4882a593Smuzhiyun 		missing = copy_to_user(buf, spidev->rx_buffer, status);
163*4882a593Smuzhiyun 		if (missing == status)
164*4882a593Smuzhiyun 			status = -EFAULT;
165*4882a593Smuzhiyun 		else
166*4882a593Smuzhiyun 			status = status - missing;
167*4882a593Smuzhiyun 	}
168*4882a593Smuzhiyun 	mutex_unlock(&spidev->buf_lock);
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	return status;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun /* Write-only message with current device setup */
174*4882a593Smuzhiyun static ssize_t
spidev_write(struct file * filp,const char __user * buf,size_t count,loff_t * f_pos)175*4882a593Smuzhiyun spidev_write(struct file *filp, const char __user *buf,
176*4882a593Smuzhiyun 		size_t count, loff_t *f_pos)
177*4882a593Smuzhiyun {
178*4882a593Smuzhiyun 	struct spidev_data	*spidev;
179*4882a593Smuzhiyun 	ssize_t			status;
180*4882a593Smuzhiyun 	unsigned long		missing;
181*4882a593Smuzhiyun 
182*4882a593Smuzhiyun 	/* chipselect only toggles at start or end of operation */
183*4882a593Smuzhiyun 	if (count > bufsiz)
184*4882a593Smuzhiyun 		return -EMSGSIZE;
185*4882a593Smuzhiyun 
186*4882a593Smuzhiyun 	spidev = filp->private_data;
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	mutex_lock(&spidev->buf_lock);
189*4882a593Smuzhiyun 	missing = copy_from_user(spidev->tx_buffer, buf, count);
190*4882a593Smuzhiyun 	if (missing == 0)
191*4882a593Smuzhiyun 		status = spidev_sync_write(spidev, count);
192*4882a593Smuzhiyun 	else
193*4882a593Smuzhiyun 		status = -EFAULT;
194*4882a593Smuzhiyun 	mutex_unlock(&spidev->buf_lock);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return status;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
spidev_message(struct spidev_data * spidev,struct spi_ioc_transfer * u_xfers,unsigned n_xfers)199*4882a593Smuzhiyun static int spidev_message(struct spidev_data *spidev,
200*4882a593Smuzhiyun 		struct spi_ioc_transfer *u_xfers, unsigned n_xfers)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct spi_message	msg;
203*4882a593Smuzhiyun 	struct spi_transfer	*k_xfers;
204*4882a593Smuzhiyun 	struct spi_transfer	*k_tmp;
205*4882a593Smuzhiyun 	struct spi_ioc_transfer *u_tmp;
206*4882a593Smuzhiyun 	unsigned		n, total, tx_total, rx_total;
207*4882a593Smuzhiyun 	u8			*tx_buf, *rx_buf;
208*4882a593Smuzhiyun 	int			status = -EFAULT;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	spi_message_init(&msg);
211*4882a593Smuzhiyun 	k_xfers = kcalloc(n_xfers, sizeof(*k_tmp), GFP_KERNEL);
212*4882a593Smuzhiyun 	if (k_xfers == NULL)
213*4882a593Smuzhiyun 		return -ENOMEM;
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/* Construct spi_message, copying any tx data to bounce buffer.
216*4882a593Smuzhiyun 	 * We walk the array of user-provided transfers, using each one
217*4882a593Smuzhiyun 	 * to initialize a kernel version of the same transfer.
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	tx_buf = spidev->tx_buffer;
220*4882a593Smuzhiyun 	rx_buf = spidev->rx_buffer;
221*4882a593Smuzhiyun 	total = 0;
222*4882a593Smuzhiyun 	tx_total = 0;
223*4882a593Smuzhiyun 	rx_total = 0;
224*4882a593Smuzhiyun 	for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
225*4882a593Smuzhiyun 			n;
226*4882a593Smuzhiyun 			n--, k_tmp++, u_tmp++) {
227*4882a593Smuzhiyun 		/* Ensure that also following allocations from rx_buf/tx_buf will meet
228*4882a593Smuzhiyun 		 * DMA alignment requirements.
229*4882a593Smuzhiyun 		 */
230*4882a593Smuzhiyun 		unsigned int len_aligned = ALIGN(u_tmp->len, ARCH_KMALLOC_MINALIGN);
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 		k_tmp->len = u_tmp->len;
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 		total += k_tmp->len;
235*4882a593Smuzhiyun 		/* Since the function returns the total length of transfers
236*4882a593Smuzhiyun 		 * on success, restrict the total to positive int values to
237*4882a593Smuzhiyun 		 * avoid the return value looking like an error.  Also check
238*4882a593Smuzhiyun 		 * each transfer length to avoid arithmetic overflow.
239*4882a593Smuzhiyun 		 */
240*4882a593Smuzhiyun 		if (total > INT_MAX || k_tmp->len > INT_MAX) {
241*4882a593Smuzhiyun 			status = -EMSGSIZE;
242*4882a593Smuzhiyun 			goto done;
243*4882a593Smuzhiyun 		}
244*4882a593Smuzhiyun 
245*4882a593Smuzhiyun 		if (u_tmp->rx_buf) {
246*4882a593Smuzhiyun 			/* this transfer needs space in RX bounce buffer */
247*4882a593Smuzhiyun 			rx_total += len_aligned;
248*4882a593Smuzhiyun 			if (rx_total > bufsiz) {
249*4882a593Smuzhiyun 				status = -EMSGSIZE;
250*4882a593Smuzhiyun 				goto done;
251*4882a593Smuzhiyun 			}
252*4882a593Smuzhiyun 			k_tmp->rx_buf = rx_buf;
253*4882a593Smuzhiyun 			rx_buf += len_aligned;
254*4882a593Smuzhiyun 		}
255*4882a593Smuzhiyun 		if (u_tmp->tx_buf) {
256*4882a593Smuzhiyun 			/* this transfer needs space in TX bounce buffer */
257*4882a593Smuzhiyun 			tx_total += len_aligned;
258*4882a593Smuzhiyun 			if (tx_total > bufsiz) {
259*4882a593Smuzhiyun 				status = -EMSGSIZE;
260*4882a593Smuzhiyun 				goto done;
261*4882a593Smuzhiyun 			}
262*4882a593Smuzhiyun 			k_tmp->tx_buf = tx_buf;
263*4882a593Smuzhiyun 			if (copy_from_user(tx_buf, (const u8 __user *)
264*4882a593Smuzhiyun 						(uintptr_t) u_tmp->tx_buf,
265*4882a593Smuzhiyun 					u_tmp->len))
266*4882a593Smuzhiyun 				goto done;
267*4882a593Smuzhiyun 			tx_buf += len_aligned;
268*4882a593Smuzhiyun 		}
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 		k_tmp->cs_change = !!u_tmp->cs_change;
271*4882a593Smuzhiyun 		k_tmp->tx_nbits = u_tmp->tx_nbits;
272*4882a593Smuzhiyun 		k_tmp->rx_nbits = u_tmp->rx_nbits;
273*4882a593Smuzhiyun 		k_tmp->bits_per_word = u_tmp->bits_per_word;
274*4882a593Smuzhiyun 		k_tmp->delay.value = u_tmp->delay_usecs;
275*4882a593Smuzhiyun 		k_tmp->delay.unit = SPI_DELAY_UNIT_USECS;
276*4882a593Smuzhiyun 		k_tmp->speed_hz = u_tmp->speed_hz;
277*4882a593Smuzhiyun 		k_tmp->word_delay.value = u_tmp->word_delay_usecs;
278*4882a593Smuzhiyun 		k_tmp->word_delay.unit = SPI_DELAY_UNIT_USECS;
279*4882a593Smuzhiyun 		if (!k_tmp->speed_hz)
280*4882a593Smuzhiyun 			k_tmp->speed_hz = spidev->speed_hz;
281*4882a593Smuzhiyun #ifdef VERBOSE
282*4882a593Smuzhiyun 		dev_dbg(&spidev->spi->dev,
283*4882a593Smuzhiyun 			"  xfer len %u %s%s%s%dbits %u usec %u usec %uHz\n",
284*4882a593Smuzhiyun 			k_tmp->len,
285*4882a593Smuzhiyun 			k_tmp->rx_buf ? "rx " : "",
286*4882a593Smuzhiyun 			k_tmp->tx_buf ? "tx " : "",
287*4882a593Smuzhiyun 			k_tmp->cs_change ? "cs " : "",
288*4882a593Smuzhiyun 			k_tmp->bits_per_word ? : spidev->spi->bits_per_word,
289*4882a593Smuzhiyun 			k_tmp->delay.value,
290*4882a593Smuzhiyun 			k_tmp->word_delay.value,
291*4882a593Smuzhiyun 			k_tmp->speed_hz ? : spidev->spi->max_speed_hz);
292*4882a593Smuzhiyun #endif
293*4882a593Smuzhiyun 		spi_message_add_tail(k_tmp, &msg);
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	status = spidev_sync(spidev, &msg);
297*4882a593Smuzhiyun 	if (status < 0)
298*4882a593Smuzhiyun 		goto done;
299*4882a593Smuzhiyun 
300*4882a593Smuzhiyun 	/* copy any rx data out of bounce buffer */
301*4882a593Smuzhiyun 	for (n = n_xfers, k_tmp = k_xfers, u_tmp = u_xfers;
302*4882a593Smuzhiyun 			n;
303*4882a593Smuzhiyun 			n--, k_tmp++, u_tmp++) {
304*4882a593Smuzhiyun 		if (u_tmp->rx_buf) {
305*4882a593Smuzhiyun 			if (copy_to_user((u8 __user *)
306*4882a593Smuzhiyun 					(uintptr_t) u_tmp->rx_buf, k_tmp->rx_buf,
307*4882a593Smuzhiyun 					u_tmp->len)) {
308*4882a593Smuzhiyun 				status = -EFAULT;
309*4882a593Smuzhiyun 				goto done;
310*4882a593Smuzhiyun 			}
311*4882a593Smuzhiyun 		}
312*4882a593Smuzhiyun 	}
313*4882a593Smuzhiyun 	status = total;
314*4882a593Smuzhiyun 
315*4882a593Smuzhiyun done:
316*4882a593Smuzhiyun 	kfree(k_xfers);
317*4882a593Smuzhiyun 	return status;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun 
320*4882a593Smuzhiyun static struct spi_ioc_transfer *
spidev_get_ioc_message(unsigned int cmd,struct spi_ioc_transfer __user * u_ioc,unsigned * n_ioc)321*4882a593Smuzhiyun spidev_get_ioc_message(unsigned int cmd, struct spi_ioc_transfer __user *u_ioc,
322*4882a593Smuzhiyun 		unsigned *n_ioc)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun 	u32	tmp;
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	/* Check type, command number and direction */
327*4882a593Smuzhiyun 	if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC
328*4882a593Smuzhiyun 			|| _IOC_NR(cmd) != _IOC_NR(SPI_IOC_MESSAGE(0))
329*4882a593Smuzhiyun 			|| _IOC_DIR(cmd) != _IOC_WRITE)
330*4882a593Smuzhiyun 		return ERR_PTR(-ENOTTY);
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	tmp = _IOC_SIZE(cmd);
333*4882a593Smuzhiyun 	if ((tmp % sizeof(struct spi_ioc_transfer)) != 0)
334*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
335*4882a593Smuzhiyun 	*n_ioc = tmp / sizeof(struct spi_ioc_transfer);
336*4882a593Smuzhiyun 	if (*n_ioc == 0)
337*4882a593Smuzhiyun 		return NULL;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	/* copy into scratch area */
340*4882a593Smuzhiyun 	return memdup_user(u_ioc, tmp);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun static long
spidev_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)344*4882a593Smuzhiyun spidev_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun 	int			retval = 0;
347*4882a593Smuzhiyun 	struct spidev_data	*spidev;
348*4882a593Smuzhiyun 	struct spi_device	*spi;
349*4882a593Smuzhiyun 	u32			tmp;
350*4882a593Smuzhiyun 	unsigned		n_ioc;
351*4882a593Smuzhiyun 	struct spi_ioc_transfer	*ioc;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Check type and command number */
354*4882a593Smuzhiyun 	if (_IOC_TYPE(cmd) != SPI_IOC_MAGIC)
355*4882a593Smuzhiyun 		return -ENOTTY;
356*4882a593Smuzhiyun 
357*4882a593Smuzhiyun 	/* guard against device removal before, or while,
358*4882a593Smuzhiyun 	 * we issue this ioctl.
359*4882a593Smuzhiyun 	 */
360*4882a593Smuzhiyun 	spidev = filp->private_data;
361*4882a593Smuzhiyun 	spin_lock_irq(&spidev->spi_lock);
362*4882a593Smuzhiyun 	spi = spi_dev_get(spidev->spi);
363*4882a593Smuzhiyun 	spin_unlock_irq(&spidev->spi_lock);
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (spi == NULL)
366*4882a593Smuzhiyun 		return -ESHUTDOWN;
367*4882a593Smuzhiyun 
368*4882a593Smuzhiyun 	/* use the buffer lock here for triple duty:
369*4882a593Smuzhiyun 	 *  - prevent I/O (from us) so calling spi_setup() is safe;
370*4882a593Smuzhiyun 	 *  - prevent concurrent SPI_IOC_WR_* from morphing
371*4882a593Smuzhiyun 	 *    data fields while SPI_IOC_RD_* reads them;
372*4882a593Smuzhiyun 	 *  - SPI_IOC_MESSAGE needs the buffer locked "normally".
373*4882a593Smuzhiyun 	 */
374*4882a593Smuzhiyun 	mutex_lock(&spidev->buf_lock);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	switch (cmd) {
377*4882a593Smuzhiyun 	/* read requests */
378*4882a593Smuzhiyun 	case SPI_IOC_RD_MODE:
379*4882a593Smuzhiyun 		retval = put_user(spi->mode & SPI_MODE_MASK,
380*4882a593Smuzhiyun 					(__u8 __user *)arg);
381*4882a593Smuzhiyun 		break;
382*4882a593Smuzhiyun 	case SPI_IOC_RD_MODE32:
383*4882a593Smuzhiyun 		retval = put_user(spi->mode & SPI_MODE_MASK,
384*4882a593Smuzhiyun 					(__u32 __user *)arg);
385*4882a593Smuzhiyun 		break;
386*4882a593Smuzhiyun 	case SPI_IOC_RD_LSB_FIRST:
387*4882a593Smuzhiyun 		retval = put_user((spi->mode & SPI_LSB_FIRST) ?  1 : 0,
388*4882a593Smuzhiyun 					(__u8 __user *)arg);
389*4882a593Smuzhiyun 		break;
390*4882a593Smuzhiyun 	case SPI_IOC_RD_BITS_PER_WORD:
391*4882a593Smuzhiyun 		retval = put_user(spi->bits_per_word, (__u8 __user *)arg);
392*4882a593Smuzhiyun 		break;
393*4882a593Smuzhiyun 	case SPI_IOC_RD_MAX_SPEED_HZ:
394*4882a593Smuzhiyun 		retval = put_user(spidev->speed_hz, (__u32 __user *)arg);
395*4882a593Smuzhiyun 		break;
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	/* write requests */
398*4882a593Smuzhiyun 	case SPI_IOC_WR_MODE:
399*4882a593Smuzhiyun 	case SPI_IOC_WR_MODE32:
400*4882a593Smuzhiyun 		if (cmd == SPI_IOC_WR_MODE)
401*4882a593Smuzhiyun 			retval = get_user(tmp, (u8 __user *)arg);
402*4882a593Smuzhiyun 		else
403*4882a593Smuzhiyun 			retval = get_user(tmp, (u32 __user *)arg);
404*4882a593Smuzhiyun 		if (retval == 0) {
405*4882a593Smuzhiyun 			struct spi_controller *ctlr = spi->controller;
406*4882a593Smuzhiyun 			u32	save = spi->mode;
407*4882a593Smuzhiyun 
408*4882a593Smuzhiyun 			if (tmp & ~SPI_MODE_MASK) {
409*4882a593Smuzhiyun 				retval = -EINVAL;
410*4882a593Smuzhiyun 				break;
411*4882a593Smuzhiyun 			}
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 			if (ctlr->use_gpio_descriptors && ctlr->cs_gpiods &&
414*4882a593Smuzhiyun 			    ctlr->cs_gpiods[spi->chip_select])
415*4882a593Smuzhiyun 				tmp |= SPI_CS_HIGH;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 			tmp |= spi->mode & ~SPI_MODE_MASK;
418*4882a593Smuzhiyun 			spi->mode = (u16)tmp;
419*4882a593Smuzhiyun 			retval = spi_setup(spi);
420*4882a593Smuzhiyun 			if (retval < 0)
421*4882a593Smuzhiyun 				spi->mode = save;
422*4882a593Smuzhiyun 			else
423*4882a593Smuzhiyun 				dev_dbg(&spi->dev, "spi mode %x\n", tmp);
424*4882a593Smuzhiyun 		}
425*4882a593Smuzhiyun 		break;
426*4882a593Smuzhiyun 	case SPI_IOC_WR_LSB_FIRST:
427*4882a593Smuzhiyun 		retval = get_user(tmp, (__u8 __user *)arg);
428*4882a593Smuzhiyun 		if (retval == 0) {
429*4882a593Smuzhiyun 			u32	save = spi->mode;
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 			if (tmp)
432*4882a593Smuzhiyun 				spi->mode |= SPI_LSB_FIRST;
433*4882a593Smuzhiyun 			else
434*4882a593Smuzhiyun 				spi->mode &= ~SPI_LSB_FIRST;
435*4882a593Smuzhiyun 			retval = spi_setup(spi);
436*4882a593Smuzhiyun 			if (retval < 0)
437*4882a593Smuzhiyun 				spi->mode = save;
438*4882a593Smuzhiyun 			else
439*4882a593Smuzhiyun 				dev_dbg(&spi->dev, "%csb first\n",
440*4882a593Smuzhiyun 						tmp ? 'l' : 'm');
441*4882a593Smuzhiyun 		}
442*4882a593Smuzhiyun 		break;
443*4882a593Smuzhiyun 	case SPI_IOC_WR_BITS_PER_WORD:
444*4882a593Smuzhiyun 		retval = get_user(tmp, (__u8 __user *)arg);
445*4882a593Smuzhiyun 		if (retval == 0) {
446*4882a593Smuzhiyun 			u8	save = spi->bits_per_word;
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 			spi->bits_per_word = tmp;
449*4882a593Smuzhiyun 			retval = spi_setup(spi);
450*4882a593Smuzhiyun 			if (retval < 0)
451*4882a593Smuzhiyun 				spi->bits_per_word = save;
452*4882a593Smuzhiyun 			else
453*4882a593Smuzhiyun 				dev_dbg(&spi->dev, "%d bits per word\n", tmp);
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 		break;
456*4882a593Smuzhiyun 	case SPI_IOC_WR_MAX_SPEED_HZ:
457*4882a593Smuzhiyun 		retval = get_user(tmp, (__u32 __user *)arg);
458*4882a593Smuzhiyun 		if (retval == 0) {
459*4882a593Smuzhiyun 			u32	save = spi->max_speed_hz;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 			spi->max_speed_hz = tmp;
462*4882a593Smuzhiyun 			retval = spi_setup(spi);
463*4882a593Smuzhiyun 			if (retval == 0) {
464*4882a593Smuzhiyun 				spidev->speed_hz = tmp;
465*4882a593Smuzhiyun 				dev_dbg(&spi->dev, "%d Hz (max)\n",
466*4882a593Smuzhiyun 					spidev->speed_hz);
467*4882a593Smuzhiyun 			}
468*4882a593Smuzhiyun 			spi->max_speed_hz = save;
469*4882a593Smuzhiyun 		}
470*4882a593Smuzhiyun 		break;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	default:
473*4882a593Smuzhiyun 		/* segmented and/or full-duplex I/O request */
474*4882a593Smuzhiyun 		/* Check message and copy into scratch area */
475*4882a593Smuzhiyun 		ioc = spidev_get_ioc_message(cmd,
476*4882a593Smuzhiyun 				(struct spi_ioc_transfer __user *)arg, &n_ioc);
477*4882a593Smuzhiyun 		if (IS_ERR(ioc)) {
478*4882a593Smuzhiyun 			retval = PTR_ERR(ioc);
479*4882a593Smuzhiyun 			break;
480*4882a593Smuzhiyun 		}
481*4882a593Smuzhiyun 		if (!ioc)
482*4882a593Smuzhiyun 			break;	/* n_ioc is also 0 */
483*4882a593Smuzhiyun 
484*4882a593Smuzhiyun 		/* translate to spi_message, execute */
485*4882a593Smuzhiyun 		retval = spidev_message(spidev, ioc, n_ioc);
486*4882a593Smuzhiyun 		kfree(ioc);
487*4882a593Smuzhiyun 		break;
488*4882a593Smuzhiyun 	}
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	mutex_unlock(&spidev->buf_lock);
491*4882a593Smuzhiyun 	spi_dev_put(spi);
492*4882a593Smuzhiyun 	return retval;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun #ifdef CONFIG_COMPAT
496*4882a593Smuzhiyun static long
spidev_compat_ioc_message(struct file * filp,unsigned int cmd,unsigned long arg)497*4882a593Smuzhiyun spidev_compat_ioc_message(struct file *filp, unsigned int cmd,
498*4882a593Smuzhiyun 		unsigned long arg)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun 	struct spi_ioc_transfer __user	*u_ioc;
501*4882a593Smuzhiyun 	int				retval = 0;
502*4882a593Smuzhiyun 	struct spidev_data		*spidev;
503*4882a593Smuzhiyun 	struct spi_device		*spi;
504*4882a593Smuzhiyun 	unsigned			n_ioc, n;
505*4882a593Smuzhiyun 	struct spi_ioc_transfer		*ioc;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	u_ioc = (struct spi_ioc_transfer __user *) compat_ptr(arg);
508*4882a593Smuzhiyun 
509*4882a593Smuzhiyun 	/* guard against device removal before, or while,
510*4882a593Smuzhiyun 	 * we issue this ioctl.
511*4882a593Smuzhiyun 	 */
512*4882a593Smuzhiyun 	spidev = filp->private_data;
513*4882a593Smuzhiyun 	spin_lock_irq(&spidev->spi_lock);
514*4882a593Smuzhiyun 	spi = spi_dev_get(spidev->spi);
515*4882a593Smuzhiyun 	spin_unlock_irq(&spidev->spi_lock);
516*4882a593Smuzhiyun 
517*4882a593Smuzhiyun 	if (spi == NULL)
518*4882a593Smuzhiyun 		return -ESHUTDOWN;
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* SPI_IOC_MESSAGE needs the buffer locked "normally" */
521*4882a593Smuzhiyun 	mutex_lock(&spidev->buf_lock);
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	/* Check message and copy into scratch area */
524*4882a593Smuzhiyun 	ioc = spidev_get_ioc_message(cmd, u_ioc, &n_ioc);
525*4882a593Smuzhiyun 	if (IS_ERR(ioc)) {
526*4882a593Smuzhiyun 		retval = PTR_ERR(ioc);
527*4882a593Smuzhiyun 		goto done;
528*4882a593Smuzhiyun 	}
529*4882a593Smuzhiyun 	if (!ioc)
530*4882a593Smuzhiyun 		goto done;	/* n_ioc is also 0 */
531*4882a593Smuzhiyun 
532*4882a593Smuzhiyun 	/* Convert buffer pointers */
533*4882a593Smuzhiyun 	for (n = 0; n < n_ioc; n++) {
534*4882a593Smuzhiyun 		ioc[n].rx_buf = (uintptr_t) compat_ptr(ioc[n].rx_buf);
535*4882a593Smuzhiyun 		ioc[n].tx_buf = (uintptr_t) compat_ptr(ioc[n].tx_buf);
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	/* translate to spi_message, execute */
539*4882a593Smuzhiyun 	retval = spidev_message(spidev, ioc, n_ioc);
540*4882a593Smuzhiyun 	kfree(ioc);
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun done:
543*4882a593Smuzhiyun 	mutex_unlock(&spidev->buf_lock);
544*4882a593Smuzhiyun 	spi_dev_put(spi);
545*4882a593Smuzhiyun 	return retval;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun 
548*4882a593Smuzhiyun static long
spidev_compat_ioctl(struct file * filp,unsigned int cmd,unsigned long arg)549*4882a593Smuzhiyun spidev_compat_ioctl(struct file *filp, unsigned int cmd, unsigned long arg)
550*4882a593Smuzhiyun {
551*4882a593Smuzhiyun 	if (_IOC_TYPE(cmd) == SPI_IOC_MAGIC
552*4882a593Smuzhiyun 			&& _IOC_NR(cmd) == _IOC_NR(SPI_IOC_MESSAGE(0))
553*4882a593Smuzhiyun 			&& _IOC_DIR(cmd) == _IOC_WRITE)
554*4882a593Smuzhiyun 		return spidev_compat_ioc_message(filp, cmd, arg);
555*4882a593Smuzhiyun 
556*4882a593Smuzhiyun 	return spidev_ioctl(filp, cmd, (unsigned long)compat_ptr(arg));
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun #else
559*4882a593Smuzhiyun #define spidev_compat_ioctl NULL
560*4882a593Smuzhiyun #endif /* CONFIG_COMPAT */
561*4882a593Smuzhiyun 
spidev_open(struct inode * inode,struct file * filp)562*4882a593Smuzhiyun static int spidev_open(struct inode *inode, struct file *filp)
563*4882a593Smuzhiyun {
564*4882a593Smuzhiyun 	struct spidev_data	*spidev;
565*4882a593Smuzhiyun 	int			status = -ENXIO;
566*4882a593Smuzhiyun 
567*4882a593Smuzhiyun 	mutex_lock(&device_list_lock);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	list_for_each_entry(spidev, &device_list, device_entry) {
570*4882a593Smuzhiyun 		if (spidev->devt == inode->i_rdev) {
571*4882a593Smuzhiyun 			status = 0;
572*4882a593Smuzhiyun 			break;
573*4882a593Smuzhiyun 		}
574*4882a593Smuzhiyun 	}
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (status) {
577*4882a593Smuzhiyun 		pr_debug("spidev: nothing for minor %d\n", iminor(inode));
578*4882a593Smuzhiyun 		goto err_find_dev;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	if (!spidev->tx_buffer) {
582*4882a593Smuzhiyun 		spidev->tx_buffer = kmalloc(bufsiz, GFP_KERNEL);
583*4882a593Smuzhiyun 		if (!spidev->tx_buffer) {
584*4882a593Smuzhiyun 			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
585*4882a593Smuzhiyun 			status = -ENOMEM;
586*4882a593Smuzhiyun 			goto err_find_dev;
587*4882a593Smuzhiyun 		}
588*4882a593Smuzhiyun 	}
589*4882a593Smuzhiyun 
590*4882a593Smuzhiyun 	if (!spidev->rx_buffer) {
591*4882a593Smuzhiyun 		spidev->rx_buffer = kmalloc(bufsiz, GFP_KERNEL);
592*4882a593Smuzhiyun 		if (!spidev->rx_buffer) {
593*4882a593Smuzhiyun 			dev_dbg(&spidev->spi->dev, "open/ENOMEM\n");
594*4882a593Smuzhiyun 			status = -ENOMEM;
595*4882a593Smuzhiyun 			goto err_alloc_rx_buf;
596*4882a593Smuzhiyun 		}
597*4882a593Smuzhiyun 	}
598*4882a593Smuzhiyun 
599*4882a593Smuzhiyun 	spidev->users++;
600*4882a593Smuzhiyun 	filp->private_data = spidev;
601*4882a593Smuzhiyun 	stream_open(inode, filp);
602*4882a593Smuzhiyun 
603*4882a593Smuzhiyun 	mutex_unlock(&device_list_lock);
604*4882a593Smuzhiyun 	return 0;
605*4882a593Smuzhiyun 
606*4882a593Smuzhiyun err_alloc_rx_buf:
607*4882a593Smuzhiyun 	kfree(spidev->tx_buffer);
608*4882a593Smuzhiyun 	spidev->tx_buffer = NULL;
609*4882a593Smuzhiyun err_find_dev:
610*4882a593Smuzhiyun 	mutex_unlock(&device_list_lock);
611*4882a593Smuzhiyun 	return status;
612*4882a593Smuzhiyun }
613*4882a593Smuzhiyun 
spidev_release(struct inode * inode,struct file * filp)614*4882a593Smuzhiyun static int spidev_release(struct inode *inode, struct file *filp)
615*4882a593Smuzhiyun {
616*4882a593Smuzhiyun 	struct spidev_data	*spidev;
617*4882a593Smuzhiyun 	int			dofree;
618*4882a593Smuzhiyun 
619*4882a593Smuzhiyun 	mutex_lock(&device_list_lock);
620*4882a593Smuzhiyun 	spidev = filp->private_data;
621*4882a593Smuzhiyun 	filp->private_data = NULL;
622*4882a593Smuzhiyun 
623*4882a593Smuzhiyun 	spin_lock_irq(&spidev->spi_lock);
624*4882a593Smuzhiyun 	/* ... after we unbound from the underlying device? */
625*4882a593Smuzhiyun 	dofree = (spidev->spi == NULL);
626*4882a593Smuzhiyun 	spin_unlock_irq(&spidev->spi_lock);
627*4882a593Smuzhiyun 
628*4882a593Smuzhiyun 	/* last close? */
629*4882a593Smuzhiyun 	spidev->users--;
630*4882a593Smuzhiyun 	if (!spidev->users) {
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		kfree(spidev->tx_buffer);
633*4882a593Smuzhiyun 		spidev->tx_buffer = NULL;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 		kfree(spidev->rx_buffer);
636*4882a593Smuzhiyun 		spidev->rx_buffer = NULL;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 		if (dofree)
639*4882a593Smuzhiyun 			kfree(spidev);
640*4882a593Smuzhiyun 		else
641*4882a593Smuzhiyun 			spidev->speed_hz = spidev->spi->max_speed_hz;
642*4882a593Smuzhiyun 	}
643*4882a593Smuzhiyun #ifdef CONFIG_SPI_SLAVE
644*4882a593Smuzhiyun 	if (!dofree)
645*4882a593Smuzhiyun 		spi_slave_abort(spidev->spi);
646*4882a593Smuzhiyun #endif
647*4882a593Smuzhiyun 	mutex_unlock(&device_list_lock);
648*4882a593Smuzhiyun 
649*4882a593Smuzhiyun 	return 0;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun 
652*4882a593Smuzhiyun static const struct file_operations spidev_fops = {
653*4882a593Smuzhiyun 	.owner =	THIS_MODULE,
654*4882a593Smuzhiyun 	/* REVISIT switch to aio primitives, so that userspace
655*4882a593Smuzhiyun 	 * gets more complete API coverage.  It'll simplify things
656*4882a593Smuzhiyun 	 * too, except for the locking.
657*4882a593Smuzhiyun 	 */
658*4882a593Smuzhiyun 	.write =	spidev_write,
659*4882a593Smuzhiyun 	.read =		spidev_read,
660*4882a593Smuzhiyun 	.unlocked_ioctl = spidev_ioctl,
661*4882a593Smuzhiyun 	.compat_ioctl = spidev_compat_ioctl,
662*4882a593Smuzhiyun 	.open =		spidev_open,
663*4882a593Smuzhiyun 	.release =	spidev_release,
664*4882a593Smuzhiyun 	.llseek =	no_llseek,
665*4882a593Smuzhiyun };
666*4882a593Smuzhiyun 
667*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun /* The main reason to have this class is to make mdev/udev create the
670*4882a593Smuzhiyun  * /dev/spidevB.C character device nodes exposing our userspace API.
671*4882a593Smuzhiyun  * It also simplifies memory management.
672*4882a593Smuzhiyun  */
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun static struct class *spidev_class;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun #ifdef CONFIG_OF
677*4882a593Smuzhiyun static const struct of_device_id spidev_dt_ids[] = {
678*4882a593Smuzhiyun 	{ .compatible = "rohm,dh2228fv" },
679*4882a593Smuzhiyun 	{ .compatible = "lineartechnology,ltc2488" },
680*4882a593Smuzhiyun 	{ .compatible = "ge,achc" },
681*4882a593Smuzhiyun 	{ .compatible = "semtech,sx1301" },
682*4882a593Smuzhiyun 	{ .compatible = "lwn,bk4" },
683*4882a593Smuzhiyun 	{ .compatible = "dh,dhcom-board" },
684*4882a593Smuzhiyun 	{ .compatible = "menlo,m53cpld" },
685*4882a593Smuzhiyun 	{ .compatible = "rockchip,spidev" },
686*4882a593Smuzhiyun 	{},
687*4882a593Smuzhiyun };
688*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, spidev_dt_ids);
689*4882a593Smuzhiyun #endif
690*4882a593Smuzhiyun 
691*4882a593Smuzhiyun #ifdef CONFIG_ACPI
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun /* Dummy SPI devices not to be used in production systems */
694*4882a593Smuzhiyun #define SPIDEV_ACPI_DUMMY	1
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun static const struct acpi_device_id spidev_acpi_ids[] = {
697*4882a593Smuzhiyun 	/*
698*4882a593Smuzhiyun 	 * The ACPI SPT000* devices are only meant for development and
699*4882a593Smuzhiyun 	 * testing. Systems used in production should have a proper ACPI
700*4882a593Smuzhiyun 	 * description of the connected peripheral and they should also use
701*4882a593Smuzhiyun 	 * a proper driver instead of poking directly to the SPI bus.
702*4882a593Smuzhiyun 	 */
703*4882a593Smuzhiyun 	{ "SPT0001", SPIDEV_ACPI_DUMMY },
704*4882a593Smuzhiyun 	{ "SPT0002", SPIDEV_ACPI_DUMMY },
705*4882a593Smuzhiyun 	{ "SPT0003", SPIDEV_ACPI_DUMMY },
706*4882a593Smuzhiyun 	{},
707*4882a593Smuzhiyun };
708*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, spidev_acpi_ids);
709*4882a593Smuzhiyun 
spidev_probe_acpi(struct spi_device * spi)710*4882a593Smuzhiyun static void spidev_probe_acpi(struct spi_device *spi)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun 	const struct acpi_device_id *id;
713*4882a593Smuzhiyun 
714*4882a593Smuzhiyun 	if (!has_acpi_companion(&spi->dev))
715*4882a593Smuzhiyun 		return;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	id = acpi_match_device(spidev_acpi_ids, &spi->dev);
718*4882a593Smuzhiyun 	if (WARN_ON(!id))
719*4882a593Smuzhiyun 		return;
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 	if (id->driver_data == SPIDEV_ACPI_DUMMY)
722*4882a593Smuzhiyun 		dev_warn(&spi->dev, "do not use this driver in production systems!\n");
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun #else
spidev_probe_acpi(struct spi_device * spi)725*4882a593Smuzhiyun static inline void spidev_probe_acpi(struct spi_device *spi) {}
726*4882a593Smuzhiyun #endif
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
729*4882a593Smuzhiyun 
spidev_probe(struct spi_device * spi)730*4882a593Smuzhiyun static int spidev_probe(struct spi_device *spi)
731*4882a593Smuzhiyun {
732*4882a593Smuzhiyun 	struct spidev_data	*spidev;
733*4882a593Smuzhiyun 	int			status;
734*4882a593Smuzhiyun 	unsigned long		minor;
735*4882a593Smuzhiyun 
736*4882a593Smuzhiyun 	/*
737*4882a593Smuzhiyun 	 * spidev should never be referenced in DT without a specific
738*4882a593Smuzhiyun 	 * compatible string, it is a Linux implementation thing
739*4882a593Smuzhiyun 	 * rather than a description of the hardware.
740*4882a593Smuzhiyun 	 */
741*4882a593Smuzhiyun 	WARN(spi->dev.of_node &&
742*4882a593Smuzhiyun 	     of_device_is_compatible(spi->dev.of_node, "spidev"),
743*4882a593Smuzhiyun 	     "%pOF: buggy DT: spidev listed directly in DT\n", spi->dev.of_node);
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	spidev_probe_acpi(spi);
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	/* Allocate driver data */
748*4882a593Smuzhiyun 	spidev = kzalloc(sizeof(*spidev), GFP_KERNEL);
749*4882a593Smuzhiyun 	if (!spidev)
750*4882a593Smuzhiyun 		return -ENOMEM;
751*4882a593Smuzhiyun 
752*4882a593Smuzhiyun 	/* Initialize the driver data */
753*4882a593Smuzhiyun 	spidev->spi = spi;
754*4882a593Smuzhiyun 	spin_lock_init(&spidev->spi_lock);
755*4882a593Smuzhiyun 	mutex_init(&spidev->buf_lock);
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	INIT_LIST_HEAD(&spidev->device_entry);
758*4882a593Smuzhiyun 
759*4882a593Smuzhiyun 	/* If we can allocate a minor number, hook up this device.
760*4882a593Smuzhiyun 	 * Reusing minors is fine so long as udev or mdev is working.
761*4882a593Smuzhiyun 	 */
762*4882a593Smuzhiyun 	mutex_lock(&device_list_lock);
763*4882a593Smuzhiyun 	minor = find_first_zero_bit(minors, N_SPI_MINORS);
764*4882a593Smuzhiyun 	if (minor < N_SPI_MINORS) {
765*4882a593Smuzhiyun 		struct device *dev;
766*4882a593Smuzhiyun 
767*4882a593Smuzhiyun 		spidev->devt = MKDEV(SPIDEV_MAJOR, minor);
768*4882a593Smuzhiyun 		dev = device_create(spidev_class, &spi->dev, spidev->devt,
769*4882a593Smuzhiyun 				    spidev, "spidev%d.%d",
770*4882a593Smuzhiyun 				    spi->master->bus_num, spi->chip_select);
771*4882a593Smuzhiyun 		status = PTR_ERR_OR_ZERO(dev);
772*4882a593Smuzhiyun 	} else {
773*4882a593Smuzhiyun 		dev_dbg(&spi->dev, "no minor number available!\n");
774*4882a593Smuzhiyun 		status = -ENODEV;
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 	if (status == 0) {
777*4882a593Smuzhiyun 		set_bit(minor, minors);
778*4882a593Smuzhiyun 		list_add(&spidev->device_entry, &device_list);
779*4882a593Smuzhiyun 	}
780*4882a593Smuzhiyun 	mutex_unlock(&device_list_lock);
781*4882a593Smuzhiyun 
782*4882a593Smuzhiyun 	spidev->speed_hz = spi->max_speed_hz;
783*4882a593Smuzhiyun 
784*4882a593Smuzhiyun 	if (status == 0)
785*4882a593Smuzhiyun 		spi_set_drvdata(spi, spidev);
786*4882a593Smuzhiyun 	else
787*4882a593Smuzhiyun 		kfree(spidev);
788*4882a593Smuzhiyun 
789*4882a593Smuzhiyun 	return status;
790*4882a593Smuzhiyun }
791*4882a593Smuzhiyun 
spidev_remove(struct spi_device * spi)792*4882a593Smuzhiyun static int spidev_remove(struct spi_device *spi)
793*4882a593Smuzhiyun {
794*4882a593Smuzhiyun 	struct spidev_data	*spidev = spi_get_drvdata(spi);
795*4882a593Smuzhiyun 
796*4882a593Smuzhiyun 	/* prevent new opens */
797*4882a593Smuzhiyun 	mutex_lock(&device_list_lock);
798*4882a593Smuzhiyun 	/* make sure ops on existing fds can abort cleanly */
799*4882a593Smuzhiyun 	spin_lock_irq(&spidev->spi_lock);
800*4882a593Smuzhiyun 	spidev->spi = NULL;
801*4882a593Smuzhiyun 	spin_unlock_irq(&spidev->spi_lock);
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 	list_del(&spidev->device_entry);
804*4882a593Smuzhiyun 	device_destroy(spidev_class, spidev->devt);
805*4882a593Smuzhiyun 	clear_bit(MINOR(spidev->devt), minors);
806*4882a593Smuzhiyun 	if (spidev->users == 0)
807*4882a593Smuzhiyun 		kfree(spidev);
808*4882a593Smuzhiyun 	mutex_unlock(&device_list_lock);
809*4882a593Smuzhiyun 
810*4882a593Smuzhiyun 	return 0;
811*4882a593Smuzhiyun }
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun static struct spi_driver spidev_spi_driver = {
814*4882a593Smuzhiyun 	.driver = {
815*4882a593Smuzhiyun 		.name =		"spidev",
816*4882a593Smuzhiyun 		.of_match_table = of_match_ptr(spidev_dt_ids),
817*4882a593Smuzhiyun 		.acpi_match_table = ACPI_PTR(spidev_acpi_ids),
818*4882a593Smuzhiyun 	},
819*4882a593Smuzhiyun 	.probe =	spidev_probe,
820*4882a593Smuzhiyun 	.remove =	spidev_remove,
821*4882a593Smuzhiyun 
822*4882a593Smuzhiyun 	/* NOTE:  suspend/resume methods are not necessary here.
823*4882a593Smuzhiyun 	 * We don't do anything except pass the requests to/from
824*4882a593Smuzhiyun 	 * the underlying controller.  The refrigerator handles
825*4882a593Smuzhiyun 	 * most issues; the controller driver handles the rest.
826*4882a593Smuzhiyun 	 */
827*4882a593Smuzhiyun };
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun /*-------------------------------------------------------------------------*/
830*4882a593Smuzhiyun 
spidev_init(void)831*4882a593Smuzhiyun static int __init spidev_init(void)
832*4882a593Smuzhiyun {
833*4882a593Smuzhiyun 	int status;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	/* Claim our 256 reserved device numbers.  Then register a class
836*4882a593Smuzhiyun 	 * that will key udev/mdev to add/remove /dev nodes.  Last, register
837*4882a593Smuzhiyun 	 * the driver which manages those device numbers.
838*4882a593Smuzhiyun 	 */
839*4882a593Smuzhiyun 	BUILD_BUG_ON(N_SPI_MINORS > 256);
840*4882a593Smuzhiyun 	status = register_chrdev(SPIDEV_MAJOR, "spi", &spidev_fops);
841*4882a593Smuzhiyun 	if (status < 0)
842*4882a593Smuzhiyun 		return status;
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	spidev_class = class_create(THIS_MODULE, "spidev");
845*4882a593Smuzhiyun 	if (IS_ERR(spidev_class)) {
846*4882a593Smuzhiyun 		unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
847*4882a593Smuzhiyun 		return PTR_ERR(spidev_class);
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	status = spi_register_driver(&spidev_spi_driver);
851*4882a593Smuzhiyun 	if (status < 0) {
852*4882a593Smuzhiyun 		class_destroy(spidev_class);
853*4882a593Smuzhiyun 		unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 	return status;
856*4882a593Smuzhiyun }
857*4882a593Smuzhiyun module_init(spidev_init);
858*4882a593Smuzhiyun 
spidev_exit(void)859*4882a593Smuzhiyun static void __exit spidev_exit(void)
860*4882a593Smuzhiyun {
861*4882a593Smuzhiyun 	spi_unregister_driver(&spidev_spi_driver);
862*4882a593Smuzhiyun 	class_destroy(spidev_class);
863*4882a593Smuzhiyun 	unregister_chrdev(SPIDEV_MAJOR, spidev_spi_driver.driver.name);
864*4882a593Smuzhiyun }
865*4882a593Smuzhiyun module_exit(spidev_exit);
866*4882a593Smuzhiyun 
867*4882a593Smuzhiyun MODULE_AUTHOR("Andrea Paterniani, <a.paterniani@swapp-eng.it>");
868*4882a593Smuzhiyun MODULE_DESCRIPTION("User mode SPI device interface");
869*4882a593Smuzhiyun MODULE_LICENSE("GPL");
870*4882a593Smuzhiyun MODULE_ALIAS("spi:spidev");
871