1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * nosy - Snoop mode driver for TI PCILynx 1394 controllers
4*4882a593Smuzhiyun * Copyright (C) 2002-2007 Kristian Høgsberg
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/device.h>
8*4882a593Smuzhiyun #include <linux/errno.h>
9*4882a593Smuzhiyun #include <linux/fs.h>
10*4882a593Smuzhiyun #include <linux/init.h>
11*4882a593Smuzhiyun #include <linux/interrupt.h>
12*4882a593Smuzhiyun #include <linux/io.h>
13*4882a593Smuzhiyun #include <linux/kernel.h>
14*4882a593Smuzhiyun #include <linux/kref.h>
15*4882a593Smuzhiyun #include <linux/miscdevice.h>
16*4882a593Smuzhiyun #include <linux/module.h>
17*4882a593Smuzhiyun #include <linux/mutex.h>
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/poll.h>
20*4882a593Smuzhiyun #include <linux/sched.h> /* required for linux/wait.h */
21*4882a593Smuzhiyun #include <linux/slab.h>
22*4882a593Smuzhiyun #include <linux/spinlock.h>
23*4882a593Smuzhiyun #include <linux/time64.h>
24*4882a593Smuzhiyun #include <linux/timex.h>
25*4882a593Smuzhiyun #include <linux/uaccess.h>
26*4882a593Smuzhiyun #include <linux/wait.h>
27*4882a593Smuzhiyun #include <linux/dma-mapping.h>
28*4882a593Smuzhiyun #include <linux/atomic.h>
29*4882a593Smuzhiyun #include <asm/byteorder.h>
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include "nosy.h"
32*4882a593Smuzhiyun #include "nosy-user.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define TCODE_PHY_PACKET 0x10
35*4882a593Smuzhiyun #define PCI_DEVICE_ID_TI_PCILYNX 0x8000
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun static char driver_name[] = KBUILD_MODNAME;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun /* this is the physical layout of a PCL, its size is 128 bytes */
40*4882a593Smuzhiyun struct pcl {
41*4882a593Smuzhiyun __le32 next;
42*4882a593Smuzhiyun __le32 async_error_next;
43*4882a593Smuzhiyun u32 user_data;
44*4882a593Smuzhiyun __le32 pcl_status;
45*4882a593Smuzhiyun __le32 remaining_transfer_count;
46*4882a593Smuzhiyun __le32 next_data_buffer;
47*4882a593Smuzhiyun struct {
48*4882a593Smuzhiyun __le32 control;
49*4882a593Smuzhiyun __le32 pointer;
50*4882a593Smuzhiyun } buffer[13];
51*4882a593Smuzhiyun };
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun struct packet {
54*4882a593Smuzhiyun unsigned int length;
55*4882a593Smuzhiyun char data[];
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct packet_buffer {
59*4882a593Smuzhiyun char *data;
60*4882a593Smuzhiyun size_t capacity;
61*4882a593Smuzhiyun long total_packet_count, lost_packet_count;
62*4882a593Smuzhiyun atomic_t size;
63*4882a593Smuzhiyun struct packet *head, *tail;
64*4882a593Smuzhiyun wait_queue_head_t wait;
65*4882a593Smuzhiyun };
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun struct pcilynx {
68*4882a593Smuzhiyun struct pci_dev *pci_device;
69*4882a593Smuzhiyun __iomem char *registers;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun struct pcl *rcv_start_pcl, *rcv_pcl;
72*4882a593Smuzhiyun __le32 *rcv_buffer;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun dma_addr_t rcv_start_pcl_bus, rcv_pcl_bus, rcv_buffer_bus;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun spinlock_t client_list_lock;
77*4882a593Smuzhiyun struct list_head client_list;
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun struct miscdevice misc;
80*4882a593Smuzhiyun struct list_head link;
81*4882a593Smuzhiyun struct kref kref;
82*4882a593Smuzhiyun };
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun static inline struct pcilynx *
lynx_get(struct pcilynx * lynx)85*4882a593Smuzhiyun lynx_get(struct pcilynx *lynx)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun kref_get(&lynx->kref);
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun return lynx;
90*4882a593Smuzhiyun }
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun static void
lynx_release(struct kref * kref)93*4882a593Smuzhiyun lynx_release(struct kref *kref)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun kfree(container_of(kref, struct pcilynx, kref));
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun static inline void
lynx_put(struct pcilynx * lynx)99*4882a593Smuzhiyun lynx_put(struct pcilynx *lynx)
100*4882a593Smuzhiyun {
101*4882a593Smuzhiyun kref_put(&lynx->kref, lynx_release);
102*4882a593Smuzhiyun }
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun struct client {
105*4882a593Smuzhiyun struct pcilynx *lynx;
106*4882a593Smuzhiyun u32 tcode_mask;
107*4882a593Smuzhiyun struct packet_buffer buffer;
108*4882a593Smuzhiyun struct list_head link;
109*4882a593Smuzhiyun };
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun static DEFINE_MUTEX(card_mutex);
112*4882a593Smuzhiyun static LIST_HEAD(card_list);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun static int
packet_buffer_init(struct packet_buffer * buffer,size_t capacity)115*4882a593Smuzhiyun packet_buffer_init(struct packet_buffer *buffer, size_t capacity)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun buffer->data = kmalloc(capacity, GFP_KERNEL);
118*4882a593Smuzhiyun if (buffer->data == NULL)
119*4882a593Smuzhiyun return -ENOMEM;
120*4882a593Smuzhiyun buffer->head = (struct packet *) buffer->data;
121*4882a593Smuzhiyun buffer->tail = (struct packet *) buffer->data;
122*4882a593Smuzhiyun buffer->capacity = capacity;
123*4882a593Smuzhiyun buffer->lost_packet_count = 0;
124*4882a593Smuzhiyun atomic_set(&buffer->size, 0);
125*4882a593Smuzhiyun init_waitqueue_head(&buffer->wait);
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun return 0;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun static void
packet_buffer_destroy(struct packet_buffer * buffer)131*4882a593Smuzhiyun packet_buffer_destroy(struct packet_buffer *buffer)
132*4882a593Smuzhiyun {
133*4882a593Smuzhiyun kfree(buffer->data);
134*4882a593Smuzhiyun }
135*4882a593Smuzhiyun
136*4882a593Smuzhiyun static int
packet_buffer_get(struct client * client,char __user * data,size_t user_length)137*4882a593Smuzhiyun packet_buffer_get(struct client *client, char __user *data, size_t user_length)
138*4882a593Smuzhiyun {
139*4882a593Smuzhiyun struct packet_buffer *buffer = &client->buffer;
140*4882a593Smuzhiyun size_t length;
141*4882a593Smuzhiyun char *end;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun if (wait_event_interruptible(buffer->wait,
144*4882a593Smuzhiyun atomic_read(&buffer->size) > 0) ||
145*4882a593Smuzhiyun list_empty(&client->lynx->link))
146*4882a593Smuzhiyun return -ERESTARTSYS;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun if (atomic_read(&buffer->size) == 0)
149*4882a593Smuzhiyun return -ENODEV;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* FIXME: Check length <= user_length. */
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun end = buffer->data + buffer->capacity;
154*4882a593Smuzhiyun length = buffer->head->length;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (&buffer->head->data[length] < end) {
157*4882a593Smuzhiyun if (copy_to_user(data, buffer->head->data, length))
158*4882a593Smuzhiyun return -EFAULT;
159*4882a593Smuzhiyun buffer->head = (struct packet *) &buffer->head->data[length];
160*4882a593Smuzhiyun } else {
161*4882a593Smuzhiyun size_t split = end - buffer->head->data;
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun if (copy_to_user(data, buffer->head->data, split))
164*4882a593Smuzhiyun return -EFAULT;
165*4882a593Smuzhiyun if (copy_to_user(data + split, buffer->data, length - split))
166*4882a593Smuzhiyun return -EFAULT;
167*4882a593Smuzhiyun buffer->head = (struct packet *) &buffer->data[length - split];
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /*
171*4882a593Smuzhiyun * Decrease buffer->size as the last thing, since this is what
172*4882a593Smuzhiyun * keeps the interrupt from overwriting the packet we are
173*4882a593Smuzhiyun * retrieving from the buffer.
174*4882a593Smuzhiyun */
175*4882a593Smuzhiyun atomic_sub(sizeof(struct packet) + length, &buffer->size);
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun return length;
178*4882a593Smuzhiyun }
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun static void
packet_buffer_put(struct packet_buffer * buffer,void * data,size_t length)181*4882a593Smuzhiyun packet_buffer_put(struct packet_buffer *buffer, void *data, size_t length)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun char *end;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun buffer->total_packet_count++;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (buffer->capacity <
188*4882a593Smuzhiyun atomic_read(&buffer->size) + sizeof(struct packet) + length) {
189*4882a593Smuzhiyun buffer->lost_packet_count++;
190*4882a593Smuzhiyun return;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun end = buffer->data + buffer->capacity;
194*4882a593Smuzhiyun buffer->tail->length = length;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun if (&buffer->tail->data[length] < end) {
197*4882a593Smuzhiyun memcpy(buffer->tail->data, data, length);
198*4882a593Smuzhiyun buffer->tail = (struct packet *) &buffer->tail->data[length];
199*4882a593Smuzhiyun } else {
200*4882a593Smuzhiyun size_t split = end - buffer->tail->data;
201*4882a593Smuzhiyun
202*4882a593Smuzhiyun memcpy(buffer->tail->data, data, split);
203*4882a593Smuzhiyun memcpy(buffer->data, data + split, length - split);
204*4882a593Smuzhiyun buffer->tail = (struct packet *) &buffer->data[length - split];
205*4882a593Smuzhiyun }
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Finally, adjust buffer size and wake up userspace reader. */
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun atomic_add(sizeof(struct packet) + length, &buffer->size);
210*4882a593Smuzhiyun wake_up_interruptible(&buffer->wait);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun static inline void
reg_write(struct pcilynx * lynx,int offset,u32 data)214*4882a593Smuzhiyun reg_write(struct pcilynx *lynx, int offset, u32 data)
215*4882a593Smuzhiyun {
216*4882a593Smuzhiyun writel(data, lynx->registers + offset);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun static inline u32
reg_read(struct pcilynx * lynx,int offset)220*4882a593Smuzhiyun reg_read(struct pcilynx *lynx, int offset)
221*4882a593Smuzhiyun {
222*4882a593Smuzhiyun return readl(lynx->registers + offset);
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun static inline void
reg_set_bits(struct pcilynx * lynx,int offset,u32 mask)226*4882a593Smuzhiyun reg_set_bits(struct pcilynx *lynx, int offset, u32 mask)
227*4882a593Smuzhiyun {
228*4882a593Smuzhiyun reg_write(lynx, offset, (reg_read(lynx, offset) | mask));
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /*
232*4882a593Smuzhiyun * Maybe the pcl programs could be set up to just append data instead
233*4882a593Smuzhiyun * of using a whole packet.
234*4882a593Smuzhiyun */
235*4882a593Smuzhiyun static inline void
run_pcl(struct pcilynx * lynx,dma_addr_t pcl_bus,int dmachan)236*4882a593Smuzhiyun run_pcl(struct pcilynx *lynx, dma_addr_t pcl_bus,
237*4882a593Smuzhiyun int dmachan)
238*4882a593Smuzhiyun {
239*4882a593Smuzhiyun reg_write(lynx, DMA0_CURRENT_PCL + dmachan * 0x20, pcl_bus);
240*4882a593Smuzhiyun reg_write(lynx, DMA0_CHAN_CTRL + dmachan * 0x20,
241*4882a593Smuzhiyun DMA_CHAN_CTRL_ENABLE | DMA_CHAN_CTRL_LINK);
242*4882a593Smuzhiyun }
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun static int
set_phy_reg(struct pcilynx * lynx,int addr,int val)245*4882a593Smuzhiyun set_phy_reg(struct pcilynx *lynx, int addr, int val)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun if (addr > 15) {
248*4882a593Smuzhiyun dev_err(&lynx->pci_device->dev,
249*4882a593Smuzhiyun "PHY register address %d out of range\n", addr);
250*4882a593Smuzhiyun return -1;
251*4882a593Smuzhiyun }
252*4882a593Smuzhiyun if (val > 0xff) {
253*4882a593Smuzhiyun dev_err(&lynx->pci_device->dev,
254*4882a593Smuzhiyun "PHY register value %d out of range\n", val);
255*4882a593Smuzhiyun return -1;
256*4882a593Smuzhiyun }
257*4882a593Smuzhiyun reg_write(lynx, LINK_PHY, LINK_PHY_WRITE |
258*4882a593Smuzhiyun LINK_PHY_ADDR(addr) | LINK_PHY_WDATA(val));
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun static int
nosy_open(struct inode * inode,struct file * file)264*4882a593Smuzhiyun nosy_open(struct inode *inode, struct file *file)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun int minor = iminor(inode);
267*4882a593Smuzhiyun struct client *client;
268*4882a593Smuzhiyun struct pcilynx *tmp, *lynx = NULL;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun mutex_lock(&card_mutex);
271*4882a593Smuzhiyun list_for_each_entry(tmp, &card_list, link)
272*4882a593Smuzhiyun if (tmp->misc.minor == minor) {
273*4882a593Smuzhiyun lynx = lynx_get(tmp);
274*4882a593Smuzhiyun break;
275*4882a593Smuzhiyun }
276*4882a593Smuzhiyun mutex_unlock(&card_mutex);
277*4882a593Smuzhiyun if (lynx == NULL)
278*4882a593Smuzhiyun return -ENODEV;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun client = kmalloc(sizeof *client, GFP_KERNEL);
281*4882a593Smuzhiyun if (client == NULL)
282*4882a593Smuzhiyun goto fail;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun client->tcode_mask = ~0;
285*4882a593Smuzhiyun client->lynx = lynx;
286*4882a593Smuzhiyun INIT_LIST_HEAD(&client->link);
287*4882a593Smuzhiyun
288*4882a593Smuzhiyun if (packet_buffer_init(&client->buffer, 128 * 1024) < 0)
289*4882a593Smuzhiyun goto fail;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun file->private_data = client;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return stream_open(inode, file);
294*4882a593Smuzhiyun fail:
295*4882a593Smuzhiyun kfree(client);
296*4882a593Smuzhiyun lynx_put(lynx);
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun return -ENOMEM;
299*4882a593Smuzhiyun }
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun static int
nosy_release(struct inode * inode,struct file * file)302*4882a593Smuzhiyun nosy_release(struct inode *inode, struct file *file)
303*4882a593Smuzhiyun {
304*4882a593Smuzhiyun struct client *client = file->private_data;
305*4882a593Smuzhiyun struct pcilynx *lynx = client->lynx;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun spin_lock_irq(&lynx->client_list_lock);
308*4882a593Smuzhiyun list_del_init(&client->link);
309*4882a593Smuzhiyun spin_unlock_irq(&lynx->client_list_lock);
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun packet_buffer_destroy(&client->buffer);
312*4882a593Smuzhiyun kfree(client);
313*4882a593Smuzhiyun lynx_put(lynx);
314*4882a593Smuzhiyun
315*4882a593Smuzhiyun return 0;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun static __poll_t
nosy_poll(struct file * file,poll_table * pt)319*4882a593Smuzhiyun nosy_poll(struct file *file, poll_table *pt)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun struct client *client = file->private_data;
322*4882a593Smuzhiyun __poll_t ret = 0;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun poll_wait(file, &client->buffer.wait, pt);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun if (atomic_read(&client->buffer.size) > 0)
327*4882a593Smuzhiyun ret = EPOLLIN | EPOLLRDNORM;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun if (list_empty(&client->lynx->link))
330*4882a593Smuzhiyun ret |= EPOLLHUP;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return ret;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun static ssize_t
nosy_read(struct file * file,char __user * buffer,size_t count,loff_t * offset)336*4882a593Smuzhiyun nosy_read(struct file *file, char __user *buffer, size_t count, loff_t *offset)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct client *client = file->private_data;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun return packet_buffer_get(client, buffer, count);
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun static long
nosy_ioctl(struct file * file,unsigned int cmd,unsigned long arg)344*4882a593Smuzhiyun nosy_ioctl(struct file *file, unsigned int cmd, unsigned long arg)
345*4882a593Smuzhiyun {
346*4882a593Smuzhiyun struct client *client = file->private_data;
347*4882a593Smuzhiyun spinlock_t *client_list_lock = &client->lynx->client_list_lock;
348*4882a593Smuzhiyun struct nosy_stats stats;
349*4882a593Smuzhiyun int ret;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun switch (cmd) {
352*4882a593Smuzhiyun case NOSY_IOC_GET_STATS:
353*4882a593Smuzhiyun spin_lock_irq(client_list_lock);
354*4882a593Smuzhiyun stats.total_packet_count = client->buffer.total_packet_count;
355*4882a593Smuzhiyun stats.lost_packet_count = client->buffer.lost_packet_count;
356*4882a593Smuzhiyun spin_unlock_irq(client_list_lock);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun if (copy_to_user((void __user *) arg, &stats, sizeof stats))
359*4882a593Smuzhiyun return -EFAULT;
360*4882a593Smuzhiyun else
361*4882a593Smuzhiyun return 0;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun case NOSY_IOC_START:
364*4882a593Smuzhiyun ret = -EBUSY;
365*4882a593Smuzhiyun spin_lock_irq(client_list_lock);
366*4882a593Smuzhiyun if (list_empty(&client->link)) {
367*4882a593Smuzhiyun list_add_tail(&client->link, &client->lynx->client_list);
368*4882a593Smuzhiyun ret = 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun spin_unlock_irq(client_list_lock);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun return ret;
373*4882a593Smuzhiyun
374*4882a593Smuzhiyun case NOSY_IOC_STOP:
375*4882a593Smuzhiyun spin_lock_irq(client_list_lock);
376*4882a593Smuzhiyun list_del_init(&client->link);
377*4882a593Smuzhiyun spin_unlock_irq(client_list_lock);
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun return 0;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun case NOSY_IOC_FILTER:
382*4882a593Smuzhiyun spin_lock_irq(client_list_lock);
383*4882a593Smuzhiyun client->tcode_mask = arg;
384*4882a593Smuzhiyun spin_unlock_irq(client_list_lock);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun default:
389*4882a593Smuzhiyun return -EINVAL;
390*4882a593Smuzhiyun /* Flush buffer, configure filter. */
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun }
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun static const struct file_operations nosy_ops = {
395*4882a593Smuzhiyun .owner = THIS_MODULE,
396*4882a593Smuzhiyun .read = nosy_read,
397*4882a593Smuzhiyun .unlocked_ioctl = nosy_ioctl,
398*4882a593Smuzhiyun .poll = nosy_poll,
399*4882a593Smuzhiyun .open = nosy_open,
400*4882a593Smuzhiyun .release = nosy_release,
401*4882a593Smuzhiyun };
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun #define PHY_PACKET_SIZE 12 /* 1 payload, 1 inverse, 1 ack = 3 quadlets */
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun static void
packet_irq_handler(struct pcilynx * lynx)406*4882a593Smuzhiyun packet_irq_handler(struct pcilynx *lynx)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct client *client;
409*4882a593Smuzhiyun u32 tcode_mask, tcode, timestamp;
410*4882a593Smuzhiyun size_t length;
411*4882a593Smuzhiyun struct timespec64 ts64;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* FIXME: Also report rcv_speed. */
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun length = __le32_to_cpu(lynx->rcv_pcl->pcl_status) & 0x00001fff;
416*4882a593Smuzhiyun tcode = __le32_to_cpu(lynx->rcv_buffer[1]) >> 4 & 0xf;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun ktime_get_real_ts64(&ts64);
419*4882a593Smuzhiyun timestamp = ts64.tv_nsec / NSEC_PER_USEC;
420*4882a593Smuzhiyun lynx->rcv_buffer[0] = (__force __le32)timestamp;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun if (length == PHY_PACKET_SIZE)
423*4882a593Smuzhiyun tcode_mask = 1 << TCODE_PHY_PACKET;
424*4882a593Smuzhiyun else
425*4882a593Smuzhiyun tcode_mask = 1 << tcode;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun spin_lock(&lynx->client_list_lock);
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun list_for_each_entry(client, &lynx->client_list, link)
430*4882a593Smuzhiyun if (client->tcode_mask & tcode_mask)
431*4882a593Smuzhiyun packet_buffer_put(&client->buffer,
432*4882a593Smuzhiyun lynx->rcv_buffer, length + 4);
433*4882a593Smuzhiyun
434*4882a593Smuzhiyun spin_unlock(&lynx->client_list_lock);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun static void
bus_reset_irq_handler(struct pcilynx * lynx)438*4882a593Smuzhiyun bus_reset_irq_handler(struct pcilynx *lynx)
439*4882a593Smuzhiyun {
440*4882a593Smuzhiyun struct client *client;
441*4882a593Smuzhiyun struct timespec64 ts64;
442*4882a593Smuzhiyun u32 timestamp;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun ktime_get_real_ts64(&ts64);
445*4882a593Smuzhiyun timestamp = ts64.tv_nsec / NSEC_PER_USEC;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun spin_lock(&lynx->client_list_lock);
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun list_for_each_entry(client, &lynx->client_list, link)
450*4882a593Smuzhiyun packet_buffer_put(&client->buffer, ×tamp, 4);
451*4882a593Smuzhiyun
452*4882a593Smuzhiyun spin_unlock(&lynx->client_list_lock);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun static irqreturn_t
irq_handler(int irq,void * device)456*4882a593Smuzhiyun irq_handler(int irq, void *device)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun struct pcilynx *lynx = device;
459*4882a593Smuzhiyun u32 pci_int_status;
460*4882a593Smuzhiyun
461*4882a593Smuzhiyun pci_int_status = reg_read(lynx, PCI_INT_STATUS);
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun if (pci_int_status == ~0)
464*4882a593Smuzhiyun /* Card was ejected. */
465*4882a593Smuzhiyun return IRQ_NONE;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun if ((pci_int_status & PCI_INT_INT_PEND) == 0)
468*4882a593Smuzhiyun /* Not our interrupt, bail out quickly. */
469*4882a593Smuzhiyun return IRQ_NONE;
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun if ((pci_int_status & PCI_INT_P1394_INT) != 0) {
472*4882a593Smuzhiyun u32 link_int_status;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun link_int_status = reg_read(lynx, LINK_INT_STATUS);
475*4882a593Smuzhiyun reg_write(lynx, LINK_INT_STATUS, link_int_status);
476*4882a593Smuzhiyun
477*4882a593Smuzhiyun if ((link_int_status & LINK_INT_PHY_BUSRESET) > 0)
478*4882a593Smuzhiyun bus_reset_irq_handler(lynx);
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
481*4882a593Smuzhiyun /* Clear the PCI_INT_STATUS register only after clearing the
482*4882a593Smuzhiyun * LINK_INT_STATUS register; otherwise the PCI_INT_P1394 will
483*4882a593Smuzhiyun * be set again immediately. */
484*4882a593Smuzhiyun
485*4882a593Smuzhiyun reg_write(lynx, PCI_INT_STATUS, pci_int_status);
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun if ((pci_int_status & PCI_INT_DMA0_HLT) > 0) {
488*4882a593Smuzhiyun packet_irq_handler(lynx);
489*4882a593Smuzhiyun run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
490*4882a593Smuzhiyun }
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun return IRQ_HANDLED;
493*4882a593Smuzhiyun }
494*4882a593Smuzhiyun
495*4882a593Smuzhiyun static void
remove_card(struct pci_dev * dev)496*4882a593Smuzhiyun remove_card(struct pci_dev *dev)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun struct pcilynx *lynx = pci_get_drvdata(dev);
499*4882a593Smuzhiyun struct client *client;
500*4882a593Smuzhiyun
501*4882a593Smuzhiyun mutex_lock(&card_mutex);
502*4882a593Smuzhiyun list_del_init(&lynx->link);
503*4882a593Smuzhiyun misc_deregister(&lynx->misc);
504*4882a593Smuzhiyun mutex_unlock(&card_mutex);
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun reg_write(lynx, PCI_INT_ENABLE, 0);
507*4882a593Smuzhiyun free_irq(lynx->pci_device->irq, lynx);
508*4882a593Smuzhiyun
509*4882a593Smuzhiyun spin_lock_irq(&lynx->client_list_lock);
510*4882a593Smuzhiyun list_for_each_entry(client, &lynx->client_list, link)
511*4882a593Smuzhiyun wake_up_interruptible(&client->buffer.wait);
512*4882a593Smuzhiyun spin_unlock_irq(&lynx->client_list_lock);
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
515*4882a593Smuzhiyun lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
516*4882a593Smuzhiyun pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
517*4882a593Smuzhiyun lynx->rcv_pcl, lynx->rcv_pcl_bus);
518*4882a593Smuzhiyun pci_free_consistent(lynx->pci_device, PAGE_SIZE,
519*4882a593Smuzhiyun lynx->rcv_buffer, lynx->rcv_buffer_bus);
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun iounmap(lynx->registers);
522*4882a593Smuzhiyun pci_disable_device(dev);
523*4882a593Smuzhiyun lynx_put(lynx);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun #define RCV_BUFFER_SIZE (16 * 1024)
527*4882a593Smuzhiyun
528*4882a593Smuzhiyun static int
add_card(struct pci_dev * dev,const struct pci_device_id * unused)529*4882a593Smuzhiyun add_card(struct pci_dev *dev, const struct pci_device_id *unused)
530*4882a593Smuzhiyun {
531*4882a593Smuzhiyun struct pcilynx *lynx;
532*4882a593Smuzhiyun u32 p, end;
533*4882a593Smuzhiyun int ret, i;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun if (pci_set_dma_mask(dev, DMA_BIT_MASK(32))) {
536*4882a593Smuzhiyun dev_err(&dev->dev,
537*4882a593Smuzhiyun "DMA address limits not supported for PCILynx hardware\n");
538*4882a593Smuzhiyun return -ENXIO;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun if (pci_enable_device(dev)) {
541*4882a593Smuzhiyun dev_err(&dev->dev, "Failed to enable PCILynx hardware\n");
542*4882a593Smuzhiyun return -ENXIO;
543*4882a593Smuzhiyun }
544*4882a593Smuzhiyun pci_set_master(dev);
545*4882a593Smuzhiyun
546*4882a593Smuzhiyun lynx = kzalloc(sizeof *lynx, GFP_KERNEL);
547*4882a593Smuzhiyun if (lynx == NULL) {
548*4882a593Smuzhiyun dev_err(&dev->dev, "Failed to allocate control structure\n");
549*4882a593Smuzhiyun ret = -ENOMEM;
550*4882a593Smuzhiyun goto fail_disable;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun lynx->pci_device = dev;
553*4882a593Smuzhiyun pci_set_drvdata(dev, lynx);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun spin_lock_init(&lynx->client_list_lock);
556*4882a593Smuzhiyun INIT_LIST_HEAD(&lynx->client_list);
557*4882a593Smuzhiyun kref_init(&lynx->kref);
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun lynx->registers = ioremap(pci_resource_start(dev, 0),
560*4882a593Smuzhiyun PCILYNX_MAX_REGISTER);
561*4882a593Smuzhiyun if (lynx->registers == NULL) {
562*4882a593Smuzhiyun dev_err(&dev->dev, "Failed to map registers\n");
563*4882a593Smuzhiyun ret = -ENOMEM;
564*4882a593Smuzhiyun goto fail_deallocate_lynx;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
567*4882a593Smuzhiyun lynx->rcv_start_pcl = pci_alloc_consistent(lynx->pci_device,
568*4882a593Smuzhiyun sizeof(struct pcl), &lynx->rcv_start_pcl_bus);
569*4882a593Smuzhiyun lynx->rcv_pcl = pci_alloc_consistent(lynx->pci_device,
570*4882a593Smuzhiyun sizeof(struct pcl), &lynx->rcv_pcl_bus);
571*4882a593Smuzhiyun lynx->rcv_buffer = pci_alloc_consistent(lynx->pci_device,
572*4882a593Smuzhiyun RCV_BUFFER_SIZE, &lynx->rcv_buffer_bus);
573*4882a593Smuzhiyun if (lynx->rcv_start_pcl == NULL ||
574*4882a593Smuzhiyun lynx->rcv_pcl == NULL ||
575*4882a593Smuzhiyun lynx->rcv_buffer == NULL) {
576*4882a593Smuzhiyun dev_err(&dev->dev, "Failed to allocate receive buffer\n");
577*4882a593Smuzhiyun ret = -ENOMEM;
578*4882a593Smuzhiyun goto fail_deallocate_buffers;
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun lynx->rcv_start_pcl->next = cpu_to_le32(lynx->rcv_pcl_bus);
581*4882a593Smuzhiyun lynx->rcv_pcl->next = cpu_to_le32(PCL_NEXT_INVALID);
582*4882a593Smuzhiyun lynx->rcv_pcl->async_error_next = cpu_to_le32(PCL_NEXT_INVALID);
583*4882a593Smuzhiyun
584*4882a593Smuzhiyun lynx->rcv_pcl->buffer[0].control =
585*4882a593Smuzhiyun cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2044);
586*4882a593Smuzhiyun lynx->rcv_pcl->buffer[0].pointer =
587*4882a593Smuzhiyun cpu_to_le32(lynx->rcv_buffer_bus + 4);
588*4882a593Smuzhiyun p = lynx->rcv_buffer_bus + 2048;
589*4882a593Smuzhiyun end = lynx->rcv_buffer_bus + RCV_BUFFER_SIZE;
590*4882a593Smuzhiyun for (i = 1; p < end; i++, p += 2048) {
591*4882a593Smuzhiyun lynx->rcv_pcl->buffer[i].control =
592*4882a593Smuzhiyun cpu_to_le32(PCL_CMD_RCV | PCL_BIGENDIAN | 2048);
593*4882a593Smuzhiyun lynx->rcv_pcl->buffer[i].pointer = cpu_to_le32(p);
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun lynx->rcv_pcl->buffer[i - 1].control |= cpu_to_le32(PCL_LAST_BUFF);
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun reg_set_bits(lynx, MISC_CONTROL, MISC_CONTROL_SWRESET);
598*4882a593Smuzhiyun /* Fix buggy cards with autoboot pin not tied low: */
599*4882a593Smuzhiyun reg_write(lynx, DMA0_CHAN_CTRL, 0);
600*4882a593Smuzhiyun reg_write(lynx, DMA_GLOBAL_REGISTER, 0x00 << 24);
601*4882a593Smuzhiyun
602*4882a593Smuzhiyun #if 0
603*4882a593Smuzhiyun /* now, looking for PHY register set */
604*4882a593Smuzhiyun if ((get_phy_reg(lynx, 2) & 0xe0) == 0xe0) {
605*4882a593Smuzhiyun lynx->phyic.reg_1394a = 1;
606*4882a593Smuzhiyun PRINT(KERN_INFO, lynx->id,
607*4882a593Smuzhiyun "found 1394a conform PHY (using extended register set)");
608*4882a593Smuzhiyun lynx->phyic.vendor = get_phy_vendorid(lynx);
609*4882a593Smuzhiyun lynx->phyic.product = get_phy_productid(lynx);
610*4882a593Smuzhiyun } else {
611*4882a593Smuzhiyun lynx->phyic.reg_1394a = 0;
612*4882a593Smuzhiyun PRINT(KERN_INFO, lynx->id, "found old 1394 PHY");
613*4882a593Smuzhiyun }
614*4882a593Smuzhiyun #endif
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /* Setup the general receive FIFO max size. */
617*4882a593Smuzhiyun reg_write(lynx, FIFO_SIZES, 255);
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun reg_set_bits(lynx, PCI_INT_ENABLE, PCI_INT_DMA_ALL);
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun reg_write(lynx, LINK_INT_ENABLE,
622*4882a593Smuzhiyun LINK_INT_PHY_TIME_OUT | LINK_INT_PHY_REG_RCVD |
623*4882a593Smuzhiyun LINK_INT_PHY_BUSRESET | LINK_INT_IT_STUCK |
624*4882a593Smuzhiyun LINK_INT_AT_STUCK | LINK_INT_SNTRJ |
625*4882a593Smuzhiyun LINK_INT_TC_ERR | LINK_INT_GRF_OVER_FLOW |
626*4882a593Smuzhiyun LINK_INT_ITF_UNDER_FLOW | LINK_INT_ATF_UNDER_FLOW);
627*4882a593Smuzhiyun
628*4882a593Smuzhiyun /* Disable the L flag in self ID packets. */
629*4882a593Smuzhiyun set_phy_reg(lynx, 4, 0);
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun /* Put this baby into snoop mode */
632*4882a593Smuzhiyun reg_set_bits(lynx, LINK_CONTROL, LINK_CONTROL_SNOOP_ENABLE);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun run_pcl(lynx, lynx->rcv_start_pcl_bus, 0);
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun if (request_irq(dev->irq, irq_handler, IRQF_SHARED,
637*4882a593Smuzhiyun driver_name, lynx)) {
638*4882a593Smuzhiyun dev_err(&dev->dev,
639*4882a593Smuzhiyun "Failed to allocate shared interrupt %d\n", dev->irq);
640*4882a593Smuzhiyun ret = -EIO;
641*4882a593Smuzhiyun goto fail_deallocate_buffers;
642*4882a593Smuzhiyun }
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun lynx->misc.parent = &dev->dev;
645*4882a593Smuzhiyun lynx->misc.minor = MISC_DYNAMIC_MINOR;
646*4882a593Smuzhiyun lynx->misc.name = "nosy";
647*4882a593Smuzhiyun lynx->misc.fops = &nosy_ops;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun mutex_lock(&card_mutex);
650*4882a593Smuzhiyun ret = misc_register(&lynx->misc);
651*4882a593Smuzhiyun if (ret) {
652*4882a593Smuzhiyun dev_err(&dev->dev, "Failed to register misc char device\n");
653*4882a593Smuzhiyun mutex_unlock(&card_mutex);
654*4882a593Smuzhiyun goto fail_free_irq;
655*4882a593Smuzhiyun }
656*4882a593Smuzhiyun list_add_tail(&lynx->link, &card_list);
657*4882a593Smuzhiyun mutex_unlock(&card_mutex);
658*4882a593Smuzhiyun
659*4882a593Smuzhiyun dev_info(&dev->dev,
660*4882a593Smuzhiyun "Initialized PCILynx IEEE1394 card, irq=%d\n", dev->irq);
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun return 0;
663*4882a593Smuzhiyun
664*4882a593Smuzhiyun fail_free_irq:
665*4882a593Smuzhiyun reg_write(lynx, PCI_INT_ENABLE, 0);
666*4882a593Smuzhiyun free_irq(lynx->pci_device->irq, lynx);
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun fail_deallocate_buffers:
669*4882a593Smuzhiyun if (lynx->rcv_start_pcl)
670*4882a593Smuzhiyun pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
671*4882a593Smuzhiyun lynx->rcv_start_pcl, lynx->rcv_start_pcl_bus);
672*4882a593Smuzhiyun if (lynx->rcv_pcl)
673*4882a593Smuzhiyun pci_free_consistent(lynx->pci_device, sizeof(struct pcl),
674*4882a593Smuzhiyun lynx->rcv_pcl, lynx->rcv_pcl_bus);
675*4882a593Smuzhiyun if (lynx->rcv_buffer)
676*4882a593Smuzhiyun pci_free_consistent(lynx->pci_device, PAGE_SIZE,
677*4882a593Smuzhiyun lynx->rcv_buffer, lynx->rcv_buffer_bus);
678*4882a593Smuzhiyun iounmap(lynx->registers);
679*4882a593Smuzhiyun
680*4882a593Smuzhiyun fail_deallocate_lynx:
681*4882a593Smuzhiyun kfree(lynx);
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun fail_disable:
684*4882a593Smuzhiyun pci_disable_device(dev);
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun return ret;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun static struct pci_device_id pci_table[] = {
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun .vendor = PCI_VENDOR_ID_TI,
692*4882a593Smuzhiyun .device = PCI_DEVICE_ID_TI_PCILYNX,
693*4882a593Smuzhiyun .subvendor = PCI_ANY_ID,
694*4882a593Smuzhiyun .subdevice = PCI_ANY_ID,
695*4882a593Smuzhiyun },
696*4882a593Smuzhiyun { } /* Terminating entry */
697*4882a593Smuzhiyun };
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, pci_table);
700*4882a593Smuzhiyun
701*4882a593Smuzhiyun static struct pci_driver lynx_pci_driver = {
702*4882a593Smuzhiyun .name = driver_name,
703*4882a593Smuzhiyun .id_table = pci_table,
704*4882a593Smuzhiyun .probe = add_card,
705*4882a593Smuzhiyun .remove = remove_card,
706*4882a593Smuzhiyun };
707*4882a593Smuzhiyun
708*4882a593Smuzhiyun module_pci_driver(lynx_pci_driver);
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun MODULE_AUTHOR("Kristian Hoegsberg");
711*4882a593Smuzhiyun MODULE_DESCRIPTION("Snoop mode driver for TI pcilynx 1394 controllers");
712*4882a593Smuzhiyun MODULE_LICENSE("GPL");
713