1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0+
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Mellanox BlueField SoC TmFifo driver
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * Copyright (C) 2019 Mellanox Technologies
6*4882a593Smuzhiyun */
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun #include <linux/acpi.h>
9*4882a593Smuzhiyun #include <linux/bitfield.h>
10*4882a593Smuzhiyun #include <linux/circ_buf.h>
11*4882a593Smuzhiyun #include <linux/efi.h>
12*4882a593Smuzhiyun #include <linux/irq.h>
13*4882a593Smuzhiyun #include <linux/module.h>
14*4882a593Smuzhiyun #include <linux/mutex.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/types.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <linux/virtio_config.h>
19*4882a593Smuzhiyun #include <linux/virtio_console.h>
20*4882a593Smuzhiyun #include <linux/virtio_ids.h>
21*4882a593Smuzhiyun #include <linux/virtio_net.h>
22*4882a593Smuzhiyun #include <linux/virtio_ring.h>
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "mlxbf-tmfifo-regs.h"
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /* Vring size. */
27*4882a593Smuzhiyun #define MLXBF_TMFIFO_VRING_SIZE SZ_1K
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun /* Console Tx buffer size. */
30*4882a593Smuzhiyun #define MLXBF_TMFIFO_CON_TX_BUF_SIZE SZ_32K
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* Console Tx buffer reserved space. */
33*4882a593Smuzhiyun #define MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE 8
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /* House-keeping timer interval. */
36*4882a593Smuzhiyun #define MLXBF_TMFIFO_TIMER_INTERVAL (HZ / 10)
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* Virtual devices sharing the TM FIFO. */
39*4882a593Smuzhiyun #define MLXBF_TMFIFO_VDEV_MAX (VIRTIO_ID_CONSOLE + 1)
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun /*
42*4882a593Smuzhiyun * Reserve 1/16 of TmFifo space, so console messages are not starved by
43*4882a593Smuzhiyun * the networking traffic.
44*4882a593Smuzhiyun */
45*4882a593Smuzhiyun #define MLXBF_TMFIFO_RESERVE_RATIO 16
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /* Message with data needs at least two words (for header & data). */
48*4882a593Smuzhiyun #define MLXBF_TMFIFO_DATA_MIN_WORDS 2
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun struct mlxbf_tmfifo;
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /**
53*4882a593Smuzhiyun * mlxbf_tmfifo_vring - Structure of the TmFifo virtual ring
54*4882a593Smuzhiyun * @va: virtual address of the ring
55*4882a593Smuzhiyun * @dma: dma address of the ring
56*4882a593Smuzhiyun * @vq: pointer to the virtio virtqueue
57*4882a593Smuzhiyun * @desc: current descriptor of the pending packet
58*4882a593Smuzhiyun * @desc_head: head descriptor of the pending packet
59*4882a593Smuzhiyun * @cur_len: processed length of the current descriptor
60*4882a593Smuzhiyun * @rem_len: remaining length of the pending packet
61*4882a593Smuzhiyun * @pkt_len: total length of the pending packet
62*4882a593Smuzhiyun * @next_avail: next avail descriptor id
63*4882a593Smuzhiyun * @num: vring size (number of descriptors)
64*4882a593Smuzhiyun * @align: vring alignment size
65*4882a593Smuzhiyun * @index: vring index
66*4882a593Smuzhiyun * @vdev_id: vring virtio id (VIRTIO_ID_xxx)
67*4882a593Smuzhiyun * @fifo: pointer to the tmfifo structure
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun struct mlxbf_tmfifo_vring {
70*4882a593Smuzhiyun void *va;
71*4882a593Smuzhiyun dma_addr_t dma;
72*4882a593Smuzhiyun struct virtqueue *vq;
73*4882a593Smuzhiyun struct vring_desc *desc;
74*4882a593Smuzhiyun struct vring_desc *desc_head;
75*4882a593Smuzhiyun int cur_len;
76*4882a593Smuzhiyun int rem_len;
77*4882a593Smuzhiyun u32 pkt_len;
78*4882a593Smuzhiyun u16 next_avail;
79*4882a593Smuzhiyun int num;
80*4882a593Smuzhiyun int align;
81*4882a593Smuzhiyun int index;
82*4882a593Smuzhiyun int vdev_id;
83*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo;
84*4882a593Smuzhiyun };
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* Interrupt types. */
87*4882a593Smuzhiyun enum {
88*4882a593Smuzhiyun MLXBF_TM_RX_LWM_IRQ,
89*4882a593Smuzhiyun MLXBF_TM_RX_HWM_IRQ,
90*4882a593Smuzhiyun MLXBF_TM_TX_LWM_IRQ,
91*4882a593Smuzhiyun MLXBF_TM_TX_HWM_IRQ,
92*4882a593Smuzhiyun MLXBF_TM_MAX_IRQ
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun /* Ring types (Rx & Tx). */
96*4882a593Smuzhiyun enum {
97*4882a593Smuzhiyun MLXBF_TMFIFO_VRING_RX,
98*4882a593Smuzhiyun MLXBF_TMFIFO_VRING_TX,
99*4882a593Smuzhiyun MLXBF_TMFIFO_VRING_MAX
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /**
103*4882a593Smuzhiyun * mlxbf_tmfifo_vdev - Structure of the TmFifo virtual device
104*4882a593Smuzhiyun * @vdev: virtio device, in which the vdev.id.device field has the
105*4882a593Smuzhiyun * VIRTIO_ID_xxx id to distinguish the virtual device.
106*4882a593Smuzhiyun * @status: status of the device
107*4882a593Smuzhiyun * @features: supported features of the device
108*4882a593Smuzhiyun * @vrings: array of tmfifo vrings of this device
109*4882a593Smuzhiyun * @config.cons: virtual console config -
110*4882a593Smuzhiyun * select if vdev.id.device is VIRTIO_ID_CONSOLE
111*4882a593Smuzhiyun * @config.net: virtual network config -
112*4882a593Smuzhiyun * select if vdev.id.device is VIRTIO_ID_NET
113*4882a593Smuzhiyun * @tx_buf: tx buffer used to buffer data before writing into the FIFO
114*4882a593Smuzhiyun */
115*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev {
116*4882a593Smuzhiyun struct virtio_device vdev;
117*4882a593Smuzhiyun u8 status;
118*4882a593Smuzhiyun u64 features;
119*4882a593Smuzhiyun struct mlxbf_tmfifo_vring vrings[MLXBF_TMFIFO_VRING_MAX];
120*4882a593Smuzhiyun union {
121*4882a593Smuzhiyun struct virtio_console_config cons;
122*4882a593Smuzhiyun struct virtio_net_config net;
123*4882a593Smuzhiyun } config;
124*4882a593Smuzhiyun struct circ_buf tx_buf;
125*4882a593Smuzhiyun };
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /**
128*4882a593Smuzhiyun * mlxbf_tmfifo_irq_info - Structure of the interrupt information
129*4882a593Smuzhiyun * @fifo: pointer to the tmfifo structure
130*4882a593Smuzhiyun * @irq: interrupt number
131*4882a593Smuzhiyun * @index: index into the interrupt array
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun struct mlxbf_tmfifo_irq_info {
134*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo;
135*4882a593Smuzhiyun int irq;
136*4882a593Smuzhiyun int index;
137*4882a593Smuzhiyun };
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /**
140*4882a593Smuzhiyun * mlxbf_tmfifo - Structure of the TmFifo
141*4882a593Smuzhiyun * @vdev: array of the virtual devices running over the TmFifo
142*4882a593Smuzhiyun * @lock: lock to protect the TmFifo access
143*4882a593Smuzhiyun * @rx_base: mapped register base address for the Rx FIFO
144*4882a593Smuzhiyun * @tx_base: mapped register base address for the Tx FIFO
145*4882a593Smuzhiyun * @rx_fifo_size: number of entries of the Rx FIFO
146*4882a593Smuzhiyun * @tx_fifo_size: number of entries of the Tx FIFO
147*4882a593Smuzhiyun * @pend_events: pending bits for deferred events
148*4882a593Smuzhiyun * @irq_info: interrupt information
149*4882a593Smuzhiyun * @work: work struct for deferred process
150*4882a593Smuzhiyun * @timer: background timer
151*4882a593Smuzhiyun * @vring: Tx/Rx ring
152*4882a593Smuzhiyun * @spin_lock: Tx/Rx spin lock
153*4882a593Smuzhiyun * @is_ready: ready flag
154*4882a593Smuzhiyun */
155*4882a593Smuzhiyun struct mlxbf_tmfifo {
156*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *vdev[MLXBF_TMFIFO_VDEV_MAX];
157*4882a593Smuzhiyun struct mutex lock; /* TmFifo lock */
158*4882a593Smuzhiyun void __iomem *rx_base;
159*4882a593Smuzhiyun void __iomem *tx_base;
160*4882a593Smuzhiyun int rx_fifo_size;
161*4882a593Smuzhiyun int tx_fifo_size;
162*4882a593Smuzhiyun unsigned long pend_events;
163*4882a593Smuzhiyun struct mlxbf_tmfifo_irq_info irq_info[MLXBF_TM_MAX_IRQ];
164*4882a593Smuzhiyun struct work_struct work;
165*4882a593Smuzhiyun struct timer_list timer;
166*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring[2];
167*4882a593Smuzhiyun spinlock_t spin_lock[2]; /* spin lock */
168*4882a593Smuzhiyun bool is_ready;
169*4882a593Smuzhiyun };
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /**
172*4882a593Smuzhiyun * mlxbf_tmfifo_msg_hdr - Structure of the TmFifo message header
173*4882a593Smuzhiyun * @type: message type
174*4882a593Smuzhiyun * @len: payload length in network byte order. Messages sent into the FIFO
175*4882a593Smuzhiyun * will be read by the other side as data stream in the same byte order.
176*4882a593Smuzhiyun * The length needs to be encoded into network order so both sides
177*4882a593Smuzhiyun * could understand it.
178*4882a593Smuzhiyun */
179*4882a593Smuzhiyun struct mlxbf_tmfifo_msg_hdr {
180*4882a593Smuzhiyun u8 type;
181*4882a593Smuzhiyun __be16 len;
182*4882a593Smuzhiyun u8 unused[5];
183*4882a593Smuzhiyun } __packed __aligned(sizeof(u64));
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /*
186*4882a593Smuzhiyun * Default MAC.
187*4882a593Smuzhiyun * This MAC address will be read from EFI persistent variable if configured.
188*4882a593Smuzhiyun * It can also be reconfigured with standard Linux tools.
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun static u8 mlxbf_tmfifo_net_default_mac[ETH_ALEN] = {
191*4882a593Smuzhiyun 0x00, 0x1A, 0xCA, 0xFF, 0xFF, 0x01
192*4882a593Smuzhiyun };
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun /* EFI variable name of the MAC address. */
195*4882a593Smuzhiyun static efi_char16_t mlxbf_tmfifo_efi_name[] = L"RshimMacAddr";
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* Maximum L2 header length. */
198*4882a593Smuzhiyun #define MLXBF_TMFIFO_NET_L2_OVERHEAD 36
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Supported virtio-net features. */
201*4882a593Smuzhiyun #define MLXBF_TMFIFO_NET_FEATURES \
202*4882a593Smuzhiyun (BIT_ULL(VIRTIO_NET_F_MTU) | BIT_ULL(VIRTIO_NET_F_STATUS) | \
203*4882a593Smuzhiyun BIT_ULL(VIRTIO_NET_F_MAC))
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun #define mlxbf_vdev_to_tmfifo(d) container_of(d, struct mlxbf_tmfifo_vdev, vdev)
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Free vrings of the FIFO device. */
mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo * fifo,struct mlxbf_tmfifo_vdev * tm_vdev)208*4882a593Smuzhiyun static void mlxbf_tmfifo_free_vrings(struct mlxbf_tmfifo *fifo,
209*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev)
210*4882a593Smuzhiyun {
211*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring;
212*4882a593Smuzhiyun int i, size;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
215*4882a593Smuzhiyun vring = &tm_vdev->vrings[i];
216*4882a593Smuzhiyun if (vring->va) {
217*4882a593Smuzhiyun size = vring_size(vring->num, vring->align);
218*4882a593Smuzhiyun dma_free_coherent(tm_vdev->vdev.dev.parent, size,
219*4882a593Smuzhiyun vring->va, vring->dma);
220*4882a593Smuzhiyun vring->va = NULL;
221*4882a593Smuzhiyun if (vring->vq) {
222*4882a593Smuzhiyun vring_del_virtqueue(vring->vq);
223*4882a593Smuzhiyun vring->vq = NULL;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /* Allocate vrings for the FIFO. */
mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo * fifo,struct mlxbf_tmfifo_vdev * tm_vdev)230*4882a593Smuzhiyun static int mlxbf_tmfifo_alloc_vrings(struct mlxbf_tmfifo *fifo,
231*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev)
232*4882a593Smuzhiyun {
233*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring;
234*4882a593Smuzhiyun struct device *dev;
235*4882a593Smuzhiyun dma_addr_t dma;
236*4882a593Smuzhiyun int i, size;
237*4882a593Smuzhiyun void *va;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
240*4882a593Smuzhiyun vring = &tm_vdev->vrings[i];
241*4882a593Smuzhiyun vring->fifo = fifo;
242*4882a593Smuzhiyun vring->num = MLXBF_TMFIFO_VRING_SIZE;
243*4882a593Smuzhiyun vring->align = SMP_CACHE_BYTES;
244*4882a593Smuzhiyun vring->index = i;
245*4882a593Smuzhiyun vring->vdev_id = tm_vdev->vdev.id.device;
246*4882a593Smuzhiyun dev = &tm_vdev->vdev.dev;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun size = vring_size(vring->num, vring->align);
249*4882a593Smuzhiyun va = dma_alloc_coherent(dev->parent, size, &dma, GFP_KERNEL);
250*4882a593Smuzhiyun if (!va) {
251*4882a593Smuzhiyun mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
252*4882a593Smuzhiyun dev_err(dev->parent, "dma_alloc_coherent failed\n");
253*4882a593Smuzhiyun return -ENOMEM;
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun vring->va = va;
257*4882a593Smuzhiyun vring->dma = dma;
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun return 0;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Disable interrupts of the FIFO device. */
mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo * fifo)264*4882a593Smuzhiyun static void mlxbf_tmfifo_disable_irqs(struct mlxbf_tmfifo *fifo)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun int i, irq;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
269*4882a593Smuzhiyun irq = fifo->irq_info[i].irq;
270*4882a593Smuzhiyun fifo->irq_info[i].irq = 0;
271*4882a593Smuzhiyun disable_irq(irq);
272*4882a593Smuzhiyun }
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun /* Interrupt handler. */
mlxbf_tmfifo_irq_handler(int irq,void * arg)276*4882a593Smuzhiyun static irqreturn_t mlxbf_tmfifo_irq_handler(int irq, void *arg)
277*4882a593Smuzhiyun {
278*4882a593Smuzhiyun struct mlxbf_tmfifo_irq_info *irq_info = arg;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (!test_and_set_bit(irq_info->index, &irq_info->fifo->pend_events))
281*4882a593Smuzhiyun schedule_work(&irq_info->fifo->work);
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun return IRQ_HANDLED;
284*4882a593Smuzhiyun }
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun /* Get the next packet descriptor from the vring. */
287*4882a593Smuzhiyun static struct vring_desc *
mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring * vring)288*4882a593Smuzhiyun mlxbf_tmfifo_get_next_desc(struct mlxbf_tmfifo_vring *vring)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun const struct vring *vr = virtqueue_get_vring(vring->vq);
291*4882a593Smuzhiyun struct virtio_device *vdev = vring->vq->vdev;
292*4882a593Smuzhiyun unsigned int idx, head;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun if (vring->next_avail == virtio16_to_cpu(vdev, vr->avail->idx))
295*4882a593Smuzhiyun return NULL;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun /* Make sure 'avail->idx' is visible already. */
298*4882a593Smuzhiyun virtio_rmb(false);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun idx = vring->next_avail % vr->num;
301*4882a593Smuzhiyun head = virtio16_to_cpu(vdev, vr->avail->ring[idx]);
302*4882a593Smuzhiyun if (WARN_ON(head >= vr->num))
303*4882a593Smuzhiyun return NULL;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun vring->next_avail++;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun return &vr->desc[head];
308*4882a593Smuzhiyun }
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun /* Release virtio descriptor. */
mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,u32 len)311*4882a593Smuzhiyun static void mlxbf_tmfifo_release_desc(struct mlxbf_tmfifo_vring *vring,
312*4882a593Smuzhiyun struct vring_desc *desc, u32 len)
313*4882a593Smuzhiyun {
314*4882a593Smuzhiyun const struct vring *vr = virtqueue_get_vring(vring->vq);
315*4882a593Smuzhiyun struct virtio_device *vdev = vring->vq->vdev;
316*4882a593Smuzhiyun u16 idx, vr_idx;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun vr_idx = virtio16_to_cpu(vdev, vr->used->idx);
319*4882a593Smuzhiyun idx = vr_idx % vr->num;
320*4882a593Smuzhiyun vr->used->ring[idx].id = cpu_to_virtio32(vdev, desc - vr->desc);
321*4882a593Smuzhiyun vr->used->ring[idx].len = cpu_to_virtio32(vdev, len);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun /*
324*4882a593Smuzhiyun * Virtio could poll and check the 'idx' to decide whether the desc is
325*4882a593Smuzhiyun * done or not. Add a memory barrier here to make sure the update above
326*4882a593Smuzhiyun * completes before updating the idx.
327*4882a593Smuzhiyun */
328*4882a593Smuzhiyun virtio_mb(false);
329*4882a593Smuzhiyun vr->used->idx = cpu_to_virtio16(vdev, vr_idx + 1);
330*4882a593Smuzhiyun }
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* Get the total length of the descriptor chain. */
mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc)333*4882a593Smuzhiyun static u32 mlxbf_tmfifo_get_pkt_len(struct mlxbf_tmfifo_vring *vring,
334*4882a593Smuzhiyun struct vring_desc *desc)
335*4882a593Smuzhiyun {
336*4882a593Smuzhiyun const struct vring *vr = virtqueue_get_vring(vring->vq);
337*4882a593Smuzhiyun struct virtio_device *vdev = vring->vq->vdev;
338*4882a593Smuzhiyun u32 len = 0, idx;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun while (desc) {
341*4882a593Smuzhiyun len += virtio32_to_cpu(vdev, desc->len);
342*4882a593Smuzhiyun if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
343*4882a593Smuzhiyun break;
344*4882a593Smuzhiyun idx = virtio16_to_cpu(vdev, desc->next);
345*4882a593Smuzhiyun desc = &vr->desc[idx];
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return len;
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring * vring)351*4882a593Smuzhiyun static void mlxbf_tmfifo_release_pending_pkt(struct mlxbf_tmfifo_vring *vring)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun struct vring_desc *desc_head;
354*4882a593Smuzhiyun u32 len = 0;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun if (vring->desc_head) {
357*4882a593Smuzhiyun desc_head = vring->desc_head;
358*4882a593Smuzhiyun len = vring->pkt_len;
359*4882a593Smuzhiyun } else {
360*4882a593Smuzhiyun desc_head = mlxbf_tmfifo_get_next_desc(vring);
361*4882a593Smuzhiyun len = mlxbf_tmfifo_get_pkt_len(vring, desc_head);
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun if (desc_head)
365*4882a593Smuzhiyun mlxbf_tmfifo_release_desc(vring, desc_head, len);
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun vring->pkt_len = 0;
368*4882a593Smuzhiyun vring->desc = NULL;
369*4882a593Smuzhiyun vring->desc_head = NULL;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,bool is_rx)372*4882a593Smuzhiyun static void mlxbf_tmfifo_init_net_desc(struct mlxbf_tmfifo_vring *vring,
373*4882a593Smuzhiyun struct vring_desc *desc, bool is_rx)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun struct virtio_device *vdev = vring->vq->vdev;
376*4882a593Smuzhiyun struct virtio_net_hdr *net_hdr;
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun net_hdr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
379*4882a593Smuzhiyun memset(net_hdr, 0, sizeof(*net_hdr));
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun
382*4882a593Smuzhiyun /* Get and initialize the next packet. */
383*4882a593Smuzhiyun static struct vring_desc *
mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring * vring,bool is_rx)384*4882a593Smuzhiyun mlxbf_tmfifo_get_next_pkt(struct mlxbf_tmfifo_vring *vring, bool is_rx)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct vring_desc *desc;
387*4882a593Smuzhiyun
388*4882a593Smuzhiyun desc = mlxbf_tmfifo_get_next_desc(vring);
389*4882a593Smuzhiyun if (desc && is_rx && vring->vdev_id == VIRTIO_ID_NET)
390*4882a593Smuzhiyun mlxbf_tmfifo_init_net_desc(vring, desc, is_rx);
391*4882a593Smuzhiyun
392*4882a593Smuzhiyun vring->desc_head = desc;
393*4882a593Smuzhiyun vring->desc = desc;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun return desc;
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
398*4882a593Smuzhiyun /* House-keeping timer. */
mlxbf_tmfifo_timer(struct timer_list * t)399*4882a593Smuzhiyun static void mlxbf_tmfifo_timer(struct timer_list *t)
400*4882a593Smuzhiyun {
401*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo = container_of(t, struct mlxbf_tmfifo, timer);
402*4882a593Smuzhiyun int rx, tx;
403*4882a593Smuzhiyun
404*4882a593Smuzhiyun rx = !test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events);
405*4882a593Smuzhiyun tx = !test_and_set_bit(MLXBF_TM_TX_LWM_IRQ, &fifo->pend_events);
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (rx || tx)
408*4882a593Smuzhiyun schedule_work(&fifo->work);
409*4882a593Smuzhiyun
410*4882a593Smuzhiyun mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun /* Copy one console packet into the output buffer. */
mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev * cons,struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc)414*4882a593Smuzhiyun static void mlxbf_tmfifo_console_output_one(struct mlxbf_tmfifo_vdev *cons,
415*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring,
416*4882a593Smuzhiyun struct vring_desc *desc)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun const struct vring *vr = virtqueue_get_vring(vring->vq);
419*4882a593Smuzhiyun struct virtio_device *vdev = &cons->vdev;
420*4882a593Smuzhiyun u32 len, idx, seg;
421*4882a593Smuzhiyun void *addr;
422*4882a593Smuzhiyun
423*4882a593Smuzhiyun while (desc) {
424*4882a593Smuzhiyun addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
425*4882a593Smuzhiyun len = virtio32_to_cpu(vdev, desc->len);
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun seg = CIRC_SPACE_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
428*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE);
429*4882a593Smuzhiyun if (len <= seg) {
430*4882a593Smuzhiyun memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, len);
431*4882a593Smuzhiyun } else {
432*4882a593Smuzhiyun memcpy(cons->tx_buf.buf + cons->tx_buf.head, addr, seg);
433*4882a593Smuzhiyun addr += seg;
434*4882a593Smuzhiyun memcpy(cons->tx_buf.buf, addr, len - seg);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun cons->tx_buf.head = (cons->tx_buf.head + len) %
437*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun if (!(virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT))
440*4882a593Smuzhiyun break;
441*4882a593Smuzhiyun idx = virtio16_to_cpu(vdev, desc->next);
442*4882a593Smuzhiyun desc = &vr->desc[idx];
443*4882a593Smuzhiyun }
444*4882a593Smuzhiyun }
445*4882a593Smuzhiyun
446*4882a593Smuzhiyun /* Copy console data into the output buffer. */
mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev * cons,struct mlxbf_tmfifo_vring * vring)447*4882a593Smuzhiyun static void mlxbf_tmfifo_console_output(struct mlxbf_tmfifo_vdev *cons,
448*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring)
449*4882a593Smuzhiyun {
450*4882a593Smuzhiyun struct vring_desc *desc;
451*4882a593Smuzhiyun u32 len, avail;
452*4882a593Smuzhiyun
453*4882a593Smuzhiyun desc = mlxbf_tmfifo_get_next_desc(vring);
454*4882a593Smuzhiyun while (desc) {
455*4882a593Smuzhiyun /* Release the packet if not enough space. */
456*4882a593Smuzhiyun len = mlxbf_tmfifo_get_pkt_len(vring, desc);
457*4882a593Smuzhiyun avail = CIRC_SPACE(cons->tx_buf.head, cons->tx_buf.tail,
458*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE);
459*4882a593Smuzhiyun if (len + MLXBF_TMFIFO_CON_TX_BUF_RSV_SIZE > avail) {
460*4882a593Smuzhiyun mlxbf_tmfifo_release_desc(vring, desc, len);
461*4882a593Smuzhiyun break;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun mlxbf_tmfifo_console_output_one(cons, vring, desc);
465*4882a593Smuzhiyun mlxbf_tmfifo_release_desc(vring, desc, len);
466*4882a593Smuzhiyun desc = mlxbf_tmfifo_get_next_desc(vring);
467*4882a593Smuzhiyun }
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun /* Get the number of available words in Rx FIFO for receiving. */
mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo * fifo)471*4882a593Smuzhiyun static int mlxbf_tmfifo_get_rx_avail(struct mlxbf_tmfifo *fifo)
472*4882a593Smuzhiyun {
473*4882a593Smuzhiyun u64 sts;
474*4882a593Smuzhiyun
475*4882a593Smuzhiyun sts = readq(fifo->rx_base + MLXBF_TMFIFO_RX_STS);
476*4882a593Smuzhiyun return FIELD_GET(MLXBF_TMFIFO_RX_STS__COUNT_MASK, sts);
477*4882a593Smuzhiyun }
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun /* Get the number of available words in the TmFifo for sending. */
mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo * fifo,int vdev_id)480*4882a593Smuzhiyun static int mlxbf_tmfifo_get_tx_avail(struct mlxbf_tmfifo *fifo, int vdev_id)
481*4882a593Smuzhiyun {
482*4882a593Smuzhiyun int tx_reserve;
483*4882a593Smuzhiyun u32 count;
484*4882a593Smuzhiyun u64 sts;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun /* Reserve some room in FIFO for console messages. */
487*4882a593Smuzhiyun if (vdev_id == VIRTIO_ID_NET)
488*4882a593Smuzhiyun tx_reserve = fifo->tx_fifo_size / MLXBF_TMFIFO_RESERVE_RATIO;
489*4882a593Smuzhiyun else
490*4882a593Smuzhiyun tx_reserve = 1;
491*4882a593Smuzhiyun
492*4882a593Smuzhiyun sts = readq(fifo->tx_base + MLXBF_TMFIFO_TX_STS);
493*4882a593Smuzhiyun count = FIELD_GET(MLXBF_TMFIFO_TX_STS__COUNT_MASK, sts);
494*4882a593Smuzhiyun return fifo->tx_fifo_size - tx_reserve - count;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* Console Tx (move data from the output buffer into the TmFifo). */
mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo * fifo,int avail)498*4882a593Smuzhiyun static void mlxbf_tmfifo_console_tx(struct mlxbf_tmfifo *fifo, int avail)
499*4882a593Smuzhiyun {
500*4882a593Smuzhiyun struct mlxbf_tmfifo_msg_hdr hdr;
501*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *cons;
502*4882a593Smuzhiyun unsigned long flags;
503*4882a593Smuzhiyun int size, seg;
504*4882a593Smuzhiyun void *addr;
505*4882a593Smuzhiyun u64 data;
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun /* Return if not enough space available. */
508*4882a593Smuzhiyun if (avail < MLXBF_TMFIFO_DATA_MIN_WORDS)
509*4882a593Smuzhiyun return;
510*4882a593Smuzhiyun
511*4882a593Smuzhiyun cons = fifo->vdev[VIRTIO_ID_CONSOLE];
512*4882a593Smuzhiyun if (!cons || !cons->tx_buf.buf)
513*4882a593Smuzhiyun return;
514*4882a593Smuzhiyun
515*4882a593Smuzhiyun /* Return if no data to send. */
516*4882a593Smuzhiyun size = CIRC_CNT(cons->tx_buf.head, cons->tx_buf.tail,
517*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE);
518*4882a593Smuzhiyun if (size == 0)
519*4882a593Smuzhiyun return;
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun /* Adjust the size to available space. */
522*4882a593Smuzhiyun if (size + sizeof(hdr) > avail * sizeof(u64))
523*4882a593Smuzhiyun size = avail * sizeof(u64) - sizeof(hdr);
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun /* Write header. */
526*4882a593Smuzhiyun hdr.type = VIRTIO_ID_CONSOLE;
527*4882a593Smuzhiyun hdr.len = htons(size);
528*4882a593Smuzhiyun writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
529*4882a593Smuzhiyun
530*4882a593Smuzhiyun /* Use spin-lock to protect the 'cons->tx_buf'. */
531*4882a593Smuzhiyun spin_lock_irqsave(&fifo->spin_lock[0], flags);
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun while (size > 0) {
534*4882a593Smuzhiyun addr = cons->tx_buf.buf + cons->tx_buf.tail;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun seg = CIRC_CNT_TO_END(cons->tx_buf.head, cons->tx_buf.tail,
537*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE);
538*4882a593Smuzhiyun if (seg >= sizeof(u64)) {
539*4882a593Smuzhiyun memcpy(&data, addr, sizeof(u64));
540*4882a593Smuzhiyun } else {
541*4882a593Smuzhiyun memcpy(&data, addr, seg);
542*4882a593Smuzhiyun memcpy((u8 *)&data + seg, cons->tx_buf.buf,
543*4882a593Smuzhiyun sizeof(u64) - seg);
544*4882a593Smuzhiyun }
545*4882a593Smuzhiyun writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (size >= sizeof(u64)) {
548*4882a593Smuzhiyun cons->tx_buf.tail = (cons->tx_buf.tail + sizeof(u64)) %
549*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE;
550*4882a593Smuzhiyun size -= sizeof(u64);
551*4882a593Smuzhiyun } else {
552*4882a593Smuzhiyun cons->tx_buf.tail = (cons->tx_buf.tail + size) %
553*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE;
554*4882a593Smuzhiyun size = 0;
555*4882a593Smuzhiyun }
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun
561*4882a593Smuzhiyun /* Rx/Tx one word in the descriptor buffer. */
mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,bool is_rx,int len)562*4882a593Smuzhiyun static void mlxbf_tmfifo_rxtx_word(struct mlxbf_tmfifo_vring *vring,
563*4882a593Smuzhiyun struct vring_desc *desc,
564*4882a593Smuzhiyun bool is_rx, int len)
565*4882a593Smuzhiyun {
566*4882a593Smuzhiyun struct virtio_device *vdev = vring->vq->vdev;
567*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo = vring->fifo;
568*4882a593Smuzhiyun void *addr;
569*4882a593Smuzhiyun u64 data;
570*4882a593Smuzhiyun
571*4882a593Smuzhiyun /* Get the buffer address of this desc. */
572*4882a593Smuzhiyun addr = phys_to_virt(virtio64_to_cpu(vdev, desc->addr));
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun /* Read a word from FIFO for Rx. */
575*4882a593Smuzhiyun if (is_rx)
576*4882a593Smuzhiyun data = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun if (vring->cur_len + sizeof(u64) <= len) {
579*4882a593Smuzhiyun /* The whole word. */
580*4882a593Smuzhiyun if (is_rx)
581*4882a593Smuzhiyun memcpy(addr + vring->cur_len, &data, sizeof(u64));
582*4882a593Smuzhiyun else
583*4882a593Smuzhiyun memcpy(&data, addr + vring->cur_len, sizeof(u64));
584*4882a593Smuzhiyun vring->cur_len += sizeof(u64);
585*4882a593Smuzhiyun } else {
586*4882a593Smuzhiyun /* Leftover bytes. */
587*4882a593Smuzhiyun if (is_rx)
588*4882a593Smuzhiyun memcpy(addr + vring->cur_len, &data,
589*4882a593Smuzhiyun len - vring->cur_len);
590*4882a593Smuzhiyun else
591*4882a593Smuzhiyun memcpy(&data, addr + vring->cur_len,
592*4882a593Smuzhiyun len - vring->cur_len);
593*4882a593Smuzhiyun vring->cur_len = len;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun /* Write the word into FIFO for Tx. */
597*4882a593Smuzhiyun if (!is_rx)
598*4882a593Smuzhiyun writeq(data, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
599*4882a593Smuzhiyun }
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /*
602*4882a593Smuzhiyun * Rx/Tx packet header.
603*4882a593Smuzhiyun *
604*4882a593Smuzhiyun * In Rx case, the packet might be found to belong to a different vring since
605*4882a593Smuzhiyun * the TmFifo is shared by different services. In such case, the 'vring_change'
606*4882a593Smuzhiyun * flag is set.
607*4882a593Smuzhiyun */
mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring * vring,struct vring_desc * desc,bool is_rx,bool * vring_change)608*4882a593Smuzhiyun static void mlxbf_tmfifo_rxtx_header(struct mlxbf_tmfifo_vring *vring,
609*4882a593Smuzhiyun struct vring_desc *desc,
610*4882a593Smuzhiyun bool is_rx, bool *vring_change)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo = vring->fifo;
613*4882a593Smuzhiyun struct virtio_net_config *config;
614*4882a593Smuzhiyun struct mlxbf_tmfifo_msg_hdr hdr;
615*4882a593Smuzhiyun int vdev_id, hdr_len;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* Read/Write packet header. */
618*4882a593Smuzhiyun if (is_rx) {
619*4882a593Smuzhiyun /* Drain one word from the FIFO. */
620*4882a593Smuzhiyun *(u64 *)&hdr = readq(fifo->rx_base + MLXBF_TMFIFO_RX_DATA);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* Skip the length 0 packets (keepalive). */
623*4882a593Smuzhiyun if (hdr.len == 0)
624*4882a593Smuzhiyun return;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun /* Check packet type. */
627*4882a593Smuzhiyun if (hdr.type == VIRTIO_ID_NET) {
628*4882a593Smuzhiyun vdev_id = VIRTIO_ID_NET;
629*4882a593Smuzhiyun hdr_len = sizeof(struct virtio_net_hdr);
630*4882a593Smuzhiyun config = &fifo->vdev[vdev_id]->config.net;
631*4882a593Smuzhiyun /* A legacy-only interface for now. */
632*4882a593Smuzhiyun if (ntohs(hdr.len) >
633*4882a593Smuzhiyun __virtio16_to_cpu(virtio_legacy_is_little_endian(),
634*4882a593Smuzhiyun config->mtu) +
635*4882a593Smuzhiyun MLXBF_TMFIFO_NET_L2_OVERHEAD)
636*4882a593Smuzhiyun return;
637*4882a593Smuzhiyun } else {
638*4882a593Smuzhiyun vdev_id = VIRTIO_ID_CONSOLE;
639*4882a593Smuzhiyun hdr_len = 0;
640*4882a593Smuzhiyun }
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun /*
643*4882a593Smuzhiyun * Check whether the new packet still belongs to this vring.
644*4882a593Smuzhiyun * If not, update the pkt_len of the new vring.
645*4882a593Smuzhiyun */
646*4882a593Smuzhiyun if (vdev_id != vring->vdev_id) {
647*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_dev2 = fifo->vdev[vdev_id];
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun if (!tm_dev2)
650*4882a593Smuzhiyun return;
651*4882a593Smuzhiyun vring->desc = desc;
652*4882a593Smuzhiyun vring = &tm_dev2->vrings[MLXBF_TMFIFO_VRING_RX];
653*4882a593Smuzhiyun *vring_change = true;
654*4882a593Smuzhiyun }
655*4882a593Smuzhiyun vring->pkt_len = ntohs(hdr.len) + hdr_len;
656*4882a593Smuzhiyun } else {
657*4882a593Smuzhiyun /* Network virtio has an extra header. */
658*4882a593Smuzhiyun hdr_len = (vring->vdev_id == VIRTIO_ID_NET) ?
659*4882a593Smuzhiyun sizeof(struct virtio_net_hdr) : 0;
660*4882a593Smuzhiyun vring->pkt_len = mlxbf_tmfifo_get_pkt_len(vring, desc);
661*4882a593Smuzhiyun hdr.type = (vring->vdev_id == VIRTIO_ID_NET) ?
662*4882a593Smuzhiyun VIRTIO_ID_NET : VIRTIO_ID_CONSOLE;
663*4882a593Smuzhiyun hdr.len = htons(vring->pkt_len - hdr_len);
664*4882a593Smuzhiyun writeq(*(u64 *)&hdr, fifo->tx_base + MLXBF_TMFIFO_TX_DATA);
665*4882a593Smuzhiyun }
666*4882a593Smuzhiyun
667*4882a593Smuzhiyun vring->cur_len = hdr_len;
668*4882a593Smuzhiyun vring->rem_len = vring->pkt_len;
669*4882a593Smuzhiyun fifo->vring[is_rx] = vring;
670*4882a593Smuzhiyun }
671*4882a593Smuzhiyun
672*4882a593Smuzhiyun /*
673*4882a593Smuzhiyun * Rx/Tx one descriptor.
674*4882a593Smuzhiyun *
675*4882a593Smuzhiyun * Return true to indicate more data available.
676*4882a593Smuzhiyun */
mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring * vring,bool is_rx,int * avail)677*4882a593Smuzhiyun static bool mlxbf_tmfifo_rxtx_one_desc(struct mlxbf_tmfifo_vring *vring,
678*4882a593Smuzhiyun bool is_rx, int *avail)
679*4882a593Smuzhiyun {
680*4882a593Smuzhiyun const struct vring *vr = virtqueue_get_vring(vring->vq);
681*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo = vring->fifo;
682*4882a593Smuzhiyun struct virtio_device *vdev;
683*4882a593Smuzhiyun bool vring_change = false;
684*4882a593Smuzhiyun struct vring_desc *desc;
685*4882a593Smuzhiyun unsigned long flags;
686*4882a593Smuzhiyun u32 len, idx;
687*4882a593Smuzhiyun
688*4882a593Smuzhiyun vdev = &fifo->vdev[vring->vdev_id]->vdev;
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun /* Get the descriptor of the next packet. */
691*4882a593Smuzhiyun if (!vring->desc) {
692*4882a593Smuzhiyun desc = mlxbf_tmfifo_get_next_pkt(vring, is_rx);
693*4882a593Smuzhiyun if (!desc)
694*4882a593Smuzhiyun return false;
695*4882a593Smuzhiyun } else {
696*4882a593Smuzhiyun desc = vring->desc;
697*4882a593Smuzhiyun }
698*4882a593Smuzhiyun
699*4882a593Smuzhiyun /* Beginning of a packet. Start to Rx/Tx packet header. */
700*4882a593Smuzhiyun if (vring->pkt_len == 0) {
701*4882a593Smuzhiyun mlxbf_tmfifo_rxtx_header(vring, desc, is_rx, &vring_change);
702*4882a593Smuzhiyun (*avail)--;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun /* Return if new packet is for another ring. */
705*4882a593Smuzhiyun if (vring_change)
706*4882a593Smuzhiyun return false;
707*4882a593Smuzhiyun goto mlxbf_tmfifo_desc_done;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun /* Get the length of this desc. */
711*4882a593Smuzhiyun len = virtio32_to_cpu(vdev, desc->len);
712*4882a593Smuzhiyun if (len > vring->rem_len)
713*4882a593Smuzhiyun len = vring->rem_len;
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun /* Rx/Tx one word (8 bytes) if not done. */
716*4882a593Smuzhiyun if (vring->cur_len < len) {
717*4882a593Smuzhiyun mlxbf_tmfifo_rxtx_word(vring, desc, is_rx, len);
718*4882a593Smuzhiyun (*avail)--;
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun /* Check again whether it's done. */
722*4882a593Smuzhiyun if (vring->cur_len == len) {
723*4882a593Smuzhiyun vring->cur_len = 0;
724*4882a593Smuzhiyun vring->rem_len -= len;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun /* Get the next desc on the chain. */
727*4882a593Smuzhiyun if (vring->rem_len > 0 &&
728*4882a593Smuzhiyun (virtio16_to_cpu(vdev, desc->flags) & VRING_DESC_F_NEXT)) {
729*4882a593Smuzhiyun idx = virtio16_to_cpu(vdev, desc->next);
730*4882a593Smuzhiyun desc = &vr->desc[idx];
731*4882a593Smuzhiyun goto mlxbf_tmfifo_desc_done;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun /* Done and release the pending packet. */
735*4882a593Smuzhiyun mlxbf_tmfifo_release_pending_pkt(vring);
736*4882a593Smuzhiyun desc = NULL;
737*4882a593Smuzhiyun fifo->vring[is_rx] = NULL;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun /*
740*4882a593Smuzhiyun * Make sure the load/store are in order before
741*4882a593Smuzhiyun * returning back to virtio.
742*4882a593Smuzhiyun */
743*4882a593Smuzhiyun virtio_mb(false);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun /* Notify upper layer that packet is done. */
746*4882a593Smuzhiyun spin_lock_irqsave(&fifo->spin_lock[is_rx], flags);
747*4882a593Smuzhiyun vring_interrupt(0, vring->vq);
748*4882a593Smuzhiyun spin_unlock_irqrestore(&fifo->spin_lock[is_rx], flags);
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun mlxbf_tmfifo_desc_done:
752*4882a593Smuzhiyun /* Save the current desc. */
753*4882a593Smuzhiyun vring->desc = desc;
754*4882a593Smuzhiyun
755*4882a593Smuzhiyun return true;
756*4882a593Smuzhiyun }
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun /* Rx & Tx processing of a queue. */
mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring * vring,bool is_rx)759*4882a593Smuzhiyun static void mlxbf_tmfifo_rxtx(struct mlxbf_tmfifo_vring *vring, bool is_rx)
760*4882a593Smuzhiyun {
761*4882a593Smuzhiyun int avail = 0, devid = vring->vdev_id;
762*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo;
763*4882a593Smuzhiyun bool more;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun fifo = vring->fifo;
766*4882a593Smuzhiyun
767*4882a593Smuzhiyun /* Return if vdev is not ready. */
768*4882a593Smuzhiyun if (!fifo->vdev[devid])
769*4882a593Smuzhiyun return;
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /* Return if another vring is running. */
772*4882a593Smuzhiyun if (fifo->vring[is_rx] && fifo->vring[is_rx] != vring)
773*4882a593Smuzhiyun return;
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun /* Only handle console and network for now. */
776*4882a593Smuzhiyun if (WARN_ON(devid != VIRTIO_ID_NET && devid != VIRTIO_ID_CONSOLE))
777*4882a593Smuzhiyun return;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun do {
780*4882a593Smuzhiyun /* Get available FIFO space. */
781*4882a593Smuzhiyun if (avail == 0) {
782*4882a593Smuzhiyun if (is_rx)
783*4882a593Smuzhiyun avail = mlxbf_tmfifo_get_rx_avail(fifo);
784*4882a593Smuzhiyun else
785*4882a593Smuzhiyun avail = mlxbf_tmfifo_get_tx_avail(fifo, devid);
786*4882a593Smuzhiyun if (avail <= 0)
787*4882a593Smuzhiyun break;
788*4882a593Smuzhiyun }
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* Console output always comes from the Tx buffer. */
791*4882a593Smuzhiyun if (!is_rx && devid == VIRTIO_ID_CONSOLE) {
792*4882a593Smuzhiyun mlxbf_tmfifo_console_tx(fifo, avail);
793*4882a593Smuzhiyun break;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun /* Handle one descriptor. */
797*4882a593Smuzhiyun more = mlxbf_tmfifo_rxtx_one_desc(vring, is_rx, &avail);
798*4882a593Smuzhiyun } while (more);
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun /* Handle Rx or Tx queues. */
mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo * fifo,int queue_id,int irq_id,bool is_rx)802*4882a593Smuzhiyun static void mlxbf_tmfifo_work_rxtx(struct mlxbf_tmfifo *fifo, int queue_id,
803*4882a593Smuzhiyun int irq_id, bool is_rx)
804*4882a593Smuzhiyun {
805*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev;
806*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring;
807*4882a593Smuzhiyun int i;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun if (!test_and_clear_bit(irq_id, &fifo->pend_events) ||
810*4882a593Smuzhiyun !fifo->irq_info[irq_id].irq)
811*4882a593Smuzhiyun return;
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++) {
814*4882a593Smuzhiyun tm_vdev = fifo->vdev[i];
815*4882a593Smuzhiyun if (tm_vdev) {
816*4882a593Smuzhiyun vring = &tm_vdev->vrings[queue_id];
817*4882a593Smuzhiyun if (vring->vq)
818*4882a593Smuzhiyun mlxbf_tmfifo_rxtx(vring, is_rx);
819*4882a593Smuzhiyun }
820*4882a593Smuzhiyun }
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* Work handler for Rx and Tx case. */
mlxbf_tmfifo_work_handler(struct work_struct * work)824*4882a593Smuzhiyun static void mlxbf_tmfifo_work_handler(struct work_struct *work)
825*4882a593Smuzhiyun {
826*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo;
827*4882a593Smuzhiyun
828*4882a593Smuzhiyun fifo = container_of(work, struct mlxbf_tmfifo, work);
829*4882a593Smuzhiyun if (!fifo->is_ready)
830*4882a593Smuzhiyun return;
831*4882a593Smuzhiyun
832*4882a593Smuzhiyun mutex_lock(&fifo->lock);
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun /* Tx (Send data to the TmFifo). */
835*4882a593Smuzhiyun mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_TX,
836*4882a593Smuzhiyun MLXBF_TM_TX_LWM_IRQ, false);
837*4882a593Smuzhiyun
838*4882a593Smuzhiyun /* Rx (Receive data from the TmFifo). */
839*4882a593Smuzhiyun mlxbf_tmfifo_work_rxtx(fifo, MLXBF_TMFIFO_VRING_RX,
840*4882a593Smuzhiyun MLXBF_TM_RX_HWM_IRQ, true);
841*4882a593Smuzhiyun
842*4882a593Smuzhiyun mutex_unlock(&fifo->lock);
843*4882a593Smuzhiyun }
844*4882a593Smuzhiyun
845*4882a593Smuzhiyun /* The notify function is called when new buffers are posted. */
mlxbf_tmfifo_virtio_notify(struct virtqueue * vq)846*4882a593Smuzhiyun static bool mlxbf_tmfifo_virtio_notify(struct virtqueue *vq)
847*4882a593Smuzhiyun {
848*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring = vq->priv;
849*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev;
850*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo;
851*4882a593Smuzhiyun unsigned long flags;
852*4882a593Smuzhiyun
853*4882a593Smuzhiyun fifo = vring->fifo;
854*4882a593Smuzhiyun
855*4882a593Smuzhiyun /*
856*4882a593Smuzhiyun * Virtio maintains vrings in pairs, even number ring for Rx
857*4882a593Smuzhiyun * and odd number ring for Tx.
858*4882a593Smuzhiyun */
859*4882a593Smuzhiyun if (vring->index & BIT(0)) {
860*4882a593Smuzhiyun /*
861*4882a593Smuzhiyun * Console could make blocking call with interrupts disabled.
862*4882a593Smuzhiyun * In such case, the vring needs to be served right away. For
863*4882a593Smuzhiyun * other cases, just set the TX LWM bit to start Tx in the
864*4882a593Smuzhiyun * worker handler.
865*4882a593Smuzhiyun */
866*4882a593Smuzhiyun if (vring->vdev_id == VIRTIO_ID_CONSOLE) {
867*4882a593Smuzhiyun spin_lock_irqsave(&fifo->spin_lock[0], flags);
868*4882a593Smuzhiyun tm_vdev = fifo->vdev[VIRTIO_ID_CONSOLE];
869*4882a593Smuzhiyun mlxbf_tmfifo_console_output(tm_vdev, vring);
870*4882a593Smuzhiyun spin_unlock_irqrestore(&fifo->spin_lock[0], flags);
871*4882a593Smuzhiyun } else if (test_and_set_bit(MLXBF_TM_TX_LWM_IRQ,
872*4882a593Smuzhiyun &fifo->pend_events)) {
873*4882a593Smuzhiyun return true;
874*4882a593Smuzhiyun }
875*4882a593Smuzhiyun } else {
876*4882a593Smuzhiyun if (test_and_set_bit(MLXBF_TM_RX_HWM_IRQ, &fifo->pend_events))
877*4882a593Smuzhiyun return true;
878*4882a593Smuzhiyun }
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun schedule_work(&fifo->work);
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun return true;
883*4882a593Smuzhiyun }
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun /* Get the array of feature bits for this device. */
mlxbf_tmfifo_virtio_get_features(struct virtio_device * vdev)886*4882a593Smuzhiyun static u64 mlxbf_tmfifo_virtio_get_features(struct virtio_device *vdev)
887*4882a593Smuzhiyun {
888*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
889*4882a593Smuzhiyun
890*4882a593Smuzhiyun return tm_vdev->features;
891*4882a593Smuzhiyun }
892*4882a593Smuzhiyun
893*4882a593Smuzhiyun /* Confirm device features to use. */
mlxbf_tmfifo_virtio_finalize_features(struct virtio_device * vdev)894*4882a593Smuzhiyun static int mlxbf_tmfifo_virtio_finalize_features(struct virtio_device *vdev)
895*4882a593Smuzhiyun {
896*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
897*4882a593Smuzhiyun
898*4882a593Smuzhiyun tm_vdev->features = vdev->features;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun return 0;
901*4882a593Smuzhiyun }
902*4882a593Smuzhiyun
903*4882a593Smuzhiyun /* Free virtqueues found by find_vqs(). */
mlxbf_tmfifo_virtio_del_vqs(struct virtio_device * vdev)904*4882a593Smuzhiyun static void mlxbf_tmfifo_virtio_del_vqs(struct virtio_device *vdev)
905*4882a593Smuzhiyun {
906*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
907*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring;
908*4882a593Smuzhiyun struct virtqueue *vq;
909*4882a593Smuzhiyun int i;
910*4882a593Smuzhiyun
911*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(tm_vdev->vrings); i++) {
912*4882a593Smuzhiyun vring = &tm_vdev->vrings[i];
913*4882a593Smuzhiyun
914*4882a593Smuzhiyun /* Release the pending packet. */
915*4882a593Smuzhiyun if (vring->desc)
916*4882a593Smuzhiyun mlxbf_tmfifo_release_pending_pkt(vring);
917*4882a593Smuzhiyun vq = vring->vq;
918*4882a593Smuzhiyun if (vq) {
919*4882a593Smuzhiyun vring->vq = NULL;
920*4882a593Smuzhiyun vring_del_virtqueue(vq);
921*4882a593Smuzhiyun }
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun }
924*4882a593Smuzhiyun
925*4882a593Smuzhiyun /* Create and initialize the virtual queues. */
mlxbf_tmfifo_virtio_find_vqs(struct virtio_device * vdev,unsigned int nvqs,struct virtqueue * vqs[],vq_callback_t * callbacks[],const char * const names[],const bool * ctx,struct irq_affinity * desc)926*4882a593Smuzhiyun static int mlxbf_tmfifo_virtio_find_vqs(struct virtio_device *vdev,
927*4882a593Smuzhiyun unsigned int nvqs,
928*4882a593Smuzhiyun struct virtqueue *vqs[],
929*4882a593Smuzhiyun vq_callback_t *callbacks[],
930*4882a593Smuzhiyun const char * const names[],
931*4882a593Smuzhiyun const bool *ctx,
932*4882a593Smuzhiyun struct irq_affinity *desc)
933*4882a593Smuzhiyun {
934*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
935*4882a593Smuzhiyun struct mlxbf_tmfifo_vring *vring;
936*4882a593Smuzhiyun struct virtqueue *vq;
937*4882a593Smuzhiyun int i, ret, size;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (nvqs > ARRAY_SIZE(tm_vdev->vrings))
940*4882a593Smuzhiyun return -EINVAL;
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun for (i = 0; i < nvqs; ++i) {
943*4882a593Smuzhiyun if (!names[i]) {
944*4882a593Smuzhiyun ret = -EINVAL;
945*4882a593Smuzhiyun goto error;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun vring = &tm_vdev->vrings[i];
948*4882a593Smuzhiyun
949*4882a593Smuzhiyun /* zero vring */
950*4882a593Smuzhiyun size = vring_size(vring->num, vring->align);
951*4882a593Smuzhiyun memset(vring->va, 0, size);
952*4882a593Smuzhiyun vq = vring_new_virtqueue(i, vring->num, vring->align, vdev,
953*4882a593Smuzhiyun false, false, vring->va,
954*4882a593Smuzhiyun mlxbf_tmfifo_virtio_notify,
955*4882a593Smuzhiyun callbacks[i], names[i]);
956*4882a593Smuzhiyun if (!vq) {
957*4882a593Smuzhiyun dev_err(&vdev->dev, "vring_new_virtqueue failed\n");
958*4882a593Smuzhiyun ret = -ENOMEM;
959*4882a593Smuzhiyun goto error;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun vqs[i] = vq;
963*4882a593Smuzhiyun vring->vq = vq;
964*4882a593Smuzhiyun vq->priv = vring;
965*4882a593Smuzhiyun }
966*4882a593Smuzhiyun
967*4882a593Smuzhiyun return 0;
968*4882a593Smuzhiyun
969*4882a593Smuzhiyun error:
970*4882a593Smuzhiyun mlxbf_tmfifo_virtio_del_vqs(vdev);
971*4882a593Smuzhiyun return ret;
972*4882a593Smuzhiyun }
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* Read the status byte. */
mlxbf_tmfifo_virtio_get_status(struct virtio_device * vdev)975*4882a593Smuzhiyun static u8 mlxbf_tmfifo_virtio_get_status(struct virtio_device *vdev)
976*4882a593Smuzhiyun {
977*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
978*4882a593Smuzhiyun
979*4882a593Smuzhiyun return tm_vdev->status;
980*4882a593Smuzhiyun }
981*4882a593Smuzhiyun
982*4882a593Smuzhiyun /* Write the status byte. */
mlxbf_tmfifo_virtio_set_status(struct virtio_device * vdev,u8 status)983*4882a593Smuzhiyun static void mlxbf_tmfifo_virtio_set_status(struct virtio_device *vdev,
984*4882a593Smuzhiyun u8 status)
985*4882a593Smuzhiyun {
986*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
987*4882a593Smuzhiyun
988*4882a593Smuzhiyun tm_vdev->status = status;
989*4882a593Smuzhiyun }
990*4882a593Smuzhiyun
991*4882a593Smuzhiyun /* Reset the device. Not much here for now. */
mlxbf_tmfifo_virtio_reset(struct virtio_device * vdev)992*4882a593Smuzhiyun static void mlxbf_tmfifo_virtio_reset(struct virtio_device *vdev)
993*4882a593Smuzhiyun {
994*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun tm_vdev->status = 0;
997*4882a593Smuzhiyun }
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun /* Read the value of a configuration field. */
mlxbf_tmfifo_virtio_get(struct virtio_device * vdev,unsigned int offset,void * buf,unsigned int len)1000*4882a593Smuzhiyun static void mlxbf_tmfifo_virtio_get(struct virtio_device *vdev,
1001*4882a593Smuzhiyun unsigned int offset,
1002*4882a593Smuzhiyun void *buf,
1003*4882a593Smuzhiyun unsigned int len)
1004*4882a593Smuzhiyun {
1005*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun if ((u64)offset + len > sizeof(tm_vdev->config))
1008*4882a593Smuzhiyun return;
1009*4882a593Smuzhiyun
1010*4882a593Smuzhiyun memcpy(buf, (u8 *)&tm_vdev->config + offset, len);
1011*4882a593Smuzhiyun }
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun /* Write the value of a configuration field. */
mlxbf_tmfifo_virtio_set(struct virtio_device * vdev,unsigned int offset,const void * buf,unsigned int len)1014*4882a593Smuzhiyun static void mlxbf_tmfifo_virtio_set(struct virtio_device *vdev,
1015*4882a593Smuzhiyun unsigned int offset,
1016*4882a593Smuzhiyun const void *buf,
1017*4882a593Smuzhiyun unsigned int len)
1018*4882a593Smuzhiyun {
1019*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1020*4882a593Smuzhiyun
1021*4882a593Smuzhiyun if ((u64)offset + len > sizeof(tm_vdev->config))
1022*4882a593Smuzhiyun return;
1023*4882a593Smuzhiyun
1024*4882a593Smuzhiyun memcpy((u8 *)&tm_vdev->config + offset, buf, len);
1025*4882a593Smuzhiyun }
1026*4882a593Smuzhiyun
tmfifo_virtio_dev_release(struct device * device)1027*4882a593Smuzhiyun static void tmfifo_virtio_dev_release(struct device *device)
1028*4882a593Smuzhiyun {
1029*4882a593Smuzhiyun struct virtio_device *vdev =
1030*4882a593Smuzhiyun container_of(device, struct virtio_device, dev);
1031*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev = mlxbf_vdev_to_tmfifo(vdev);
1032*4882a593Smuzhiyun
1033*4882a593Smuzhiyun kfree(tm_vdev);
1034*4882a593Smuzhiyun }
1035*4882a593Smuzhiyun
1036*4882a593Smuzhiyun /* Virtio config operations. */
1037*4882a593Smuzhiyun static const struct virtio_config_ops mlxbf_tmfifo_virtio_config_ops = {
1038*4882a593Smuzhiyun .get_features = mlxbf_tmfifo_virtio_get_features,
1039*4882a593Smuzhiyun .finalize_features = mlxbf_tmfifo_virtio_finalize_features,
1040*4882a593Smuzhiyun .find_vqs = mlxbf_tmfifo_virtio_find_vqs,
1041*4882a593Smuzhiyun .del_vqs = mlxbf_tmfifo_virtio_del_vqs,
1042*4882a593Smuzhiyun .reset = mlxbf_tmfifo_virtio_reset,
1043*4882a593Smuzhiyun .set_status = mlxbf_tmfifo_virtio_set_status,
1044*4882a593Smuzhiyun .get_status = mlxbf_tmfifo_virtio_get_status,
1045*4882a593Smuzhiyun .get = mlxbf_tmfifo_virtio_get,
1046*4882a593Smuzhiyun .set = mlxbf_tmfifo_virtio_set,
1047*4882a593Smuzhiyun };
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun /* Create vdev for the FIFO. */
mlxbf_tmfifo_create_vdev(struct device * dev,struct mlxbf_tmfifo * fifo,int vdev_id,u64 features,void * config,u32 size)1050*4882a593Smuzhiyun static int mlxbf_tmfifo_create_vdev(struct device *dev,
1051*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo,
1052*4882a593Smuzhiyun int vdev_id, u64 features,
1053*4882a593Smuzhiyun void *config, u32 size)
1054*4882a593Smuzhiyun {
1055*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev, *reg_dev = NULL;
1056*4882a593Smuzhiyun int ret;
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun mutex_lock(&fifo->lock);
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun tm_vdev = fifo->vdev[vdev_id];
1061*4882a593Smuzhiyun if (tm_vdev) {
1062*4882a593Smuzhiyun dev_err(dev, "vdev %d already exists\n", vdev_id);
1063*4882a593Smuzhiyun ret = -EEXIST;
1064*4882a593Smuzhiyun goto fail;
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun tm_vdev = kzalloc(sizeof(*tm_vdev), GFP_KERNEL);
1068*4882a593Smuzhiyun if (!tm_vdev) {
1069*4882a593Smuzhiyun ret = -ENOMEM;
1070*4882a593Smuzhiyun goto fail;
1071*4882a593Smuzhiyun }
1072*4882a593Smuzhiyun
1073*4882a593Smuzhiyun tm_vdev->vdev.id.device = vdev_id;
1074*4882a593Smuzhiyun tm_vdev->vdev.config = &mlxbf_tmfifo_virtio_config_ops;
1075*4882a593Smuzhiyun tm_vdev->vdev.dev.parent = dev;
1076*4882a593Smuzhiyun tm_vdev->vdev.dev.release = tmfifo_virtio_dev_release;
1077*4882a593Smuzhiyun tm_vdev->features = features;
1078*4882a593Smuzhiyun if (config)
1079*4882a593Smuzhiyun memcpy(&tm_vdev->config, config, size);
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun if (mlxbf_tmfifo_alloc_vrings(fifo, tm_vdev)) {
1082*4882a593Smuzhiyun dev_err(dev, "unable to allocate vring\n");
1083*4882a593Smuzhiyun ret = -ENOMEM;
1084*4882a593Smuzhiyun goto vdev_fail;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /* Allocate an output buffer for the console device. */
1088*4882a593Smuzhiyun if (vdev_id == VIRTIO_ID_CONSOLE)
1089*4882a593Smuzhiyun tm_vdev->tx_buf.buf = devm_kmalloc(dev,
1090*4882a593Smuzhiyun MLXBF_TMFIFO_CON_TX_BUF_SIZE,
1091*4882a593Smuzhiyun GFP_KERNEL);
1092*4882a593Smuzhiyun fifo->vdev[vdev_id] = tm_vdev;
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun /* Register the virtio device. */
1095*4882a593Smuzhiyun ret = register_virtio_device(&tm_vdev->vdev);
1096*4882a593Smuzhiyun reg_dev = tm_vdev;
1097*4882a593Smuzhiyun if (ret) {
1098*4882a593Smuzhiyun dev_err(dev, "register_virtio_device failed\n");
1099*4882a593Smuzhiyun goto vdev_fail;
1100*4882a593Smuzhiyun }
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun mutex_unlock(&fifo->lock);
1103*4882a593Smuzhiyun return 0;
1104*4882a593Smuzhiyun
1105*4882a593Smuzhiyun vdev_fail:
1106*4882a593Smuzhiyun mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
1107*4882a593Smuzhiyun fifo->vdev[vdev_id] = NULL;
1108*4882a593Smuzhiyun if (reg_dev)
1109*4882a593Smuzhiyun put_device(&tm_vdev->vdev.dev);
1110*4882a593Smuzhiyun else
1111*4882a593Smuzhiyun kfree(tm_vdev);
1112*4882a593Smuzhiyun fail:
1113*4882a593Smuzhiyun mutex_unlock(&fifo->lock);
1114*4882a593Smuzhiyun return ret;
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun /* Delete vdev for the FIFO. */
mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo * fifo,int vdev_id)1118*4882a593Smuzhiyun static int mlxbf_tmfifo_delete_vdev(struct mlxbf_tmfifo *fifo, int vdev_id)
1119*4882a593Smuzhiyun {
1120*4882a593Smuzhiyun struct mlxbf_tmfifo_vdev *tm_vdev;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun mutex_lock(&fifo->lock);
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* Unregister vdev. */
1125*4882a593Smuzhiyun tm_vdev = fifo->vdev[vdev_id];
1126*4882a593Smuzhiyun if (tm_vdev) {
1127*4882a593Smuzhiyun unregister_virtio_device(&tm_vdev->vdev);
1128*4882a593Smuzhiyun mlxbf_tmfifo_free_vrings(fifo, tm_vdev);
1129*4882a593Smuzhiyun fifo->vdev[vdev_id] = NULL;
1130*4882a593Smuzhiyun }
1131*4882a593Smuzhiyun
1132*4882a593Smuzhiyun mutex_unlock(&fifo->lock);
1133*4882a593Smuzhiyun
1134*4882a593Smuzhiyun return 0;
1135*4882a593Smuzhiyun }
1136*4882a593Smuzhiyun
1137*4882a593Smuzhiyun /* Read the configured network MAC address from efi variable. */
mlxbf_tmfifo_get_cfg_mac(u8 * mac)1138*4882a593Smuzhiyun static void mlxbf_tmfifo_get_cfg_mac(u8 *mac)
1139*4882a593Smuzhiyun {
1140*4882a593Smuzhiyun efi_guid_t guid = EFI_GLOBAL_VARIABLE_GUID;
1141*4882a593Smuzhiyun unsigned long size = ETH_ALEN;
1142*4882a593Smuzhiyun u8 buf[ETH_ALEN];
1143*4882a593Smuzhiyun efi_status_t rc;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun rc = efi.get_variable(mlxbf_tmfifo_efi_name, &guid, NULL, &size, buf);
1146*4882a593Smuzhiyun if (rc == EFI_SUCCESS && size == ETH_ALEN)
1147*4882a593Smuzhiyun ether_addr_copy(mac, buf);
1148*4882a593Smuzhiyun else
1149*4882a593Smuzhiyun ether_addr_copy(mac, mlxbf_tmfifo_net_default_mac);
1150*4882a593Smuzhiyun }
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* Set TmFifo thresolds which is used to trigger interrupts. */
mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo * fifo)1153*4882a593Smuzhiyun static void mlxbf_tmfifo_set_threshold(struct mlxbf_tmfifo *fifo)
1154*4882a593Smuzhiyun {
1155*4882a593Smuzhiyun u64 ctl;
1156*4882a593Smuzhiyun
1157*4882a593Smuzhiyun /* Get Tx FIFO size and set the low/high watermark. */
1158*4882a593Smuzhiyun ctl = readq(fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
1159*4882a593Smuzhiyun fifo->tx_fifo_size =
1160*4882a593Smuzhiyun FIELD_GET(MLXBF_TMFIFO_TX_CTL__MAX_ENTRIES_MASK, ctl);
1161*4882a593Smuzhiyun ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__LWM_MASK) |
1162*4882a593Smuzhiyun FIELD_PREP(MLXBF_TMFIFO_TX_CTL__LWM_MASK,
1163*4882a593Smuzhiyun fifo->tx_fifo_size / 2);
1164*4882a593Smuzhiyun ctl = (ctl & ~MLXBF_TMFIFO_TX_CTL__HWM_MASK) |
1165*4882a593Smuzhiyun FIELD_PREP(MLXBF_TMFIFO_TX_CTL__HWM_MASK,
1166*4882a593Smuzhiyun fifo->tx_fifo_size - 1);
1167*4882a593Smuzhiyun writeq(ctl, fifo->tx_base + MLXBF_TMFIFO_TX_CTL);
1168*4882a593Smuzhiyun
1169*4882a593Smuzhiyun /* Get Rx FIFO size and set the low/high watermark. */
1170*4882a593Smuzhiyun ctl = readq(fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
1171*4882a593Smuzhiyun fifo->rx_fifo_size =
1172*4882a593Smuzhiyun FIELD_GET(MLXBF_TMFIFO_RX_CTL__MAX_ENTRIES_MASK, ctl);
1173*4882a593Smuzhiyun ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__LWM_MASK) |
1174*4882a593Smuzhiyun FIELD_PREP(MLXBF_TMFIFO_RX_CTL__LWM_MASK, 0);
1175*4882a593Smuzhiyun ctl = (ctl & ~MLXBF_TMFIFO_RX_CTL__HWM_MASK) |
1176*4882a593Smuzhiyun FIELD_PREP(MLXBF_TMFIFO_RX_CTL__HWM_MASK, 1);
1177*4882a593Smuzhiyun writeq(ctl, fifo->rx_base + MLXBF_TMFIFO_RX_CTL);
1178*4882a593Smuzhiyun }
1179*4882a593Smuzhiyun
mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo * fifo)1180*4882a593Smuzhiyun static void mlxbf_tmfifo_cleanup(struct mlxbf_tmfifo *fifo)
1181*4882a593Smuzhiyun {
1182*4882a593Smuzhiyun int i;
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun fifo->is_ready = false;
1185*4882a593Smuzhiyun del_timer_sync(&fifo->timer);
1186*4882a593Smuzhiyun mlxbf_tmfifo_disable_irqs(fifo);
1187*4882a593Smuzhiyun cancel_work_sync(&fifo->work);
1188*4882a593Smuzhiyun for (i = 0; i < MLXBF_TMFIFO_VDEV_MAX; i++)
1189*4882a593Smuzhiyun mlxbf_tmfifo_delete_vdev(fifo, i);
1190*4882a593Smuzhiyun }
1191*4882a593Smuzhiyun
1192*4882a593Smuzhiyun /* Probe the TMFIFO. */
mlxbf_tmfifo_probe(struct platform_device * pdev)1193*4882a593Smuzhiyun static int mlxbf_tmfifo_probe(struct platform_device *pdev)
1194*4882a593Smuzhiyun {
1195*4882a593Smuzhiyun struct virtio_net_config net_config;
1196*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1197*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo;
1198*4882a593Smuzhiyun int i, rc;
1199*4882a593Smuzhiyun
1200*4882a593Smuzhiyun fifo = devm_kzalloc(dev, sizeof(*fifo), GFP_KERNEL);
1201*4882a593Smuzhiyun if (!fifo)
1202*4882a593Smuzhiyun return -ENOMEM;
1203*4882a593Smuzhiyun
1204*4882a593Smuzhiyun spin_lock_init(&fifo->spin_lock[0]);
1205*4882a593Smuzhiyun spin_lock_init(&fifo->spin_lock[1]);
1206*4882a593Smuzhiyun INIT_WORK(&fifo->work, mlxbf_tmfifo_work_handler);
1207*4882a593Smuzhiyun mutex_init(&fifo->lock);
1208*4882a593Smuzhiyun
1209*4882a593Smuzhiyun /* Get the resource of the Rx FIFO. */
1210*4882a593Smuzhiyun fifo->rx_base = devm_platform_ioremap_resource(pdev, 0);
1211*4882a593Smuzhiyun if (IS_ERR(fifo->rx_base))
1212*4882a593Smuzhiyun return PTR_ERR(fifo->rx_base);
1213*4882a593Smuzhiyun
1214*4882a593Smuzhiyun /* Get the resource of the Tx FIFO. */
1215*4882a593Smuzhiyun fifo->tx_base = devm_platform_ioremap_resource(pdev, 1);
1216*4882a593Smuzhiyun if (IS_ERR(fifo->tx_base))
1217*4882a593Smuzhiyun return PTR_ERR(fifo->tx_base);
1218*4882a593Smuzhiyun
1219*4882a593Smuzhiyun platform_set_drvdata(pdev, fifo);
1220*4882a593Smuzhiyun
1221*4882a593Smuzhiyun timer_setup(&fifo->timer, mlxbf_tmfifo_timer, 0);
1222*4882a593Smuzhiyun
1223*4882a593Smuzhiyun for (i = 0; i < MLXBF_TM_MAX_IRQ; i++) {
1224*4882a593Smuzhiyun fifo->irq_info[i].index = i;
1225*4882a593Smuzhiyun fifo->irq_info[i].fifo = fifo;
1226*4882a593Smuzhiyun fifo->irq_info[i].irq = platform_get_irq(pdev, i);
1227*4882a593Smuzhiyun rc = devm_request_irq(dev, fifo->irq_info[i].irq,
1228*4882a593Smuzhiyun mlxbf_tmfifo_irq_handler, 0,
1229*4882a593Smuzhiyun "tmfifo", &fifo->irq_info[i]);
1230*4882a593Smuzhiyun if (rc) {
1231*4882a593Smuzhiyun dev_err(dev, "devm_request_irq failed\n");
1232*4882a593Smuzhiyun fifo->irq_info[i].irq = 0;
1233*4882a593Smuzhiyun return rc;
1234*4882a593Smuzhiyun }
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun mlxbf_tmfifo_set_threshold(fifo);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun /* Create the console vdev. */
1240*4882a593Smuzhiyun rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_CONSOLE, 0, NULL, 0);
1241*4882a593Smuzhiyun if (rc)
1242*4882a593Smuzhiyun goto fail;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun /* Create the network vdev. */
1245*4882a593Smuzhiyun memset(&net_config, 0, sizeof(net_config));
1246*4882a593Smuzhiyun
1247*4882a593Smuzhiyun /* A legacy-only interface for now. */
1248*4882a593Smuzhiyun net_config.mtu = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1249*4882a593Smuzhiyun ETH_DATA_LEN);
1250*4882a593Smuzhiyun net_config.status = __cpu_to_virtio16(virtio_legacy_is_little_endian(),
1251*4882a593Smuzhiyun VIRTIO_NET_S_LINK_UP);
1252*4882a593Smuzhiyun mlxbf_tmfifo_get_cfg_mac(net_config.mac);
1253*4882a593Smuzhiyun rc = mlxbf_tmfifo_create_vdev(dev, fifo, VIRTIO_ID_NET,
1254*4882a593Smuzhiyun MLXBF_TMFIFO_NET_FEATURES, &net_config,
1255*4882a593Smuzhiyun sizeof(net_config));
1256*4882a593Smuzhiyun if (rc)
1257*4882a593Smuzhiyun goto fail;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun mod_timer(&fifo->timer, jiffies + MLXBF_TMFIFO_TIMER_INTERVAL);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun fifo->is_ready = true;
1262*4882a593Smuzhiyun return 0;
1263*4882a593Smuzhiyun
1264*4882a593Smuzhiyun fail:
1265*4882a593Smuzhiyun mlxbf_tmfifo_cleanup(fifo);
1266*4882a593Smuzhiyun return rc;
1267*4882a593Smuzhiyun }
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /* Device remove function. */
mlxbf_tmfifo_remove(struct platform_device * pdev)1270*4882a593Smuzhiyun static int mlxbf_tmfifo_remove(struct platform_device *pdev)
1271*4882a593Smuzhiyun {
1272*4882a593Smuzhiyun struct mlxbf_tmfifo *fifo = platform_get_drvdata(pdev);
1273*4882a593Smuzhiyun
1274*4882a593Smuzhiyun mlxbf_tmfifo_cleanup(fifo);
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun return 0;
1277*4882a593Smuzhiyun }
1278*4882a593Smuzhiyun
1279*4882a593Smuzhiyun static const struct acpi_device_id mlxbf_tmfifo_acpi_match[] = {
1280*4882a593Smuzhiyun { "MLNXBF01", 0 },
1281*4882a593Smuzhiyun {}
1282*4882a593Smuzhiyun };
1283*4882a593Smuzhiyun MODULE_DEVICE_TABLE(acpi, mlxbf_tmfifo_acpi_match);
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun static struct platform_driver mlxbf_tmfifo_driver = {
1286*4882a593Smuzhiyun .probe = mlxbf_tmfifo_probe,
1287*4882a593Smuzhiyun .remove = mlxbf_tmfifo_remove,
1288*4882a593Smuzhiyun .driver = {
1289*4882a593Smuzhiyun .name = "bf-tmfifo",
1290*4882a593Smuzhiyun .acpi_match_table = mlxbf_tmfifo_acpi_match,
1291*4882a593Smuzhiyun },
1292*4882a593Smuzhiyun };
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun module_platform_driver(mlxbf_tmfifo_driver);
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun MODULE_DESCRIPTION("Mellanox BlueField SoC TmFifo Driver");
1297*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1298*4882a593Smuzhiyun MODULE_AUTHOR("Mellanox Technologies");
1299