1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright (c) 2016 Citrix Systems Inc.
3*4882a593Smuzhiyun * Copyright (c) 2002-2005, K A Fraser
4*4882a593Smuzhiyun *
5*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or
6*4882a593Smuzhiyun * modify it under the terms of the GNU General Public License version 2
7*4882a593Smuzhiyun * as published by the Free Software Foundation; or, when distributed
8*4882a593Smuzhiyun * separately from the Linux kernel or incorporated into other
9*4882a593Smuzhiyun * software packages, subject to the following license:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a copy
12*4882a593Smuzhiyun * of this source file (the "Software"), to deal in the Software without
13*4882a593Smuzhiyun * restriction, including without limitation the rights to use, copy, modify,
14*4882a593Smuzhiyun * merge, publish, distribute, sublicense, and/or sell copies of the Software,
15*4882a593Smuzhiyun * and to permit persons to whom the Software is furnished to do so, subject to
16*4882a593Smuzhiyun * the following conditions:
17*4882a593Smuzhiyun *
18*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
19*4882a593Smuzhiyun * all copies or substantial portions of the Software.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
22*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
23*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
24*4882a593Smuzhiyun * AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
25*4882a593Smuzhiyun * LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
26*4882a593Smuzhiyun * FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
27*4882a593Smuzhiyun * IN THE SOFTWARE.
28*4882a593Smuzhiyun */
29*4882a593Smuzhiyun #include "common.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #include <linux/kthread.h>
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun #include <xen/xen.h>
34*4882a593Smuzhiyun #include <xen/events.h>
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /*
37*4882a593Smuzhiyun * Update the needed ring page slots for the first SKB queued.
38*4882a593Smuzhiyun * Note that any call sequence outside the RX thread calling this function
39*4882a593Smuzhiyun * needs to wake up the RX thread via a call of xenvif_kick_thread()
40*4882a593Smuzhiyun * afterwards in order to avoid a race with putting the thread to sleep.
41*4882a593Smuzhiyun */
xenvif_update_needed_slots(struct xenvif_queue * queue,const struct sk_buff * skb)42*4882a593Smuzhiyun static void xenvif_update_needed_slots(struct xenvif_queue *queue,
43*4882a593Smuzhiyun const struct sk_buff *skb)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun unsigned int needed = 0;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (skb) {
48*4882a593Smuzhiyun needed = DIV_ROUND_UP(skb->len, XEN_PAGE_SIZE);
49*4882a593Smuzhiyun if (skb_is_gso(skb))
50*4882a593Smuzhiyun needed++;
51*4882a593Smuzhiyun if (skb->sw_hash)
52*4882a593Smuzhiyun needed++;
53*4882a593Smuzhiyun }
54*4882a593Smuzhiyun
55*4882a593Smuzhiyun WRITE_ONCE(queue->rx_slots_needed, needed);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
xenvif_rx_ring_slots_available(struct xenvif_queue * queue)58*4882a593Smuzhiyun static bool xenvif_rx_ring_slots_available(struct xenvif_queue *queue)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun RING_IDX prod, cons;
61*4882a593Smuzhiyun unsigned int needed;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun needed = READ_ONCE(queue->rx_slots_needed);
64*4882a593Smuzhiyun if (!needed)
65*4882a593Smuzhiyun return false;
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun do {
68*4882a593Smuzhiyun prod = queue->rx.sring->req_prod;
69*4882a593Smuzhiyun cons = queue->rx.req_cons;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun if (prod - cons >= needed)
72*4882a593Smuzhiyun return true;
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun queue->rx.sring->req_event = prod + 1;
75*4882a593Smuzhiyun
76*4882a593Smuzhiyun /* Make sure event is visible before we check prod
77*4882a593Smuzhiyun * again.
78*4882a593Smuzhiyun */
79*4882a593Smuzhiyun mb();
80*4882a593Smuzhiyun } while (queue->rx.sring->req_prod != prod);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return false;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
xenvif_rx_queue_tail(struct xenvif_queue * queue,struct sk_buff * skb)85*4882a593Smuzhiyun bool xenvif_rx_queue_tail(struct xenvif_queue *queue, struct sk_buff *skb)
86*4882a593Smuzhiyun {
87*4882a593Smuzhiyun unsigned long flags;
88*4882a593Smuzhiyun bool ret = true;
89*4882a593Smuzhiyun
90*4882a593Smuzhiyun spin_lock_irqsave(&queue->rx_queue.lock, flags);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun if (queue->rx_queue_len >= queue->rx_queue_max) {
93*4882a593Smuzhiyun struct net_device *dev = queue->vif->dev;
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun netif_tx_stop_queue(netdev_get_tx_queue(dev, queue->id));
96*4882a593Smuzhiyun ret = false;
97*4882a593Smuzhiyun } else {
98*4882a593Smuzhiyun if (skb_queue_empty(&queue->rx_queue))
99*4882a593Smuzhiyun xenvif_update_needed_slots(queue, skb);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun __skb_queue_tail(&queue->rx_queue, skb);
102*4882a593Smuzhiyun
103*4882a593Smuzhiyun queue->rx_queue_len += skb->len;
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun spin_unlock_irqrestore(&queue->rx_queue.lock, flags);
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun return ret;
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
xenvif_rx_dequeue(struct xenvif_queue * queue)111*4882a593Smuzhiyun static struct sk_buff *xenvif_rx_dequeue(struct xenvif_queue *queue)
112*4882a593Smuzhiyun {
113*4882a593Smuzhiyun struct sk_buff *skb;
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun spin_lock_irq(&queue->rx_queue.lock);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun skb = __skb_dequeue(&queue->rx_queue);
118*4882a593Smuzhiyun if (skb) {
119*4882a593Smuzhiyun xenvif_update_needed_slots(queue, skb_peek(&queue->rx_queue));
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun queue->rx_queue_len -= skb->len;
122*4882a593Smuzhiyun if (queue->rx_queue_len < queue->rx_queue_max) {
123*4882a593Smuzhiyun struct netdev_queue *txq;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun txq = netdev_get_tx_queue(queue->vif->dev, queue->id);
126*4882a593Smuzhiyun netif_tx_wake_queue(txq);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun spin_unlock_irq(&queue->rx_queue.lock);
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return skb;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
xenvif_rx_queue_purge(struct xenvif_queue * queue)135*4882a593Smuzhiyun static void xenvif_rx_queue_purge(struct xenvif_queue *queue)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct sk_buff *skb;
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun while ((skb = xenvif_rx_dequeue(queue)) != NULL)
140*4882a593Smuzhiyun kfree_skb(skb);
141*4882a593Smuzhiyun }
142*4882a593Smuzhiyun
xenvif_rx_queue_drop_expired(struct xenvif_queue * queue)143*4882a593Smuzhiyun static void xenvif_rx_queue_drop_expired(struct xenvif_queue *queue)
144*4882a593Smuzhiyun {
145*4882a593Smuzhiyun struct sk_buff *skb;
146*4882a593Smuzhiyun
147*4882a593Smuzhiyun for (;;) {
148*4882a593Smuzhiyun skb = skb_peek(&queue->rx_queue);
149*4882a593Smuzhiyun if (!skb)
150*4882a593Smuzhiyun break;
151*4882a593Smuzhiyun if (time_before(jiffies, XENVIF_RX_CB(skb)->expires))
152*4882a593Smuzhiyun break;
153*4882a593Smuzhiyun xenvif_rx_dequeue(queue);
154*4882a593Smuzhiyun kfree_skb(skb);
155*4882a593Smuzhiyun queue->vif->dev->stats.rx_dropped++;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun
xenvif_rx_copy_flush(struct xenvif_queue * queue)159*4882a593Smuzhiyun static void xenvif_rx_copy_flush(struct xenvif_queue *queue)
160*4882a593Smuzhiyun {
161*4882a593Smuzhiyun unsigned int i;
162*4882a593Smuzhiyun int notify;
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun gnttab_batch_copy(queue->rx_copy.op, queue->rx_copy.num);
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun for (i = 0; i < queue->rx_copy.num; i++) {
167*4882a593Smuzhiyun struct gnttab_copy *op;
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun op = &queue->rx_copy.op[i];
170*4882a593Smuzhiyun
171*4882a593Smuzhiyun /* If the copy failed, overwrite the status field in
172*4882a593Smuzhiyun * the corresponding response.
173*4882a593Smuzhiyun */
174*4882a593Smuzhiyun if (unlikely(op->status != GNTST_okay)) {
175*4882a593Smuzhiyun struct xen_netif_rx_response *rsp;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&queue->rx,
178*4882a593Smuzhiyun queue->rx_copy.idx[i]);
179*4882a593Smuzhiyun rsp->status = op->status;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun }
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun queue->rx_copy.num = 0;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* Push responses for all completed packets. */
186*4882a593Smuzhiyun RING_PUSH_RESPONSES_AND_CHECK_NOTIFY(&queue->rx, notify);
187*4882a593Smuzhiyun if (notify)
188*4882a593Smuzhiyun notify_remote_via_irq(queue->rx_irq);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun __skb_queue_purge(queue->rx_copy.completed);
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
xenvif_rx_copy_add(struct xenvif_queue * queue,struct xen_netif_rx_request * req,unsigned int offset,void * data,size_t len)193*4882a593Smuzhiyun static void xenvif_rx_copy_add(struct xenvif_queue *queue,
194*4882a593Smuzhiyun struct xen_netif_rx_request *req,
195*4882a593Smuzhiyun unsigned int offset, void *data, size_t len)
196*4882a593Smuzhiyun {
197*4882a593Smuzhiyun struct gnttab_copy *op;
198*4882a593Smuzhiyun struct page *page;
199*4882a593Smuzhiyun struct xen_page_foreign *foreign;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun if (queue->rx_copy.num == COPY_BATCH_SIZE)
202*4882a593Smuzhiyun xenvif_rx_copy_flush(queue);
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun op = &queue->rx_copy.op[queue->rx_copy.num];
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun page = virt_to_page(data);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun op->flags = GNTCOPY_dest_gref;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun foreign = xen_page_foreign(page);
211*4882a593Smuzhiyun if (foreign) {
212*4882a593Smuzhiyun op->source.domid = foreign->domid;
213*4882a593Smuzhiyun op->source.u.ref = foreign->gref;
214*4882a593Smuzhiyun op->flags |= GNTCOPY_source_gref;
215*4882a593Smuzhiyun } else {
216*4882a593Smuzhiyun op->source.u.gmfn = virt_to_gfn(data);
217*4882a593Smuzhiyun op->source.domid = DOMID_SELF;
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun op->source.offset = xen_offset_in_page(data);
221*4882a593Smuzhiyun op->dest.u.ref = req->gref;
222*4882a593Smuzhiyun op->dest.domid = queue->vif->domid;
223*4882a593Smuzhiyun op->dest.offset = offset;
224*4882a593Smuzhiyun op->len = len;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun queue->rx_copy.idx[queue->rx_copy.num] = queue->rx.req_cons;
227*4882a593Smuzhiyun queue->rx_copy.num++;
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
xenvif_gso_type(struct sk_buff * skb)230*4882a593Smuzhiyun static unsigned int xenvif_gso_type(struct sk_buff *skb)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun if (skb_is_gso(skb)) {
233*4882a593Smuzhiyun if (skb_shinfo(skb)->gso_type & SKB_GSO_TCPV4)
234*4882a593Smuzhiyun return XEN_NETIF_GSO_TYPE_TCPV4;
235*4882a593Smuzhiyun else
236*4882a593Smuzhiyun return XEN_NETIF_GSO_TYPE_TCPV6;
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun return XEN_NETIF_GSO_TYPE_NONE;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun struct xenvif_pkt_state {
242*4882a593Smuzhiyun struct sk_buff *skb;
243*4882a593Smuzhiyun size_t remaining_len;
244*4882a593Smuzhiyun struct sk_buff *frag_iter;
245*4882a593Smuzhiyun int frag; /* frag == -1 => frag_iter->head */
246*4882a593Smuzhiyun unsigned int frag_offset;
247*4882a593Smuzhiyun struct xen_netif_extra_info extras[XEN_NETIF_EXTRA_TYPE_MAX - 1];
248*4882a593Smuzhiyun unsigned int extra_count;
249*4882a593Smuzhiyun unsigned int slot;
250*4882a593Smuzhiyun };
251*4882a593Smuzhiyun
xenvif_rx_next_skb(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt)252*4882a593Smuzhiyun static void xenvif_rx_next_skb(struct xenvif_queue *queue,
253*4882a593Smuzhiyun struct xenvif_pkt_state *pkt)
254*4882a593Smuzhiyun {
255*4882a593Smuzhiyun struct sk_buff *skb;
256*4882a593Smuzhiyun unsigned int gso_type;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun skb = xenvif_rx_dequeue(queue);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun queue->stats.tx_bytes += skb->len;
261*4882a593Smuzhiyun queue->stats.tx_packets++;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Reset packet state. */
264*4882a593Smuzhiyun memset(pkt, 0, sizeof(struct xenvif_pkt_state));
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun pkt->skb = skb;
267*4882a593Smuzhiyun pkt->frag_iter = skb;
268*4882a593Smuzhiyun pkt->remaining_len = skb->len;
269*4882a593Smuzhiyun pkt->frag = -1;
270*4882a593Smuzhiyun
271*4882a593Smuzhiyun gso_type = xenvif_gso_type(skb);
272*4882a593Smuzhiyun if ((1 << gso_type) & queue->vif->gso_mask) {
273*4882a593Smuzhiyun struct xen_netif_extra_info *extra;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_GSO - 1];
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun extra->u.gso.type = gso_type;
278*4882a593Smuzhiyun extra->u.gso.size = skb_shinfo(skb)->gso_size;
279*4882a593Smuzhiyun extra->u.gso.pad = 0;
280*4882a593Smuzhiyun extra->u.gso.features = 0;
281*4882a593Smuzhiyun extra->type = XEN_NETIF_EXTRA_TYPE_GSO;
282*4882a593Smuzhiyun extra->flags = 0;
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun pkt->extra_count++;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (queue->vif->xdp_headroom) {
288*4882a593Smuzhiyun struct xen_netif_extra_info *extra;
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_XDP - 1];
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun memset(extra, 0, sizeof(struct xen_netif_extra_info));
293*4882a593Smuzhiyun extra->u.xdp.headroom = queue->vif->xdp_headroom;
294*4882a593Smuzhiyun extra->type = XEN_NETIF_EXTRA_TYPE_XDP;
295*4882a593Smuzhiyun extra->flags = 0;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun pkt->extra_count++;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun if (skb->sw_hash) {
301*4882a593Smuzhiyun struct xen_netif_extra_info *extra;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun extra = &pkt->extras[XEN_NETIF_EXTRA_TYPE_HASH - 1];
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun extra->u.hash.algorithm =
306*4882a593Smuzhiyun XEN_NETIF_CTRL_HASH_ALGORITHM_TOEPLITZ;
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun if (skb->l4_hash)
309*4882a593Smuzhiyun extra->u.hash.type =
310*4882a593Smuzhiyun skb->protocol == htons(ETH_P_IP) ?
311*4882a593Smuzhiyun _XEN_NETIF_CTRL_HASH_TYPE_IPV4_TCP :
312*4882a593Smuzhiyun _XEN_NETIF_CTRL_HASH_TYPE_IPV6_TCP;
313*4882a593Smuzhiyun else
314*4882a593Smuzhiyun extra->u.hash.type =
315*4882a593Smuzhiyun skb->protocol == htons(ETH_P_IP) ?
316*4882a593Smuzhiyun _XEN_NETIF_CTRL_HASH_TYPE_IPV4 :
317*4882a593Smuzhiyun _XEN_NETIF_CTRL_HASH_TYPE_IPV6;
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun *(uint32_t *)extra->u.hash.value = skb_get_hash_raw(skb);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun extra->type = XEN_NETIF_EXTRA_TYPE_HASH;
322*4882a593Smuzhiyun extra->flags = 0;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun pkt->extra_count++;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun
xenvif_rx_complete(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt)328*4882a593Smuzhiyun static void xenvif_rx_complete(struct xenvif_queue *queue,
329*4882a593Smuzhiyun struct xenvif_pkt_state *pkt)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun /* All responses are ready to be pushed. */
332*4882a593Smuzhiyun queue->rx.rsp_prod_pvt = queue->rx.req_cons;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun __skb_queue_tail(queue->rx_copy.completed, pkt->skb);
335*4882a593Smuzhiyun }
336*4882a593Smuzhiyun
xenvif_rx_next_frag(struct xenvif_pkt_state * pkt)337*4882a593Smuzhiyun static void xenvif_rx_next_frag(struct xenvif_pkt_state *pkt)
338*4882a593Smuzhiyun {
339*4882a593Smuzhiyun struct sk_buff *frag_iter = pkt->frag_iter;
340*4882a593Smuzhiyun unsigned int nr_frags = skb_shinfo(frag_iter)->nr_frags;
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun pkt->frag++;
343*4882a593Smuzhiyun pkt->frag_offset = 0;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (pkt->frag >= nr_frags) {
346*4882a593Smuzhiyun if (frag_iter == pkt->skb)
347*4882a593Smuzhiyun pkt->frag_iter = skb_shinfo(frag_iter)->frag_list;
348*4882a593Smuzhiyun else
349*4882a593Smuzhiyun pkt->frag_iter = frag_iter->next;
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun pkt->frag = -1;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
xenvif_rx_next_chunk(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,unsigned int offset,void ** data,size_t * len)355*4882a593Smuzhiyun static void xenvif_rx_next_chunk(struct xenvif_queue *queue,
356*4882a593Smuzhiyun struct xenvif_pkt_state *pkt,
357*4882a593Smuzhiyun unsigned int offset, void **data,
358*4882a593Smuzhiyun size_t *len)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun struct sk_buff *frag_iter = pkt->frag_iter;
361*4882a593Smuzhiyun void *frag_data;
362*4882a593Smuzhiyun size_t frag_len, chunk_len;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun BUG_ON(!frag_iter);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun if (pkt->frag == -1) {
367*4882a593Smuzhiyun frag_data = frag_iter->data;
368*4882a593Smuzhiyun frag_len = skb_headlen(frag_iter);
369*4882a593Smuzhiyun } else {
370*4882a593Smuzhiyun skb_frag_t *frag = &skb_shinfo(frag_iter)->frags[pkt->frag];
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun frag_data = skb_frag_address(frag);
373*4882a593Smuzhiyun frag_len = skb_frag_size(frag);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun frag_data += pkt->frag_offset;
377*4882a593Smuzhiyun frag_len -= pkt->frag_offset;
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun chunk_len = min_t(size_t, frag_len, XEN_PAGE_SIZE - offset);
380*4882a593Smuzhiyun chunk_len = min_t(size_t, chunk_len, XEN_PAGE_SIZE -
381*4882a593Smuzhiyun xen_offset_in_page(frag_data));
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun pkt->frag_offset += chunk_len;
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Advance to next frag? */
386*4882a593Smuzhiyun if (frag_len == chunk_len)
387*4882a593Smuzhiyun xenvif_rx_next_frag(pkt);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun *data = frag_data;
390*4882a593Smuzhiyun *len = chunk_len;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
xenvif_rx_data_slot(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,struct xen_netif_rx_request * req,struct xen_netif_rx_response * rsp)393*4882a593Smuzhiyun static void xenvif_rx_data_slot(struct xenvif_queue *queue,
394*4882a593Smuzhiyun struct xenvif_pkt_state *pkt,
395*4882a593Smuzhiyun struct xen_netif_rx_request *req,
396*4882a593Smuzhiyun struct xen_netif_rx_response *rsp)
397*4882a593Smuzhiyun {
398*4882a593Smuzhiyun unsigned int offset = queue->vif->xdp_headroom;
399*4882a593Smuzhiyun unsigned int flags;
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun do {
402*4882a593Smuzhiyun size_t len;
403*4882a593Smuzhiyun void *data;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun xenvif_rx_next_chunk(queue, pkt, offset, &data, &len);
406*4882a593Smuzhiyun xenvif_rx_copy_add(queue, req, offset, data, len);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun offset += len;
409*4882a593Smuzhiyun pkt->remaining_len -= len;
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun } while (offset < XEN_PAGE_SIZE && pkt->remaining_len > 0);
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun if (pkt->remaining_len > 0)
414*4882a593Smuzhiyun flags = XEN_NETRXF_more_data;
415*4882a593Smuzhiyun else
416*4882a593Smuzhiyun flags = 0;
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun if (pkt->slot == 0) {
419*4882a593Smuzhiyun struct sk_buff *skb = pkt->skb;
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL)
422*4882a593Smuzhiyun flags |= XEN_NETRXF_csum_blank |
423*4882a593Smuzhiyun XEN_NETRXF_data_validated;
424*4882a593Smuzhiyun else if (skb->ip_summed == CHECKSUM_UNNECESSARY)
425*4882a593Smuzhiyun flags |= XEN_NETRXF_data_validated;
426*4882a593Smuzhiyun
427*4882a593Smuzhiyun if (pkt->extra_count != 0)
428*4882a593Smuzhiyun flags |= XEN_NETRXF_extra_info;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun rsp->offset = 0;
432*4882a593Smuzhiyun rsp->flags = flags;
433*4882a593Smuzhiyun rsp->id = req->id;
434*4882a593Smuzhiyun rsp->status = (s16)offset;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
xenvif_rx_extra_slot(struct xenvif_queue * queue,struct xenvif_pkt_state * pkt,struct xen_netif_rx_request * req,struct xen_netif_rx_response * rsp)437*4882a593Smuzhiyun static void xenvif_rx_extra_slot(struct xenvif_queue *queue,
438*4882a593Smuzhiyun struct xenvif_pkt_state *pkt,
439*4882a593Smuzhiyun struct xen_netif_rx_request *req,
440*4882a593Smuzhiyun struct xen_netif_rx_response *rsp)
441*4882a593Smuzhiyun {
442*4882a593Smuzhiyun struct xen_netif_extra_info *extra = (void *)rsp;
443*4882a593Smuzhiyun unsigned int i;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun pkt->extra_count--;
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun for (i = 0; i < ARRAY_SIZE(pkt->extras); i++) {
448*4882a593Smuzhiyun if (pkt->extras[i].type) {
449*4882a593Smuzhiyun *extra = pkt->extras[i];
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun if (pkt->extra_count != 0)
452*4882a593Smuzhiyun extra->flags |= XEN_NETIF_EXTRA_FLAG_MORE;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun pkt->extras[i].type = 0;
455*4882a593Smuzhiyun return;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun }
458*4882a593Smuzhiyun BUG();
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
xenvif_rx_skb(struct xenvif_queue * queue)461*4882a593Smuzhiyun static void xenvif_rx_skb(struct xenvif_queue *queue)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct xenvif_pkt_state pkt;
464*4882a593Smuzhiyun
465*4882a593Smuzhiyun xenvif_rx_next_skb(queue, &pkt);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun queue->last_rx_time = jiffies;
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun do {
470*4882a593Smuzhiyun struct xen_netif_rx_request *req;
471*4882a593Smuzhiyun struct xen_netif_rx_response *rsp;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun req = RING_GET_REQUEST(&queue->rx, queue->rx.req_cons);
474*4882a593Smuzhiyun rsp = RING_GET_RESPONSE(&queue->rx, queue->rx.req_cons);
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Extras must go after the first data slot */
477*4882a593Smuzhiyun if (pkt.slot != 0 && pkt.extra_count != 0)
478*4882a593Smuzhiyun xenvif_rx_extra_slot(queue, &pkt, req, rsp);
479*4882a593Smuzhiyun else
480*4882a593Smuzhiyun xenvif_rx_data_slot(queue, &pkt, req, rsp);
481*4882a593Smuzhiyun
482*4882a593Smuzhiyun queue->rx.req_cons++;
483*4882a593Smuzhiyun pkt.slot++;
484*4882a593Smuzhiyun } while (pkt.remaining_len > 0 || pkt.extra_count != 0);
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun xenvif_rx_complete(queue, &pkt);
487*4882a593Smuzhiyun }
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun #define RX_BATCH_SIZE 64
490*4882a593Smuzhiyun
xenvif_rx_action(struct xenvif_queue * queue)491*4882a593Smuzhiyun static void xenvif_rx_action(struct xenvif_queue *queue)
492*4882a593Smuzhiyun {
493*4882a593Smuzhiyun struct sk_buff_head completed_skbs;
494*4882a593Smuzhiyun unsigned int work_done = 0;
495*4882a593Smuzhiyun
496*4882a593Smuzhiyun __skb_queue_head_init(&completed_skbs);
497*4882a593Smuzhiyun queue->rx_copy.completed = &completed_skbs;
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun while (xenvif_rx_ring_slots_available(queue) &&
500*4882a593Smuzhiyun !skb_queue_empty(&queue->rx_queue) &&
501*4882a593Smuzhiyun work_done < RX_BATCH_SIZE) {
502*4882a593Smuzhiyun xenvif_rx_skb(queue);
503*4882a593Smuzhiyun work_done++;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun /* Flush any pending copies and complete all skbs. */
507*4882a593Smuzhiyun xenvif_rx_copy_flush(queue);
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
xenvif_rx_queue_slots(const struct xenvif_queue * queue)510*4882a593Smuzhiyun static RING_IDX xenvif_rx_queue_slots(const struct xenvif_queue *queue)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun RING_IDX prod, cons;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun prod = queue->rx.sring->req_prod;
515*4882a593Smuzhiyun cons = queue->rx.req_cons;
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun return prod - cons;
518*4882a593Smuzhiyun }
519*4882a593Smuzhiyun
xenvif_rx_queue_stalled(const struct xenvif_queue * queue)520*4882a593Smuzhiyun static bool xenvif_rx_queue_stalled(const struct xenvif_queue *queue)
521*4882a593Smuzhiyun {
522*4882a593Smuzhiyun unsigned int needed = READ_ONCE(queue->rx_slots_needed);
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun return !queue->stalled &&
525*4882a593Smuzhiyun xenvif_rx_queue_slots(queue) < needed &&
526*4882a593Smuzhiyun time_after(jiffies,
527*4882a593Smuzhiyun queue->last_rx_time + queue->vif->stall_timeout);
528*4882a593Smuzhiyun }
529*4882a593Smuzhiyun
xenvif_rx_queue_ready(struct xenvif_queue * queue)530*4882a593Smuzhiyun static bool xenvif_rx_queue_ready(struct xenvif_queue *queue)
531*4882a593Smuzhiyun {
532*4882a593Smuzhiyun unsigned int needed = READ_ONCE(queue->rx_slots_needed);
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun return queue->stalled && xenvif_rx_queue_slots(queue) >= needed;
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
xenvif_have_rx_work(struct xenvif_queue * queue,bool test_kthread)537*4882a593Smuzhiyun bool xenvif_have_rx_work(struct xenvif_queue *queue, bool test_kthread)
538*4882a593Smuzhiyun {
539*4882a593Smuzhiyun return xenvif_rx_ring_slots_available(queue) ||
540*4882a593Smuzhiyun (queue->vif->stall_timeout &&
541*4882a593Smuzhiyun (xenvif_rx_queue_stalled(queue) ||
542*4882a593Smuzhiyun xenvif_rx_queue_ready(queue))) ||
543*4882a593Smuzhiyun (test_kthread && kthread_should_stop()) ||
544*4882a593Smuzhiyun queue->vif->disabled;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
xenvif_rx_queue_timeout(struct xenvif_queue * queue)547*4882a593Smuzhiyun static long xenvif_rx_queue_timeout(struct xenvif_queue *queue)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct sk_buff *skb;
550*4882a593Smuzhiyun long timeout;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun skb = skb_peek(&queue->rx_queue);
553*4882a593Smuzhiyun if (!skb)
554*4882a593Smuzhiyun return MAX_SCHEDULE_TIMEOUT;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun timeout = XENVIF_RX_CB(skb)->expires - jiffies;
557*4882a593Smuzhiyun return timeout < 0 ? 0 : timeout;
558*4882a593Smuzhiyun }
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun /* Wait until the guest Rx thread has work.
561*4882a593Smuzhiyun *
562*4882a593Smuzhiyun * The timeout needs to be adjusted based on the current head of the
563*4882a593Smuzhiyun * queue (and not just the head at the beginning). In particular, if
564*4882a593Smuzhiyun * the queue is initially empty an infinite timeout is used and this
565*4882a593Smuzhiyun * needs to be reduced when a skb is queued.
566*4882a593Smuzhiyun *
567*4882a593Smuzhiyun * This cannot be done with wait_event_timeout() because it only
568*4882a593Smuzhiyun * calculates the timeout once.
569*4882a593Smuzhiyun */
xenvif_wait_for_rx_work(struct xenvif_queue * queue)570*4882a593Smuzhiyun static void xenvif_wait_for_rx_work(struct xenvif_queue *queue)
571*4882a593Smuzhiyun {
572*4882a593Smuzhiyun DEFINE_WAIT(wait);
573*4882a593Smuzhiyun
574*4882a593Smuzhiyun if (xenvif_have_rx_work(queue, true))
575*4882a593Smuzhiyun return;
576*4882a593Smuzhiyun
577*4882a593Smuzhiyun for (;;) {
578*4882a593Smuzhiyun long ret;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun prepare_to_wait(&queue->wq, &wait, TASK_INTERRUPTIBLE);
581*4882a593Smuzhiyun if (xenvif_have_rx_work(queue, true))
582*4882a593Smuzhiyun break;
583*4882a593Smuzhiyun if (atomic_fetch_andnot(NETBK_RX_EOI | NETBK_COMMON_EOI,
584*4882a593Smuzhiyun &queue->eoi_pending) &
585*4882a593Smuzhiyun (NETBK_RX_EOI | NETBK_COMMON_EOI))
586*4882a593Smuzhiyun xen_irq_lateeoi(queue->rx_irq, 0);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun ret = schedule_timeout(xenvif_rx_queue_timeout(queue));
589*4882a593Smuzhiyun if (!ret)
590*4882a593Smuzhiyun break;
591*4882a593Smuzhiyun }
592*4882a593Smuzhiyun finish_wait(&queue->wq, &wait);
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun
xenvif_queue_carrier_off(struct xenvif_queue * queue)595*4882a593Smuzhiyun static void xenvif_queue_carrier_off(struct xenvif_queue *queue)
596*4882a593Smuzhiyun {
597*4882a593Smuzhiyun struct xenvif *vif = queue->vif;
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun queue->stalled = true;
600*4882a593Smuzhiyun
601*4882a593Smuzhiyun /* At least one queue has stalled? Disable the carrier. */
602*4882a593Smuzhiyun spin_lock(&vif->lock);
603*4882a593Smuzhiyun if (vif->stalled_queues++ == 0) {
604*4882a593Smuzhiyun netdev_info(vif->dev, "Guest Rx stalled");
605*4882a593Smuzhiyun netif_carrier_off(vif->dev);
606*4882a593Smuzhiyun }
607*4882a593Smuzhiyun spin_unlock(&vif->lock);
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun
xenvif_queue_carrier_on(struct xenvif_queue * queue)610*4882a593Smuzhiyun static void xenvif_queue_carrier_on(struct xenvif_queue *queue)
611*4882a593Smuzhiyun {
612*4882a593Smuzhiyun struct xenvif *vif = queue->vif;
613*4882a593Smuzhiyun
614*4882a593Smuzhiyun queue->last_rx_time = jiffies; /* Reset Rx stall detection. */
615*4882a593Smuzhiyun queue->stalled = false;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun /* All queues are ready? Enable the carrier. */
618*4882a593Smuzhiyun spin_lock(&vif->lock);
619*4882a593Smuzhiyun if (--vif->stalled_queues == 0) {
620*4882a593Smuzhiyun netdev_info(vif->dev, "Guest Rx ready");
621*4882a593Smuzhiyun netif_carrier_on(vif->dev);
622*4882a593Smuzhiyun }
623*4882a593Smuzhiyun spin_unlock(&vif->lock);
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
xenvif_kthread_guest_rx(void * data)626*4882a593Smuzhiyun int xenvif_kthread_guest_rx(void *data)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun struct xenvif_queue *queue = data;
629*4882a593Smuzhiyun struct xenvif *vif = queue->vif;
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun if (!vif->stall_timeout)
632*4882a593Smuzhiyun xenvif_queue_carrier_on(queue);
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun for (;;) {
635*4882a593Smuzhiyun xenvif_wait_for_rx_work(queue);
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun if (kthread_should_stop())
638*4882a593Smuzhiyun break;
639*4882a593Smuzhiyun
640*4882a593Smuzhiyun /* This frontend is found to be rogue, disable it in
641*4882a593Smuzhiyun * kthread context. Currently this is only set when
642*4882a593Smuzhiyun * netback finds out frontend sends malformed packet,
643*4882a593Smuzhiyun * but we cannot disable the interface in softirq
644*4882a593Smuzhiyun * context so we defer it here, if this thread is
645*4882a593Smuzhiyun * associated with queue 0.
646*4882a593Smuzhiyun */
647*4882a593Smuzhiyun if (unlikely(vif->disabled && queue->id == 0)) {
648*4882a593Smuzhiyun xenvif_carrier_off(vif);
649*4882a593Smuzhiyun break;
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun if (!skb_queue_empty(&queue->rx_queue))
653*4882a593Smuzhiyun xenvif_rx_action(queue);
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* If the guest hasn't provided any Rx slots for a
656*4882a593Smuzhiyun * while it's probably not responsive, drop the
657*4882a593Smuzhiyun * carrier so packets are dropped earlier.
658*4882a593Smuzhiyun */
659*4882a593Smuzhiyun if (vif->stall_timeout) {
660*4882a593Smuzhiyun if (xenvif_rx_queue_stalled(queue))
661*4882a593Smuzhiyun xenvif_queue_carrier_off(queue);
662*4882a593Smuzhiyun else if (xenvif_rx_queue_ready(queue))
663*4882a593Smuzhiyun xenvif_queue_carrier_on(queue);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun /* Queued packets may have foreign pages from other
667*4882a593Smuzhiyun * domains. These cannot be queued indefinitely as
668*4882a593Smuzhiyun * this would starve guests of grant refs and transmit
669*4882a593Smuzhiyun * slots.
670*4882a593Smuzhiyun */
671*4882a593Smuzhiyun xenvif_rx_queue_drop_expired(queue);
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun cond_resched();
674*4882a593Smuzhiyun }
675*4882a593Smuzhiyun
676*4882a593Smuzhiyun /* Bin any remaining skbs */
677*4882a593Smuzhiyun xenvif_rx_queue_purge(queue);
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun return 0;
680*4882a593Smuzhiyun }
681