1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun * Copyright 2005-2019 Solarflare Communications Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
7*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 as published
8*4882a593Smuzhiyun * by the Free Software Foundation, incorporated herein by reference.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "net_driver.h"
12*4882a593Smuzhiyun #include "ef100_rx.h"
13*4882a593Smuzhiyun #include "rx_common.h"
14*4882a593Smuzhiyun #include "efx.h"
15*4882a593Smuzhiyun #include "nic_common.h"
16*4882a593Smuzhiyun #include "mcdi_functions.h"
17*4882a593Smuzhiyun #include "ef100_regs.h"
18*4882a593Smuzhiyun #include "ef100_nic.h"
19*4882a593Smuzhiyun #include "io.h"
20*4882a593Smuzhiyun
21*4882a593Smuzhiyun /* Get the value of a field in the RX prefix */
22*4882a593Smuzhiyun #define PREFIX_OFFSET_W(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN / 32)
23*4882a593Smuzhiyun #define PREFIX_OFFSET_B(_f) (ESF_GZ_RX_PREFIX_ ## _f ## _LBN % 32)
24*4882a593Smuzhiyun #define PREFIX_WIDTH_MASK(_f) ((1UL << ESF_GZ_RX_PREFIX_ ## _f ## _WIDTH) - 1)
25*4882a593Smuzhiyun #define PREFIX_WORD(_p, _f) le32_to_cpu((__force __le32)(_p)[PREFIX_OFFSET_W(_f)])
26*4882a593Smuzhiyun #define PREFIX_FIELD(_p, _f) ((PREFIX_WORD(_p, _f) >> PREFIX_OFFSET_B(_f)) & \
27*4882a593Smuzhiyun PREFIX_WIDTH_MASK(_f))
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_LBN \
30*4882a593Smuzhiyun (ESF_GZ_RX_PREFIX_CLASS_LBN + ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_LBN)
31*4882a593Smuzhiyun #define ESF_GZ_RX_PREFIX_NT_OR_INNER_L3_CLASS_WIDTH \
32*4882a593Smuzhiyun ESF_GZ_RX_PREFIX_HCLASS_NT_OR_INNER_L3_CLASS_WIDTH
33*4882a593Smuzhiyun
ef100_rx_buf_hash_valid(const u8 * prefix)34*4882a593Smuzhiyun bool ef100_rx_buf_hash_valid(const u8 *prefix)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun return PREFIX_FIELD(prefix, RSS_HASH_VALID);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
ef100_has_fcs_error(struct efx_channel * channel,u32 * prefix)39*4882a593Smuzhiyun static bool ef100_has_fcs_error(struct efx_channel *channel, u32 *prefix)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun u16 rxclass;
42*4882a593Smuzhiyun u8 l2status;
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun rxclass = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, CLASS));
45*4882a593Smuzhiyun l2status = PREFIX_FIELD(&rxclass, HCLASS_L2_STATUS);
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun if (likely(l2status == ESE_GZ_RH_HCLASS_L2_STATUS_OK))
48*4882a593Smuzhiyun /* Everything is ok */
49*4882a593Smuzhiyun return false;
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (l2status == ESE_GZ_RH_HCLASS_L2_STATUS_FCS_ERR)
52*4882a593Smuzhiyun channel->n_rx_eth_crc_err++;
53*4882a593Smuzhiyun return true;
54*4882a593Smuzhiyun }
55*4882a593Smuzhiyun
__ef100_rx_packet(struct efx_channel * channel)56*4882a593Smuzhiyun void __ef100_rx_packet(struct efx_channel *channel)
57*4882a593Smuzhiyun {
58*4882a593Smuzhiyun struct efx_rx_buffer *rx_buf = efx_rx_buffer(&channel->rx_queue, channel->rx_pkt_index);
59*4882a593Smuzhiyun struct efx_nic *efx = channel->efx;
60*4882a593Smuzhiyun u8 *eh = efx_rx_buf_va(rx_buf);
61*4882a593Smuzhiyun __wsum csum = 0;
62*4882a593Smuzhiyun u32 *prefix;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun prefix = (u32 *)(eh - ESE_GZ_RX_PKT_PREFIX_LEN);
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (ef100_has_fcs_error(channel, prefix) &&
67*4882a593Smuzhiyun unlikely(!(efx->net_dev->features & NETIF_F_RXALL)))
68*4882a593Smuzhiyun goto out;
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun rx_buf->len = le16_to_cpu((__force __le16)PREFIX_FIELD(prefix, LENGTH));
71*4882a593Smuzhiyun if (rx_buf->len <= sizeof(struct ethhdr)) {
72*4882a593Smuzhiyun if (net_ratelimit())
73*4882a593Smuzhiyun netif_err(channel->efx, rx_err, channel->efx->net_dev,
74*4882a593Smuzhiyun "RX packet too small (%d)\n", rx_buf->len);
75*4882a593Smuzhiyun ++channel->n_rx_frm_trunc;
76*4882a593Smuzhiyun goto out;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun
79*4882a593Smuzhiyun if (likely(efx->net_dev->features & NETIF_F_RXCSUM)) {
80*4882a593Smuzhiyun if (PREFIX_FIELD(prefix, NT_OR_INNER_L3_CLASS) == 1) {
81*4882a593Smuzhiyun ++channel->n_rx_ip_hdr_chksum_err;
82*4882a593Smuzhiyun } else {
83*4882a593Smuzhiyun u16 sum = be16_to_cpu((__force __be16)PREFIX_FIELD(prefix, CSUM_FRAME));
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun csum = (__force __wsum) sum;
86*4882a593Smuzhiyun }
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun if (channel->type->receive_skb) {
90*4882a593Smuzhiyun struct efx_rx_queue *rx_queue =
91*4882a593Smuzhiyun efx_channel_get_rx_queue(channel);
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /* no support for special channels yet, so just discard */
94*4882a593Smuzhiyun WARN_ON_ONCE(1);
95*4882a593Smuzhiyun efx_free_rx_buffers(rx_queue, rx_buf, 1);
96*4882a593Smuzhiyun goto out;
97*4882a593Smuzhiyun }
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun efx_rx_packet_gro(channel, rx_buf, channel->rx_pkt_n_frags, eh, csum);
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun out:
102*4882a593Smuzhiyun channel->rx_pkt_n_frags = 0;
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
ef100_rx_packet(struct efx_rx_queue * rx_queue,unsigned int index)105*4882a593Smuzhiyun static void ef100_rx_packet(struct efx_rx_queue *rx_queue, unsigned int index)
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun struct efx_rx_buffer *rx_buf = efx_rx_buffer(rx_queue, index);
108*4882a593Smuzhiyun struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
109*4882a593Smuzhiyun struct efx_nic *efx = rx_queue->efx;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun ++rx_queue->rx_packets;
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun netif_vdbg(efx, rx_status, efx->net_dev,
114*4882a593Smuzhiyun "RX queue %d received id %x\n",
115*4882a593Smuzhiyun efx_rx_queue_index(rx_queue), index);
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun efx_sync_rx_buffer(efx, rx_buf, efx->rx_dma_len);
118*4882a593Smuzhiyun
119*4882a593Smuzhiyun prefetch(efx_rx_buf_va(rx_buf));
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun rx_buf->page_offset += efx->rx_prefix_size;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun efx_recycle_rx_pages(channel, rx_buf, 1);
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun efx_rx_flush_packet(channel);
126*4882a593Smuzhiyun channel->rx_pkt_n_frags = 1;
127*4882a593Smuzhiyun channel->rx_pkt_index = index;
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
efx_ef100_ev_rx(struct efx_channel * channel,const efx_qword_t * p_event)130*4882a593Smuzhiyun void efx_ef100_ev_rx(struct efx_channel *channel, const efx_qword_t *p_event)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct efx_rx_queue *rx_queue = efx_channel_get_rx_queue(channel);
133*4882a593Smuzhiyun unsigned int n_packets =
134*4882a593Smuzhiyun EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_NUM_PKT);
135*4882a593Smuzhiyun int i;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun WARN_ON_ONCE(!n_packets);
138*4882a593Smuzhiyun if (n_packets > 1)
139*4882a593Smuzhiyun ++channel->n_rx_merge_events;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun channel->irq_mod_score += 2 * n_packets;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun for (i = 0; i < n_packets; ++i) {
144*4882a593Smuzhiyun ef100_rx_packet(rx_queue,
145*4882a593Smuzhiyun rx_queue->removed_count & rx_queue->ptr_mask);
146*4882a593Smuzhiyun ++rx_queue->removed_count;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
ef100_rx_write(struct efx_rx_queue * rx_queue)150*4882a593Smuzhiyun void ef100_rx_write(struct efx_rx_queue *rx_queue)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun struct efx_rx_buffer *rx_buf;
153*4882a593Smuzhiyun unsigned int idx;
154*4882a593Smuzhiyun efx_qword_t *rxd;
155*4882a593Smuzhiyun efx_dword_t rxdb;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun while (rx_queue->notified_count != rx_queue->added_count) {
158*4882a593Smuzhiyun idx = rx_queue->notified_count & rx_queue->ptr_mask;
159*4882a593Smuzhiyun rx_buf = efx_rx_buffer(rx_queue, idx);
160*4882a593Smuzhiyun rxd = efx_rx_desc(rx_queue, idx);
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun EFX_POPULATE_QWORD_1(*rxd, ESF_GZ_RX_BUF_ADDR, rx_buf->dma_addr);
163*4882a593Smuzhiyun
164*4882a593Smuzhiyun ++rx_queue->notified_count;
165*4882a593Smuzhiyun }
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun wmb();
168*4882a593Smuzhiyun EFX_POPULATE_DWORD_1(rxdb, ERF_GZ_RX_RING_PIDX,
169*4882a593Smuzhiyun rx_queue->added_count & rx_queue->ptr_mask);
170*4882a593Smuzhiyun efx_writed_page(rx_queue->efx, &rxdb,
171*4882a593Smuzhiyun ER_GZ_RX_RING_DOORBELL, efx_rx_queue_index(rx_queue));
172*4882a593Smuzhiyun }
173