1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun * Copyright 2018 Solarflare Communications Inc.
5*4882a593Smuzhiyun * Copyright 2019-2020 Xilinx Inc.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
8*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 as published
9*4882a593Smuzhiyun * by the Free Software Foundation, incorporated herein by reference.
10*4882a593Smuzhiyun */
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun #include "ef100_nic.h"
13*4882a593Smuzhiyun #include "efx_common.h"
14*4882a593Smuzhiyun #include "efx_channels.h"
15*4882a593Smuzhiyun #include "io.h"
16*4882a593Smuzhiyun #include "selftest.h"
17*4882a593Smuzhiyun #include "ef100_regs.h"
18*4882a593Smuzhiyun #include "mcdi.h"
19*4882a593Smuzhiyun #include "mcdi_pcol.h"
20*4882a593Smuzhiyun #include "mcdi_port_common.h"
21*4882a593Smuzhiyun #include "mcdi_functions.h"
22*4882a593Smuzhiyun #include "mcdi_filters.h"
23*4882a593Smuzhiyun #include "ef100_rx.h"
24*4882a593Smuzhiyun #include "ef100_tx.h"
25*4882a593Smuzhiyun #include "ef100_netdev.h"
26*4882a593Smuzhiyun
27*4882a593Smuzhiyun #define EF100_MAX_VIS 4096
28*4882a593Smuzhiyun #define EF100_NUM_MCDI_BUFFERS 1
29*4882a593Smuzhiyun #define MCDI_BUF_LEN (8 + MCDI_CTL_SDU_LEN_MAX)
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun #define EF100_RESET_PORT ((ETH_RESET_MAC | ETH_RESET_PHY) << ETH_RESET_SHARED_SHIFT)
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun /* MCDI
34*4882a593Smuzhiyun */
ef100_mcdi_buf(struct efx_nic * efx,u8 bufid,dma_addr_t * dma_addr)35*4882a593Smuzhiyun static u8 *ef100_mcdi_buf(struct efx_nic *efx, u8 bufid, dma_addr_t *dma_addr)
36*4882a593Smuzhiyun {
37*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun if (dma_addr)
40*4882a593Smuzhiyun *dma_addr = nic_data->mcdi_buf.dma_addr +
41*4882a593Smuzhiyun bufid * ALIGN(MCDI_BUF_LEN, 256);
42*4882a593Smuzhiyun return nic_data->mcdi_buf.addr + bufid * ALIGN(MCDI_BUF_LEN, 256);
43*4882a593Smuzhiyun }
44*4882a593Smuzhiyun
ef100_get_warm_boot_count(struct efx_nic * efx)45*4882a593Smuzhiyun static int ef100_get_warm_boot_count(struct efx_nic *efx)
46*4882a593Smuzhiyun {
47*4882a593Smuzhiyun efx_dword_t reg;
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun efx_readd(efx, ®, efx_reg(efx, ER_GZ_MC_SFT_STATUS));
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun if (EFX_DWORD_FIELD(reg, EFX_DWORD_0) == 0xffffffff) {
52*4882a593Smuzhiyun netif_err(efx, hw, efx->net_dev, "Hardware unavailable\n");
53*4882a593Smuzhiyun efx->state = STATE_DISABLED;
54*4882a593Smuzhiyun return -ENETDOWN;
55*4882a593Smuzhiyun } else {
56*4882a593Smuzhiyun return EFX_DWORD_FIELD(reg, EFX_WORD_1) == 0xb007 ?
57*4882a593Smuzhiyun EFX_DWORD_FIELD(reg, EFX_WORD_0) : -EIO;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun
ef100_mcdi_request(struct efx_nic * efx,const efx_dword_t * hdr,size_t hdr_len,const efx_dword_t * sdu,size_t sdu_len)61*4882a593Smuzhiyun static void ef100_mcdi_request(struct efx_nic *efx,
62*4882a593Smuzhiyun const efx_dword_t *hdr, size_t hdr_len,
63*4882a593Smuzhiyun const efx_dword_t *sdu, size_t sdu_len)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun dma_addr_t dma_addr;
66*4882a593Smuzhiyun u8 *pdu = ef100_mcdi_buf(efx, 0, &dma_addr);
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun memcpy(pdu, hdr, hdr_len);
69*4882a593Smuzhiyun memcpy(pdu + hdr_len, sdu, sdu_len);
70*4882a593Smuzhiyun wmb();
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun /* The hardware provides 'low' and 'high' (doorbell) registers
73*4882a593Smuzhiyun * for passing the 64-bit address of an MCDI request to
74*4882a593Smuzhiyun * firmware. However the dwords are swapped by firmware. The
75*4882a593Smuzhiyun * least significant bits of the doorbell are then 0 for all
76*4882a593Smuzhiyun * MCDI requests due to alignment.
77*4882a593Smuzhiyun */
78*4882a593Smuzhiyun _efx_writed(efx, cpu_to_le32((u64)dma_addr >> 32), efx_reg(efx, ER_GZ_MC_DB_LWRD));
79*4882a593Smuzhiyun _efx_writed(efx, cpu_to_le32((u32)dma_addr), efx_reg(efx, ER_GZ_MC_DB_HWRD));
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun
ef100_mcdi_poll_response(struct efx_nic * efx)82*4882a593Smuzhiyun static bool ef100_mcdi_poll_response(struct efx_nic *efx)
83*4882a593Smuzhiyun {
84*4882a593Smuzhiyun const efx_dword_t hdr =
85*4882a593Smuzhiyun *(const efx_dword_t *)(ef100_mcdi_buf(efx, 0, NULL));
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun rmb();
88*4882a593Smuzhiyun return EFX_DWORD_FIELD(hdr, MCDI_HEADER_RESPONSE);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
ef100_mcdi_read_response(struct efx_nic * efx,efx_dword_t * outbuf,size_t offset,size_t outlen)91*4882a593Smuzhiyun static void ef100_mcdi_read_response(struct efx_nic *efx,
92*4882a593Smuzhiyun efx_dword_t *outbuf, size_t offset,
93*4882a593Smuzhiyun size_t outlen)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun const u8 *pdu = ef100_mcdi_buf(efx, 0, NULL);
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun memcpy(outbuf, pdu + offset, outlen);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun
ef100_mcdi_poll_reboot(struct efx_nic * efx)100*4882a593Smuzhiyun static int ef100_mcdi_poll_reboot(struct efx_nic *efx)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
103*4882a593Smuzhiyun int rc;
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun rc = ef100_get_warm_boot_count(efx);
106*4882a593Smuzhiyun if (rc < 0) {
107*4882a593Smuzhiyun /* The firmware is presumably in the process of
108*4882a593Smuzhiyun * rebooting. However, we are supposed to report each
109*4882a593Smuzhiyun * reboot just once, so we must only do that once we
110*4882a593Smuzhiyun * can read and store the updated warm boot count.
111*4882a593Smuzhiyun */
112*4882a593Smuzhiyun return 0;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun if (rc == nic_data->warm_boot_count)
116*4882a593Smuzhiyun return 0;
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun nic_data->warm_boot_count = rc;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun return -EIO;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun
ef100_mcdi_reboot_detected(struct efx_nic * efx)123*4882a593Smuzhiyun static void ef100_mcdi_reboot_detected(struct efx_nic *efx)
124*4882a593Smuzhiyun {
125*4882a593Smuzhiyun }
126*4882a593Smuzhiyun
127*4882a593Smuzhiyun /* MCDI calls
128*4882a593Smuzhiyun */
ef100_get_mac_address(struct efx_nic * efx,u8 * mac_address)129*4882a593Smuzhiyun static int ef100_get_mac_address(struct efx_nic *efx, u8 *mac_address)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_MAC_ADDRESSES_OUT_LEN);
132*4882a593Smuzhiyun size_t outlen;
133*4882a593Smuzhiyun int rc;
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun BUILD_BUG_ON(MC_CMD_GET_MAC_ADDRESSES_IN_LEN != 0);
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_GET_MAC_ADDRESSES, NULL, 0,
138*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
139*4882a593Smuzhiyun if (rc)
140*4882a593Smuzhiyun return rc;
141*4882a593Smuzhiyun if (outlen < MC_CMD_GET_MAC_ADDRESSES_OUT_LEN)
142*4882a593Smuzhiyun return -EIO;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun ether_addr_copy(mac_address,
145*4882a593Smuzhiyun MCDI_PTR(outbuf, GET_MAC_ADDRESSES_OUT_MAC_ADDR_BASE));
146*4882a593Smuzhiyun return 0;
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
efx_ef100_init_datapath_caps(struct efx_nic * efx)149*4882a593Smuzhiyun static int efx_ef100_init_datapath_caps(struct efx_nic *efx)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_CAPABILITIES_V7_OUT_LEN);
152*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
153*4882a593Smuzhiyun u8 vi_window_mode;
154*4882a593Smuzhiyun size_t outlen;
155*4882a593Smuzhiyun int rc;
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun BUILD_BUG_ON(MC_CMD_GET_CAPABILITIES_IN_LEN != 0);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_GET_CAPABILITIES, NULL, 0,
160*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
161*4882a593Smuzhiyun if (rc)
162*4882a593Smuzhiyun return rc;
163*4882a593Smuzhiyun if (outlen < MC_CMD_GET_CAPABILITIES_V4_OUT_LEN) {
164*4882a593Smuzhiyun netif_err(efx, drv, efx->net_dev,
165*4882a593Smuzhiyun "unable to read datapath firmware capabilities\n");
166*4882a593Smuzhiyun return -EIO;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun nic_data->datapath_caps = MCDI_DWORD(outbuf,
170*4882a593Smuzhiyun GET_CAPABILITIES_OUT_FLAGS1);
171*4882a593Smuzhiyun nic_data->datapath_caps2 = MCDI_DWORD(outbuf,
172*4882a593Smuzhiyun GET_CAPABILITIES_V2_OUT_FLAGS2);
173*4882a593Smuzhiyun if (outlen < MC_CMD_GET_CAPABILITIES_V7_OUT_LEN)
174*4882a593Smuzhiyun nic_data->datapath_caps3 = 0;
175*4882a593Smuzhiyun else
176*4882a593Smuzhiyun nic_data->datapath_caps3 = MCDI_DWORD(outbuf,
177*4882a593Smuzhiyun GET_CAPABILITIES_V7_OUT_FLAGS3);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun vi_window_mode = MCDI_BYTE(outbuf,
180*4882a593Smuzhiyun GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE);
181*4882a593Smuzhiyun rc = efx_mcdi_window_mode_to_stride(efx, vi_window_mode);
182*4882a593Smuzhiyun if (rc)
183*4882a593Smuzhiyun return rc;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun if (efx_ef100_has_cap(nic_data->datapath_caps2, TX_TSO_V3))
186*4882a593Smuzhiyun efx->net_dev->features |= NETIF_F_TSO | NETIF_F_TSO6;
187*4882a593Smuzhiyun efx->num_mac_stats = MCDI_WORD(outbuf,
188*4882a593Smuzhiyun GET_CAPABILITIES_V4_OUT_MAC_STATS_NUM_STATS);
189*4882a593Smuzhiyun netif_dbg(efx, probe, efx->net_dev,
190*4882a593Smuzhiyun "firmware reports num_mac_stats = %u\n",
191*4882a593Smuzhiyun efx->num_mac_stats);
192*4882a593Smuzhiyun return 0;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun /* Event handling
196*4882a593Smuzhiyun */
ef100_ev_probe(struct efx_channel * channel)197*4882a593Smuzhiyun static int ef100_ev_probe(struct efx_channel *channel)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun /* Allocate an extra descriptor for the QMDA status completion entry */
200*4882a593Smuzhiyun return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
201*4882a593Smuzhiyun (channel->eventq_mask + 2) *
202*4882a593Smuzhiyun sizeof(efx_qword_t),
203*4882a593Smuzhiyun GFP_KERNEL);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
ef100_ev_init(struct efx_channel * channel)206*4882a593Smuzhiyun static int ef100_ev_init(struct efx_channel *channel)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun struct ef100_nic_data *nic_data = channel->efx->nic_data;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* initial phase is 0 */
211*4882a593Smuzhiyun clear_bit(channel->channel, nic_data->evq_phases);
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun return efx_mcdi_ev_init(channel, false, false);
214*4882a593Smuzhiyun }
215*4882a593Smuzhiyun
ef100_ev_read_ack(struct efx_channel * channel)216*4882a593Smuzhiyun static void ef100_ev_read_ack(struct efx_channel *channel)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun efx_dword_t evq_prime;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun EFX_POPULATE_DWORD_2(evq_prime,
221*4882a593Smuzhiyun ERF_GZ_EVQ_ID, channel->channel,
222*4882a593Smuzhiyun ERF_GZ_IDX, channel->eventq_read_ptr &
223*4882a593Smuzhiyun channel->eventq_mask);
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun efx_writed(channel->efx, &evq_prime,
226*4882a593Smuzhiyun efx_reg(channel->efx, ER_GZ_EVQ_INT_PRIME));
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
ef100_ev_process(struct efx_channel * channel,int quota)229*4882a593Smuzhiyun static int ef100_ev_process(struct efx_channel *channel, int quota)
230*4882a593Smuzhiyun {
231*4882a593Smuzhiyun struct efx_nic *efx = channel->efx;
232*4882a593Smuzhiyun struct ef100_nic_data *nic_data;
233*4882a593Smuzhiyun bool evq_phase, old_evq_phase;
234*4882a593Smuzhiyun unsigned int read_ptr;
235*4882a593Smuzhiyun efx_qword_t *p_event;
236*4882a593Smuzhiyun int spent = 0;
237*4882a593Smuzhiyun bool ev_phase;
238*4882a593Smuzhiyun int ev_type;
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun if (unlikely(!channel->enabled))
241*4882a593Smuzhiyun return 0;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun nic_data = efx->nic_data;
244*4882a593Smuzhiyun evq_phase = test_bit(channel->channel, nic_data->evq_phases);
245*4882a593Smuzhiyun old_evq_phase = evq_phase;
246*4882a593Smuzhiyun read_ptr = channel->eventq_read_ptr;
247*4882a593Smuzhiyun BUILD_BUG_ON(ESF_GZ_EV_RXPKTS_PHASE_LBN != ESF_GZ_EV_TXCMPL_PHASE_LBN);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun while (spent < quota) {
250*4882a593Smuzhiyun p_event = efx_event(channel, read_ptr);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun ev_phase = !!EFX_QWORD_FIELD(*p_event, ESF_GZ_EV_RXPKTS_PHASE);
253*4882a593Smuzhiyun if (ev_phase != evq_phase)
254*4882a593Smuzhiyun break;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun netif_vdbg(efx, drv, efx->net_dev,
257*4882a593Smuzhiyun "processing event on %d " EFX_QWORD_FMT "\n",
258*4882a593Smuzhiyun channel->channel, EFX_QWORD_VAL(*p_event));
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun ev_type = EFX_QWORD_FIELD(*p_event, ESF_GZ_E_TYPE);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun switch (ev_type) {
263*4882a593Smuzhiyun case ESE_GZ_EF100_EV_RX_PKTS:
264*4882a593Smuzhiyun efx_ef100_ev_rx(channel, p_event);
265*4882a593Smuzhiyun ++spent;
266*4882a593Smuzhiyun break;
267*4882a593Smuzhiyun case ESE_GZ_EF100_EV_MCDI:
268*4882a593Smuzhiyun efx_mcdi_process_event(channel, p_event);
269*4882a593Smuzhiyun break;
270*4882a593Smuzhiyun case ESE_GZ_EF100_EV_TX_COMPLETION:
271*4882a593Smuzhiyun ef100_ev_tx(channel, p_event);
272*4882a593Smuzhiyun break;
273*4882a593Smuzhiyun case ESE_GZ_EF100_EV_DRIVER:
274*4882a593Smuzhiyun netif_info(efx, drv, efx->net_dev,
275*4882a593Smuzhiyun "Driver initiated event " EFX_QWORD_FMT "\n",
276*4882a593Smuzhiyun EFX_QWORD_VAL(*p_event));
277*4882a593Smuzhiyun break;
278*4882a593Smuzhiyun default:
279*4882a593Smuzhiyun netif_info(efx, drv, efx->net_dev,
280*4882a593Smuzhiyun "Unhandled event " EFX_QWORD_FMT "\n",
281*4882a593Smuzhiyun EFX_QWORD_VAL(*p_event));
282*4882a593Smuzhiyun }
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun ++read_ptr;
285*4882a593Smuzhiyun if ((read_ptr & channel->eventq_mask) == 0)
286*4882a593Smuzhiyun evq_phase = !evq_phase;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun
289*4882a593Smuzhiyun channel->eventq_read_ptr = read_ptr;
290*4882a593Smuzhiyun if (evq_phase != old_evq_phase)
291*4882a593Smuzhiyun change_bit(channel->channel, nic_data->evq_phases);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun return spent;
294*4882a593Smuzhiyun }
295*4882a593Smuzhiyun
ef100_msi_interrupt(int irq,void * dev_id)296*4882a593Smuzhiyun static irqreturn_t ef100_msi_interrupt(int irq, void *dev_id)
297*4882a593Smuzhiyun {
298*4882a593Smuzhiyun struct efx_msi_context *context = dev_id;
299*4882a593Smuzhiyun struct efx_nic *efx = context->efx;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun netif_vdbg(efx, intr, efx->net_dev,
302*4882a593Smuzhiyun "IRQ %d on CPU %d\n", irq, raw_smp_processor_id());
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun if (likely(READ_ONCE(efx->irq_soft_enabled))) {
305*4882a593Smuzhiyun /* Note test interrupts */
306*4882a593Smuzhiyun if (context->index == efx->irq_level)
307*4882a593Smuzhiyun efx->last_irq_cpu = raw_smp_processor_id();
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Schedule processing of the channel */
310*4882a593Smuzhiyun efx_schedule_channel_irq(efx->channel[context->index]);
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun return IRQ_HANDLED;
314*4882a593Smuzhiyun }
315*4882a593Smuzhiyun
ef100_phy_probe(struct efx_nic * efx)316*4882a593Smuzhiyun static int ef100_phy_probe(struct efx_nic *efx)
317*4882a593Smuzhiyun {
318*4882a593Smuzhiyun struct efx_mcdi_phy_data *phy_data;
319*4882a593Smuzhiyun int rc;
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Probe for the PHY */
322*4882a593Smuzhiyun efx->phy_data = kzalloc(sizeof(struct efx_mcdi_phy_data), GFP_KERNEL);
323*4882a593Smuzhiyun if (!efx->phy_data)
324*4882a593Smuzhiyun return -ENOMEM;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun rc = efx_mcdi_get_phy_cfg(efx, efx->phy_data);
327*4882a593Smuzhiyun if (rc)
328*4882a593Smuzhiyun return rc;
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* Populate driver and ethtool settings */
331*4882a593Smuzhiyun phy_data = efx->phy_data;
332*4882a593Smuzhiyun mcdi_to_ethtool_linkset(phy_data->media, phy_data->supported_cap,
333*4882a593Smuzhiyun efx->link_advertising);
334*4882a593Smuzhiyun efx->fec_config = mcdi_fec_caps_to_ethtool(phy_data->supported_cap,
335*4882a593Smuzhiyun false);
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun /* Default to Autonegotiated flow control if the PHY supports it */
338*4882a593Smuzhiyun efx->wanted_fc = EFX_FC_RX | EFX_FC_TX;
339*4882a593Smuzhiyun if (phy_data->supported_cap & (1 << MC_CMD_PHY_CAP_AN_LBN))
340*4882a593Smuzhiyun efx->wanted_fc |= EFX_FC_AUTO;
341*4882a593Smuzhiyun efx_link_set_wanted_fc(efx, efx->wanted_fc);
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun /* Push settings to the PHY. Failure is not fatal, the user can try to
344*4882a593Smuzhiyun * fix it using ethtool.
345*4882a593Smuzhiyun */
346*4882a593Smuzhiyun rc = efx_mcdi_port_reconfigure(efx);
347*4882a593Smuzhiyun if (rc && rc != -EPERM)
348*4882a593Smuzhiyun netif_warn(efx, drv, efx->net_dev,
349*4882a593Smuzhiyun "could not initialise PHY settings\n");
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun return 0;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
ef100_filter_table_probe(struct efx_nic * efx)354*4882a593Smuzhiyun static int ef100_filter_table_probe(struct efx_nic *efx)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun return efx_mcdi_filter_table_probe(efx, true);
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun
ef100_filter_table_up(struct efx_nic * efx)359*4882a593Smuzhiyun static int ef100_filter_table_up(struct efx_nic *efx)
360*4882a593Smuzhiyun {
361*4882a593Smuzhiyun int rc;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun rc = efx_mcdi_filter_add_vlan(efx, EFX_FILTER_VID_UNSPEC);
364*4882a593Smuzhiyun if (rc) {
365*4882a593Smuzhiyun efx_mcdi_filter_table_down(efx);
366*4882a593Smuzhiyun return rc;
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun rc = efx_mcdi_filter_add_vlan(efx, 0);
370*4882a593Smuzhiyun if (rc) {
371*4882a593Smuzhiyun efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
372*4882a593Smuzhiyun efx_mcdi_filter_table_down(efx);
373*4882a593Smuzhiyun }
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun return rc;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
ef100_filter_table_down(struct efx_nic * efx)378*4882a593Smuzhiyun static void ef100_filter_table_down(struct efx_nic *efx)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun efx_mcdi_filter_del_vlan(efx, 0);
381*4882a593Smuzhiyun efx_mcdi_filter_del_vlan(efx, EFX_FILTER_VID_UNSPEC);
382*4882a593Smuzhiyun efx_mcdi_filter_table_down(efx);
383*4882a593Smuzhiyun }
384*4882a593Smuzhiyun
385*4882a593Smuzhiyun /* Other
386*4882a593Smuzhiyun */
ef100_reconfigure_mac(struct efx_nic * efx,bool mtu_only)387*4882a593Smuzhiyun static int ef100_reconfigure_mac(struct efx_nic *efx, bool mtu_only)
388*4882a593Smuzhiyun {
389*4882a593Smuzhiyun WARN_ON(!mutex_is_locked(&efx->mac_lock));
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun efx_mcdi_filter_sync_rx_mode(efx);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun if (mtu_only && efx_has_cap(efx, SET_MAC_ENHANCED))
394*4882a593Smuzhiyun return efx_mcdi_set_mtu(efx);
395*4882a593Smuzhiyun return efx_mcdi_set_mac(efx);
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
ef100_map_reset_reason(enum reset_type reason)398*4882a593Smuzhiyun static enum reset_type ef100_map_reset_reason(enum reset_type reason)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun if (reason == RESET_TYPE_TX_WATCHDOG)
401*4882a593Smuzhiyun return reason;
402*4882a593Smuzhiyun return RESET_TYPE_DISABLE;
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
ef100_map_reset_flags(u32 * flags)405*4882a593Smuzhiyun static int ef100_map_reset_flags(u32 *flags)
406*4882a593Smuzhiyun {
407*4882a593Smuzhiyun /* Only perform a RESET_TYPE_ALL because we don't support MC_REBOOTs */
408*4882a593Smuzhiyun if ((*flags & EF100_RESET_PORT)) {
409*4882a593Smuzhiyun *flags &= ~EF100_RESET_PORT;
410*4882a593Smuzhiyun return RESET_TYPE_ALL;
411*4882a593Smuzhiyun }
412*4882a593Smuzhiyun if (*flags & ETH_RESET_MGMT) {
413*4882a593Smuzhiyun *flags &= ~ETH_RESET_MGMT;
414*4882a593Smuzhiyun return RESET_TYPE_DISABLE;
415*4882a593Smuzhiyun }
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun return -EINVAL;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
ef100_reset(struct efx_nic * efx,enum reset_type reset_type)420*4882a593Smuzhiyun static int ef100_reset(struct efx_nic *efx, enum reset_type reset_type)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun int rc;
423*4882a593Smuzhiyun
424*4882a593Smuzhiyun dev_close(efx->net_dev);
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (reset_type == RESET_TYPE_TX_WATCHDOG) {
427*4882a593Smuzhiyun netif_device_attach(efx->net_dev);
428*4882a593Smuzhiyun __clear_bit(reset_type, &efx->reset_pending);
429*4882a593Smuzhiyun rc = dev_open(efx->net_dev, NULL);
430*4882a593Smuzhiyun } else if (reset_type == RESET_TYPE_ALL) {
431*4882a593Smuzhiyun rc = efx_mcdi_reset(efx, reset_type);
432*4882a593Smuzhiyun if (rc)
433*4882a593Smuzhiyun return rc;
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun netif_device_attach(efx->net_dev);
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun rc = dev_open(efx->net_dev, NULL);
438*4882a593Smuzhiyun } else {
439*4882a593Smuzhiyun rc = 1; /* Leave the device closed */
440*4882a593Smuzhiyun }
441*4882a593Smuzhiyun return rc;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
ef100_common_stat_mask(unsigned long * mask)444*4882a593Smuzhiyun static void ef100_common_stat_mask(unsigned long *mask)
445*4882a593Smuzhiyun {
446*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_packets, mask);
447*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_packets, mask);
448*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_bytes, mask);
449*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_bytes, mask);
450*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_multicast, mask);
451*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_bad, mask);
452*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_align_error, mask);
453*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_overflow, mask);
454*4882a593Smuzhiyun }
455*4882a593Smuzhiyun
ef100_ethtool_stat_mask(unsigned long * mask)456*4882a593Smuzhiyun static void ef100_ethtool_stat_mask(unsigned long *mask)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_pause, mask);
459*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_unicast, mask);
460*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_multicast, mask);
461*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_broadcast, mask);
462*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_lt64, mask);
463*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_64, mask);
464*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_65_to_127, mask);
465*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_128_to_255, mask);
466*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_256_to_511, mask);
467*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_512_to_1023, mask);
468*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_1024_to_15xx, mask);
469*4882a593Smuzhiyun __set_bit(EF100_STAT_port_tx_15xx_to_jumbo, mask);
470*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_good, mask);
471*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_pause, mask);
472*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_unicast, mask);
473*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_broadcast, mask);
474*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_lt64, mask);
475*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_64, mask);
476*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_65_to_127, mask);
477*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_128_to_255, mask);
478*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_256_to_511, mask);
479*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_512_to_1023, mask);
480*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_1024_to_15xx, mask);
481*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_15xx_to_jumbo, mask);
482*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_gtjumbo, mask);
483*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_bad_gtjumbo, mask);
484*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_length_error, mask);
485*4882a593Smuzhiyun __set_bit(EF100_STAT_port_rx_nodesc_drops, mask);
486*4882a593Smuzhiyun __set_bit(GENERIC_STAT_rx_nodesc_trunc, mask);
487*4882a593Smuzhiyun __set_bit(GENERIC_STAT_rx_noskb_drops, mask);
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun #define EF100_DMA_STAT(ext_name, mcdi_name) \
491*4882a593Smuzhiyun [EF100_STAT_ ## ext_name] = \
492*4882a593Smuzhiyun { #ext_name, 64, 8 * MC_CMD_MAC_ ## mcdi_name }
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun static const struct efx_hw_stat_desc ef100_stat_desc[EF100_STAT_COUNT] = {
495*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_bytes, TX_BYTES),
496*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_packets, TX_PKTS),
497*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_pause, TX_PAUSE_PKTS),
498*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_unicast, TX_UNICAST_PKTS),
499*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_multicast, TX_MULTICAST_PKTS),
500*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_broadcast, TX_BROADCAST_PKTS),
501*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_lt64, TX_LT64_PKTS),
502*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_64, TX_64_PKTS),
503*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_65_to_127, TX_65_TO_127_PKTS),
504*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_128_to_255, TX_128_TO_255_PKTS),
505*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_256_to_511, TX_256_TO_511_PKTS),
506*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_512_to_1023, TX_512_TO_1023_PKTS),
507*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_1024_to_15xx, TX_1024_TO_15XX_PKTS),
508*4882a593Smuzhiyun EF100_DMA_STAT(port_tx_15xx_to_jumbo, TX_15XX_TO_JUMBO_PKTS),
509*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_bytes, RX_BYTES),
510*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_packets, RX_PKTS),
511*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_good, RX_GOOD_PKTS),
512*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_bad, RX_BAD_FCS_PKTS),
513*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_pause, RX_PAUSE_PKTS),
514*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_unicast, RX_UNICAST_PKTS),
515*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_multicast, RX_MULTICAST_PKTS),
516*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_broadcast, RX_BROADCAST_PKTS),
517*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_lt64, RX_UNDERSIZE_PKTS),
518*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_64, RX_64_PKTS),
519*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_65_to_127, RX_65_TO_127_PKTS),
520*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_128_to_255, RX_128_TO_255_PKTS),
521*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_256_to_511, RX_256_TO_511_PKTS),
522*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_512_to_1023, RX_512_TO_1023_PKTS),
523*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_1024_to_15xx, RX_1024_TO_15XX_PKTS),
524*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_15xx_to_jumbo, RX_15XX_TO_JUMBO_PKTS),
525*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_gtjumbo, RX_GTJUMBO_PKTS),
526*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_bad_gtjumbo, RX_JABBER_PKTS),
527*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_align_error, RX_ALIGN_ERROR_PKTS),
528*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_length_error, RX_LENGTH_ERROR_PKTS),
529*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_overflow, RX_OVERFLOW_PKTS),
530*4882a593Smuzhiyun EF100_DMA_STAT(port_rx_nodesc_drops, RX_NODESC_DROPS),
531*4882a593Smuzhiyun EFX_GENERIC_SW_STAT(rx_nodesc_trunc),
532*4882a593Smuzhiyun EFX_GENERIC_SW_STAT(rx_noskb_drops),
533*4882a593Smuzhiyun };
534*4882a593Smuzhiyun
ef100_describe_stats(struct efx_nic * efx,u8 * names)535*4882a593Smuzhiyun static size_t ef100_describe_stats(struct efx_nic *efx, u8 *names)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun ef100_ethtool_stat_mask(mask);
540*4882a593Smuzhiyun return efx_nic_describe_stats(ef100_stat_desc, EF100_STAT_COUNT,
541*4882a593Smuzhiyun mask, names);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
ef100_update_stats_common(struct efx_nic * efx,u64 * full_stats,struct rtnl_link_stats64 * core_stats)544*4882a593Smuzhiyun static size_t ef100_update_stats_common(struct efx_nic *efx, u64 *full_stats,
545*4882a593Smuzhiyun struct rtnl_link_stats64 *core_stats)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
548*4882a593Smuzhiyun DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
549*4882a593Smuzhiyun size_t stats_count = 0, index;
550*4882a593Smuzhiyun u64 *stats = nic_data->stats;
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun ef100_ethtool_stat_mask(mask);
553*4882a593Smuzhiyun
554*4882a593Smuzhiyun if (full_stats) {
555*4882a593Smuzhiyun for_each_set_bit(index, mask, EF100_STAT_COUNT) {
556*4882a593Smuzhiyun if (ef100_stat_desc[index].name) {
557*4882a593Smuzhiyun *full_stats++ = stats[index];
558*4882a593Smuzhiyun ++stats_count;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun if (!core_stats)
564*4882a593Smuzhiyun return stats_count;
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun core_stats->rx_packets = stats[EF100_STAT_port_rx_packets];
567*4882a593Smuzhiyun core_stats->tx_packets = stats[EF100_STAT_port_tx_packets];
568*4882a593Smuzhiyun core_stats->rx_bytes = stats[EF100_STAT_port_rx_bytes];
569*4882a593Smuzhiyun core_stats->tx_bytes = stats[EF100_STAT_port_tx_bytes];
570*4882a593Smuzhiyun core_stats->rx_dropped = stats[EF100_STAT_port_rx_nodesc_drops] +
571*4882a593Smuzhiyun stats[GENERIC_STAT_rx_nodesc_trunc] +
572*4882a593Smuzhiyun stats[GENERIC_STAT_rx_noskb_drops];
573*4882a593Smuzhiyun core_stats->multicast = stats[EF100_STAT_port_rx_multicast];
574*4882a593Smuzhiyun core_stats->rx_length_errors =
575*4882a593Smuzhiyun stats[EF100_STAT_port_rx_gtjumbo] +
576*4882a593Smuzhiyun stats[EF100_STAT_port_rx_length_error];
577*4882a593Smuzhiyun core_stats->rx_crc_errors = stats[EF100_STAT_port_rx_bad];
578*4882a593Smuzhiyun core_stats->rx_frame_errors =
579*4882a593Smuzhiyun stats[EF100_STAT_port_rx_align_error];
580*4882a593Smuzhiyun core_stats->rx_fifo_errors = stats[EF100_STAT_port_rx_overflow];
581*4882a593Smuzhiyun core_stats->rx_errors = (core_stats->rx_length_errors +
582*4882a593Smuzhiyun core_stats->rx_crc_errors +
583*4882a593Smuzhiyun core_stats->rx_frame_errors);
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun return stats_count;
586*4882a593Smuzhiyun }
587*4882a593Smuzhiyun
ef100_update_stats(struct efx_nic * efx,u64 * full_stats,struct rtnl_link_stats64 * core_stats)588*4882a593Smuzhiyun static size_t ef100_update_stats(struct efx_nic *efx,
589*4882a593Smuzhiyun u64 *full_stats,
590*4882a593Smuzhiyun struct rtnl_link_stats64 *core_stats)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun __le64 *mc_stats = kmalloc(array_size(efx->num_mac_stats, sizeof(__le64)), GFP_ATOMIC);
593*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
594*4882a593Smuzhiyun DECLARE_BITMAP(mask, EF100_STAT_COUNT) = {};
595*4882a593Smuzhiyun u64 *stats = nic_data->stats;
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun ef100_common_stat_mask(mask);
598*4882a593Smuzhiyun ef100_ethtool_stat_mask(mask);
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun if (!mc_stats)
601*4882a593Smuzhiyun return 0;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun efx_nic_copy_stats(efx, mc_stats);
604*4882a593Smuzhiyun efx_nic_update_stats(ef100_stat_desc, EF100_STAT_COUNT, mask,
605*4882a593Smuzhiyun stats, mc_stats, false);
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun kfree(mc_stats);
608*4882a593Smuzhiyun
609*4882a593Smuzhiyun return ef100_update_stats_common(efx, full_stats, core_stats);
610*4882a593Smuzhiyun }
611*4882a593Smuzhiyun
efx_ef100_get_phys_port_id(struct efx_nic * efx,struct netdev_phys_item_id * ppid)612*4882a593Smuzhiyun static int efx_ef100_get_phys_port_id(struct efx_nic *efx,
613*4882a593Smuzhiyun struct netdev_phys_item_id *ppid)
614*4882a593Smuzhiyun {
615*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
616*4882a593Smuzhiyun
617*4882a593Smuzhiyun if (!is_valid_ether_addr(nic_data->port_id))
618*4882a593Smuzhiyun return -EOPNOTSUPP;
619*4882a593Smuzhiyun
620*4882a593Smuzhiyun ppid->id_len = ETH_ALEN;
621*4882a593Smuzhiyun memcpy(ppid->id, nic_data->port_id, ppid->id_len);
622*4882a593Smuzhiyun
623*4882a593Smuzhiyun return 0;
624*4882a593Smuzhiyun }
625*4882a593Smuzhiyun
efx_ef100_irq_test_generate(struct efx_nic * efx)626*4882a593Smuzhiyun static int efx_ef100_irq_test_generate(struct efx_nic *efx)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_TRIGGER_INTERRUPT_IN_LEN);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun BUILD_BUG_ON(MC_CMD_TRIGGER_INTERRUPT_OUT_LEN != 0);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, TRIGGER_INTERRUPT_IN_INTR_LEVEL, efx->irq_level);
633*4882a593Smuzhiyun return efx_mcdi_rpc_quiet(efx, MC_CMD_TRIGGER_INTERRUPT,
634*4882a593Smuzhiyun inbuf, sizeof(inbuf), NULL, 0, NULL);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun
637*4882a593Smuzhiyun #define EFX_EF100_TEST 1
638*4882a593Smuzhiyun
efx_ef100_ev_test_generate(struct efx_channel * channel)639*4882a593Smuzhiyun static void efx_ef100_ev_test_generate(struct efx_channel *channel)
640*4882a593Smuzhiyun {
641*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_DRIVER_EVENT_IN_LEN);
642*4882a593Smuzhiyun struct efx_nic *efx = channel->efx;
643*4882a593Smuzhiyun efx_qword_t event;
644*4882a593Smuzhiyun int rc;
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun EFX_POPULATE_QWORD_2(event,
647*4882a593Smuzhiyun ESF_GZ_E_TYPE, ESE_GZ_EF100_EV_DRIVER,
648*4882a593Smuzhiyun ESF_GZ_DRIVER_DATA, EFX_EF100_TEST);
649*4882a593Smuzhiyun
650*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, DRIVER_EVENT_IN_EVQ, channel->channel);
651*4882a593Smuzhiyun
652*4882a593Smuzhiyun /* MCDI_SET_QWORD is not appropriate here since EFX_POPULATE_* has
653*4882a593Smuzhiyun * already swapped the data to little-endian order.
654*4882a593Smuzhiyun */
655*4882a593Smuzhiyun memcpy(MCDI_PTR(inbuf, DRIVER_EVENT_IN_DATA), &event.u64[0],
656*4882a593Smuzhiyun sizeof(efx_qword_t));
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_DRIVER_EVENT, inbuf, sizeof(inbuf),
659*4882a593Smuzhiyun NULL, 0, NULL);
660*4882a593Smuzhiyun if (rc && (rc != -ENETDOWN))
661*4882a593Smuzhiyun goto fail;
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun return;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun fail:
666*4882a593Smuzhiyun WARN_ON(true);
667*4882a593Smuzhiyun netif_err(efx, hw, efx->net_dev, "%s: failed rc=%d\n", __func__, rc);
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
ef100_check_caps(const struct efx_nic * efx,u8 flag,u32 offset)670*4882a593Smuzhiyun static unsigned int ef100_check_caps(const struct efx_nic *efx,
671*4882a593Smuzhiyun u8 flag, u32 offset)
672*4882a593Smuzhiyun {
673*4882a593Smuzhiyun const struct ef100_nic_data *nic_data = efx->nic_data;
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun switch (offset) {
676*4882a593Smuzhiyun case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS1_OFST:
677*4882a593Smuzhiyun return nic_data->datapath_caps & BIT_ULL(flag);
678*4882a593Smuzhiyun case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS2_OFST:
679*4882a593Smuzhiyun return nic_data->datapath_caps2 & BIT_ULL(flag);
680*4882a593Smuzhiyun case MC_CMD_GET_CAPABILITIES_V8_OUT_FLAGS3_OFST:
681*4882a593Smuzhiyun return nic_data->datapath_caps3 & BIT_ULL(flag);
682*4882a593Smuzhiyun default:
683*4882a593Smuzhiyun return 0;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
687*4882a593Smuzhiyun /* NIC level access functions
688*4882a593Smuzhiyun */
689*4882a593Smuzhiyun #define EF100_OFFLOAD_FEATURES (NETIF_F_HW_CSUM | NETIF_F_RXCSUM | \
690*4882a593Smuzhiyun NETIF_F_HIGHDMA | NETIF_F_SG | NETIF_F_FRAGLIST | NETIF_F_NTUPLE | \
691*4882a593Smuzhiyun NETIF_F_RXHASH | NETIF_F_RXFCS | NETIF_F_TSO_ECN | NETIF_F_RXALL | \
692*4882a593Smuzhiyun NETIF_F_TSO_MANGLEID | NETIF_F_HW_VLAN_CTAG_TX)
693*4882a593Smuzhiyun
694*4882a593Smuzhiyun const struct efx_nic_type ef100_pf_nic_type = {
695*4882a593Smuzhiyun .revision = EFX_REV_EF100,
696*4882a593Smuzhiyun .is_vf = false,
697*4882a593Smuzhiyun .probe = ef100_probe_pf,
698*4882a593Smuzhiyun .offload_features = EF100_OFFLOAD_FEATURES,
699*4882a593Smuzhiyun .mcdi_max_ver = 2,
700*4882a593Smuzhiyun .mcdi_request = ef100_mcdi_request,
701*4882a593Smuzhiyun .mcdi_poll_response = ef100_mcdi_poll_response,
702*4882a593Smuzhiyun .mcdi_read_response = ef100_mcdi_read_response,
703*4882a593Smuzhiyun .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
704*4882a593Smuzhiyun .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
705*4882a593Smuzhiyun .irq_enable_master = efx_port_dummy_op_void,
706*4882a593Smuzhiyun .irq_test_generate = efx_ef100_irq_test_generate,
707*4882a593Smuzhiyun .irq_disable_non_ev = efx_port_dummy_op_void,
708*4882a593Smuzhiyun .push_irq_moderation = efx_channel_dummy_op_void,
709*4882a593Smuzhiyun .min_interrupt_mode = EFX_INT_MODE_MSIX,
710*4882a593Smuzhiyun .map_reset_reason = ef100_map_reset_reason,
711*4882a593Smuzhiyun .map_reset_flags = ef100_map_reset_flags,
712*4882a593Smuzhiyun .reset = ef100_reset,
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun .check_caps = ef100_check_caps,
715*4882a593Smuzhiyun
716*4882a593Smuzhiyun .ev_probe = ef100_ev_probe,
717*4882a593Smuzhiyun .ev_init = ef100_ev_init,
718*4882a593Smuzhiyun .ev_fini = efx_mcdi_ev_fini,
719*4882a593Smuzhiyun .ev_remove = efx_mcdi_ev_remove,
720*4882a593Smuzhiyun .irq_handle_msi = ef100_msi_interrupt,
721*4882a593Smuzhiyun .ev_process = ef100_ev_process,
722*4882a593Smuzhiyun .ev_read_ack = ef100_ev_read_ack,
723*4882a593Smuzhiyun .ev_test_generate = efx_ef100_ev_test_generate,
724*4882a593Smuzhiyun .tx_probe = ef100_tx_probe,
725*4882a593Smuzhiyun .tx_init = ef100_tx_init,
726*4882a593Smuzhiyun .tx_write = ef100_tx_write,
727*4882a593Smuzhiyun .tx_enqueue = ef100_enqueue_skb,
728*4882a593Smuzhiyun .rx_probe = efx_mcdi_rx_probe,
729*4882a593Smuzhiyun .rx_init = efx_mcdi_rx_init,
730*4882a593Smuzhiyun .rx_remove = efx_mcdi_rx_remove,
731*4882a593Smuzhiyun .rx_write = ef100_rx_write,
732*4882a593Smuzhiyun .rx_packet = __ef100_rx_packet,
733*4882a593Smuzhiyun .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
734*4882a593Smuzhiyun .fini_dmaq = efx_fini_dmaq,
735*4882a593Smuzhiyun .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
736*4882a593Smuzhiyun .filter_table_probe = ef100_filter_table_up,
737*4882a593Smuzhiyun .filter_table_restore = efx_mcdi_filter_table_restore,
738*4882a593Smuzhiyun .filter_table_remove = ef100_filter_table_down,
739*4882a593Smuzhiyun .filter_insert = efx_mcdi_filter_insert,
740*4882a593Smuzhiyun .filter_remove_safe = efx_mcdi_filter_remove_safe,
741*4882a593Smuzhiyun .filter_get_safe = efx_mcdi_filter_get_safe,
742*4882a593Smuzhiyun .filter_clear_rx = efx_mcdi_filter_clear_rx,
743*4882a593Smuzhiyun .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
744*4882a593Smuzhiyun .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
745*4882a593Smuzhiyun .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
746*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
747*4882a593Smuzhiyun .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
748*4882a593Smuzhiyun #endif
749*4882a593Smuzhiyun
750*4882a593Smuzhiyun .get_phys_port_id = efx_ef100_get_phys_port_id,
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
753*4882a593Smuzhiyun .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
754*4882a593Smuzhiyun .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
755*4882a593Smuzhiyun .rx_hash_key_size = 40,
756*4882a593Smuzhiyun .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
757*4882a593Smuzhiyun .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
758*4882a593Smuzhiyun .rx_push_rss_context_config = efx_mcdi_rx_push_rss_context_config,
759*4882a593Smuzhiyun .rx_pull_rss_context_config = efx_mcdi_rx_pull_rss_context_config,
760*4882a593Smuzhiyun .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun .reconfigure_mac = ef100_reconfigure_mac,
763*4882a593Smuzhiyun .reconfigure_port = efx_mcdi_port_reconfigure,
764*4882a593Smuzhiyun .test_nvram = efx_new_mcdi_nvram_test_all,
765*4882a593Smuzhiyun .describe_stats = ef100_describe_stats,
766*4882a593Smuzhiyun .start_stats = efx_mcdi_mac_start_stats,
767*4882a593Smuzhiyun .update_stats = ef100_update_stats,
768*4882a593Smuzhiyun .pull_stats = efx_mcdi_mac_pull_stats,
769*4882a593Smuzhiyun .stop_stats = efx_mcdi_mac_stop_stats,
770*4882a593Smuzhiyun
771*4882a593Smuzhiyun /* Per-type bar/size configuration not used on ef100. Location of
772*4882a593Smuzhiyun * registers is defined by extended capabilities.
773*4882a593Smuzhiyun */
774*4882a593Smuzhiyun .mem_bar = NULL,
775*4882a593Smuzhiyun .mem_map_size = NULL,
776*4882a593Smuzhiyun
777*4882a593Smuzhiyun };
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun const struct efx_nic_type ef100_vf_nic_type = {
780*4882a593Smuzhiyun .revision = EFX_REV_EF100,
781*4882a593Smuzhiyun .is_vf = true,
782*4882a593Smuzhiyun .probe = ef100_probe_vf,
783*4882a593Smuzhiyun .offload_features = EF100_OFFLOAD_FEATURES,
784*4882a593Smuzhiyun .mcdi_max_ver = 2,
785*4882a593Smuzhiyun .mcdi_request = ef100_mcdi_request,
786*4882a593Smuzhiyun .mcdi_poll_response = ef100_mcdi_poll_response,
787*4882a593Smuzhiyun .mcdi_read_response = ef100_mcdi_read_response,
788*4882a593Smuzhiyun .mcdi_poll_reboot = ef100_mcdi_poll_reboot,
789*4882a593Smuzhiyun .mcdi_reboot_detected = ef100_mcdi_reboot_detected,
790*4882a593Smuzhiyun .irq_enable_master = efx_port_dummy_op_void,
791*4882a593Smuzhiyun .irq_test_generate = efx_ef100_irq_test_generate,
792*4882a593Smuzhiyun .irq_disable_non_ev = efx_port_dummy_op_void,
793*4882a593Smuzhiyun .push_irq_moderation = efx_channel_dummy_op_void,
794*4882a593Smuzhiyun .min_interrupt_mode = EFX_INT_MODE_MSIX,
795*4882a593Smuzhiyun .map_reset_reason = ef100_map_reset_reason,
796*4882a593Smuzhiyun .map_reset_flags = ef100_map_reset_flags,
797*4882a593Smuzhiyun .reset = ef100_reset,
798*4882a593Smuzhiyun .check_caps = ef100_check_caps,
799*4882a593Smuzhiyun .ev_probe = ef100_ev_probe,
800*4882a593Smuzhiyun .ev_init = ef100_ev_init,
801*4882a593Smuzhiyun .ev_fini = efx_mcdi_ev_fini,
802*4882a593Smuzhiyun .ev_remove = efx_mcdi_ev_remove,
803*4882a593Smuzhiyun .irq_handle_msi = ef100_msi_interrupt,
804*4882a593Smuzhiyun .ev_process = ef100_ev_process,
805*4882a593Smuzhiyun .ev_read_ack = ef100_ev_read_ack,
806*4882a593Smuzhiyun .ev_test_generate = efx_ef100_ev_test_generate,
807*4882a593Smuzhiyun .tx_probe = ef100_tx_probe,
808*4882a593Smuzhiyun .tx_init = ef100_tx_init,
809*4882a593Smuzhiyun .tx_write = ef100_tx_write,
810*4882a593Smuzhiyun .tx_enqueue = ef100_enqueue_skb,
811*4882a593Smuzhiyun .rx_probe = efx_mcdi_rx_probe,
812*4882a593Smuzhiyun .rx_init = efx_mcdi_rx_init,
813*4882a593Smuzhiyun .rx_remove = efx_mcdi_rx_remove,
814*4882a593Smuzhiyun .rx_write = ef100_rx_write,
815*4882a593Smuzhiyun .rx_packet = __ef100_rx_packet,
816*4882a593Smuzhiyun .rx_buf_hash_valid = ef100_rx_buf_hash_valid,
817*4882a593Smuzhiyun .fini_dmaq = efx_fini_dmaq,
818*4882a593Smuzhiyun .max_rx_ip_filters = EFX_MCDI_FILTER_TBL_ROWS,
819*4882a593Smuzhiyun .filter_table_probe = ef100_filter_table_up,
820*4882a593Smuzhiyun .filter_table_restore = efx_mcdi_filter_table_restore,
821*4882a593Smuzhiyun .filter_table_remove = ef100_filter_table_down,
822*4882a593Smuzhiyun .filter_insert = efx_mcdi_filter_insert,
823*4882a593Smuzhiyun .filter_remove_safe = efx_mcdi_filter_remove_safe,
824*4882a593Smuzhiyun .filter_get_safe = efx_mcdi_filter_get_safe,
825*4882a593Smuzhiyun .filter_clear_rx = efx_mcdi_filter_clear_rx,
826*4882a593Smuzhiyun .filter_count_rx_used = efx_mcdi_filter_count_rx_used,
827*4882a593Smuzhiyun .filter_get_rx_id_limit = efx_mcdi_filter_get_rx_id_limit,
828*4882a593Smuzhiyun .filter_get_rx_ids = efx_mcdi_filter_get_rx_ids,
829*4882a593Smuzhiyun #ifdef CONFIG_RFS_ACCEL
830*4882a593Smuzhiyun .filter_rfs_expire_one = efx_mcdi_filter_rfs_expire_one,
831*4882a593Smuzhiyun #endif
832*4882a593Smuzhiyun
833*4882a593Smuzhiyun .rx_prefix_size = ESE_GZ_RX_PKT_PREFIX_LEN,
834*4882a593Smuzhiyun .rx_hash_offset = ESF_GZ_RX_PREFIX_RSS_HASH_LBN / 8,
835*4882a593Smuzhiyun .rx_ts_offset = ESF_GZ_RX_PREFIX_PARTIAL_TSTAMP_LBN / 8,
836*4882a593Smuzhiyun .rx_hash_key_size = 40,
837*4882a593Smuzhiyun .rx_pull_rss_config = efx_mcdi_rx_pull_rss_config,
838*4882a593Smuzhiyun .rx_push_rss_config = efx_mcdi_pf_rx_push_rss_config,
839*4882a593Smuzhiyun .rx_restore_rss_contexts = efx_mcdi_rx_restore_rss_contexts,
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun .reconfigure_mac = ef100_reconfigure_mac,
842*4882a593Smuzhiyun .test_nvram = efx_new_mcdi_nvram_test_all,
843*4882a593Smuzhiyun .describe_stats = ef100_describe_stats,
844*4882a593Smuzhiyun .start_stats = efx_mcdi_mac_start_stats,
845*4882a593Smuzhiyun .update_stats = ef100_update_stats,
846*4882a593Smuzhiyun .pull_stats = efx_mcdi_mac_pull_stats,
847*4882a593Smuzhiyun .stop_stats = efx_mcdi_mac_stop_stats,
848*4882a593Smuzhiyun
849*4882a593Smuzhiyun .mem_bar = NULL,
850*4882a593Smuzhiyun .mem_map_size = NULL,
851*4882a593Smuzhiyun
852*4882a593Smuzhiyun };
853*4882a593Smuzhiyun
compare_versions(const char * a,const char * b)854*4882a593Smuzhiyun static int compare_versions(const char *a, const char *b)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun int a_major, a_minor, a_point, a_patch;
857*4882a593Smuzhiyun int b_major, b_minor, b_point, b_patch;
858*4882a593Smuzhiyun int a_matched, b_matched;
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun a_matched = sscanf(a, "%d.%d.%d.%d", &a_major, &a_minor, &a_point, &a_patch);
861*4882a593Smuzhiyun b_matched = sscanf(b, "%d.%d.%d.%d", &b_major, &b_minor, &b_point, &b_patch);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun if (a_matched == 4 && b_matched != 4)
864*4882a593Smuzhiyun return +1;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun if (a_matched != 4 && b_matched == 4)
867*4882a593Smuzhiyun return -1;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun if (a_matched != 4 && b_matched != 4)
870*4882a593Smuzhiyun return 0;
871*4882a593Smuzhiyun
872*4882a593Smuzhiyun if (a_major != b_major)
873*4882a593Smuzhiyun return a_major - b_major;
874*4882a593Smuzhiyun
875*4882a593Smuzhiyun if (a_minor != b_minor)
876*4882a593Smuzhiyun return a_minor - b_minor;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (a_point != b_point)
879*4882a593Smuzhiyun return a_point - b_point;
880*4882a593Smuzhiyun
881*4882a593Smuzhiyun return a_patch - b_patch;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun enum ef100_tlv_state_machine {
885*4882a593Smuzhiyun EF100_TLV_TYPE,
886*4882a593Smuzhiyun EF100_TLV_TYPE_CONT,
887*4882a593Smuzhiyun EF100_TLV_LENGTH,
888*4882a593Smuzhiyun EF100_TLV_VALUE
889*4882a593Smuzhiyun };
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun struct ef100_tlv_state {
892*4882a593Smuzhiyun enum ef100_tlv_state_machine state;
893*4882a593Smuzhiyun u64 value;
894*4882a593Smuzhiyun u32 value_offset;
895*4882a593Smuzhiyun u16 type;
896*4882a593Smuzhiyun u8 len;
897*4882a593Smuzhiyun };
898*4882a593Smuzhiyun
ef100_tlv_feed(struct ef100_tlv_state * state,u8 byte)899*4882a593Smuzhiyun static int ef100_tlv_feed(struct ef100_tlv_state *state, u8 byte)
900*4882a593Smuzhiyun {
901*4882a593Smuzhiyun switch (state->state) {
902*4882a593Smuzhiyun case EF100_TLV_TYPE:
903*4882a593Smuzhiyun state->type = byte & 0x7f;
904*4882a593Smuzhiyun state->state = (byte & 0x80) ? EF100_TLV_TYPE_CONT
905*4882a593Smuzhiyun : EF100_TLV_LENGTH;
906*4882a593Smuzhiyun /* Clear ready to read in a new entry */
907*4882a593Smuzhiyun state->value = 0;
908*4882a593Smuzhiyun state->value_offset = 0;
909*4882a593Smuzhiyun return 0;
910*4882a593Smuzhiyun case EF100_TLV_TYPE_CONT:
911*4882a593Smuzhiyun state->type |= byte << 7;
912*4882a593Smuzhiyun state->state = EF100_TLV_LENGTH;
913*4882a593Smuzhiyun return 0;
914*4882a593Smuzhiyun case EF100_TLV_LENGTH:
915*4882a593Smuzhiyun state->len = byte;
916*4882a593Smuzhiyun /* We only handle TLVs that fit in a u64 */
917*4882a593Smuzhiyun if (state->len > sizeof(state->value))
918*4882a593Smuzhiyun return -EOPNOTSUPP;
919*4882a593Smuzhiyun /* len may be zero, implying a value of zero */
920*4882a593Smuzhiyun state->state = state->len ? EF100_TLV_VALUE : EF100_TLV_TYPE;
921*4882a593Smuzhiyun return 0;
922*4882a593Smuzhiyun case EF100_TLV_VALUE:
923*4882a593Smuzhiyun state->value |= ((u64)byte) << (state->value_offset * 8);
924*4882a593Smuzhiyun state->value_offset++;
925*4882a593Smuzhiyun if (state->value_offset >= state->len)
926*4882a593Smuzhiyun state->state = EF100_TLV_TYPE;
927*4882a593Smuzhiyun return 0;
928*4882a593Smuzhiyun default: /* state machine error, can't happen */
929*4882a593Smuzhiyun WARN_ON_ONCE(1);
930*4882a593Smuzhiyun return -EIO;
931*4882a593Smuzhiyun }
932*4882a593Smuzhiyun }
933*4882a593Smuzhiyun
ef100_process_design_param(struct efx_nic * efx,const struct ef100_tlv_state * reader)934*4882a593Smuzhiyun static int ef100_process_design_param(struct efx_nic *efx,
935*4882a593Smuzhiyun const struct ef100_tlv_state *reader)
936*4882a593Smuzhiyun {
937*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun switch (reader->type) {
940*4882a593Smuzhiyun case ESE_EF100_DP_GZ_PAD: /* padding, skip it */
941*4882a593Smuzhiyun return 0;
942*4882a593Smuzhiyun case ESE_EF100_DP_GZ_PARTIAL_TSTAMP_SUB_NANO_BITS:
943*4882a593Smuzhiyun /* Driver doesn't support timestamping yet, so we don't care */
944*4882a593Smuzhiyun return 0;
945*4882a593Smuzhiyun case ESE_EF100_DP_GZ_EVQ_UNSOL_CREDIT_SEQ_BITS:
946*4882a593Smuzhiyun /* Driver doesn't support unsolicited-event credits yet, so
947*4882a593Smuzhiyun * we don't care
948*4882a593Smuzhiyun */
949*4882a593Smuzhiyun return 0;
950*4882a593Smuzhiyun case ESE_EF100_DP_GZ_NMMU_GROUP_SIZE:
951*4882a593Smuzhiyun /* Driver doesn't manage the NMMU (so we don't care) */
952*4882a593Smuzhiyun return 0;
953*4882a593Smuzhiyun case ESE_EF100_DP_GZ_RX_L4_CSUM_PROTOCOLS:
954*4882a593Smuzhiyun /* Driver uses CHECKSUM_COMPLETE, so we don't care about
955*4882a593Smuzhiyun * protocol checksum validation
956*4882a593Smuzhiyun */
957*4882a593Smuzhiyun return 0;
958*4882a593Smuzhiyun case ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN:
959*4882a593Smuzhiyun nic_data->tso_max_hdr_len = min_t(u64, reader->value, 0xffff);
960*4882a593Smuzhiyun return 0;
961*4882a593Smuzhiyun case ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS:
962*4882a593Smuzhiyun /* We always put HDR_NUM_SEGS=1 in our TSO descriptors */
963*4882a593Smuzhiyun if (!reader->value) {
964*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
965*4882a593Smuzhiyun "TSO_MAX_HDR_NUM_SEGS < 1\n");
966*4882a593Smuzhiyun return -EOPNOTSUPP;
967*4882a593Smuzhiyun }
968*4882a593Smuzhiyun return 0;
969*4882a593Smuzhiyun case ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY:
970*4882a593Smuzhiyun case ESE_EF100_DP_GZ_TXQ_SIZE_GRANULARITY:
971*4882a593Smuzhiyun /* Our TXQ and RXQ sizes are always power-of-two and thus divisible by
972*4882a593Smuzhiyun * EFX_MIN_DMAQ_SIZE, so we just need to check that
973*4882a593Smuzhiyun * EFX_MIN_DMAQ_SIZE is divisible by GRANULARITY.
974*4882a593Smuzhiyun * This is very unlikely to fail.
975*4882a593Smuzhiyun */
976*4882a593Smuzhiyun if (!reader->value || reader->value > EFX_MIN_DMAQ_SIZE ||
977*4882a593Smuzhiyun EFX_MIN_DMAQ_SIZE % (u32)reader->value) {
978*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
979*4882a593Smuzhiyun "%s size granularity is %llu, can't guarantee safety\n",
980*4882a593Smuzhiyun reader->type == ESE_EF100_DP_GZ_RXQ_SIZE_GRANULARITY ? "RXQ" : "TXQ",
981*4882a593Smuzhiyun reader->value);
982*4882a593Smuzhiyun return -EOPNOTSUPP;
983*4882a593Smuzhiyun }
984*4882a593Smuzhiyun return 0;
985*4882a593Smuzhiyun case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN:
986*4882a593Smuzhiyun nic_data->tso_max_payload_len = min_t(u64, reader->value, GSO_MAX_SIZE);
987*4882a593Smuzhiyun efx->net_dev->gso_max_size = nic_data->tso_max_payload_len;
988*4882a593Smuzhiyun return 0;
989*4882a593Smuzhiyun case ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS:
990*4882a593Smuzhiyun nic_data->tso_max_payload_num_segs = min_t(u64, reader->value, 0xffff);
991*4882a593Smuzhiyun efx->net_dev->gso_max_segs = nic_data->tso_max_payload_num_segs;
992*4882a593Smuzhiyun return 0;
993*4882a593Smuzhiyun case ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES:
994*4882a593Smuzhiyun nic_data->tso_max_frames = min_t(u64, reader->value, 0xffff);
995*4882a593Smuzhiyun return 0;
996*4882a593Smuzhiyun case ESE_EF100_DP_GZ_COMPAT:
997*4882a593Smuzhiyun if (reader->value) {
998*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
999*4882a593Smuzhiyun "DP_COMPAT has unknown bits %#llx, driver not compatible with this hw\n",
1000*4882a593Smuzhiyun reader->value);
1001*4882a593Smuzhiyun return -EOPNOTSUPP;
1002*4882a593Smuzhiyun }
1003*4882a593Smuzhiyun return 0;
1004*4882a593Smuzhiyun case ESE_EF100_DP_GZ_MEM2MEM_MAX_LEN:
1005*4882a593Smuzhiyun /* Driver doesn't use mem2mem transfers */
1006*4882a593Smuzhiyun return 0;
1007*4882a593Smuzhiyun case ESE_EF100_DP_GZ_EVQ_TIMER_TICK_NANOS:
1008*4882a593Smuzhiyun /* Driver doesn't currently use EVQ_TIMER */
1009*4882a593Smuzhiyun return 0;
1010*4882a593Smuzhiyun case ESE_EF100_DP_GZ_NMMU_PAGE_SIZES:
1011*4882a593Smuzhiyun /* Driver doesn't manage the NMMU (so we don't care) */
1012*4882a593Smuzhiyun return 0;
1013*4882a593Smuzhiyun case ESE_EF100_DP_GZ_VI_STRIDES:
1014*4882a593Smuzhiyun /* We never try to set the VI stride, and we don't rely on
1015*4882a593Smuzhiyun * being able to find VIs past VI 0 until after we've learned
1016*4882a593Smuzhiyun * the current stride from MC_CMD_GET_CAPABILITIES.
1017*4882a593Smuzhiyun * So the value of this shouldn't matter.
1018*4882a593Smuzhiyun */
1019*4882a593Smuzhiyun if (reader->value != ESE_EF100_DP_GZ_VI_STRIDES_DEFAULT)
1020*4882a593Smuzhiyun netif_dbg(efx, probe, efx->net_dev,
1021*4882a593Smuzhiyun "NIC has other than default VI_STRIDES (mask "
1022*4882a593Smuzhiyun "%#llx), early probing might use wrong one\n",
1023*4882a593Smuzhiyun reader->value);
1024*4882a593Smuzhiyun return 0;
1025*4882a593Smuzhiyun case ESE_EF100_DP_GZ_RX_MAX_RUNT:
1026*4882a593Smuzhiyun /* Driver doesn't look at L2_STATUS:LEN_ERR bit, so we don't
1027*4882a593Smuzhiyun * care whether it indicates runt or overlength for any given
1028*4882a593Smuzhiyun * packet, so we don't care about this parameter.
1029*4882a593Smuzhiyun */
1030*4882a593Smuzhiyun return 0;
1031*4882a593Smuzhiyun default:
1032*4882a593Smuzhiyun /* Host interface says "Drivers should ignore design parameters
1033*4882a593Smuzhiyun * that they do not recognise."
1034*4882a593Smuzhiyun */
1035*4882a593Smuzhiyun netif_dbg(efx, probe, efx->net_dev,
1036*4882a593Smuzhiyun "Ignoring unrecognised design parameter %u\n",
1037*4882a593Smuzhiyun reader->type);
1038*4882a593Smuzhiyun return 0;
1039*4882a593Smuzhiyun }
1040*4882a593Smuzhiyun }
1041*4882a593Smuzhiyun
ef100_check_design_params(struct efx_nic * efx)1042*4882a593Smuzhiyun static int ef100_check_design_params(struct efx_nic *efx)
1043*4882a593Smuzhiyun {
1044*4882a593Smuzhiyun struct ef100_tlv_state reader = {};
1045*4882a593Smuzhiyun u32 total_len, offset = 0;
1046*4882a593Smuzhiyun efx_dword_t reg;
1047*4882a593Smuzhiyun int rc = 0, i;
1048*4882a593Smuzhiyun u32 data;
1049*4882a593Smuzhiyun
1050*4882a593Smuzhiyun efx_readd(efx, ®, ER_GZ_PARAMS_TLV_LEN);
1051*4882a593Smuzhiyun total_len = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
1052*4882a593Smuzhiyun netif_dbg(efx, probe, efx->net_dev, "%u bytes of design parameters\n",
1053*4882a593Smuzhiyun total_len);
1054*4882a593Smuzhiyun while (offset < total_len) {
1055*4882a593Smuzhiyun efx_readd(efx, ®, ER_GZ_PARAMS_TLV + offset);
1056*4882a593Smuzhiyun data = EFX_DWORD_FIELD(reg, EFX_DWORD_0);
1057*4882a593Smuzhiyun for (i = 0; i < sizeof(data); i++) {
1058*4882a593Smuzhiyun rc = ef100_tlv_feed(&reader, data);
1059*4882a593Smuzhiyun /* Got a complete value? */
1060*4882a593Smuzhiyun if (!rc && reader.state == EF100_TLV_TYPE)
1061*4882a593Smuzhiyun rc = ef100_process_design_param(efx, &reader);
1062*4882a593Smuzhiyun if (rc)
1063*4882a593Smuzhiyun goto out;
1064*4882a593Smuzhiyun data >>= 8;
1065*4882a593Smuzhiyun offset++;
1066*4882a593Smuzhiyun }
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun /* Check we didn't end halfway through a TLV entry, which could either
1069*4882a593Smuzhiyun * mean that the TLV stream is truncated or just that it's corrupted
1070*4882a593Smuzhiyun * and our state machine is out of sync.
1071*4882a593Smuzhiyun */
1072*4882a593Smuzhiyun if (reader.state != EF100_TLV_TYPE) {
1073*4882a593Smuzhiyun if (reader.state == EF100_TLV_TYPE_CONT)
1074*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
1075*4882a593Smuzhiyun "truncated design parameter (incomplete type %u)\n",
1076*4882a593Smuzhiyun reader.type);
1077*4882a593Smuzhiyun else
1078*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
1079*4882a593Smuzhiyun "truncated design parameter %u\n",
1080*4882a593Smuzhiyun reader.type);
1081*4882a593Smuzhiyun rc = -EIO;
1082*4882a593Smuzhiyun }
1083*4882a593Smuzhiyun out:
1084*4882a593Smuzhiyun return rc;
1085*4882a593Smuzhiyun }
1086*4882a593Smuzhiyun
1087*4882a593Smuzhiyun /* NIC probe and remove
1088*4882a593Smuzhiyun */
ef100_probe_main(struct efx_nic * efx)1089*4882a593Smuzhiyun static int ef100_probe_main(struct efx_nic *efx)
1090*4882a593Smuzhiyun {
1091*4882a593Smuzhiyun unsigned int bar_size = resource_size(&efx->pci_dev->resource[efx->mem_bar]);
1092*4882a593Smuzhiyun struct net_device *net_dev = efx->net_dev;
1093*4882a593Smuzhiyun struct ef100_nic_data *nic_data;
1094*4882a593Smuzhiyun char fw_version[32];
1095*4882a593Smuzhiyun int i, rc;
1096*4882a593Smuzhiyun
1097*4882a593Smuzhiyun if (WARN_ON(bar_size == 0))
1098*4882a593Smuzhiyun return -EIO;
1099*4882a593Smuzhiyun
1100*4882a593Smuzhiyun nic_data = kzalloc(sizeof(*nic_data), GFP_KERNEL);
1101*4882a593Smuzhiyun if (!nic_data)
1102*4882a593Smuzhiyun return -ENOMEM;
1103*4882a593Smuzhiyun efx->nic_data = nic_data;
1104*4882a593Smuzhiyun nic_data->efx = efx;
1105*4882a593Smuzhiyun net_dev->features |= efx->type->offload_features;
1106*4882a593Smuzhiyun net_dev->hw_features |= efx->type->offload_features;
1107*4882a593Smuzhiyun
1108*4882a593Smuzhiyun /* Populate design-parameter defaults */
1109*4882a593Smuzhiyun nic_data->tso_max_hdr_len = ESE_EF100_DP_GZ_TSO_MAX_HDR_LEN_DEFAULT;
1110*4882a593Smuzhiyun nic_data->tso_max_frames = ESE_EF100_DP_GZ_TSO_MAX_NUM_FRAMES_DEFAULT;
1111*4882a593Smuzhiyun nic_data->tso_max_payload_num_segs = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_NUM_SEGS_DEFAULT;
1112*4882a593Smuzhiyun nic_data->tso_max_payload_len = ESE_EF100_DP_GZ_TSO_MAX_PAYLOAD_LEN_DEFAULT;
1113*4882a593Smuzhiyun net_dev->gso_max_segs = ESE_EF100_DP_GZ_TSO_MAX_HDR_NUM_SEGS_DEFAULT;
1114*4882a593Smuzhiyun /* Read design parameters */
1115*4882a593Smuzhiyun rc = ef100_check_design_params(efx);
1116*4882a593Smuzhiyun if (rc) {
1117*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
1118*4882a593Smuzhiyun "Unsupported design parameters\n");
1119*4882a593Smuzhiyun goto fail;
1120*4882a593Smuzhiyun }
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* we assume later that we can copy from this buffer in dwords */
1123*4882a593Smuzhiyun BUILD_BUG_ON(MCDI_CTL_SDU_LEN_MAX_V2 % 4);
1124*4882a593Smuzhiyun
1125*4882a593Smuzhiyun /* MCDI buffers must be 256 byte aligned. */
1126*4882a593Smuzhiyun rc = efx_nic_alloc_buffer(efx, &nic_data->mcdi_buf, MCDI_BUF_LEN,
1127*4882a593Smuzhiyun GFP_KERNEL);
1128*4882a593Smuzhiyun if (rc)
1129*4882a593Smuzhiyun goto fail;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun /* Get the MC's warm boot count. In case it's rebooting right
1132*4882a593Smuzhiyun * now, be prepared to retry.
1133*4882a593Smuzhiyun */
1134*4882a593Smuzhiyun i = 0;
1135*4882a593Smuzhiyun for (;;) {
1136*4882a593Smuzhiyun rc = ef100_get_warm_boot_count(efx);
1137*4882a593Smuzhiyun if (rc >= 0)
1138*4882a593Smuzhiyun break;
1139*4882a593Smuzhiyun if (++i == 5)
1140*4882a593Smuzhiyun goto fail;
1141*4882a593Smuzhiyun ssleep(1);
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun nic_data->warm_boot_count = rc;
1144*4882a593Smuzhiyun
1145*4882a593Smuzhiyun /* In case we're recovering from a crash (kexec), we want to
1146*4882a593Smuzhiyun * cancel any outstanding request by the previous user of this
1147*4882a593Smuzhiyun * function. We send a special message using the least
1148*4882a593Smuzhiyun * significant bits of the 'high' (doorbell) register.
1149*4882a593Smuzhiyun */
1150*4882a593Smuzhiyun _efx_writed(efx, cpu_to_le32(1), efx_reg(efx, ER_GZ_MC_DB_HWRD));
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun /* Post-IO section. */
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun rc = efx_mcdi_init(efx);
1155*4882a593Smuzhiyun if (!rc && efx->mcdi->fn_flags &
1156*4882a593Smuzhiyun (1 << MC_CMD_DRV_ATTACH_EXT_OUT_FLAG_NO_ACTIVE_PORT)) {
1157*4882a593Smuzhiyun netif_info(efx, probe, efx->net_dev,
1158*4882a593Smuzhiyun "No network port on this PCI function");
1159*4882a593Smuzhiyun rc = -ENODEV;
1160*4882a593Smuzhiyun }
1161*4882a593Smuzhiyun if (rc)
1162*4882a593Smuzhiyun goto fail;
1163*4882a593Smuzhiyun /* Reset (most) configuration for this function */
1164*4882a593Smuzhiyun rc = efx_mcdi_reset(efx, RESET_TYPE_ALL);
1165*4882a593Smuzhiyun if (rc)
1166*4882a593Smuzhiyun goto fail;
1167*4882a593Smuzhiyun /* Enable event logging */
1168*4882a593Smuzhiyun rc = efx_mcdi_log_ctrl(efx, true, false, 0);
1169*4882a593Smuzhiyun if (rc)
1170*4882a593Smuzhiyun goto fail;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun rc = efx_get_pf_index(efx, &nic_data->pf_index);
1173*4882a593Smuzhiyun if (rc)
1174*4882a593Smuzhiyun goto fail;
1175*4882a593Smuzhiyun
1176*4882a593Smuzhiyun rc = efx_ef100_init_datapath_caps(efx);
1177*4882a593Smuzhiyun if (rc < 0)
1178*4882a593Smuzhiyun goto fail;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun efx->max_vis = EF100_MAX_VIS;
1181*4882a593Smuzhiyun
1182*4882a593Smuzhiyun rc = efx_mcdi_port_get_number(efx);
1183*4882a593Smuzhiyun if (rc < 0)
1184*4882a593Smuzhiyun goto fail;
1185*4882a593Smuzhiyun efx->port_num = rc;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun efx_mcdi_print_fwver(efx, fw_version, sizeof(fw_version));
1188*4882a593Smuzhiyun netif_dbg(efx, drv, efx->net_dev, "Firmware version %s\n", fw_version);
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun if (compare_versions(fw_version, "1.1.0.1000") < 0) {
1191*4882a593Smuzhiyun netif_info(efx, drv, efx->net_dev, "Firmware uses old event descriptors\n");
1192*4882a593Smuzhiyun rc = -EINVAL;
1193*4882a593Smuzhiyun goto fail;
1194*4882a593Smuzhiyun }
1195*4882a593Smuzhiyun
1196*4882a593Smuzhiyun if (efx_has_cap(efx, UNSOL_EV_CREDIT_SUPPORTED)) {
1197*4882a593Smuzhiyun netif_info(efx, drv, efx->net_dev, "Firmware uses unsolicited-event credits\n");
1198*4882a593Smuzhiyun rc = -EINVAL;
1199*4882a593Smuzhiyun goto fail;
1200*4882a593Smuzhiyun }
1201*4882a593Smuzhiyun
1202*4882a593Smuzhiyun rc = ef100_phy_probe(efx);
1203*4882a593Smuzhiyun if (rc)
1204*4882a593Smuzhiyun goto fail;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun down_write(&efx->filter_sem);
1207*4882a593Smuzhiyun rc = ef100_filter_table_probe(efx);
1208*4882a593Smuzhiyun up_write(&efx->filter_sem);
1209*4882a593Smuzhiyun if (rc)
1210*4882a593Smuzhiyun goto fail;
1211*4882a593Smuzhiyun
1212*4882a593Smuzhiyun netdev_rss_key_fill(efx->rss_context.rx_hash_key,
1213*4882a593Smuzhiyun sizeof(efx->rss_context.rx_hash_key));
1214*4882a593Smuzhiyun
1215*4882a593Smuzhiyun /* Don't fail init if RSS setup doesn't work. */
1216*4882a593Smuzhiyun efx_mcdi_push_default_indir_table(efx, efx->n_rx_channels);
1217*4882a593Smuzhiyun
1218*4882a593Smuzhiyun rc = ef100_register_netdev(efx);
1219*4882a593Smuzhiyun if (rc)
1220*4882a593Smuzhiyun goto fail;
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun return 0;
1223*4882a593Smuzhiyun fail:
1224*4882a593Smuzhiyun return rc;
1225*4882a593Smuzhiyun }
1226*4882a593Smuzhiyun
ef100_probe_pf(struct efx_nic * efx)1227*4882a593Smuzhiyun int ef100_probe_pf(struct efx_nic *efx)
1228*4882a593Smuzhiyun {
1229*4882a593Smuzhiyun struct net_device *net_dev = efx->net_dev;
1230*4882a593Smuzhiyun struct ef100_nic_data *nic_data;
1231*4882a593Smuzhiyun int rc = ef100_probe_main(efx);
1232*4882a593Smuzhiyun
1233*4882a593Smuzhiyun if (rc)
1234*4882a593Smuzhiyun goto fail;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun nic_data = efx->nic_data;
1237*4882a593Smuzhiyun rc = ef100_get_mac_address(efx, net_dev->perm_addr);
1238*4882a593Smuzhiyun if (rc)
1239*4882a593Smuzhiyun goto fail;
1240*4882a593Smuzhiyun /* Assign MAC address */
1241*4882a593Smuzhiyun memcpy(net_dev->dev_addr, net_dev->perm_addr, ETH_ALEN);
1242*4882a593Smuzhiyun memcpy(nic_data->port_id, net_dev->perm_addr, ETH_ALEN);
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun return 0;
1245*4882a593Smuzhiyun
1246*4882a593Smuzhiyun fail:
1247*4882a593Smuzhiyun return rc;
1248*4882a593Smuzhiyun }
1249*4882a593Smuzhiyun
ef100_probe_vf(struct efx_nic * efx)1250*4882a593Smuzhiyun int ef100_probe_vf(struct efx_nic *efx)
1251*4882a593Smuzhiyun {
1252*4882a593Smuzhiyun return ef100_probe_main(efx);
1253*4882a593Smuzhiyun }
1254*4882a593Smuzhiyun
ef100_remove(struct efx_nic * efx)1255*4882a593Smuzhiyun void ef100_remove(struct efx_nic *efx)
1256*4882a593Smuzhiyun {
1257*4882a593Smuzhiyun struct ef100_nic_data *nic_data = efx->nic_data;
1258*4882a593Smuzhiyun
1259*4882a593Smuzhiyun ef100_unregister_netdev(efx);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun down_write(&efx->filter_sem);
1262*4882a593Smuzhiyun efx_mcdi_filter_table_remove(efx);
1263*4882a593Smuzhiyun up_write(&efx->filter_sem);
1264*4882a593Smuzhiyun efx_fini_channels(efx);
1265*4882a593Smuzhiyun kfree(efx->phy_data);
1266*4882a593Smuzhiyun efx->phy_data = NULL;
1267*4882a593Smuzhiyun efx_mcdi_detach(efx);
1268*4882a593Smuzhiyun efx_mcdi_fini(efx);
1269*4882a593Smuzhiyun if (nic_data)
1270*4882a593Smuzhiyun efx_nic_free_buffer(efx, &nic_data->mcdi_buf);
1271*4882a593Smuzhiyun kfree(nic_data);
1272*4882a593Smuzhiyun efx->nic_data = NULL;
1273*4882a593Smuzhiyun }
1274