1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /****************************************************************************
3*4882a593Smuzhiyun * Driver for Solarflare network controllers and boards
4*4882a593Smuzhiyun * Copyright 2019 Solarflare Communications Inc.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify it
7*4882a593Smuzhiyun * under the terms of the GNU General Public License version 2 as published
8*4882a593Smuzhiyun * by the Free Software Foundation, incorporated herein by reference.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "net_driver.h"
12*4882a593Smuzhiyun #include "efx.h"
13*4882a593Smuzhiyun #include "nic.h"
14*4882a593Smuzhiyun #include "mcdi_functions.h"
15*4882a593Smuzhiyun #include "mcdi.h"
16*4882a593Smuzhiyun #include "mcdi_pcol.h"
17*4882a593Smuzhiyun
efx_mcdi_free_vis(struct efx_nic * efx)18*4882a593Smuzhiyun int efx_mcdi_free_vis(struct efx_nic *efx)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun MCDI_DECLARE_BUF_ERR(outbuf);
21*4882a593Smuzhiyun size_t outlen;
22*4882a593Smuzhiyun int rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FREE_VIS, NULL, 0,
23*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* -EALREADY means nothing to free, so ignore */
26*4882a593Smuzhiyun if (rc == -EALREADY)
27*4882a593Smuzhiyun rc = 0;
28*4882a593Smuzhiyun if (rc)
29*4882a593Smuzhiyun efx_mcdi_display_error(efx, MC_CMD_FREE_VIS, 0, outbuf, outlen,
30*4882a593Smuzhiyun rc);
31*4882a593Smuzhiyun return rc;
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
efx_mcdi_alloc_vis(struct efx_nic * efx,unsigned int min_vis,unsigned int max_vis,unsigned int * vi_base,unsigned int * allocated_vis)34*4882a593Smuzhiyun int efx_mcdi_alloc_vis(struct efx_nic *efx, unsigned int min_vis,
35*4882a593Smuzhiyun unsigned int max_vis, unsigned int *vi_base,
36*4882a593Smuzhiyun unsigned int *allocated_vis)
37*4882a593Smuzhiyun {
38*4882a593Smuzhiyun MCDI_DECLARE_BUF(outbuf, MC_CMD_ALLOC_VIS_OUT_LEN);
39*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_ALLOC_VIS_IN_LEN);
40*4882a593Smuzhiyun size_t outlen;
41*4882a593Smuzhiyun int rc;
42*4882a593Smuzhiyun
43*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MIN_VI_COUNT, min_vis);
44*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, ALLOC_VIS_IN_MAX_VI_COUNT, max_vis);
45*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_ALLOC_VIS, inbuf, sizeof(inbuf),
46*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
47*4882a593Smuzhiyun if (rc != 0)
48*4882a593Smuzhiyun return rc;
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun if (outlen < MC_CMD_ALLOC_VIS_OUT_LEN)
51*4882a593Smuzhiyun return -EIO;
52*4882a593Smuzhiyun
53*4882a593Smuzhiyun netif_dbg(efx, drv, efx->net_dev, "base VI is A0x%03x\n",
54*4882a593Smuzhiyun MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE));
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun if (vi_base)
57*4882a593Smuzhiyun *vi_base = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_BASE);
58*4882a593Smuzhiyun if (allocated_vis)
59*4882a593Smuzhiyun *allocated_vis = MCDI_DWORD(outbuf, ALLOC_VIS_OUT_VI_COUNT);
60*4882a593Smuzhiyun return 0;
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun
efx_mcdi_ev_probe(struct efx_channel * channel)63*4882a593Smuzhiyun int efx_mcdi_ev_probe(struct efx_channel *channel)
64*4882a593Smuzhiyun {
65*4882a593Smuzhiyun return efx_nic_alloc_buffer(channel->efx, &channel->eventq.buf,
66*4882a593Smuzhiyun (channel->eventq_mask + 1) *
67*4882a593Smuzhiyun sizeof(efx_qword_t),
68*4882a593Smuzhiyun GFP_KERNEL);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun
efx_mcdi_ev_init(struct efx_channel * channel,bool v1_cut_thru,bool v2)71*4882a593Smuzhiyun int efx_mcdi_ev_init(struct efx_channel *channel, bool v1_cut_thru, bool v2)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf,
74*4882a593Smuzhiyun MC_CMD_INIT_EVQ_V2_IN_LEN(EFX_MAX_EVQ_SIZE * 8 /
75*4882a593Smuzhiyun EFX_BUF_SIZE));
76*4882a593Smuzhiyun MCDI_DECLARE_BUF(outbuf, MC_CMD_INIT_EVQ_V2_OUT_LEN);
77*4882a593Smuzhiyun size_t entries = channel->eventq.buf.len / EFX_BUF_SIZE;
78*4882a593Smuzhiyun struct efx_nic *efx = channel->efx;
79*4882a593Smuzhiyun size_t inlen, outlen;
80*4882a593Smuzhiyun dma_addr_t dma_addr;
81*4882a593Smuzhiyun int rc, i;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun /* Fill event queue with all ones (i.e. empty events) */
84*4882a593Smuzhiyun memset(channel->eventq.buf.addr, 0xff, channel->eventq.buf.len);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_SIZE, channel->eventq_mask + 1);
87*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_INSTANCE, channel->channel);
88*4882a593Smuzhiyun /* INIT_EVQ expects index in vector table, not absolute */
89*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_IRQ_NUM, channel->channel);
90*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_MODE,
91*4882a593Smuzhiyun MC_CMD_INIT_EVQ_IN_TMR_MODE_DIS);
92*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_LOAD, 0);
93*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_TMR_RELOAD, 0);
94*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_MODE,
95*4882a593Smuzhiyun MC_CMD_INIT_EVQ_IN_COUNT_MODE_DIS);
96*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_EVQ_IN_COUNT_THRSHLD, 0);
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun if (v2) {
99*4882a593Smuzhiyun /* Use the new generic approach to specifying event queue
100*4882a593Smuzhiyun * configuration, requesting lower latency or higher throughput.
101*4882a593Smuzhiyun * The options that actually get used appear in the output.
102*4882a593Smuzhiyun */
103*4882a593Smuzhiyun MCDI_POPULATE_DWORD_2(inbuf, INIT_EVQ_V2_IN_FLAGS,
104*4882a593Smuzhiyun INIT_EVQ_V2_IN_FLAG_INTERRUPTING, 1,
105*4882a593Smuzhiyun INIT_EVQ_V2_IN_FLAG_TYPE,
106*4882a593Smuzhiyun MC_CMD_INIT_EVQ_V2_IN_FLAG_TYPE_AUTO);
107*4882a593Smuzhiyun } else {
108*4882a593Smuzhiyun MCDI_POPULATE_DWORD_4(inbuf, INIT_EVQ_IN_FLAGS,
109*4882a593Smuzhiyun INIT_EVQ_IN_FLAG_INTERRUPTING, 1,
110*4882a593Smuzhiyun INIT_EVQ_IN_FLAG_RX_MERGE, 1,
111*4882a593Smuzhiyun INIT_EVQ_IN_FLAG_TX_MERGE, 1,
112*4882a593Smuzhiyun INIT_EVQ_IN_FLAG_CUT_THRU, v1_cut_thru);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun dma_addr = channel->eventq.buf.dma_addr;
116*4882a593Smuzhiyun for (i = 0; i < entries; ++i) {
117*4882a593Smuzhiyun MCDI_SET_ARRAY_QWORD(inbuf, INIT_EVQ_IN_DMA_ADDR, i, dma_addr);
118*4882a593Smuzhiyun dma_addr += EFX_BUF_SIZE;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun inlen = MC_CMD_INIT_EVQ_IN_LEN(entries);
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_INIT_EVQ, inbuf, inlen,
124*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun if (outlen >= MC_CMD_INIT_EVQ_V2_OUT_LEN)
127*4882a593Smuzhiyun netif_dbg(efx, drv, efx->net_dev,
128*4882a593Smuzhiyun "Channel %d using event queue flags %08x\n",
129*4882a593Smuzhiyun channel->channel,
130*4882a593Smuzhiyun MCDI_DWORD(outbuf, INIT_EVQ_V2_OUT_FLAGS));
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun return rc;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
efx_mcdi_ev_remove(struct efx_channel * channel)135*4882a593Smuzhiyun void efx_mcdi_ev_remove(struct efx_channel *channel)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun efx_nic_free_buffer(channel->efx, &channel->eventq.buf);
138*4882a593Smuzhiyun }
139*4882a593Smuzhiyun
efx_mcdi_ev_fini(struct efx_channel * channel)140*4882a593Smuzhiyun void efx_mcdi_ev_fini(struct efx_channel *channel)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_EVQ_IN_LEN);
143*4882a593Smuzhiyun MCDI_DECLARE_BUF_ERR(outbuf);
144*4882a593Smuzhiyun struct efx_nic *efx = channel->efx;
145*4882a593Smuzhiyun size_t outlen;
146*4882a593Smuzhiyun int rc;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, FINI_EVQ_IN_INSTANCE, channel->channel);
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_EVQ, inbuf, sizeof(inbuf),
151*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun if (rc && rc != -EALREADY)
154*4882a593Smuzhiyun goto fail;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun return;
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun fail:
159*4882a593Smuzhiyun efx_mcdi_display_error(efx, MC_CMD_FINI_EVQ, MC_CMD_FINI_EVQ_IN_LEN,
160*4882a593Smuzhiyun outbuf, outlen, rc);
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
efx_mcdi_tx_init(struct efx_tx_queue * tx_queue)163*4882a593Smuzhiyun int efx_mcdi_tx_init(struct efx_tx_queue *tx_queue)
164*4882a593Smuzhiyun {
165*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_TXQ_IN_LEN(EFX_MAX_DMAQ_SIZE * 8 /
166*4882a593Smuzhiyun EFX_BUF_SIZE));
167*4882a593Smuzhiyun bool csum_offload = tx_queue->type & EFX_TXQ_TYPE_OUTER_CSUM;
168*4882a593Smuzhiyun bool inner_csum = tx_queue->type & EFX_TXQ_TYPE_INNER_CSUM;
169*4882a593Smuzhiyun size_t entries = tx_queue->txd.buf.len / EFX_BUF_SIZE;
170*4882a593Smuzhiyun struct efx_channel *channel = tx_queue->channel;
171*4882a593Smuzhiyun struct efx_nic *efx = tx_queue->efx;
172*4882a593Smuzhiyun dma_addr_t dma_addr;
173*4882a593Smuzhiyun size_t inlen;
174*4882a593Smuzhiyun int rc, i;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun BUILD_BUG_ON(MC_CMD_INIT_TXQ_OUT_LEN != 0);
177*4882a593Smuzhiyun
178*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_SIZE, tx_queue->ptr_mask + 1);
179*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_TARGET_EVQ, channel->channel);
180*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_LABEL, tx_queue->label);
181*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_INSTANCE, tx_queue->queue);
182*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_OWNER_ID, 0);
183*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_TXQ_IN_PORT_ID, efx->vport_id);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun dma_addr = tx_queue->txd.buf.dma_addr;
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun netif_dbg(efx, hw, efx->net_dev, "pushing TXQ %d. %zu entries (%llx)\n",
188*4882a593Smuzhiyun tx_queue->queue, entries, (u64)dma_addr);
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun for (i = 0; i < entries; ++i) {
191*4882a593Smuzhiyun MCDI_SET_ARRAY_QWORD(inbuf, INIT_TXQ_IN_DMA_ADDR, i, dma_addr);
192*4882a593Smuzhiyun dma_addr += EFX_BUF_SIZE;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun inlen = MC_CMD_INIT_TXQ_IN_LEN(entries);
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun do {
198*4882a593Smuzhiyun bool tso_v2 = tx_queue->tso_version == 2;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* TSOv2 implies IP header checksum offload for TSO frames,
201*4882a593Smuzhiyun * so we can safely disable IP header checksum offload for
202*4882a593Smuzhiyun * everything else. If we don't have TSOv2, then we have to
203*4882a593Smuzhiyun * enable IP header checksum offload, which is strictly
204*4882a593Smuzhiyun * incorrect but better than breaking TSO.
205*4882a593Smuzhiyun */
206*4882a593Smuzhiyun MCDI_POPULATE_DWORD_6(inbuf, INIT_TXQ_IN_FLAGS,
207*4882a593Smuzhiyun /* This flag was removed from mcdi_pcol.h for
208*4882a593Smuzhiyun * the non-_EXT version of INIT_TXQ. However,
209*4882a593Smuzhiyun * firmware still honours it.
210*4882a593Smuzhiyun */
211*4882a593Smuzhiyun INIT_TXQ_EXT_IN_FLAG_TSOV2_EN, tso_v2,
212*4882a593Smuzhiyun INIT_TXQ_IN_FLAG_IP_CSUM_DIS, !(csum_offload && tso_v2),
213*4882a593Smuzhiyun INIT_TXQ_IN_FLAG_TCP_CSUM_DIS, !csum_offload,
214*4882a593Smuzhiyun INIT_TXQ_EXT_IN_FLAG_TIMESTAMP, tx_queue->timestamping,
215*4882a593Smuzhiyun INIT_TXQ_IN_FLAG_INNER_IP_CSUM_EN, inner_csum && !tso_v2,
216*4882a593Smuzhiyun INIT_TXQ_IN_FLAG_INNER_TCP_CSUM_EN, inner_csum);
217*4882a593Smuzhiyun
218*4882a593Smuzhiyun rc = efx_mcdi_rpc_quiet(efx, MC_CMD_INIT_TXQ, inbuf, inlen,
219*4882a593Smuzhiyun NULL, 0, NULL);
220*4882a593Smuzhiyun if (rc == -ENOSPC && tso_v2) {
221*4882a593Smuzhiyun /* Retry without TSOv2 if we're short on contexts. */
222*4882a593Smuzhiyun tx_queue->tso_version = 0;
223*4882a593Smuzhiyun netif_warn(efx, probe, efx->net_dev,
224*4882a593Smuzhiyun "TSOv2 context not available to segment in "
225*4882a593Smuzhiyun "hardware. TCP performance may be reduced.\n"
226*4882a593Smuzhiyun );
227*4882a593Smuzhiyun } else if (rc) {
228*4882a593Smuzhiyun efx_mcdi_display_error(efx, MC_CMD_INIT_TXQ,
229*4882a593Smuzhiyun MC_CMD_INIT_TXQ_EXT_IN_LEN,
230*4882a593Smuzhiyun NULL, 0, rc);
231*4882a593Smuzhiyun goto fail;
232*4882a593Smuzhiyun }
233*4882a593Smuzhiyun } while (rc);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun return 0;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun fail:
238*4882a593Smuzhiyun return rc;
239*4882a593Smuzhiyun }
240*4882a593Smuzhiyun
efx_mcdi_tx_remove(struct efx_tx_queue * tx_queue)241*4882a593Smuzhiyun void efx_mcdi_tx_remove(struct efx_tx_queue *tx_queue)
242*4882a593Smuzhiyun {
243*4882a593Smuzhiyun efx_nic_free_buffer(tx_queue->efx, &tx_queue->txd.buf);
244*4882a593Smuzhiyun }
245*4882a593Smuzhiyun
efx_mcdi_tx_fini(struct efx_tx_queue * tx_queue)246*4882a593Smuzhiyun void efx_mcdi_tx_fini(struct efx_tx_queue *tx_queue)
247*4882a593Smuzhiyun {
248*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_TXQ_IN_LEN);
249*4882a593Smuzhiyun MCDI_DECLARE_BUF_ERR(outbuf);
250*4882a593Smuzhiyun struct efx_nic *efx = tx_queue->efx;
251*4882a593Smuzhiyun size_t outlen;
252*4882a593Smuzhiyun int rc;
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, FINI_TXQ_IN_INSTANCE,
255*4882a593Smuzhiyun tx_queue->queue);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_TXQ, inbuf, sizeof(inbuf),
258*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun if (rc && rc != -EALREADY)
261*4882a593Smuzhiyun goto fail;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun return;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun fail:
266*4882a593Smuzhiyun efx_mcdi_display_error(efx, MC_CMD_FINI_TXQ, MC_CMD_FINI_TXQ_IN_LEN,
267*4882a593Smuzhiyun outbuf, outlen, rc);
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun
efx_mcdi_rx_probe(struct efx_rx_queue * rx_queue)270*4882a593Smuzhiyun int efx_mcdi_rx_probe(struct efx_rx_queue *rx_queue)
271*4882a593Smuzhiyun {
272*4882a593Smuzhiyun return efx_nic_alloc_buffer(rx_queue->efx, &rx_queue->rxd.buf,
273*4882a593Smuzhiyun (rx_queue->ptr_mask + 1) *
274*4882a593Smuzhiyun sizeof(efx_qword_t),
275*4882a593Smuzhiyun GFP_KERNEL);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun
efx_mcdi_rx_init(struct efx_rx_queue * rx_queue)278*4882a593Smuzhiyun void efx_mcdi_rx_init(struct efx_rx_queue *rx_queue)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun struct efx_channel *channel = efx_rx_queue_channel(rx_queue);
281*4882a593Smuzhiyun size_t entries = rx_queue->rxd.buf.len / EFX_BUF_SIZE;
282*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_INIT_RXQ_V4_IN_LEN);
283*4882a593Smuzhiyun struct efx_nic *efx = rx_queue->efx;
284*4882a593Smuzhiyun unsigned int buffer_size;
285*4882a593Smuzhiyun dma_addr_t dma_addr;
286*4882a593Smuzhiyun int rc;
287*4882a593Smuzhiyun int i;
288*4882a593Smuzhiyun BUILD_BUG_ON(MC_CMD_INIT_RXQ_OUT_LEN != 0);
289*4882a593Smuzhiyun
290*4882a593Smuzhiyun rx_queue->scatter_n = 0;
291*4882a593Smuzhiyun rx_queue->scatter_len = 0;
292*4882a593Smuzhiyun if (efx->type->revision == EFX_REV_EF100)
293*4882a593Smuzhiyun buffer_size = efx->rx_page_buf_step;
294*4882a593Smuzhiyun else
295*4882a593Smuzhiyun buffer_size = 0;
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_SIZE, rx_queue->ptr_mask + 1);
298*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_TARGET_EVQ, channel->channel);
299*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_LABEL, efx_rx_queue_index(rx_queue));
300*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_INSTANCE,
301*4882a593Smuzhiyun efx_rx_queue_index(rx_queue));
302*4882a593Smuzhiyun MCDI_POPULATE_DWORD_2(inbuf, INIT_RXQ_IN_FLAGS,
303*4882a593Smuzhiyun INIT_RXQ_IN_FLAG_PREFIX, 1,
304*4882a593Smuzhiyun INIT_RXQ_IN_FLAG_TIMESTAMP, 1);
305*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_OWNER_ID, 0);
306*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_IN_PORT_ID, efx->vport_id);
307*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, INIT_RXQ_V4_IN_BUFFER_SIZE_BYTES, buffer_size);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun dma_addr = rx_queue->rxd.buf.dma_addr;
310*4882a593Smuzhiyun
311*4882a593Smuzhiyun netif_dbg(efx, hw, efx->net_dev, "pushing RXQ %d. %zu entries (%llx)\n",
312*4882a593Smuzhiyun efx_rx_queue_index(rx_queue), entries, (u64)dma_addr);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun for (i = 0; i < entries; ++i) {
315*4882a593Smuzhiyun MCDI_SET_ARRAY_QWORD(inbuf, INIT_RXQ_IN_DMA_ADDR, i, dma_addr);
316*4882a593Smuzhiyun dma_addr += EFX_BUF_SIZE;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_INIT_RXQ, inbuf, sizeof(inbuf),
320*4882a593Smuzhiyun NULL, 0, NULL);
321*4882a593Smuzhiyun if (rc)
322*4882a593Smuzhiyun netdev_WARN(efx->net_dev, "failed to initialise RXQ %d\n",
323*4882a593Smuzhiyun efx_rx_queue_index(rx_queue));
324*4882a593Smuzhiyun }
325*4882a593Smuzhiyun
efx_mcdi_rx_remove(struct efx_rx_queue * rx_queue)326*4882a593Smuzhiyun void efx_mcdi_rx_remove(struct efx_rx_queue *rx_queue)
327*4882a593Smuzhiyun {
328*4882a593Smuzhiyun efx_nic_free_buffer(rx_queue->efx, &rx_queue->rxd.buf);
329*4882a593Smuzhiyun }
330*4882a593Smuzhiyun
efx_mcdi_rx_fini(struct efx_rx_queue * rx_queue)331*4882a593Smuzhiyun void efx_mcdi_rx_fini(struct efx_rx_queue *rx_queue)
332*4882a593Smuzhiyun {
333*4882a593Smuzhiyun MCDI_DECLARE_BUF(inbuf, MC_CMD_FINI_RXQ_IN_LEN);
334*4882a593Smuzhiyun MCDI_DECLARE_BUF_ERR(outbuf);
335*4882a593Smuzhiyun struct efx_nic *efx = rx_queue->efx;
336*4882a593Smuzhiyun size_t outlen;
337*4882a593Smuzhiyun int rc;
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun MCDI_SET_DWORD(inbuf, FINI_RXQ_IN_INSTANCE,
340*4882a593Smuzhiyun efx_rx_queue_index(rx_queue));
341*4882a593Smuzhiyun
342*4882a593Smuzhiyun rc = efx_mcdi_rpc_quiet(efx, MC_CMD_FINI_RXQ, inbuf, sizeof(inbuf),
343*4882a593Smuzhiyun outbuf, sizeof(outbuf), &outlen);
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (rc && rc != -EALREADY)
346*4882a593Smuzhiyun goto fail;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun return;
349*4882a593Smuzhiyun
350*4882a593Smuzhiyun fail:
351*4882a593Smuzhiyun efx_mcdi_display_error(efx, MC_CMD_FINI_RXQ, MC_CMD_FINI_RXQ_IN_LEN,
352*4882a593Smuzhiyun outbuf, outlen, rc);
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
efx_fini_dmaq(struct efx_nic * efx)355*4882a593Smuzhiyun int efx_fini_dmaq(struct efx_nic *efx)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct efx_tx_queue *tx_queue;
358*4882a593Smuzhiyun struct efx_rx_queue *rx_queue;
359*4882a593Smuzhiyun struct efx_channel *channel;
360*4882a593Smuzhiyun int pending;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* If the MC has just rebooted, the TX/RX queues will have already been
363*4882a593Smuzhiyun * torn down, but efx->active_queues needs to be set to zero.
364*4882a593Smuzhiyun */
365*4882a593Smuzhiyun if (efx->must_realloc_vis) {
366*4882a593Smuzhiyun atomic_set(&efx->active_queues, 0);
367*4882a593Smuzhiyun return 0;
368*4882a593Smuzhiyun }
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun /* Do not attempt to write to the NIC during EEH recovery */
371*4882a593Smuzhiyun if (efx->state != STATE_RECOVERY) {
372*4882a593Smuzhiyun efx_for_each_channel(channel, efx) {
373*4882a593Smuzhiyun efx_for_each_channel_rx_queue(rx_queue, channel)
374*4882a593Smuzhiyun efx_mcdi_rx_fini(rx_queue);
375*4882a593Smuzhiyun efx_for_each_channel_tx_queue(tx_queue, channel)
376*4882a593Smuzhiyun efx_mcdi_tx_fini(tx_queue);
377*4882a593Smuzhiyun }
378*4882a593Smuzhiyun
379*4882a593Smuzhiyun wait_event_timeout(efx->flush_wq,
380*4882a593Smuzhiyun atomic_read(&efx->active_queues) == 0,
381*4882a593Smuzhiyun msecs_to_jiffies(EFX_MAX_FLUSH_TIME));
382*4882a593Smuzhiyun pending = atomic_read(&efx->active_queues);
383*4882a593Smuzhiyun if (pending) {
384*4882a593Smuzhiyun netif_err(efx, hw, efx->net_dev, "failed to flush %d queues\n",
385*4882a593Smuzhiyun pending);
386*4882a593Smuzhiyun return -ETIMEDOUT;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun }
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun return 0;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
efx_mcdi_window_mode_to_stride(struct efx_nic * efx,u8 vi_window_mode)393*4882a593Smuzhiyun int efx_mcdi_window_mode_to_stride(struct efx_nic *efx, u8 vi_window_mode)
394*4882a593Smuzhiyun {
395*4882a593Smuzhiyun switch (vi_window_mode) {
396*4882a593Smuzhiyun case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_8K:
397*4882a593Smuzhiyun efx->vi_stride = 8192;
398*4882a593Smuzhiyun break;
399*4882a593Smuzhiyun case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_16K:
400*4882a593Smuzhiyun efx->vi_stride = 16384;
401*4882a593Smuzhiyun break;
402*4882a593Smuzhiyun case MC_CMD_GET_CAPABILITIES_V3_OUT_VI_WINDOW_MODE_64K:
403*4882a593Smuzhiyun efx->vi_stride = 65536;
404*4882a593Smuzhiyun break;
405*4882a593Smuzhiyun default:
406*4882a593Smuzhiyun netif_err(efx, probe, efx->net_dev,
407*4882a593Smuzhiyun "Unrecognised VI window mode %d\n",
408*4882a593Smuzhiyun vi_window_mode);
409*4882a593Smuzhiyun return -EIO;
410*4882a593Smuzhiyun }
411*4882a593Smuzhiyun netif_dbg(efx, probe, efx->net_dev, "vi_stride = %u\n",
412*4882a593Smuzhiyun efx->vi_stride);
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun
efx_get_pf_index(struct efx_nic * efx,unsigned int * pf_index)416*4882a593Smuzhiyun int efx_get_pf_index(struct efx_nic *efx, unsigned int *pf_index)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun MCDI_DECLARE_BUF(outbuf, MC_CMD_GET_FUNCTION_INFO_OUT_LEN);
419*4882a593Smuzhiyun size_t outlen;
420*4882a593Smuzhiyun int rc;
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun rc = efx_mcdi_rpc(efx, MC_CMD_GET_FUNCTION_INFO, NULL, 0, outbuf,
423*4882a593Smuzhiyun sizeof(outbuf), &outlen);
424*4882a593Smuzhiyun if (rc)
425*4882a593Smuzhiyun return rc;
426*4882a593Smuzhiyun if (outlen < sizeof(outbuf))
427*4882a593Smuzhiyun return -EIO;
428*4882a593Smuzhiyun
429*4882a593Smuzhiyun *pf_index = MCDI_DWORD(outbuf, GET_FUNCTION_INFO_OUT_PF);
430*4882a593Smuzhiyun return 0;
431*4882a593Smuzhiyun }
432