1*4882a593Smuzhiyun /**********************************************************************
2*4882a593Smuzhiyun * Author: Cavium, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Contact: support@cavium.com
5*4882a593Smuzhiyun * Please include "LiquidIO" in the subject.
6*4882a593Smuzhiyun *
7*4882a593Smuzhiyun * Copyright (c) 2003-2016 Cavium, Inc.
8*4882a593Smuzhiyun *
9*4882a593Smuzhiyun * This file is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun * it under the terms of the GNU General Public License, Version 2, as
11*4882a593Smuzhiyun * published by the Free Software Foundation.
12*4882a593Smuzhiyun *
13*4882a593Smuzhiyun * This file is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15*4882a593Smuzhiyun * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16*4882a593Smuzhiyun * NONINFRINGEMENT. See the GNU General Public License for more details.
17*4882a593Smuzhiyun ***********************************************************************/
18*4882a593Smuzhiyun #include <linux/module.h>
19*4882a593Smuzhiyun #include <linux/interrupt.h>
20*4882a593Smuzhiyun #include <linux/pci.h>
21*4882a593Smuzhiyun #include <linux/firmware.h>
22*4882a593Smuzhiyun #include <net/vxlan.h>
23*4882a593Smuzhiyun #include <linux/kthread.h>
24*4882a593Smuzhiyun #include "liquidio_common.h"
25*4882a593Smuzhiyun #include "octeon_droq.h"
26*4882a593Smuzhiyun #include "octeon_iq.h"
27*4882a593Smuzhiyun #include "response_manager.h"
28*4882a593Smuzhiyun #include "octeon_device.h"
29*4882a593Smuzhiyun #include "octeon_nic.h"
30*4882a593Smuzhiyun #include "octeon_main.h"
31*4882a593Smuzhiyun #include "octeon_network.h"
32*4882a593Smuzhiyun #include "cn66xx_regs.h"
33*4882a593Smuzhiyun #include "cn66xx_device.h"
34*4882a593Smuzhiyun #include "cn68xx_device.h"
35*4882a593Smuzhiyun #include "cn23xx_pf_device.h"
36*4882a593Smuzhiyun #include "liquidio_image.h"
37*4882a593Smuzhiyun #include "lio_vf_rep.h"
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun MODULE_AUTHOR("Cavium Networks, <support@cavium.com>");
40*4882a593Smuzhiyun MODULE_DESCRIPTION("Cavium LiquidIO Intelligent Server Adapter Driver");
41*4882a593Smuzhiyun MODULE_LICENSE("GPL");
42*4882a593Smuzhiyun MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210SV_NAME
43*4882a593Smuzhiyun "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
44*4882a593Smuzhiyun MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_210NV_NAME
45*4882a593Smuzhiyun "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
46*4882a593Smuzhiyun MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_410NV_NAME
47*4882a593Smuzhiyun "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
48*4882a593Smuzhiyun MODULE_FIRMWARE(LIO_FW_DIR LIO_FW_BASE_NAME LIO_23XX_NAME
49*4882a593Smuzhiyun "_" LIO_FW_NAME_TYPE_NIC LIO_FW_NAME_SUFFIX);
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun static int ddr_timeout = 10000;
52*4882a593Smuzhiyun module_param(ddr_timeout, int, 0644);
53*4882a593Smuzhiyun MODULE_PARM_DESC(ddr_timeout,
54*4882a593Smuzhiyun "Number of milliseconds to wait for DDR initialization. 0 waits for ddr_timeout to be set to non-zero value before starting to check");
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun #define DEFAULT_MSG_ENABLE (NETIF_MSG_DRV | NETIF_MSG_PROBE | NETIF_MSG_LINK)
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun static int debug = -1;
59*4882a593Smuzhiyun module_param(debug, int, 0644);
60*4882a593Smuzhiyun MODULE_PARM_DESC(debug, "NETIF_MSG debug bits");
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static char fw_type[LIO_MAX_FW_TYPE_LEN] = LIO_FW_NAME_TYPE_AUTO;
63*4882a593Smuzhiyun module_param_string(fw_type, fw_type, sizeof(fw_type), 0444);
64*4882a593Smuzhiyun MODULE_PARM_DESC(fw_type, "Type of firmware to be loaded (default is \"auto\"), which uses firmware in flash, if present, else loads \"nic\".");
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun static u32 console_bitmask;
67*4882a593Smuzhiyun module_param(console_bitmask, int, 0644);
68*4882a593Smuzhiyun MODULE_PARM_DESC(console_bitmask,
69*4882a593Smuzhiyun "Bitmask indicating which consoles have debug output redirected to syslog.");
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /**
72*4882a593Smuzhiyun * octeon_console_debug_enabled - determines if a given console has debug enabled.
73*4882a593Smuzhiyun * @console: console to check
74*4882a593Smuzhiyun * Return: 1 = enabled. 0 otherwise
75*4882a593Smuzhiyun */
octeon_console_debug_enabled(u32 console)76*4882a593Smuzhiyun static int octeon_console_debug_enabled(u32 console)
77*4882a593Smuzhiyun {
78*4882a593Smuzhiyun return (console_bitmask >> (console)) & 0x1;
79*4882a593Smuzhiyun }
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun /* Polling interval for determining when NIC application is alive */
82*4882a593Smuzhiyun #define LIQUIDIO_STARTER_POLL_INTERVAL_MS 100
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun /* runtime link query interval */
85*4882a593Smuzhiyun #define LIQUIDIO_LINK_QUERY_INTERVAL_MS 1000
86*4882a593Smuzhiyun /* update localtime to octeon firmware every 60 seconds.
87*4882a593Smuzhiyun * make firmware to use same time reference, so that it will be easy to
88*4882a593Smuzhiyun * correlate firmware logged events/errors with host events, for debugging.
89*4882a593Smuzhiyun */
90*4882a593Smuzhiyun #define LIO_SYNC_OCTEON_TIME_INTERVAL_MS 60000
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* time to wait for possible in-flight requests in milliseconds */
93*4882a593Smuzhiyun #define WAIT_INFLIGHT_REQUEST msecs_to_jiffies(1000)
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun struct lio_trusted_vf_ctx {
96*4882a593Smuzhiyun struct completion complete;
97*4882a593Smuzhiyun int status;
98*4882a593Smuzhiyun };
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun struct oct_link_status_resp {
101*4882a593Smuzhiyun u64 rh;
102*4882a593Smuzhiyun struct oct_link_info link_info;
103*4882a593Smuzhiyun u64 status;
104*4882a593Smuzhiyun };
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun struct oct_timestamp_resp {
107*4882a593Smuzhiyun u64 rh;
108*4882a593Smuzhiyun u64 timestamp;
109*4882a593Smuzhiyun u64 status;
110*4882a593Smuzhiyun };
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun #define OCT_TIMESTAMP_RESP_SIZE (sizeof(struct oct_timestamp_resp))
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun union tx_info {
115*4882a593Smuzhiyun u64 u64;
116*4882a593Smuzhiyun struct {
117*4882a593Smuzhiyun #ifdef __BIG_ENDIAN_BITFIELD
118*4882a593Smuzhiyun u16 gso_size;
119*4882a593Smuzhiyun u16 gso_segs;
120*4882a593Smuzhiyun u32 reserved;
121*4882a593Smuzhiyun #else
122*4882a593Smuzhiyun u32 reserved;
123*4882a593Smuzhiyun u16 gso_segs;
124*4882a593Smuzhiyun u16 gso_size;
125*4882a593Smuzhiyun #endif
126*4882a593Smuzhiyun } s;
127*4882a593Smuzhiyun };
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun /* Octeon device properties to be used by the NIC module.
130*4882a593Smuzhiyun * Each octeon device in the system will be represented
131*4882a593Smuzhiyun * by this structure in the NIC module.
132*4882a593Smuzhiyun */
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun #define OCTNIC_GSO_MAX_HEADER_SIZE 128
135*4882a593Smuzhiyun #define OCTNIC_GSO_MAX_SIZE \
136*4882a593Smuzhiyun (CN23XX_DEFAULT_INPUT_JABBER - OCTNIC_GSO_MAX_HEADER_SIZE)
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun struct handshake {
139*4882a593Smuzhiyun struct completion init;
140*4882a593Smuzhiyun struct completion started;
141*4882a593Smuzhiyun struct pci_dev *pci_dev;
142*4882a593Smuzhiyun int init_ok;
143*4882a593Smuzhiyun int started_ok;
144*4882a593Smuzhiyun };
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
147*4882a593Smuzhiyun static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs);
148*4882a593Smuzhiyun #endif
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
151*4882a593Smuzhiyun char *prefix, char *suffix);
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun static int octeon_device_init(struct octeon_device *);
154*4882a593Smuzhiyun static int liquidio_stop(struct net_device *netdev);
155*4882a593Smuzhiyun static void liquidio_remove(struct pci_dev *pdev);
156*4882a593Smuzhiyun static int liquidio_probe(struct pci_dev *pdev,
157*4882a593Smuzhiyun const struct pci_device_id *ent);
158*4882a593Smuzhiyun static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
159*4882a593Smuzhiyun int linkstate);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun static struct handshake handshake[MAX_OCTEON_DEVICES];
162*4882a593Smuzhiyun static struct completion first_stage;
163*4882a593Smuzhiyun
octeon_droq_bh(struct tasklet_struct * t)164*4882a593Smuzhiyun static void octeon_droq_bh(struct tasklet_struct *t)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun int q_no;
167*4882a593Smuzhiyun int reschedule = 0;
168*4882a593Smuzhiyun struct octeon_device_priv *oct_priv = from_tasklet(oct_priv, t,
169*4882a593Smuzhiyun droq_tasklet);
170*4882a593Smuzhiyun struct octeon_device *oct = oct_priv->dev;
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun for (q_no = 0; q_no < MAX_OCTEON_OUTPUT_QUEUES(oct); q_no++) {
173*4882a593Smuzhiyun if (!(oct->io_qmask.oq & BIT_ULL(q_no)))
174*4882a593Smuzhiyun continue;
175*4882a593Smuzhiyun reschedule |= octeon_droq_process_packets(oct, oct->droq[q_no],
176*4882a593Smuzhiyun MAX_PACKET_BUDGET);
177*4882a593Smuzhiyun lio_enable_irq(oct->droq[q_no], NULL);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct) && oct->msix_on) {
180*4882a593Smuzhiyun /* set time and cnt interrupt thresholds for this DROQ
181*4882a593Smuzhiyun * for NAPI
182*4882a593Smuzhiyun */
183*4882a593Smuzhiyun int adjusted_q_no = q_no + oct->sriov_info.pf_srn;
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun octeon_write_csr64(
186*4882a593Smuzhiyun oct, CN23XX_SLI_OQ_PKT_INT_LEVELS(adjusted_q_no),
187*4882a593Smuzhiyun 0x5700000040ULL);
188*4882a593Smuzhiyun octeon_write_csr64(
189*4882a593Smuzhiyun oct, CN23XX_SLI_OQ_PKTS_SENT(adjusted_q_no), 0);
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun if (reschedule)
194*4882a593Smuzhiyun tasklet_schedule(&oct_priv->droq_tasklet);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun
lio_wait_for_oq_pkts(struct octeon_device * oct)197*4882a593Smuzhiyun static int lio_wait_for_oq_pkts(struct octeon_device *oct)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun struct octeon_device_priv *oct_priv =
200*4882a593Smuzhiyun (struct octeon_device_priv *)oct->priv;
201*4882a593Smuzhiyun int retry = 100, pkt_cnt = 0, pending_pkts = 0;
202*4882a593Smuzhiyun int i;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun do {
205*4882a593Smuzhiyun pending_pkts = 0;
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
208*4882a593Smuzhiyun if (!(oct->io_qmask.oq & BIT_ULL(i)))
209*4882a593Smuzhiyun continue;
210*4882a593Smuzhiyun pkt_cnt += octeon_droq_check_hw_for_pkts(oct->droq[i]);
211*4882a593Smuzhiyun }
212*4882a593Smuzhiyun if (pkt_cnt > 0) {
213*4882a593Smuzhiyun pending_pkts += pkt_cnt;
214*4882a593Smuzhiyun tasklet_schedule(&oct_priv->droq_tasklet);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun pkt_cnt = 0;
217*4882a593Smuzhiyun schedule_timeout_uninterruptible(1);
218*4882a593Smuzhiyun
219*4882a593Smuzhiyun } while (retry-- && pending_pkts);
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun return pkt_cnt;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /**
225*4882a593Smuzhiyun * force_io_queues_off - Forces all IO queues off on a given device
226*4882a593Smuzhiyun * @oct: Pointer to Octeon device
227*4882a593Smuzhiyun */
force_io_queues_off(struct octeon_device * oct)228*4882a593Smuzhiyun static void force_io_queues_off(struct octeon_device *oct)
229*4882a593Smuzhiyun {
230*4882a593Smuzhiyun if ((oct->chip_id == OCTEON_CN66XX) ||
231*4882a593Smuzhiyun (oct->chip_id == OCTEON_CN68XX)) {
232*4882a593Smuzhiyun /* Reset the Enable bits for Input Queues. */
233*4882a593Smuzhiyun octeon_write_csr(oct, CN6XXX_SLI_PKT_INSTR_ENB, 0);
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Reset the Enable bits for Output Queues. */
236*4882a593Smuzhiyun octeon_write_csr(oct, CN6XXX_SLI_PKT_OUT_ENB, 0);
237*4882a593Smuzhiyun }
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun * pcierror_quiesce_device - Cause device to go quiet so it can be safely removed/reset/etc
242*4882a593Smuzhiyun * @oct: Pointer to Octeon device
243*4882a593Smuzhiyun */
pcierror_quiesce_device(struct octeon_device * oct)244*4882a593Smuzhiyun static inline void pcierror_quiesce_device(struct octeon_device *oct)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun int i;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun /* Disable the input and output queues now. No more packets will
249*4882a593Smuzhiyun * arrive from Octeon, but we should wait for all packet processing
250*4882a593Smuzhiyun * to finish.
251*4882a593Smuzhiyun */
252*4882a593Smuzhiyun force_io_queues_off(oct);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun /* To allow for in-flight requests */
255*4882a593Smuzhiyun schedule_timeout_uninterruptible(WAIT_INFLIGHT_REQUEST);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun if (wait_for_pending_requests(oct))
258*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "There were pending requests\n");
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun /* Force all requests waiting to be fetched by OCTEON to complete. */
261*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
262*4882a593Smuzhiyun struct octeon_instr_queue *iq;
263*4882a593Smuzhiyun
264*4882a593Smuzhiyun if (!(oct->io_qmask.iq & BIT_ULL(i)))
265*4882a593Smuzhiyun continue;
266*4882a593Smuzhiyun iq = oct->instr_queue[i];
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun if (atomic_read(&iq->instr_pending)) {
269*4882a593Smuzhiyun spin_lock_bh(&iq->lock);
270*4882a593Smuzhiyun iq->fill_cnt = 0;
271*4882a593Smuzhiyun iq->octeon_read_index = iq->host_write_index;
272*4882a593Smuzhiyun iq->stats.instr_processed +=
273*4882a593Smuzhiyun atomic_read(&iq->instr_pending);
274*4882a593Smuzhiyun lio_process_iq_request_list(oct, iq, 0);
275*4882a593Smuzhiyun spin_unlock_bh(&iq->lock);
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun /* Force all pending ordered list requests to time out. */
280*4882a593Smuzhiyun lio_process_ordered_list(oct, 1);
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* We do not need to wait for output queue packets to be processed. */
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun /**
286*4882a593Smuzhiyun * cleanup_aer_uncorrect_error_status - Cleanup PCI AER uncorrectable error status
287*4882a593Smuzhiyun * @dev: Pointer to PCI device
288*4882a593Smuzhiyun */
cleanup_aer_uncorrect_error_status(struct pci_dev * dev)289*4882a593Smuzhiyun static void cleanup_aer_uncorrect_error_status(struct pci_dev *dev)
290*4882a593Smuzhiyun {
291*4882a593Smuzhiyun int pos = 0x100;
292*4882a593Smuzhiyun u32 status, mask;
293*4882a593Smuzhiyun
294*4882a593Smuzhiyun pr_info("%s :\n", __func__);
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, &status);
297*4882a593Smuzhiyun pci_read_config_dword(dev, pos + PCI_ERR_UNCOR_SEVER, &mask);
298*4882a593Smuzhiyun if (dev->error_state == pci_channel_io_normal)
299*4882a593Smuzhiyun status &= ~mask; /* Clear corresponding nonfatal bits */
300*4882a593Smuzhiyun else
301*4882a593Smuzhiyun status &= mask; /* Clear corresponding fatal bits */
302*4882a593Smuzhiyun pci_write_config_dword(dev, pos + PCI_ERR_UNCOR_STATUS, status);
303*4882a593Smuzhiyun }
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun /**
306*4882a593Smuzhiyun * stop_pci_io - Stop all PCI IO to a given device
307*4882a593Smuzhiyun * @oct: Pointer to Octeon device
308*4882a593Smuzhiyun */
stop_pci_io(struct octeon_device * oct)309*4882a593Smuzhiyun static void stop_pci_io(struct octeon_device *oct)
310*4882a593Smuzhiyun {
311*4882a593Smuzhiyun /* No more instructions will be forwarded. */
312*4882a593Smuzhiyun atomic_set(&oct->status, OCT_DEV_IN_RESET);
313*4882a593Smuzhiyun
314*4882a593Smuzhiyun pci_disable_device(oct->pci_dev);
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* Disable interrupts */
317*4882a593Smuzhiyun oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun pcierror_quiesce_device(oct);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun /* Release the interrupt line */
322*4882a593Smuzhiyun free_irq(oct->pci_dev->irq, oct);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun if (oct->flags & LIO_FLAG_MSI_ENABLED)
325*4882a593Smuzhiyun pci_disable_msi(oct->pci_dev);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
328*4882a593Smuzhiyun lio_get_state_string(&oct->status));
329*4882a593Smuzhiyun
330*4882a593Smuzhiyun /* making it a common function for all OCTEON models */
331*4882a593Smuzhiyun cleanup_aer_uncorrect_error_status(oct->pci_dev);
332*4882a593Smuzhiyun }
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /**
335*4882a593Smuzhiyun * liquidio_pcie_error_detected - called when PCI error is detected
336*4882a593Smuzhiyun * @pdev: Pointer to PCI device
337*4882a593Smuzhiyun * @state: The current pci connection state
338*4882a593Smuzhiyun *
339*4882a593Smuzhiyun * This function is called after a PCI bus error affecting
340*4882a593Smuzhiyun * this device has been detected.
341*4882a593Smuzhiyun */
liquidio_pcie_error_detected(struct pci_dev * pdev,pci_channel_state_t state)342*4882a593Smuzhiyun static pci_ers_result_t liquidio_pcie_error_detected(struct pci_dev *pdev,
343*4882a593Smuzhiyun pci_channel_state_t state)
344*4882a593Smuzhiyun {
345*4882a593Smuzhiyun struct octeon_device *oct = pci_get_drvdata(pdev);
346*4882a593Smuzhiyun
347*4882a593Smuzhiyun /* Non-correctable Non-fatal errors */
348*4882a593Smuzhiyun if (state == pci_channel_io_normal) {
349*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Non-correctable non-fatal error reported:\n");
350*4882a593Smuzhiyun cleanup_aer_uncorrect_error_status(oct->pci_dev);
351*4882a593Smuzhiyun return PCI_ERS_RESULT_CAN_RECOVER;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun /* Non-correctable Fatal errors */
355*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Non-correctable FATAL reported by PCI AER driver\n");
356*4882a593Smuzhiyun stop_pci_io(oct);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun /* Always return a DISCONNECT. There is no support for recovery but only
359*4882a593Smuzhiyun * for a clean shutdown.
360*4882a593Smuzhiyun */
361*4882a593Smuzhiyun return PCI_ERS_RESULT_DISCONNECT;
362*4882a593Smuzhiyun }
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /**
365*4882a593Smuzhiyun * liquidio_pcie_mmio_enabled - mmio handler
366*4882a593Smuzhiyun * @pdev: Pointer to PCI device
367*4882a593Smuzhiyun */
liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused * pdev)368*4882a593Smuzhiyun static pci_ers_result_t liquidio_pcie_mmio_enabled(struct pci_dev __maybe_unused *pdev)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun /* We should never hit this since we never ask for a reset for a Fatal
371*4882a593Smuzhiyun * Error. We always return DISCONNECT in io_error above.
372*4882a593Smuzhiyun * But play safe and return RECOVERED for now.
373*4882a593Smuzhiyun */
374*4882a593Smuzhiyun return PCI_ERS_RESULT_RECOVERED;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /**
378*4882a593Smuzhiyun * liquidio_pcie_slot_reset - called after the pci bus has been reset.
379*4882a593Smuzhiyun * @pdev: Pointer to PCI device
380*4882a593Smuzhiyun *
381*4882a593Smuzhiyun * Restart the card from scratch, as if from a cold-boot. Implementation
382*4882a593Smuzhiyun * resembles the first-half of the octeon_resume routine.
383*4882a593Smuzhiyun */
liquidio_pcie_slot_reset(struct pci_dev __maybe_unused * pdev)384*4882a593Smuzhiyun static pci_ers_result_t liquidio_pcie_slot_reset(struct pci_dev __maybe_unused *pdev)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun /* We should never hit this since we never ask for a reset for a Fatal
387*4882a593Smuzhiyun * Error. We always return DISCONNECT in io_error above.
388*4882a593Smuzhiyun * But play safe and return RECOVERED for now.
389*4882a593Smuzhiyun */
390*4882a593Smuzhiyun return PCI_ERS_RESULT_RECOVERED;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /**
394*4882a593Smuzhiyun * liquidio_pcie_resume - called when traffic can start flowing again.
395*4882a593Smuzhiyun * @pdev: Pointer to PCI device
396*4882a593Smuzhiyun *
397*4882a593Smuzhiyun * This callback is called when the error recovery driver tells us that
398*4882a593Smuzhiyun * its OK to resume normal operation. Implementation resembles the
399*4882a593Smuzhiyun * second-half of the octeon_resume routine.
400*4882a593Smuzhiyun */
liquidio_pcie_resume(struct pci_dev __maybe_unused * pdev)401*4882a593Smuzhiyun static void liquidio_pcie_resume(struct pci_dev __maybe_unused *pdev)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun /* Nothing to be done here. */
404*4882a593Smuzhiyun }
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun #define liquidio_suspend NULL
407*4882a593Smuzhiyun #define liquidio_resume NULL
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun /* For PCI-E Advanced Error Recovery (AER) Interface */
410*4882a593Smuzhiyun static const struct pci_error_handlers liquidio_err_handler = {
411*4882a593Smuzhiyun .error_detected = liquidio_pcie_error_detected,
412*4882a593Smuzhiyun .mmio_enabled = liquidio_pcie_mmio_enabled,
413*4882a593Smuzhiyun .slot_reset = liquidio_pcie_slot_reset,
414*4882a593Smuzhiyun .resume = liquidio_pcie_resume,
415*4882a593Smuzhiyun };
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun static const struct pci_device_id liquidio_pci_tbl[] = {
418*4882a593Smuzhiyun { /* 68xx */
419*4882a593Smuzhiyun PCI_VENDOR_ID_CAVIUM, 0x91, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
420*4882a593Smuzhiyun },
421*4882a593Smuzhiyun { /* 66xx */
422*4882a593Smuzhiyun PCI_VENDOR_ID_CAVIUM, 0x92, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
423*4882a593Smuzhiyun },
424*4882a593Smuzhiyun { /* 23xx pf */
425*4882a593Smuzhiyun PCI_VENDOR_ID_CAVIUM, 0x9702, PCI_ANY_ID, PCI_ANY_ID, 0, 0, 0
426*4882a593Smuzhiyun },
427*4882a593Smuzhiyun {
428*4882a593Smuzhiyun 0, 0, 0, 0, 0, 0, 0
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun };
431*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, liquidio_pci_tbl);
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun static SIMPLE_DEV_PM_OPS(liquidio_pm_ops, liquidio_suspend, liquidio_resume);
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun static struct pci_driver liquidio_pci_driver = {
436*4882a593Smuzhiyun .name = "LiquidIO",
437*4882a593Smuzhiyun .id_table = liquidio_pci_tbl,
438*4882a593Smuzhiyun .probe = liquidio_probe,
439*4882a593Smuzhiyun .remove = liquidio_remove,
440*4882a593Smuzhiyun .err_handler = &liquidio_err_handler, /* For AER */
441*4882a593Smuzhiyun .driver.pm = &liquidio_pm_ops,
442*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
443*4882a593Smuzhiyun .sriov_configure = liquidio_enable_sriov,
444*4882a593Smuzhiyun #endif
445*4882a593Smuzhiyun };
446*4882a593Smuzhiyun
447*4882a593Smuzhiyun /**
448*4882a593Smuzhiyun * liquidio_init_pci - register PCI driver
449*4882a593Smuzhiyun */
liquidio_init_pci(void)450*4882a593Smuzhiyun static int liquidio_init_pci(void)
451*4882a593Smuzhiyun {
452*4882a593Smuzhiyun return pci_register_driver(&liquidio_pci_driver);
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun /**
456*4882a593Smuzhiyun * liquidio_deinit_pci - unregister PCI driver
457*4882a593Smuzhiyun */
liquidio_deinit_pci(void)458*4882a593Smuzhiyun static void liquidio_deinit_pci(void)
459*4882a593Smuzhiyun {
460*4882a593Smuzhiyun pci_unregister_driver(&liquidio_pci_driver);
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun /**
464*4882a593Smuzhiyun * check_txq_status - Check Tx queue status, and take appropriate action
465*4882a593Smuzhiyun * @lio: per-network private data
466*4882a593Smuzhiyun * Return: 0 if full, number of queues woken up otherwise
467*4882a593Smuzhiyun */
check_txq_status(struct lio * lio)468*4882a593Smuzhiyun static inline int check_txq_status(struct lio *lio)
469*4882a593Smuzhiyun {
470*4882a593Smuzhiyun int numqs = lio->netdev->real_num_tx_queues;
471*4882a593Smuzhiyun int ret_val = 0;
472*4882a593Smuzhiyun int q, iq;
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* check each sub-queue state */
475*4882a593Smuzhiyun for (q = 0; q < numqs; q++) {
476*4882a593Smuzhiyun iq = lio->linfo.txpciq[q %
477*4882a593Smuzhiyun lio->oct_dev->num_iqs].s.q_no;
478*4882a593Smuzhiyun if (octnet_iq_is_full(lio->oct_dev, iq))
479*4882a593Smuzhiyun continue;
480*4882a593Smuzhiyun if (__netif_subqueue_stopped(lio->netdev, q)) {
481*4882a593Smuzhiyun netif_wake_subqueue(lio->netdev, q);
482*4882a593Smuzhiyun INCR_INSTRQUEUE_PKT_COUNT(lio->oct_dev, iq,
483*4882a593Smuzhiyun tx_restart, 1);
484*4882a593Smuzhiyun ret_val++;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun }
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun return ret_val;
489*4882a593Smuzhiyun }
490*4882a593Smuzhiyun
491*4882a593Smuzhiyun /**
492*4882a593Smuzhiyun * print_link_info - Print link information
493*4882a593Smuzhiyun * @netdev: network device
494*4882a593Smuzhiyun */
print_link_info(struct net_device * netdev)495*4882a593Smuzhiyun static void print_link_info(struct net_device *netdev)
496*4882a593Smuzhiyun {
497*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun if (!ifstate_check(lio, LIO_IFSTATE_RESETTING) &&
500*4882a593Smuzhiyun ifstate_check(lio, LIO_IFSTATE_REGISTERED)) {
501*4882a593Smuzhiyun struct oct_link_info *linfo = &lio->linfo;
502*4882a593Smuzhiyun
503*4882a593Smuzhiyun if (linfo->link.s.link_up) {
504*4882a593Smuzhiyun netif_info(lio, link, lio->netdev, "%d Mbps %s Duplex UP\n",
505*4882a593Smuzhiyun linfo->link.s.speed,
506*4882a593Smuzhiyun (linfo->link.s.duplex) ? "Full" : "Half");
507*4882a593Smuzhiyun } else {
508*4882a593Smuzhiyun netif_info(lio, link, lio->netdev, "Link Down\n");
509*4882a593Smuzhiyun }
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun }
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun /**
514*4882a593Smuzhiyun * octnet_link_status_change - Routine to notify MTU change
515*4882a593Smuzhiyun * @work: work_struct data structure
516*4882a593Smuzhiyun */
octnet_link_status_change(struct work_struct * work)517*4882a593Smuzhiyun static void octnet_link_status_change(struct work_struct *work)
518*4882a593Smuzhiyun {
519*4882a593Smuzhiyun struct cavium_wk *wk = (struct cavium_wk *)work;
520*4882a593Smuzhiyun struct lio *lio = (struct lio *)wk->ctxptr;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun /* lio->linfo.link.s.mtu always contains max MTU of the lio interface.
523*4882a593Smuzhiyun * this API is invoked only when new max-MTU of the interface is
524*4882a593Smuzhiyun * less than current MTU.
525*4882a593Smuzhiyun */
526*4882a593Smuzhiyun rtnl_lock();
527*4882a593Smuzhiyun dev_set_mtu(lio->netdev, lio->linfo.link.s.mtu);
528*4882a593Smuzhiyun rtnl_unlock();
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /**
532*4882a593Smuzhiyun * setup_link_status_change_wq - Sets up the mtu status change work
533*4882a593Smuzhiyun * @netdev: network device
534*4882a593Smuzhiyun */
setup_link_status_change_wq(struct net_device * netdev)535*4882a593Smuzhiyun static inline int setup_link_status_change_wq(struct net_device *netdev)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
538*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun lio->link_status_wq.wq = alloc_workqueue("link-status",
541*4882a593Smuzhiyun WQ_MEM_RECLAIM, 0);
542*4882a593Smuzhiyun if (!lio->link_status_wq.wq) {
543*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "unable to create cavium link status wq\n");
544*4882a593Smuzhiyun return -1;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun INIT_DELAYED_WORK(&lio->link_status_wq.wk.work,
547*4882a593Smuzhiyun octnet_link_status_change);
548*4882a593Smuzhiyun lio->link_status_wq.wk.ctxptr = lio;
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun return 0;
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
cleanup_link_status_change_wq(struct net_device * netdev)553*4882a593Smuzhiyun static inline void cleanup_link_status_change_wq(struct net_device *netdev)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun if (lio->link_status_wq.wq) {
558*4882a593Smuzhiyun cancel_delayed_work_sync(&lio->link_status_wq.wk.work);
559*4882a593Smuzhiyun destroy_workqueue(lio->link_status_wq.wq);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun }
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun /**
564*4882a593Smuzhiyun * update_link_status - Update link status
565*4882a593Smuzhiyun * @netdev: network device
566*4882a593Smuzhiyun * @ls: link status structure
567*4882a593Smuzhiyun *
568*4882a593Smuzhiyun * Called on receipt of a link status response from the core application to
569*4882a593Smuzhiyun * update each interface's link status.
570*4882a593Smuzhiyun */
update_link_status(struct net_device * netdev,union oct_link_status * ls)571*4882a593Smuzhiyun static inline void update_link_status(struct net_device *netdev,
572*4882a593Smuzhiyun union oct_link_status *ls)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
575*4882a593Smuzhiyun int changed = (lio->linfo.link.u64 != ls->u64);
576*4882a593Smuzhiyun int current_max_mtu = lio->linfo.link.s.mtu;
577*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
578*4882a593Smuzhiyun
579*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "%s: lio->linfo.link.u64=%llx, ls->u64=%llx\n",
580*4882a593Smuzhiyun __func__, lio->linfo.link.u64, ls->u64);
581*4882a593Smuzhiyun lio->linfo.link.u64 = ls->u64;
582*4882a593Smuzhiyun
583*4882a593Smuzhiyun if ((lio->intf_open) && (changed)) {
584*4882a593Smuzhiyun print_link_info(netdev);
585*4882a593Smuzhiyun lio->link_changes++;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (lio->linfo.link.s.link_up) {
588*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "%s: link_up", __func__);
589*4882a593Smuzhiyun netif_carrier_on(netdev);
590*4882a593Smuzhiyun wake_txqs(netdev);
591*4882a593Smuzhiyun } else {
592*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "%s: link_off", __func__);
593*4882a593Smuzhiyun netif_carrier_off(netdev);
594*4882a593Smuzhiyun stop_txqs(netdev);
595*4882a593Smuzhiyun }
596*4882a593Smuzhiyun if (lio->linfo.link.s.mtu != current_max_mtu) {
597*4882a593Smuzhiyun netif_info(lio, probe, lio->netdev, "Max MTU changed from %d to %d\n",
598*4882a593Smuzhiyun current_max_mtu, lio->linfo.link.s.mtu);
599*4882a593Smuzhiyun netdev->max_mtu = lio->linfo.link.s.mtu;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun if (lio->linfo.link.s.mtu < netdev->mtu) {
602*4882a593Smuzhiyun dev_warn(&oct->pci_dev->dev,
603*4882a593Smuzhiyun "Current MTU is higher than new max MTU; Reducing the current mtu from %d to %d\n",
604*4882a593Smuzhiyun netdev->mtu, lio->linfo.link.s.mtu);
605*4882a593Smuzhiyun queue_delayed_work(lio->link_status_wq.wq,
606*4882a593Smuzhiyun &lio->link_status_wq.wk.work, 0);
607*4882a593Smuzhiyun }
608*4882a593Smuzhiyun }
609*4882a593Smuzhiyun }
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun /**
612*4882a593Smuzhiyun * lio_sync_octeon_time - send latest localtime to octeon firmware so that
613*4882a593Smuzhiyun * firmware will correct it's time, in case there is a time skew
614*4882a593Smuzhiyun *
615*4882a593Smuzhiyun * @work: work scheduled to send time update to octeon firmware
616*4882a593Smuzhiyun **/
lio_sync_octeon_time(struct work_struct * work)617*4882a593Smuzhiyun static void lio_sync_octeon_time(struct work_struct *work)
618*4882a593Smuzhiyun {
619*4882a593Smuzhiyun struct cavium_wk *wk = (struct cavium_wk *)work;
620*4882a593Smuzhiyun struct lio *lio = (struct lio *)wk->ctxptr;
621*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
622*4882a593Smuzhiyun struct octeon_soft_command *sc;
623*4882a593Smuzhiyun struct timespec64 ts;
624*4882a593Smuzhiyun struct lio_time *lt;
625*4882a593Smuzhiyun int ret;
626*4882a593Smuzhiyun
627*4882a593Smuzhiyun sc = octeon_alloc_soft_command(oct, sizeof(struct lio_time), 16, 0);
628*4882a593Smuzhiyun if (!sc) {
629*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
630*4882a593Smuzhiyun "Failed to sync time to octeon: soft command allocation failed\n");
631*4882a593Smuzhiyun return;
632*4882a593Smuzhiyun }
633*4882a593Smuzhiyun
634*4882a593Smuzhiyun lt = (struct lio_time *)sc->virtdptr;
635*4882a593Smuzhiyun
636*4882a593Smuzhiyun /* Get time of the day */
637*4882a593Smuzhiyun ktime_get_real_ts64(&ts);
638*4882a593Smuzhiyun lt->sec = ts.tv_sec;
639*4882a593Smuzhiyun lt->nsec = ts.tv_nsec;
640*4882a593Smuzhiyun octeon_swap_8B_data((u64 *)lt, (sizeof(struct lio_time)) / 8);
641*4882a593Smuzhiyun
642*4882a593Smuzhiyun sc->iq_no = lio->linfo.txpciq[0].s.q_no;
643*4882a593Smuzhiyun octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
644*4882a593Smuzhiyun OPCODE_NIC_SYNC_OCTEON_TIME, 0, 0, 0);
645*4882a593Smuzhiyun
646*4882a593Smuzhiyun init_completion(&sc->complete);
647*4882a593Smuzhiyun sc->sc_status = OCTEON_REQUEST_PENDING;
648*4882a593Smuzhiyun
649*4882a593Smuzhiyun ret = octeon_send_soft_command(oct, sc);
650*4882a593Smuzhiyun if (ret == IQ_SEND_FAILED) {
651*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
652*4882a593Smuzhiyun "Failed to sync time to octeon: failed to send soft command\n");
653*4882a593Smuzhiyun octeon_free_soft_command(oct, sc);
654*4882a593Smuzhiyun } else {
655*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun queue_delayed_work(lio->sync_octeon_time_wq.wq,
659*4882a593Smuzhiyun &lio->sync_octeon_time_wq.wk.work,
660*4882a593Smuzhiyun msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun /**
664*4882a593Smuzhiyun * setup_sync_octeon_time_wq - prepare work to periodically update local time to octeon firmware
665*4882a593Smuzhiyun *
666*4882a593Smuzhiyun * @netdev: network device which should send time update to firmware
667*4882a593Smuzhiyun **/
setup_sync_octeon_time_wq(struct net_device * netdev)668*4882a593Smuzhiyun static inline int setup_sync_octeon_time_wq(struct net_device *netdev)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
671*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun lio->sync_octeon_time_wq.wq =
674*4882a593Smuzhiyun alloc_workqueue("update-octeon-time", WQ_MEM_RECLAIM, 0);
675*4882a593Smuzhiyun if (!lio->sync_octeon_time_wq.wq) {
676*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Unable to create wq to update octeon time\n");
677*4882a593Smuzhiyun return -1;
678*4882a593Smuzhiyun }
679*4882a593Smuzhiyun INIT_DELAYED_WORK(&lio->sync_octeon_time_wq.wk.work,
680*4882a593Smuzhiyun lio_sync_octeon_time);
681*4882a593Smuzhiyun lio->sync_octeon_time_wq.wk.ctxptr = lio;
682*4882a593Smuzhiyun queue_delayed_work(lio->sync_octeon_time_wq.wq,
683*4882a593Smuzhiyun &lio->sync_octeon_time_wq.wk.work,
684*4882a593Smuzhiyun msecs_to_jiffies(LIO_SYNC_OCTEON_TIME_INTERVAL_MS));
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun return 0;
687*4882a593Smuzhiyun }
688*4882a593Smuzhiyun
689*4882a593Smuzhiyun /**
690*4882a593Smuzhiyun * cleanup_sync_octeon_time_wq - destroy wq
691*4882a593Smuzhiyun *
692*4882a593Smuzhiyun * @netdev: network device which should send time update to firmware
693*4882a593Smuzhiyun *
694*4882a593Smuzhiyun * Stop scheduling and destroy the work created to periodically update local
695*4882a593Smuzhiyun * time to octeon firmware.
696*4882a593Smuzhiyun **/
cleanup_sync_octeon_time_wq(struct net_device * netdev)697*4882a593Smuzhiyun static inline void cleanup_sync_octeon_time_wq(struct net_device *netdev)
698*4882a593Smuzhiyun {
699*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
700*4882a593Smuzhiyun struct cavium_wq *time_wq = &lio->sync_octeon_time_wq;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (time_wq->wq) {
703*4882a593Smuzhiyun cancel_delayed_work_sync(&time_wq->wk.work);
704*4882a593Smuzhiyun destroy_workqueue(time_wq->wq);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun }
707*4882a593Smuzhiyun
get_other_octeon_device(struct octeon_device * oct)708*4882a593Smuzhiyun static struct octeon_device *get_other_octeon_device(struct octeon_device *oct)
709*4882a593Smuzhiyun {
710*4882a593Smuzhiyun struct octeon_device *other_oct;
711*4882a593Smuzhiyun
712*4882a593Smuzhiyun other_oct = lio_get_device(oct->octeon_id + 1);
713*4882a593Smuzhiyun
714*4882a593Smuzhiyun if (other_oct && other_oct->pci_dev) {
715*4882a593Smuzhiyun int oct_busnum, other_oct_busnum;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun oct_busnum = oct->pci_dev->bus->number;
718*4882a593Smuzhiyun other_oct_busnum = other_oct->pci_dev->bus->number;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (oct_busnum == other_oct_busnum) {
721*4882a593Smuzhiyun int oct_slot, other_oct_slot;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun oct_slot = PCI_SLOT(oct->pci_dev->devfn);
724*4882a593Smuzhiyun other_oct_slot = PCI_SLOT(other_oct->pci_dev->devfn);
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (oct_slot == other_oct_slot)
727*4882a593Smuzhiyun return other_oct;
728*4882a593Smuzhiyun }
729*4882a593Smuzhiyun }
730*4882a593Smuzhiyun
731*4882a593Smuzhiyun return NULL;
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
disable_all_vf_links(struct octeon_device * oct)734*4882a593Smuzhiyun static void disable_all_vf_links(struct octeon_device *oct)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun struct net_device *netdev;
737*4882a593Smuzhiyun int max_vfs, vf, i;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (!oct)
740*4882a593Smuzhiyun return;
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun max_vfs = oct->sriov_info.max_vfs;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun for (i = 0; i < oct->ifcount; i++) {
745*4882a593Smuzhiyun netdev = oct->props[i].netdev;
746*4882a593Smuzhiyun if (!netdev)
747*4882a593Smuzhiyun continue;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun for (vf = 0; vf < max_vfs; vf++)
750*4882a593Smuzhiyun liquidio_set_vf_link_state(netdev, vf,
751*4882a593Smuzhiyun IFLA_VF_LINK_STATE_DISABLE);
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun }
754*4882a593Smuzhiyun
liquidio_watchdog(void * param)755*4882a593Smuzhiyun static int liquidio_watchdog(void *param)
756*4882a593Smuzhiyun {
757*4882a593Smuzhiyun bool err_msg_was_printed[LIO_MAX_CORES];
758*4882a593Smuzhiyun u16 mask_of_crashed_or_stuck_cores = 0;
759*4882a593Smuzhiyun bool all_vf_links_are_disabled = false;
760*4882a593Smuzhiyun struct octeon_device *oct = param;
761*4882a593Smuzhiyun struct octeon_device *other_oct;
762*4882a593Smuzhiyun #ifdef CONFIG_MODULE_UNLOAD
763*4882a593Smuzhiyun long refcount, vfs_referencing_pf;
764*4882a593Smuzhiyun u64 vfs_mask1, vfs_mask2;
765*4882a593Smuzhiyun #endif
766*4882a593Smuzhiyun int core;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun memset(err_msg_was_printed, 0, sizeof(err_msg_was_printed));
769*4882a593Smuzhiyun
770*4882a593Smuzhiyun while (!kthread_should_stop()) {
771*4882a593Smuzhiyun /* sleep for a couple of seconds so that we don't hog the CPU */
772*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
773*4882a593Smuzhiyun schedule_timeout(msecs_to_jiffies(2000));
774*4882a593Smuzhiyun
775*4882a593Smuzhiyun mask_of_crashed_or_stuck_cores =
776*4882a593Smuzhiyun (u16)octeon_read_csr64(oct, CN23XX_SLI_SCRATCH2);
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (!mask_of_crashed_or_stuck_cores)
779*4882a593Smuzhiyun continue;
780*4882a593Smuzhiyun
781*4882a593Smuzhiyun WRITE_ONCE(oct->cores_crashed, true);
782*4882a593Smuzhiyun other_oct = get_other_octeon_device(oct);
783*4882a593Smuzhiyun if (other_oct)
784*4882a593Smuzhiyun WRITE_ONCE(other_oct->cores_crashed, true);
785*4882a593Smuzhiyun
786*4882a593Smuzhiyun for (core = 0; core < LIO_MAX_CORES; core++) {
787*4882a593Smuzhiyun bool core_crashed_or_got_stuck;
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun core_crashed_or_got_stuck =
790*4882a593Smuzhiyun (mask_of_crashed_or_stuck_cores
791*4882a593Smuzhiyun >> core) & 1;
792*4882a593Smuzhiyun
793*4882a593Smuzhiyun if (core_crashed_or_got_stuck &&
794*4882a593Smuzhiyun !err_msg_was_printed[core]) {
795*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
796*4882a593Smuzhiyun "ERROR: Octeon core %d crashed or got stuck! See oct-fwdump for details.\n",
797*4882a593Smuzhiyun core);
798*4882a593Smuzhiyun err_msg_was_printed[core] = true;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun }
801*4882a593Smuzhiyun
802*4882a593Smuzhiyun if (all_vf_links_are_disabled)
803*4882a593Smuzhiyun continue;
804*4882a593Smuzhiyun
805*4882a593Smuzhiyun disable_all_vf_links(oct);
806*4882a593Smuzhiyun disable_all_vf_links(other_oct);
807*4882a593Smuzhiyun all_vf_links_are_disabled = true;
808*4882a593Smuzhiyun
809*4882a593Smuzhiyun #ifdef CONFIG_MODULE_UNLOAD
810*4882a593Smuzhiyun vfs_mask1 = READ_ONCE(oct->sriov_info.vf_drv_loaded_mask);
811*4882a593Smuzhiyun vfs_mask2 = READ_ONCE(other_oct->sriov_info.vf_drv_loaded_mask);
812*4882a593Smuzhiyun
813*4882a593Smuzhiyun vfs_referencing_pf = hweight64(vfs_mask1);
814*4882a593Smuzhiyun vfs_referencing_pf += hweight64(vfs_mask2);
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun refcount = module_refcount(THIS_MODULE);
817*4882a593Smuzhiyun if (refcount >= vfs_referencing_pf) {
818*4882a593Smuzhiyun while (vfs_referencing_pf) {
819*4882a593Smuzhiyun module_put(THIS_MODULE);
820*4882a593Smuzhiyun vfs_referencing_pf--;
821*4882a593Smuzhiyun }
822*4882a593Smuzhiyun }
823*4882a593Smuzhiyun #endif
824*4882a593Smuzhiyun }
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun return 0;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun /**
830*4882a593Smuzhiyun * liquidio_probe - PCI probe handler
831*4882a593Smuzhiyun * @pdev: PCI device structure
832*4882a593Smuzhiyun * @ent: unused
833*4882a593Smuzhiyun */
834*4882a593Smuzhiyun static int
liquidio_probe(struct pci_dev * pdev,const struct pci_device_id __maybe_unused * ent)835*4882a593Smuzhiyun liquidio_probe(struct pci_dev *pdev, const struct pci_device_id __maybe_unused *ent)
836*4882a593Smuzhiyun {
837*4882a593Smuzhiyun struct octeon_device *oct_dev = NULL;
838*4882a593Smuzhiyun struct handshake *hs;
839*4882a593Smuzhiyun
840*4882a593Smuzhiyun oct_dev = octeon_allocate_device(pdev->device,
841*4882a593Smuzhiyun sizeof(struct octeon_device_priv));
842*4882a593Smuzhiyun if (!oct_dev) {
843*4882a593Smuzhiyun dev_err(&pdev->dev, "Unable to allocate device\n");
844*4882a593Smuzhiyun return -ENOMEM;
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun if (pdev->device == OCTEON_CN23XX_PF_VID)
848*4882a593Smuzhiyun oct_dev->msix_on = LIO_FLAG_MSIX_ENABLED;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* Enable PTP for 6XXX Device */
851*4882a593Smuzhiyun if (((pdev->device == OCTEON_CN66XX) ||
852*4882a593Smuzhiyun (pdev->device == OCTEON_CN68XX)))
853*4882a593Smuzhiyun oct_dev->ptp_enable = true;
854*4882a593Smuzhiyun else
855*4882a593Smuzhiyun oct_dev->ptp_enable = false;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun dev_info(&pdev->dev, "Initializing device %x:%x.\n",
858*4882a593Smuzhiyun (u32)pdev->vendor, (u32)pdev->device);
859*4882a593Smuzhiyun
860*4882a593Smuzhiyun /* Assign octeon_device for this device to the private data area. */
861*4882a593Smuzhiyun pci_set_drvdata(pdev, oct_dev);
862*4882a593Smuzhiyun
863*4882a593Smuzhiyun /* set linux specific device pointer */
864*4882a593Smuzhiyun oct_dev->pci_dev = (void *)pdev;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun oct_dev->subsystem_id = pdev->subsystem_vendor |
867*4882a593Smuzhiyun (pdev->subsystem_device << 16);
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun hs = &handshake[oct_dev->octeon_id];
870*4882a593Smuzhiyun init_completion(&hs->init);
871*4882a593Smuzhiyun init_completion(&hs->started);
872*4882a593Smuzhiyun hs->pci_dev = pdev;
873*4882a593Smuzhiyun
874*4882a593Smuzhiyun if (oct_dev->octeon_id == 0)
875*4882a593Smuzhiyun /* first LiquidIO NIC is detected */
876*4882a593Smuzhiyun complete(&first_stage);
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun if (octeon_device_init(oct_dev)) {
879*4882a593Smuzhiyun complete(&hs->init);
880*4882a593Smuzhiyun liquidio_remove(pdev);
881*4882a593Smuzhiyun return -ENOMEM;
882*4882a593Smuzhiyun }
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct_dev)) {
885*4882a593Smuzhiyun u8 bus, device, function;
886*4882a593Smuzhiyun
887*4882a593Smuzhiyun if (atomic_read(oct_dev->adapter_refcount) == 1) {
888*4882a593Smuzhiyun /* Each NIC gets one watchdog kernel thread. The first
889*4882a593Smuzhiyun * PF (of each NIC) that gets pci_driver->probe()'d
890*4882a593Smuzhiyun * creates that thread.
891*4882a593Smuzhiyun */
892*4882a593Smuzhiyun bus = pdev->bus->number;
893*4882a593Smuzhiyun device = PCI_SLOT(pdev->devfn);
894*4882a593Smuzhiyun function = PCI_FUNC(pdev->devfn);
895*4882a593Smuzhiyun oct_dev->watchdog_task = kthread_create(
896*4882a593Smuzhiyun liquidio_watchdog, oct_dev,
897*4882a593Smuzhiyun "liowd/%02hhx:%02hhx.%hhx", bus, device, function);
898*4882a593Smuzhiyun if (!IS_ERR(oct_dev->watchdog_task)) {
899*4882a593Smuzhiyun wake_up_process(oct_dev->watchdog_task);
900*4882a593Smuzhiyun } else {
901*4882a593Smuzhiyun oct_dev->watchdog_task = NULL;
902*4882a593Smuzhiyun dev_err(&oct_dev->pci_dev->dev,
903*4882a593Smuzhiyun "failed to create kernel_thread\n");
904*4882a593Smuzhiyun liquidio_remove(pdev);
905*4882a593Smuzhiyun return -1;
906*4882a593Smuzhiyun }
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun }
909*4882a593Smuzhiyun
910*4882a593Smuzhiyun oct_dev->rx_pause = 1;
911*4882a593Smuzhiyun oct_dev->tx_pause = 1;
912*4882a593Smuzhiyun
913*4882a593Smuzhiyun dev_dbg(&oct_dev->pci_dev->dev, "Device is ready\n");
914*4882a593Smuzhiyun
915*4882a593Smuzhiyun return 0;
916*4882a593Smuzhiyun }
917*4882a593Smuzhiyun
fw_type_is_auto(void)918*4882a593Smuzhiyun static bool fw_type_is_auto(void)
919*4882a593Smuzhiyun {
920*4882a593Smuzhiyun return strncmp(fw_type, LIO_FW_NAME_TYPE_AUTO,
921*4882a593Smuzhiyun sizeof(LIO_FW_NAME_TYPE_AUTO)) == 0;
922*4882a593Smuzhiyun }
923*4882a593Smuzhiyun
924*4882a593Smuzhiyun /**
925*4882a593Smuzhiyun * octeon_pci_flr - PCI FLR for each Octeon device.
926*4882a593Smuzhiyun * @oct: octeon device
927*4882a593Smuzhiyun */
octeon_pci_flr(struct octeon_device * oct)928*4882a593Smuzhiyun static void octeon_pci_flr(struct octeon_device *oct)
929*4882a593Smuzhiyun {
930*4882a593Smuzhiyun int rc;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun pci_save_state(oct->pci_dev);
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun pci_cfg_access_lock(oct->pci_dev);
935*4882a593Smuzhiyun
936*4882a593Smuzhiyun /* Quiesce the device completely */
937*4882a593Smuzhiyun pci_write_config_word(oct->pci_dev, PCI_COMMAND,
938*4882a593Smuzhiyun PCI_COMMAND_INTX_DISABLE);
939*4882a593Smuzhiyun
940*4882a593Smuzhiyun rc = __pci_reset_function_locked(oct->pci_dev);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun if (rc != 0)
943*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Error %d resetting PCI function %d\n",
944*4882a593Smuzhiyun rc, oct->pf_num);
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun pci_cfg_access_unlock(oct->pci_dev);
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun pci_restore_state(oct->pci_dev);
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun /**
952*4882a593Smuzhiyun * octeon_destroy_resources - Destroy resources associated with octeon device
953*4882a593Smuzhiyun * @oct: octeon device
954*4882a593Smuzhiyun */
octeon_destroy_resources(struct octeon_device * oct)955*4882a593Smuzhiyun static void octeon_destroy_resources(struct octeon_device *oct)
956*4882a593Smuzhiyun {
957*4882a593Smuzhiyun int i, refcount;
958*4882a593Smuzhiyun struct msix_entry *msix_entries;
959*4882a593Smuzhiyun struct octeon_device_priv *oct_priv =
960*4882a593Smuzhiyun (struct octeon_device_priv *)oct->priv;
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun struct handshake *hs;
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun switch (atomic_read(&oct->status)) {
965*4882a593Smuzhiyun case OCT_DEV_RUNNING:
966*4882a593Smuzhiyun case OCT_DEV_CORE_OK:
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun /* No more instructions will be forwarded. */
969*4882a593Smuzhiyun atomic_set(&oct->status, OCT_DEV_IN_RESET);
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun oct->app_mode = CVM_DRV_INVALID_APP;
972*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Device state is now %s\n",
973*4882a593Smuzhiyun lio_get_state_string(&oct->status));
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun schedule_timeout_uninterruptible(HZ / 10);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun fallthrough;
978*4882a593Smuzhiyun case OCT_DEV_HOST_OK:
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun case OCT_DEV_CONSOLE_INIT_DONE:
981*4882a593Smuzhiyun /* Remove any consoles */
982*4882a593Smuzhiyun octeon_remove_consoles(oct);
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun fallthrough;
985*4882a593Smuzhiyun case OCT_DEV_IO_QUEUES_DONE:
986*4882a593Smuzhiyun if (lio_wait_for_instr_fetch(oct))
987*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "IQ had pending instructions\n");
988*4882a593Smuzhiyun
989*4882a593Smuzhiyun if (wait_for_pending_requests(oct))
990*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "There were pending requests\n");
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /* Disable the input and output queues now. No more packets will
993*4882a593Smuzhiyun * arrive from Octeon, but we should wait for all packet
994*4882a593Smuzhiyun * processing to finish.
995*4882a593Smuzhiyun */
996*4882a593Smuzhiyun oct->fn_list.disable_io_queues(oct);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (lio_wait_for_oq_pkts(oct))
999*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "OQ had pending packets\n");
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun /* Force all requests waiting to be fetched by OCTEON to
1002*4882a593Smuzhiyun * complete.
1003*4882a593Smuzhiyun */
1004*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1005*4882a593Smuzhiyun struct octeon_instr_queue *iq;
1006*4882a593Smuzhiyun
1007*4882a593Smuzhiyun if (!(oct->io_qmask.iq & BIT_ULL(i)))
1008*4882a593Smuzhiyun continue;
1009*4882a593Smuzhiyun iq = oct->instr_queue[i];
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun if (atomic_read(&iq->instr_pending)) {
1012*4882a593Smuzhiyun spin_lock_bh(&iq->lock);
1013*4882a593Smuzhiyun iq->fill_cnt = 0;
1014*4882a593Smuzhiyun iq->octeon_read_index = iq->host_write_index;
1015*4882a593Smuzhiyun iq->stats.instr_processed +=
1016*4882a593Smuzhiyun atomic_read(&iq->instr_pending);
1017*4882a593Smuzhiyun lio_process_iq_request_list(oct, iq, 0);
1018*4882a593Smuzhiyun spin_unlock_bh(&iq->lock);
1019*4882a593Smuzhiyun }
1020*4882a593Smuzhiyun }
1021*4882a593Smuzhiyun
1022*4882a593Smuzhiyun lio_process_ordered_list(oct, 1);
1023*4882a593Smuzhiyun octeon_free_sc_done_list(oct);
1024*4882a593Smuzhiyun octeon_free_sc_zombie_list(oct);
1025*4882a593Smuzhiyun
1026*4882a593Smuzhiyun fallthrough;
1027*4882a593Smuzhiyun case OCT_DEV_INTR_SET_DONE:
1028*4882a593Smuzhiyun /* Disable interrupts */
1029*4882a593Smuzhiyun oct->fn_list.disable_interrupt(oct, OCTEON_ALL_INTR);
1030*4882a593Smuzhiyun
1031*4882a593Smuzhiyun if (oct->msix_on) {
1032*4882a593Smuzhiyun msix_entries = (struct msix_entry *)oct->msix_entries;
1033*4882a593Smuzhiyun for (i = 0; i < oct->num_msix_irqs - 1; i++) {
1034*4882a593Smuzhiyun if (oct->ioq_vector[i].vector) {
1035*4882a593Smuzhiyun /* clear the affinity_cpumask */
1036*4882a593Smuzhiyun irq_set_affinity_hint(
1037*4882a593Smuzhiyun msix_entries[i].vector,
1038*4882a593Smuzhiyun NULL);
1039*4882a593Smuzhiyun free_irq(msix_entries[i].vector,
1040*4882a593Smuzhiyun &oct->ioq_vector[i]);
1041*4882a593Smuzhiyun oct->ioq_vector[i].vector = 0;
1042*4882a593Smuzhiyun }
1043*4882a593Smuzhiyun }
1044*4882a593Smuzhiyun /* non-iov vector's argument is oct struct */
1045*4882a593Smuzhiyun free_irq(msix_entries[i].vector, oct);
1046*4882a593Smuzhiyun
1047*4882a593Smuzhiyun pci_disable_msix(oct->pci_dev);
1048*4882a593Smuzhiyun kfree(oct->msix_entries);
1049*4882a593Smuzhiyun oct->msix_entries = NULL;
1050*4882a593Smuzhiyun } else {
1051*4882a593Smuzhiyun /* Release the interrupt line */
1052*4882a593Smuzhiyun free_irq(oct->pci_dev->irq, oct);
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun if (oct->flags & LIO_FLAG_MSI_ENABLED)
1055*4882a593Smuzhiyun pci_disable_msi(oct->pci_dev);
1056*4882a593Smuzhiyun }
1057*4882a593Smuzhiyun
1058*4882a593Smuzhiyun kfree(oct->irq_name_storage);
1059*4882a593Smuzhiyun oct->irq_name_storage = NULL;
1060*4882a593Smuzhiyun
1061*4882a593Smuzhiyun fallthrough;
1062*4882a593Smuzhiyun case OCT_DEV_MSIX_ALLOC_VECTOR_DONE:
1063*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
1064*4882a593Smuzhiyun octeon_free_ioq_vector(oct);
1065*4882a593Smuzhiyun
1066*4882a593Smuzhiyun fallthrough;
1067*4882a593Smuzhiyun case OCT_DEV_MBOX_SETUP_DONE:
1068*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
1069*4882a593Smuzhiyun oct->fn_list.free_mbox(oct);
1070*4882a593Smuzhiyun
1071*4882a593Smuzhiyun fallthrough;
1072*4882a593Smuzhiyun case OCT_DEV_IN_RESET:
1073*4882a593Smuzhiyun case OCT_DEV_DROQ_INIT_DONE:
1074*4882a593Smuzhiyun /* Wait for any pending operations */
1075*4882a593Smuzhiyun mdelay(100);
1076*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_OUTPUT_QUEUES(oct); i++) {
1077*4882a593Smuzhiyun if (!(oct->io_qmask.oq & BIT_ULL(i)))
1078*4882a593Smuzhiyun continue;
1079*4882a593Smuzhiyun octeon_delete_droq(oct, i);
1080*4882a593Smuzhiyun }
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun /* Force any pending handshakes to complete */
1083*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
1084*4882a593Smuzhiyun hs = &handshake[i];
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun if (hs->pci_dev) {
1087*4882a593Smuzhiyun handshake[oct->octeon_id].init_ok = 0;
1088*4882a593Smuzhiyun complete(&handshake[oct->octeon_id].init);
1089*4882a593Smuzhiyun handshake[oct->octeon_id].started_ok = 0;
1090*4882a593Smuzhiyun complete(&handshake[oct->octeon_id].started);
1091*4882a593Smuzhiyun }
1092*4882a593Smuzhiyun }
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun fallthrough;
1095*4882a593Smuzhiyun case OCT_DEV_RESP_LIST_INIT_DONE:
1096*4882a593Smuzhiyun octeon_delete_response_list(oct);
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun fallthrough;
1099*4882a593Smuzhiyun case OCT_DEV_INSTR_QUEUE_INIT_DONE:
1100*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_INSTR_QUEUES(oct); i++) {
1101*4882a593Smuzhiyun if (!(oct->io_qmask.iq & BIT_ULL(i)))
1102*4882a593Smuzhiyun continue;
1103*4882a593Smuzhiyun octeon_delete_instr_queue(oct, i);
1104*4882a593Smuzhiyun }
1105*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
1106*4882a593Smuzhiyun if (oct->sriov_info.sriov_enabled)
1107*4882a593Smuzhiyun pci_disable_sriov(oct->pci_dev);
1108*4882a593Smuzhiyun #endif
1109*4882a593Smuzhiyun fallthrough;
1110*4882a593Smuzhiyun case OCT_DEV_SC_BUFF_POOL_INIT_DONE:
1111*4882a593Smuzhiyun octeon_free_sc_buffer_pool(oct);
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun fallthrough;
1114*4882a593Smuzhiyun case OCT_DEV_DISPATCH_INIT_DONE:
1115*4882a593Smuzhiyun octeon_delete_dispatch_list(oct);
1116*4882a593Smuzhiyun cancel_delayed_work_sync(&oct->nic_poll_work.work);
1117*4882a593Smuzhiyun
1118*4882a593Smuzhiyun fallthrough;
1119*4882a593Smuzhiyun case OCT_DEV_PCI_MAP_DONE:
1120*4882a593Smuzhiyun refcount = octeon_deregister_device(oct);
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* Soft reset the octeon device before exiting.
1123*4882a593Smuzhiyun * However, if fw was loaded from card (i.e. autoboot),
1124*4882a593Smuzhiyun * perform an FLR instead.
1125*4882a593Smuzhiyun * Implementation note: only soft-reset the device
1126*4882a593Smuzhiyun * if it is a CN6XXX OR the LAST CN23XX device.
1127*4882a593Smuzhiyun */
1128*4882a593Smuzhiyun if (atomic_read(oct->adapter_fw_state) == FW_IS_PRELOADED)
1129*4882a593Smuzhiyun octeon_pci_flr(oct);
1130*4882a593Smuzhiyun else if (OCTEON_CN6XXX(oct) || !refcount)
1131*4882a593Smuzhiyun oct->fn_list.soft_reset(oct);
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun octeon_unmap_pci_barx(oct, 0);
1134*4882a593Smuzhiyun octeon_unmap_pci_barx(oct, 1);
1135*4882a593Smuzhiyun
1136*4882a593Smuzhiyun fallthrough;
1137*4882a593Smuzhiyun case OCT_DEV_PCI_ENABLE_DONE:
1138*4882a593Smuzhiyun pci_clear_master(oct->pci_dev);
1139*4882a593Smuzhiyun /* Disable the device, releasing the PCI INT */
1140*4882a593Smuzhiyun pci_disable_device(oct->pci_dev);
1141*4882a593Smuzhiyun
1142*4882a593Smuzhiyun fallthrough;
1143*4882a593Smuzhiyun case OCT_DEV_BEGIN_STATE:
1144*4882a593Smuzhiyun /* Nothing to be done here either */
1145*4882a593Smuzhiyun break;
1146*4882a593Smuzhiyun } /* end switch (oct->status) */
1147*4882a593Smuzhiyun
1148*4882a593Smuzhiyun tasklet_kill(&oct_priv->droq_tasklet);
1149*4882a593Smuzhiyun }
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun /**
1152*4882a593Smuzhiyun * send_rx_ctrl_cmd - Send Rx control command
1153*4882a593Smuzhiyun * @lio: per-network private data
1154*4882a593Smuzhiyun * @start_stop: whether to start or stop
1155*4882a593Smuzhiyun */
send_rx_ctrl_cmd(struct lio * lio,int start_stop)1156*4882a593Smuzhiyun static int send_rx_ctrl_cmd(struct lio *lio, int start_stop)
1157*4882a593Smuzhiyun {
1158*4882a593Smuzhiyun struct octeon_soft_command *sc;
1159*4882a593Smuzhiyun union octnet_cmd *ncmd;
1160*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1161*4882a593Smuzhiyun int retval;
1162*4882a593Smuzhiyun
1163*4882a593Smuzhiyun if (oct->props[lio->ifidx].rx_on == start_stop)
1164*4882a593Smuzhiyun return 0;
1165*4882a593Smuzhiyun
1166*4882a593Smuzhiyun sc = (struct octeon_soft_command *)
1167*4882a593Smuzhiyun octeon_alloc_soft_command(oct, OCTNET_CMD_SIZE,
1168*4882a593Smuzhiyun 16, 0);
1169*4882a593Smuzhiyun if (!sc) {
1170*4882a593Smuzhiyun netif_info(lio, rx_err, lio->netdev,
1171*4882a593Smuzhiyun "Failed to allocate octeon_soft_command struct\n");
1172*4882a593Smuzhiyun return -ENOMEM;
1173*4882a593Smuzhiyun }
1174*4882a593Smuzhiyun
1175*4882a593Smuzhiyun ncmd = (union octnet_cmd *)sc->virtdptr;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun ncmd->u64 = 0;
1178*4882a593Smuzhiyun ncmd->s.cmd = OCTNET_CMD_RX_CTL;
1179*4882a593Smuzhiyun ncmd->s.param1 = start_stop;
1180*4882a593Smuzhiyun
1181*4882a593Smuzhiyun octeon_swap_8B_data((u64 *)ncmd, (OCTNET_CMD_SIZE >> 3));
1182*4882a593Smuzhiyun
1183*4882a593Smuzhiyun sc->iq_no = lio->linfo.txpciq[0].s.q_no;
1184*4882a593Smuzhiyun
1185*4882a593Smuzhiyun octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
1186*4882a593Smuzhiyun OPCODE_NIC_CMD, 0, 0, 0);
1187*4882a593Smuzhiyun
1188*4882a593Smuzhiyun init_completion(&sc->complete);
1189*4882a593Smuzhiyun sc->sc_status = OCTEON_REQUEST_PENDING;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun retval = octeon_send_soft_command(oct, sc);
1192*4882a593Smuzhiyun if (retval == IQ_SEND_FAILED) {
1193*4882a593Smuzhiyun netif_info(lio, rx_err, lio->netdev, "Failed to send RX Control message\n");
1194*4882a593Smuzhiyun octeon_free_soft_command(oct, sc);
1195*4882a593Smuzhiyun } else {
1196*4882a593Smuzhiyun /* Sleep on a wait queue till the cond flag indicates that the
1197*4882a593Smuzhiyun * response arrived or timed-out.
1198*4882a593Smuzhiyun */
1199*4882a593Smuzhiyun retval = wait_for_sc_completion_timeout(oct, sc, 0);
1200*4882a593Smuzhiyun if (retval)
1201*4882a593Smuzhiyun return retval;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun oct->props[lio->ifidx].rx_on = start_stop;
1204*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
1205*4882a593Smuzhiyun }
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun return retval;
1208*4882a593Smuzhiyun }
1209*4882a593Smuzhiyun
1210*4882a593Smuzhiyun /**
1211*4882a593Smuzhiyun * liquidio_destroy_nic_device - Destroy NIC device interface
1212*4882a593Smuzhiyun * @oct: octeon device
1213*4882a593Smuzhiyun * @ifidx: which interface to destroy
1214*4882a593Smuzhiyun *
1215*4882a593Smuzhiyun * Cleanup associated with each interface for an Octeon device when NIC
1216*4882a593Smuzhiyun * module is being unloaded or if initialization fails during load.
1217*4882a593Smuzhiyun */
liquidio_destroy_nic_device(struct octeon_device * oct,int ifidx)1218*4882a593Smuzhiyun static void liquidio_destroy_nic_device(struct octeon_device *oct, int ifidx)
1219*4882a593Smuzhiyun {
1220*4882a593Smuzhiyun struct net_device *netdev = oct->props[ifidx].netdev;
1221*4882a593Smuzhiyun struct octeon_device_priv *oct_priv =
1222*4882a593Smuzhiyun (struct octeon_device_priv *)oct->priv;
1223*4882a593Smuzhiyun struct napi_struct *napi, *n;
1224*4882a593Smuzhiyun struct lio *lio;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (!netdev) {
1227*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "%s No netdevice ptr for index %d\n",
1228*4882a593Smuzhiyun __func__, ifidx);
1229*4882a593Smuzhiyun return;
1230*4882a593Smuzhiyun }
1231*4882a593Smuzhiyun
1232*4882a593Smuzhiyun lio = GET_LIO(netdev);
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "NIC device cleanup\n");
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING)
1237*4882a593Smuzhiyun liquidio_stop(netdev);
1238*4882a593Smuzhiyun
1239*4882a593Smuzhiyun if (oct->props[lio->ifidx].napi_enabled == 1) {
1240*4882a593Smuzhiyun list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1241*4882a593Smuzhiyun napi_disable(napi);
1242*4882a593Smuzhiyun
1243*4882a593Smuzhiyun oct->props[lio->ifidx].napi_enabled = 0;
1244*4882a593Smuzhiyun
1245*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
1246*4882a593Smuzhiyun oct->droq[0]->ops.poll_mode = 0;
1247*4882a593Smuzhiyun }
1248*4882a593Smuzhiyun
1249*4882a593Smuzhiyun /* Delete NAPI */
1250*4882a593Smuzhiyun list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1251*4882a593Smuzhiyun netif_napi_del(napi);
1252*4882a593Smuzhiyun
1253*4882a593Smuzhiyun tasklet_enable(&oct_priv->droq_tasklet);
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun if (atomic_read(&lio->ifstate) & LIO_IFSTATE_REGISTERED)
1256*4882a593Smuzhiyun unregister_netdev(netdev);
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun cleanup_sync_octeon_time_wq(netdev);
1259*4882a593Smuzhiyun cleanup_link_status_change_wq(netdev);
1260*4882a593Smuzhiyun
1261*4882a593Smuzhiyun cleanup_rx_oom_poll_fn(netdev);
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun lio_delete_glists(lio);
1264*4882a593Smuzhiyun
1265*4882a593Smuzhiyun free_netdev(netdev);
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun oct->props[ifidx].gmxport = -1;
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun oct->props[ifidx].netdev = NULL;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun /**
1273*4882a593Smuzhiyun * liquidio_stop_nic_module - Stop complete NIC functionality
1274*4882a593Smuzhiyun * @oct: octeon device
1275*4882a593Smuzhiyun */
liquidio_stop_nic_module(struct octeon_device * oct)1276*4882a593Smuzhiyun static int liquidio_stop_nic_module(struct octeon_device *oct)
1277*4882a593Smuzhiyun {
1278*4882a593Smuzhiyun int i, j;
1279*4882a593Smuzhiyun struct lio *lio;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Stopping network interfaces\n");
1282*4882a593Smuzhiyun if (!oct->ifcount) {
1283*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Init for Octeon was not completed\n");
1284*4882a593Smuzhiyun return 1;
1285*4882a593Smuzhiyun }
1286*4882a593Smuzhiyun
1287*4882a593Smuzhiyun spin_lock_bh(&oct->cmd_resp_wqlock);
1288*4882a593Smuzhiyun oct->cmd_resp_state = OCT_DRV_OFFLINE;
1289*4882a593Smuzhiyun spin_unlock_bh(&oct->cmd_resp_wqlock);
1290*4882a593Smuzhiyun
1291*4882a593Smuzhiyun lio_vf_rep_destroy(oct);
1292*4882a593Smuzhiyun
1293*4882a593Smuzhiyun for (i = 0; i < oct->ifcount; i++) {
1294*4882a593Smuzhiyun lio = GET_LIO(oct->props[i].netdev);
1295*4882a593Smuzhiyun for (j = 0; j < oct->num_oqs; j++)
1296*4882a593Smuzhiyun octeon_unregister_droq_ops(oct,
1297*4882a593Smuzhiyun lio->linfo.rxpciq[j].s.q_no);
1298*4882a593Smuzhiyun }
1299*4882a593Smuzhiyun
1300*4882a593Smuzhiyun for (i = 0; i < oct->ifcount; i++)
1301*4882a593Smuzhiyun liquidio_destroy_nic_device(oct, i);
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun if (oct->devlink) {
1304*4882a593Smuzhiyun devlink_unregister(oct->devlink);
1305*4882a593Smuzhiyun devlink_free(oct->devlink);
1306*4882a593Smuzhiyun oct->devlink = NULL;
1307*4882a593Smuzhiyun }
1308*4882a593Smuzhiyun
1309*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Network interfaces stopped\n");
1310*4882a593Smuzhiyun return 0;
1311*4882a593Smuzhiyun }
1312*4882a593Smuzhiyun
1313*4882a593Smuzhiyun /**
1314*4882a593Smuzhiyun * liquidio_remove - Cleans up resources at unload time
1315*4882a593Smuzhiyun * @pdev: PCI device structure
1316*4882a593Smuzhiyun */
liquidio_remove(struct pci_dev * pdev)1317*4882a593Smuzhiyun static void liquidio_remove(struct pci_dev *pdev)
1318*4882a593Smuzhiyun {
1319*4882a593Smuzhiyun struct octeon_device *oct_dev = pci_get_drvdata(pdev);
1320*4882a593Smuzhiyun
1321*4882a593Smuzhiyun dev_dbg(&oct_dev->pci_dev->dev, "Stopping device\n");
1322*4882a593Smuzhiyun
1323*4882a593Smuzhiyun if (oct_dev->watchdog_task)
1324*4882a593Smuzhiyun kthread_stop(oct_dev->watchdog_task);
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun if (!oct_dev->octeon_id &&
1327*4882a593Smuzhiyun oct_dev->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP)
1328*4882a593Smuzhiyun lio_vf_rep_modexit();
1329*4882a593Smuzhiyun
1330*4882a593Smuzhiyun if (oct_dev->app_mode && (oct_dev->app_mode == CVM_DRV_NIC_APP))
1331*4882a593Smuzhiyun liquidio_stop_nic_module(oct_dev);
1332*4882a593Smuzhiyun
1333*4882a593Smuzhiyun /* Reset the octeon device and cleanup all memory allocated for
1334*4882a593Smuzhiyun * the octeon device by driver.
1335*4882a593Smuzhiyun */
1336*4882a593Smuzhiyun octeon_destroy_resources(oct_dev);
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun dev_info(&oct_dev->pci_dev->dev, "Device removed\n");
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun /* This octeon device has been removed. Update the global
1341*4882a593Smuzhiyun * data structure to reflect this. Free the device structure.
1342*4882a593Smuzhiyun */
1343*4882a593Smuzhiyun octeon_free_device_mem(oct_dev);
1344*4882a593Smuzhiyun }
1345*4882a593Smuzhiyun
1346*4882a593Smuzhiyun /**
1347*4882a593Smuzhiyun * octeon_chip_specific_setup - Identify the Octeon device and to map the BAR address space
1348*4882a593Smuzhiyun * @oct: octeon device
1349*4882a593Smuzhiyun */
octeon_chip_specific_setup(struct octeon_device * oct)1350*4882a593Smuzhiyun static int octeon_chip_specific_setup(struct octeon_device *oct)
1351*4882a593Smuzhiyun {
1352*4882a593Smuzhiyun u32 dev_id, rev_id;
1353*4882a593Smuzhiyun int ret = 1;
1354*4882a593Smuzhiyun
1355*4882a593Smuzhiyun pci_read_config_dword(oct->pci_dev, 0, &dev_id);
1356*4882a593Smuzhiyun pci_read_config_dword(oct->pci_dev, 8, &rev_id);
1357*4882a593Smuzhiyun oct->rev_id = rev_id & 0xff;
1358*4882a593Smuzhiyun
1359*4882a593Smuzhiyun switch (dev_id) {
1360*4882a593Smuzhiyun case OCTEON_CN68XX_PCIID:
1361*4882a593Smuzhiyun oct->chip_id = OCTEON_CN68XX;
1362*4882a593Smuzhiyun ret = lio_setup_cn68xx_octeon_device(oct);
1363*4882a593Smuzhiyun break;
1364*4882a593Smuzhiyun
1365*4882a593Smuzhiyun case OCTEON_CN66XX_PCIID:
1366*4882a593Smuzhiyun oct->chip_id = OCTEON_CN66XX;
1367*4882a593Smuzhiyun ret = lio_setup_cn66xx_octeon_device(oct);
1368*4882a593Smuzhiyun break;
1369*4882a593Smuzhiyun
1370*4882a593Smuzhiyun case OCTEON_CN23XX_PCIID_PF:
1371*4882a593Smuzhiyun oct->chip_id = OCTEON_CN23XX_PF_VID;
1372*4882a593Smuzhiyun ret = setup_cn23xx_octeon_pf_device(oct);
1373*4882a593Smuzhiyun if (ret)
1374*4882a593Smuzhiyun break;
1375*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
1376*4882a593Smuzhiyun if (!ret)
1377*4882a593Smuzhiyun pci_sriov_set_totalvfs(oct->pci_dev,
1378*4882a593Smuzhiyun oct->sriov_info.max_vfs);
1379*4882a593Smuzhiyun #endif
1380*4882a593Smuzhiyun break;
1381*4882a593Smuzhiyun
1382*4882a593Smuzhiyun default:
1383*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Unknown device found (dev_id: %x)\n",
1384*4882a593Smuzhiyun dev_id);
1385*4882a593Smuzhiyun }
1386*4882a593Smuzhiyun
1387*4882a593Smuzhiyun return ret;
1388*4882a593Smuzhiyun }
1389*4882a593Smuzhiyun
1390*4882a593Smuzhiyun /**
1391*4882a593Smuzhiyun * octeon_pci_os_setup - PCI initialization for each Octeon device.
1392*4882a593Smuzhiyun * @oct: octeon device
1393*4882a593Smuzhiyun */
octeon_pci_os_setup(struct octeon_device * oct)1394*4882a593Smuzhiyun static int octeon_pci_os_setup(struct octeon_device *oct)
1395*4882a593Smuzhiyun {
1396*4882a593Smuzhiyun /* setup PCI stuff first */
1397*4882a593Smuzhiyun if (pci_enable_device(oct->pci_dev)) {
1398*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "pci_enable_device failed\n");
1399*4882a593Smuzhiyun return 1;
1400*4882a593Smuzhiyun }
1401*4882a593Smuzhiyun
1402*4882a593Smuzhiyun if (dma_set_mask_and_coherent(&oct->pci_dev->dev, DMA_BIT_MASK(64))) {
1403*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Unexpected DMA device capability\n");
1404*4882a593Smuzhiyun pci_disable_device(oct->pci_dev);
1405*4882a593Smuzhiyun return 1;
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun /* Enable PCI DMA Master. */
1409*4882a593Smuzhiyun pci_set_master(oct->pci_dev);
1410*4882a593Smuzhiyun
1411*4882a593Smuzhiyun return 0;
1412*4882a593Smuzhiyun }
1413*4882a593Smuzhiyun
1414*4882a593Smuzhiyun /**
1415*4882a593Smuzhiyun * free_netbuf - Unmap and free network buffer
1416*4882a593Smuzhiyun * @buf: buffer
1417*4882a593Smuzhiyun */
free_netbuf(void * buf)1418*4882a593Smuzhiyun static void free_netbuf(void *buf)
1419*4882a593Smuzhiyun {
1420*4882a593Smuzhiyun struct sk_buff *skb;
1421*4882a593Smuzhiyun struct octnet_buf_free_info *finfo;
1422*4882a593Smuzhiyun struct lio *lio;
1423*4882a593Smuzhiyun
1424*4882a593Smuzhiyun finfo = (struct octnet_buf_free_info *)buf;
1425*4882a593Smuzhiyun skb = finfo->skb;
1426*4882a593Smuzhiyun lio = finfo->lio;
1427*4882a593Smuzhiyun
1428*4882a593Smuzhiyun dma_unmap_single(&lio->oct_dev->pci_dev->dev, finfo->dptr, skb->len,
1429*4882a593Smuzhiyun DMA_TO_DEVICE);
1430*4882a593Smuzhiyun
1431*4882a593Smuzhiyun tx_buffer_free(skb);
1432*4882a593Smuzhiyun }
1433*4882a593Smuzhiyun
1434*4882a593Smuzhiyun /**
1435*4882a593Smuzhiyun * free_netsgbuf - Unmap and free gather buffer
1436*4882a593Smuzhiyun * @buf: buffer
1437*4882a593Smuzhiyun */
free_netsgbuf(void * buf)1438*4882a593Smuzhiyun static void free_netsgbuf(void *buf)
1439*4882a593Smuzhiyun {
1440*4882a593Smuzhiyun struct octnet_buf_free_info *finfo;
1441*4882a593Smuzhiyun struct sk_buff *skb;
1442*4882a593Smuzhiyun struct lio *lio;
1443*4882a593Smuzhiyun struct octnic_gather *g;
1444*4882a593Smuzhiyun int i, frags, iq;
1445*4882a593Smuzhiyun
1446*4882a593Smuzhiyun finfo = (struct octnet_buf_free_info *)buf;
1447*4882a593Smuzhiyun skb = finfo->skb;
1448*4882a593Smuzhiyun lio = finfo->lio;
1449*4882a593Smuzhiyun g = finfo->g;
1450*4882a593Smuzhiyun frags = skb_shinfo(skb)->nr_frags;
1451*4882a593Smuzhiyun
1452*4882a593Smuzhiyun dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1453*4882a593Smuzhiyun g->sg[0].ptr[0], (skb->len - skb->data_len),
1454*4882a593Smuzhiyun DMA_TO_DEVICE);
1455*4882a593Smuzhiyun
1456*4882a593Smuzhiyun i = 1;
1457*4882a593Smuzhiyun while (frags--) {
1458*4882a593Smuzhiyun skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun pci_unmap_page((lio->oct_dev)->pci_dev,
1461*4882a593Smuzhiyun g->sg[(i >> 2)].ptr[(i & 3)],
1462*4882a593Smuzhiyun skb_frag_size(frag), DMA_TO_DEVICE);
1463*4882a593Smuzhiyun i++;
1464*4882a593Smuzhiyun }
1465*4882a593Smuzhiyun
1466*4882a593Smuzhiyun iq = skb_iq(lio->oct_dev, skb);
1467*4882a593Smuzhiyun spin_lock(&lio->glist_lock[iq]);
1468*4882a593Smuzhiyun list_add_tail(&g->list, &lio->glist[iq]);
1469*4882a593Smuzhiyun spin_unlock(&lio->glist_lock[iq]);
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun tx_buffer_free(skb);
1472*4882a593Smuzhiyun }
1473*4882a593Smuzhiyun
1474*4882a593Smuzhiyun /**
1475*4882a593Smuzhiyun * free_netsgbuf_with_resp - Unmap and free gather buffer with response
1476*4882a593Smuzhiyun * @buf: buffer
1477*4882a593Smuzhiyun */
free_netsgbuf_with_resp(void * buf)1478*4882a593Smuzhiyun static void free_netsgbuf_with_resp(void *buf)
1479*4882a593Smuzhiyun {
1480*4882a593Smuzhiyun struct octeon_soft_command *sc;
1481*4882a593Smuzhiyun struct octnet_buf_free_info *finfo;
1482*4882a593Smuzhiyun struct sk_buff *skb;
1483*4882a593Smuzhiyun struct lio *lio;
1484*4882a593Smuzhiyun struct octnic_gather *g;
1485*4882a593Smuzhiyun int i, frags, iq;
1486*4882a593Smuzhiyun
1487*4882a593Smuzhiyun sc = (struct octeon_soft_command *)buf;
1488*4882a593Smuzhiyun skb = (struct sk_buff *)sc->callback_arg;
1489*4882a593Smuzhiyun finfo = (struct octnet_buf_free_info *)&skb->cb;
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun lio = finfo->lio;
1492*4882a593Smuzhiyun g = finfo->g;
1493*4882a593Smuzhiyun frags = skb_shinfo(skb)->nr_frags;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun dma_unmap_single(&lio->oct_dev->pci_dev->dev,
1496*4882a593Smuzhiyun g->sg[0].ptr[0], (skb->len - skb->data_len),
1497*4882a593Smuzhiyun DMA_TO_DEVICE);
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun i = 1;
1500*4882a593Smuzhiyun while (frags--) {
1501*4882a593Smuzhiyun skb_frag_t *frag = &skb_shinfo(skb)->frags[i - 1];
1502*4882a593Smuzhiyun
1503*4882a593Smuzhiyun pci_unmap_page((lio->oct_dev)->pci_dev,
1504*4882a593Smuzhiyun g->sg[(i >> 2)].ptr[(i & 3)],
1505*4882a593Smuzhiyun skb_frag_size(frag), DMA_TO_DEVICE);
1506*4882a593Smuzhiyun i++;
1507*4882a593Smuzhiyun }
1508*4882a593Smuzhiyun
1509*4882a593Smuzhiyun iq = skb_iq(lio->oct_dev, skb);
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun spin_lock(&lio->glist_lock[iq]);
1512*4882a593Smuzhiyun list_add_tail(&g->list, &lio->glist[iq]);
1513*4882a593Smuzhiyun spin_unlock(&lio->glist_lock[iq]);
1514*4882a593Smuzhiyun
1515*4882a593Smuzhiyun /* Don't free the skb yet */
1516*4882a593Smuzhiyun }
1517*4882a593Smuzhiyun
1518*4882a593Smuzhiyun /**
1519*4882a593Smuzhiyun * liquidio_ptp_adjfreq - Adjust ptp frequency
1520*4882a593Smuzhiyun * @ptp: PTP clock info
1521*4882a593Smuzhiyun * @ppb: how much to adjust by, in parts-per-billion
1522*4882a593Smuzhiyun */
liquidio_ptp_adjfreq(struct ptp_clock_info * ptp,s32 ppb)1523*4882a593Smuzhiyun static int liquidio_ptp_adjfreq(struct ptp_clock_info *ptp, s32 ppb)
1524*4882a593Smuzhiyun {
1525*4882a593Smuzhiyun struct lio *lio = container_of(ptp, struct lio, ptp_info);
1526*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1527*4882a593Smuzhiyun u64 comp, delta;
1528*4882a593Smuzhiyun unsigned long flags;
1529*4882a593Smuzhiyun bool neg_adj = false;
1530*4882a593Smuzhiyun
1531*4882a593Smuzhiyun if (ppb < 0) {
1532*4882a593Smuzhiyun neg_adj = true;
1533*4882a593Smuzhiyun ppb = -ppb;
1534*4882a593Smuzhiyun }
1535*4882a593Smuzhiyun
1536*4882a593Smuzhiyun /* The hardware adds the clock compensation value to the
1537*4882a593Smuzhiyun * PTP clock on every coprocessor clock cycle, so we
1538*4882a593Smuzhiyun * compute the delta in terms of coprocessor clocks.
1539*4882a593Smuzhiyun */
1540*4882a593Smuzhiyun delta = (u64)ppb << 32;
1541*4882a593Smuzhiyun do_div(delta, oct->coproc_clock_rate);
1542*4882a593Smuzhiyun
1543*4882a593Smuzhiyun spin_lock_irqsave(&lio->ptp_lock, flags);
1544*4882a593Smuzhiyun comp = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_COMP);
1545*4882a593Smuzhiyun if (neg_adj)
1546*4882a593Smuzhiyun comp -= delta;
1547*4882a593Smuzhiyun else
1548*4882a593Smuzhiyun comp += delta;
1549*4882a593Smuzhiyun lio_pci_writeq(oct, comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1550*4882a593Smuzhiyun spin_unlock_irqrestore(&lio->ptp_lock, flags);
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun return 0;
1553*4882a593Smuzhiyun }
1554*4882a593Smuzhiyun
1555*4882a593Smuzhiyun /**
1556*4882a593Smuzhiyun * liquidio_ptp_adjtime - Adjust ptp time
1557*4882a593Smuzhiyun * @ptp: PTP clock info
1558*4882a593Smuzhiyun * @delta: how much to adjust by, in nanosecs
1559*4882a593Smuzhiyun */
liquidio_ptp_adjtime(struct ptp_clock_info * ptp,s64 delta)1560*4882a593Smuzhiyun static int liquidio_ptp_adjtime(struct ptp_clock_info *ptp, s64 delta)
1561*4882a593Smuzhiyun {
1562*4882a593Smuzhiyun unsigned long flags;
1563*4882a593Smuzhiyun struct lio *lio = container_of(ptp, struct lio, ptp_info);
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun spin_lock_irqsave(&lio->ptp_lock, flags);
1566*4882a593Smuzhiyun lio->ptp_adjust += delta;
1567*4882a593Smuzhiyun spin_unlock_irqrestore(&lio->ptp_lock, flags);
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun return 0;
1570*4882a593Smuzhiyun }
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun /**
1573*4882a593Smuzhiyun * liquidio_ptp_gettime - Get hardware clock time, including any adjustment
1574*4882a593Smuzhiyun * @ptp: PTP clock info
1575*4882a593Smuzhiyun * @ts: timespec
1576*4882a593Smuzhiyun */
liquidio_ptp_gettime(struct ptp_clock_info * ptp,struct timespec64 * ts)1577*4882a593Smuzhiyun static int liquidio_ptp_gettime(struct ptp_clock_info *ptp,
1578*4882a593Smuzhiyun struct timespec64 *ts)
1579*4882a593Smuzhiyun {
1580*4882a593Smuzhiyun u64 ns;
1581*4882a593Smuzhiyun unsigned long flags;
1582*4882a593Smuzhiyun struct lio *lio = container_of(ptp, struct lio, ptp_info);
1583*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun spin_lock_irqsave(&lio->ptp_lock, flags);
1586*4882a593Smuzhiyun ns = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_HI);
1587*4882a593Smuzhiyun ns += lio->ptp_adjust;
1588*4882a593Smuzhiyun spin_unlock_irqrestore(&lio->ptp_lock, flags);
1589*4882a593Smuzhiyun
1590*4882a593Smuzhiyun *ts = ns_to_timespec64(ns);
1591*4882a593Smuzhiyun
1592*4882a593Smuzhiyun return 0;
1593*4882a593Smuzhiyun }
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun /**
1596*4882a593Smuzhiyun * liquidio_ptp_settime - Set hardware clock time. Reset adjustment
1597*4882a593Smuzhiyun * @ptp: PTP clock info
1598*4882a593Smuzhiyun * @ts: timespec
1599*4882a593Smuzhiyun */
liquidio_ptp_settime(struct ptp_clock_info * ptp,const struct timespec64 * ts)1600*4882a593Smuzhiyun static int liquidio_ptp_settime(struct ptp_clock_info *ptp,
1601*4882a593Smuzhiyun const struct timespec64 *ts)
1602*4882a593Smuzhiyun {
1603*4882a593Smuzhiyun u64 ns;
1604*4882a593Smuzhiyun unsigned long flags;
1605*4882a593Smuzhiyun struct lio *lio = container_of(ptp, struct lio, ptp_info);
1606*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1607*4882a593Smuzhiyun
1608*4882a593Smuzhiyun ns = timespec64_to_ns(ts);
1609*4882a593Smuzhiyun
1610*4882a593Smuzhiyun spin_lock_irqsave(&lio->ptp_lock, flags);
1611*4882a593Smuzhiyun lio_pci_writeq(oct, ns, CN6XXX_MIO_PTP_CLOCK_HI);
1612*4882a593Smuzhiyun lio->ptp_adjust = 0;
1613*4882a593Smuzhiyun spin_unlock_irqrestore(&lio->ptp_lock, flags);
1614*4882a593Smuzhiyun
1615*4882a593Smuzhiyun return 0;
1616*4882a593Smuzhiyun }
1617*4882a593Smuzhiyun
1618*4882a593Smuzhiyun /**
1619*4882a593Smuzhiyun * liquidio_ptp_enable - Check if PTP is enabled
1620*4882a593Smuzhiyun * @ptp: PTP clock info
1621*4882a593Smuzhiyun * @rq: request
1622*4882a593Smuzhiyun * @on: is it on
1623*4882a593Smuzhiyun */
1624*4882a593Smuzhiyun static int
liquidio_ptp_enable(struct ptp_clock_info __maybe_unused * ptp,struct ptp_clock_request __maybe_unused * rq,int __maybe_unused on)1625*4882a593Smuzhiyun liquidio_ptp_enable(struct ptp_clock_info __maybe_unused *ptp,
1626*4882a593Smuzhiyun struct ptp_clock_request __maybe_unused *rq,
1627*4882a593Smuzhiyun int __maybe_unused on)
1628*4882a593Smuzhiyun {
1629*4882a593Smuzhiyun return -EOPNOTSUPP;
1630*4882a593Smuzhiyun }
1631*4882a593Smuzhiyun
1632*4882a593Smuzhiyun /**
1633*4882a593Smuzhiyun * oct_ptp_open - Open PTP clock source
1634*4882a593Smuzhiyun * @netdev: network device
1635*4882a593Smuzhiyun */
oct_ptp_open(struct net_device * netdev)1636*4882a593Smuzhiyun static void oct_ptp_open(struct net_device *netdev)
1637*4882a593Smuzhiyun {
1638*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
1639*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)lio->oct_dev;
1640*4882a593Smuzhiyun
1641*4882a593Smuzhiyun spin_lock_init(&lio->ptp_lock);
1642*4882a593Smuzhiyun
1643*4882a593Smuzhiyun snprintf(lio->ptp_info.name, 16, "%s", netdev->name);
1644*4882a593Smuzhiyun lio->ptp_info.owner = THIS_MODULE;
1645*4882a593Smuzhiyun lio->ptp_info.max_adj = 250000000;
1646*4882a593Smuzhiyun lio->ptp_info.n_alarm = 0;
1647*4882a593Smuzhiyun lio->ptp_info.n_ext_ts = 0;
1648*4882a593Smuzhiyun lio->ptp_info.n_per_out = 0;
1649*4882a593Smuzhiyun lio->ptp_info.pps = 0;
1650*4882a593Smuzhiyun lio->ptp_info.adjfreq = liquidio_ptp_adjfreq;
1651*4882a593Smuzhiyun lio->ptp_info.adjtime = liquidio_ptp_adjtime;
1652*4882a593Smuzhiyun lio->ptp_info.gettime64 = liquidio_ptp_gettime;
1653*4882a593Smuzhiyun lio->ptp_info.settime64 = liquidio_ptp_settime;
1654*4882a593Smuzhiyun lio->ptp_info.enable = liquidio_ptp_enable;
1655*4882a593Smuzhiyun
1656*4882a593Smuzhiyun lio->ptp_adjust = 0;
1657*4882a593Smuzhiyun
1658*4882a593Smuzhiyun lio->ptp_clock = ptp_clock_register(&lio->ptp_info,
1659*4882a593Smuzhiyun &oct->pci_dev->dev);
1660*4882a593Smuzhiyun
1661*4882a593Smuzhiyun if (IS_ERR(lio->ptp_clock))
1662*4882a593Smuzhiyun lio->ptp_clock = NULL;
1663*4882a593Smuzhiyun }
1664*4882a593Smuzhiyun
1665*4882a593Smuzhiyun /**
1666*4882a593Smuzhiyun * liquidio_ptp_init - Init PTP clock
1667*4882a593Smuzhiyun * @oct: octeon device
1668*4882a593Smuzhiyun */
liquidio_ptp_init(struct octeon_device * oct)1669*4882a593Smuzhiyun static void liquidio_ptp_init(struct octeon_device *oct)
1670*4882a593Smuzhiyun {
1671*4882a593Smuzhiyun u64 clock_comp, cfg;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun clock_comp = (u64)NSEC_PER_SEC << 32;
1674*4882a593Smuzhiyun do_div(clock_comp, oct->coproc_clock_rate);
1675*4882a593Smuzhiyun lio_pci_writeq(oct, clock_comp, CN6XXX_MIO_PTP_CLOCK_COMP);
1676*4882a593Smuzhiyun
1677*4882a593Smuzhiyun /* Enable */
1678*4882a593Smuzhiyun cfg = lio_pci_readq(oct, CN6XXX_MIO_PTP_CLOCK_CFG);
1679*4882a593Smuzhiyun lio_pci_writeq(oct, cfg | 0x01, CN6XXX_MIO_PTP_CLOCK_CFG);
1680*4882a593Smuzhiyun }
1681*4882a593Smuzhiyun
1682*4882a593Smuzhiyun /**
1683*4882a593Smuzhiyun * load_firmware - Load firmware to device
1684*4882a593Smuzhiyun * @oct: octeon device
1685*4882a593Smuzhiyun *
1686*4882a593Smuzhiyun * Maps device to firmware filename, requests firmware, and downloads it
1687*4882a593Smuzhiyun */
load_firmware(struct octeon_device * oct)1688*4882a593Smuzhiyun static int load_firmware(struct octeon_device *oct)
1689*4882a593Smuzhiyun {
1690*4882a593Smuzhiyun int ret = 0;
1691*4882a593Smuzhiyun const struct firmware *fw;
1692*4882a593Smuzhiyun char fw_name[LIO_MAX_FW_FILENAME_LEN];
1693*4882a593Smuzhiyun char *tmp_fw_type;
1694*4882a593Smuzhiyun
1695*4882a593Smuzhiyun if (fw_type_is_auto()) {
1696*4882a593Smuzhiyun tmp_fw_type = LIO_FW_NAME_TYPE_NIC;
1697*4882a593Smuzhiyun strncpy(fw_type, tmp_fw_type, sizeof(fw_type));
1698*4882a593Smuzhiyun } else {
1699*4882a593Smuzhiyun tmp_fw_type = fw_type;
1700*4882a593Smuzhiyun }
1701*4882a593Smuzhiyun
1702*4882a593Smuzhiyun sprintf(fw_name, "%s%s%s_%s%s", LIO_FW_DIR, LIO_FW_BASE_NAME,
1703*4882a593Smuzhiyun octeon_get_conf(oct)->card_name, tmp_fw_type,
1704*4882a593Smuzhiyun LIO_FW_NAME_SUFFIX);
1705*4882a593Smuzhiyun
1706*4882a593Smuzhiyun ret = request_firmware(&fw, fw_name, &oct->pci_dev->dev);
1707*4882a593Smuzhiyun if (ret) {
1708*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Request firmware failed. Could not find file %s.\n",
1709*4882a593Smuzhiyun fw_name);
1710*4882a593Smuzhiyun release_firmware(fw);
1711*4882a593Smuzhiyun return ret;
1712*4882a593Smuzhiyun }
1713*4882a593Smuzhiyun
1714*4882a593Smuzhiyun ret = octeon_download_firmware(oct, fw->data, fw->size);
1715*4882a593Smuzhiyun
1716*4882a593Smuzhiyun release_firmware(fw);
1717*4882a593Smuzhiyun
1718*4882a593Smuzhiyun return ret;
1719*4882a593Smuzhiyun }
1720*4882a593Smuzhiyun
1721*4882a593Smuzhiyun /**
1722*4882a593Smuzhiyun * octnet_poll_check_txq_status - Poll routine for checking transmit queue status
1723*4882a593Smuzhiyun * @work: work_struct data structure
1724*4882a593Smuzhiyun */
octnet_poll_check_txq_status(struct work_struct * work)1725*4882a593Smuzhiyun static void octnet_poll_check_txq_status(struct work_struct *work)
1726*4882a593Smuzhiyun {
1727*4882a593Smuzhiyun struct cavium_wk *wk = (struct cavium_wk *)work;
1728*4882a593Smuzhiyun struct lio *lio = (struct lio *)wk->ctxptr;
1729*4882a593Smuzhiyun
1730*4882a593Smuzhiyun if (!ifstate_check(lio, LIO_IFSTATE_RUNNING))
1731*4882a593Smuzhiyun return;
1732*4882a593Smuzhiyun
1733*4882a593Smuzhiyun check_txq_status(lio);
1734*4882a593Smuzhiyun queue_delayed_work(lio->txq_status_wq.wq,
1735*4882a593Smuzhiyun &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1736*4882a593Smuzhiyun }
1737*4882a593Smuzhiyun
1738*4882a593Smuzhiyun /**
1739*4882a593Smuzhiyun * setup_tx_poll_fn - Sets up the txq poll check
1740*4882a593Smuzhiyun * @netdev: network device
1741*4882a593Smuzhiyun */
setup_tx_poll_fn(struct net_device * netdev)1742*4882a593Smuzhiyun static inline int setup_tx_poll_fn(struct net_device *netdev)
1743*4882a593Smuzhiyun {
1744*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
1745*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
1746*4882a593Smuzhiyun
1747*4882a593Smuzhiyun lio->txq_status_wq.wq = alloc_workqueue("txq-status",
1748*4882a593Smuzhiyun WQ_MEM_RECLAIM, 0);
1749*4882a593Smuzhiyun if (!lio->txq_status_wq.wq) {
1750*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "unable to create cavium txq status wq\n");
1751*4882a593Smuzhiyun return -1;
1752*4882a593Smuzhiyun }
1753*4882a593Smuzhiyun INIT_DELAYED_WORK(&lio->txq_status_wq.wk.work,
1754*4882a593Smuzhiyun octnet_poll_check_txq_status);
1755*4882a593Smuzhiyun lio->txq_status_wq.wk.ctxptr = lio;
1756*4882a593Smuzhiyun queue_delayed_work(lio->txq_status_wq.wq,
1757*4882a593Smuzhiyun &lio->txq_status_wq.wk.work, msecs_to_jiffies(1));
1758*4882a593Smuzhiyun return 0;
1759*4882a593Smuzhiyun }
1760*4882a593Smuzhiyun
cleanup_tx_poll_fn(struct net_device * netdev)1761*4882a593Smuzhiyun static inline void cleanup_tx_poll_fn(struct net_device *netdev)
1762*4882a593Smuzhiyun {
1763*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
1764*4882a593Smuzhiyun
1765*4882a593Smuzhiyun if (lio->txq_status_wq.wq) {
1766*4882a593Smuzhiyun cancel_delayed_work_sync(&lio->txq_status_wq.wk.work);
1767*4882a593Smuzhiyun destroy_workqueue(lio->txq_status_wq.wq);
1768*4882a593Smuzhiyun }
1769*4882a593Smuzhiyun }
1770*4882a593Smuzhiyun
1771*4882a593Smuzhiyun /**
1772*4882a593Smuzhiyun * liquidio_open - Net device open for LiquidIO
1773*4882a593Smuzhiyun * @netdev: network device
1774*4882a593Smuzhiyun */
liquidio_open(struct net_device * netdev)1775*4882a593Smuzhiyun static int liquidio_open(struct net_device *netdev)
1776*4882a593Smuzhiyun {
1777*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
1778*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
1779*4882a593Smuzhiyun struct octeon_device_priv *oct_priv =
1780*4882a593Smuzhiyun (struct octeon_device_priv *)oct->priv;
1781*4882a593Smuzhiyun struct napi_struct *napi, *n;
1782*4882a593Smuzhiyun int ret = 0;
1783*4882a593Smuzhiyun
1784*4882a593Smuzhiyun if (oct->props[lio->ifidx].napi_enabled == 0) {
1785*4882a593Smuzhiyun tasklet_disable(&oct_priv->droq_tasklet);
1786*4882a593Smuzhiyun
1787*4882a593Smuzhiyun list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1788*4882a593Smuzhiyun napi_enable(napi);
1789*4882a593Smuzhiyun
1790*4882a593Smuzhiyun oct->props[lio->ifidx].napi_enabled = 1;
1791*4882a593Smuzhiyun
1792*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
1793*4882a593Smuzhiyun oct->droq[0]->ops.poll_mode = 1;
1794*4882a593Smuzhiyun }
1795*4882a593Smuzhiyun
1796*4882a593Smuzhiyun if (oct->ptp_enable)
1797*4882a593Smuzhiyun oct_ptp_open(netdev);
1798*4882a593Smuzhiyun
1799*4882a593Smuzhiyun ifstate_set(lio, LIO_IFSTATE_RUNNING);
1800*4882a593Smuzhiyun
1801*4882a593Smuzhiyun if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on) {
1802*4882a593Smuzhiyun ret = setup_tx_poll_fn(netdev);
1803*4882a593Smuzhiyun if (ret)
1804*4882a593Smuzhiyun goto err_poll;
1805*4882a593Smuzhiyun }
1806*4882a593Smuzhiyun
1807*4882a593Smuzhiyun netif_tx_start_all_queues(netdev);
1808*4882a593Smuzhiyun
1809*4882a593Smuzhiyun /* Ready for link status updates */
1810*4882a593Smuzhiyun lio->intf_open = 1;
1811*4882a593Smuzhiyun
1812*4882a593Smuzhiyun netif_info(lio, ifup, lio->netdev, "Interface Open, ready for traffic\n");
1813*4882a593Smuzhiyun
1814*4882a593Smuzhiyun /* tell Octeon to start forwarding packets to host */
1815*4882a593Smuzhiyun ret = send_rx_ctrl_cmd(lio, 1);
1816*4882a593Smuzhiyun if (ret)
1817*4882a593Smuzhiyun goto err_rx_ctrl;
1818*4882a593Smuzhiyun
1819*4882a593Smuzhiyun /* start periodical statistics fetch */
1820*4882a593Smuzhiyun INIT_DELAYED_WORK(&lio->stats_wk.work, lio_fetch_stats);
1821*4882a593Smuzhiyun lio->stats_wk.ctxptr = lio;
1822*4882a593Smuzhiyun schedule_delayed_work(&lio->stats_wk.work, msecs_to_jiffies
1823*4882a593Smuzhiyun (LIQUIDIO_NDEV_STATS_POLL_TIME_MS));
1824*4882a593Smuzhiyun
1825*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "%s interface is opened\n",
1826*4882a593Smuzhiyun netdev->name);
1827*4882a593Smuzhiyun
1828*4882a593Smuzhiyun return 0;
1829*4882a593Smuzhiyun
1830*4882a593Smuzhiyun err_rx_ctrl:
1831*4882a593Smuzhiyun if (!OCTEON_CN23XX_PF(oct) || !oct->msix_on)
1832*4882a593Smuzhiyun cleanup_tx_poll_fn(netdev);
1833*4882a593Smuzhiyun err_poll:
1834*4882a593Smuzhiyun if (lio->ptp_clock) {
1835*4882a593Smuzhiyun ptp_clock_unregister(lio->ptp_clock);
1836*4882a593Smuzhiyun lio->ptp_clock = NULL;
1837*4882a593Smuzhiyun }
1838*4882a593Smuzhiyun
1839*4882a593Smuzhiyun if (oct->props[lio->ifidx].napi_enabled == 1) {
1840*4882a593Smuzhiyun list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1841*4882a593Smuzhiyun napi_disable(napi);
1842*4882a593Smuzhiyun
1843*4882a593Smuzhiyun oct->props[lio->ifidx].napi_enabled = 0;
1844*4882a593Smuzhiyun
1845*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
1846*4882a593Smuzhiyun oct->droq[0]->ops.poll_mode = 0;
1847*4882a593Smuzhiyun }
1848*4882a593Smuzhiyun
1849*4882a593Smuzhiyun return ret;
1850*4882a593Smuzhiyun }
1851*4882a593Smuzhiyun
1852*4882a593Smuzhiyun /**
1853*4882a593Smuzhiyun * liquidio_stop - Net device stop for LiquidIO
1854*4882a593Smuzhiyun * @netdev: network device
1855*4882a593Smuzhiyun */
liquidio_stop(struct net_device * netdev)1856*4882a593Smuzhiyun static int liquidio_stop(struct net_device *netdev)
1857*4882a593Smuzhiyun {
1858*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
1859*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
1860*4882a593Smuzhiyun struct octeon_device_priv *oct_priv =
1861*4882a593Smuzhiyun (struct octeon_device_priv *)oct->priv;
1862*4882a593Smuzhiyun struct napi_struct *napi, *n;
1863*4882a593Smuzhiyun int ret = 0;
1864*4882a593Smuzhiyun
1865*4882a593Smuzhiyun ifstate_reset(lio, LIO_IFSTATE_RUNNING);
1866*4882a593Smuzhiyun
1867*4882a593Smuzhiyun /* Stop any link updates */
1868*4882a593Smuzhiyun lio->intf_open = 0;
1869*4882a593Smuzhiyun
1870*4882a593Smuzhiyun stop_txqs(netdev);
1871*4882a593Smuzhiyun
1872*4882a593Smuzhiyun /* Inform that netif carrier is down */
1873*4882a593Smuzhiyun netif_carrier_off(netdev);
1874*4882a593Smuzhiyun netif_tx_disable(netdev);
1875*4882a593Smuzhiyun
1876*4882a593Smuzhiyun lio->linfo.link.s.link_up = 0;
1877*4882a593Smuzhiyun lio->link_changes++;
1878*4882a593Smuzhiyun
1879*4882a593Smuzhiyun /* Tell Octeon that nic interface is down. */
1880*4882a593Smuzhiyun ret = send_rx_ctrl_cmd(lio, 0);
1881*4882a593Smuzhiyun if (ret)
1882*4882a593Smuzhiyun return ret;
1883*4882a593Smuzhiyun
1884*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct)) {
1885*4882a593Smuzhiyun if (!oct->msix_on)
1886*4882a593Smuzhiyun cleanup_tx_poll_fn(netdev);
1887*4882a593Smuzhiyun } else {
1888*4882a593Smuzhiyun cleanup_tx_poll_fn(netdev);
1889*4882a593Smuzhiyun }
1890*4882a593Smuzhiyun
1891*4882a593Smuzhiyun cancel_delayed_work_sync(&lio->stats_wk.work);
1892*4882a593Smuzhiyun
1893*4882a593Smuzhiyun if (lio->ptp_clock) {
1894*4882a593Smuzhiyun ptp_clock_unregister(lio->ptp_clock);
1895*4882a593Smuzhiyun lio->ptp_clock = NULL;
1896*4882a593Smuzhiyun }
1897*4882a593Smuzhiyun
1898*4882a593Smuzhiyun /* Wait for any pending Rx descriptors */
1899*4882a593Smuzhiyun if (lio_wait_for_clean_oq(oct))
1900*4882a593Smuzhiyun netif_info(lio, rx_err, lio->netdev,
1901*4882a593Smuzhiyun "Proceeding with stop interface after partial RX desc processing\n");
1902*4882a593Smuzhiyun
1903*4882a593Smuzhiyun if (oct->props[lio->ifidx].napi_enabled == 1) {
1904*4882a593Smuzhiyun list_for_each_entry_safe(napi, n, &netdev->napi_list, dev_list)
1905*4882a593Smuzhiyun napi_disable(napi);
1906*4882a593Smuzhiyun
1907*4882a593Smuzhiyun oct->props[lio->ifidx].napi_enabled = 0;
1908*4882a593Smuzhiyun
1909*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
1910*4882a593Smuzhiyun oct->droq[0]->ops.poll_mode = 0;
1911*4882a593Smuzhiyun
1912*4882a593Smuzhiyun tasklet_enable(&oct_priv->droq_tasklet);
1913*4882a593Smuzhiyun }
1914*4882a593Smuzhiyun
1915*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "%s interface is stopped\n", netdev->name);
1916*4882a593Smuzhiyun
1917*4882a593Smuzhiyun return ret;
1918*4882a593Smuzhiyun }
1919*4882a593Smuzhiyun
1920*4882a593Smuzhiyun /**
1921*4882a593Smuzhiyun * get_new_flags - Converts a mask based on net device flags
1922*4882a593Smuzhiyun * @netdev: network device
1923*4882a593Smuzhiyun *
1924*4882a593Smuzhiyun * This routine generates a octnet_ifflags mask from the net device flags
1925*4882a593Smuzhiyun * received from the OS.
1926*4882a593Smuzhiyun */
get_new_flags(struct net_device * netdev)1927*4882a593Smuzhiyun static inline enum octnet_ifflags get_new_flags(struct net_device *netdev)
1928*4882a593Smuzhiyun {
1929*4882a593Smuzhiyun enum octnet_ifflags f = OCTNET_IFFLAG_UNICAST;
1930*4882a593Smuzhiyun
1931*4882a593Smuzhiyun if (netdev->flags & IFF_PROMISC)
1932*4882a593Smuzhiyun f |= OCTNET_IFFLAG_PROMISC;
1933*4882a593Smuzhiyun
1934*4882a593Smuzhiyun if (netdev->flags & IFF_ALLMULTI)
1935*4882a593Smuzhiyun f |= OCTNET_IFFLAG_ALLMULTI;
1936*4882a593Smuzhiyun
1937*4882a593Smuzhiyun if (netdev->flags & IFF_MULTICAST) {
1938*4882a593Smuzhiyun f |= OCTNET_IFFLAG_MULTICAST;
1939*4882a593Smuzhiyun
1940*4882a593Smuzhiyun /* Accept all multicast addresses if there are more than we
1941*4882a593Smuzhiyun * can handle
1942*4882a593Smuzhiyun */
1943*4882a593Smuzhiyun if (netdev_mc_count(netdev) > MAX_OCTEON_MULTICAST_ADDR)
1944*4882a593Smuzhiyun f |= OCTNET_IFFLAG_ALLMULTI;
1945*4882a593Smuzhiyun }
1946*4882a593Smuzhiyun
1947*4882a593Smuzhiyun if (netdev->flags & IFF_BROADCAST)
1948*4882a593Smuzhiyun f |= OCTNET_IFFLAG_BROADCAST;
1949*4882a593Smuzhiyun
1950*4882a593Smuzhiyun return f;
1951*4882a593Smuzhiyun }
1952*4882a593Smuzhiyun
1953*4882a593Smuzhiyun /**
1954*4882a593Smuzhiyun * liquidio_set_mcast_list - Net device set_multicast_list
1955*4882a593Smuzhiyun * @netdev: network device
1956*4882a593Smuzhiyun */
liquidio_set_mcast_list(struct net_device * netdev)1957*4882a593Smuzhiyun static void liquidio_set_mcast_list(struct net_device *netdev)
1958*4882a593Smuzhiyun {
1959*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
1960*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
1961*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
1962*4882a593Smuzhiyun struct netdev_hw_addr *ha;
1963*4882a593Smuzhiyun u64 *mc;
1964*4882a593Smuzhiyun int ret;
1965*4882a593Smuzhiyun int mc_count = min(netdev_mc_count(netdev), MAX_OCTEON_MULTICAST_ADDR);
1966*4882a593Smuzhiyun
1967*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
1968*4882a593Smuzhiyun
1969*4882a593Smuzhiyun /* Create a ctrl pkt command to be sent to core app. */
1970*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
1971*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_SET_MULTI_LIST;
1972*4882a593Smuzhiyun nctrl.ncmd.s.param1 = get_new_flags(netdev);
1973*4882a593Smuzhiyun nctrl.ncmd.s.param2 = mc_count;
1974*4882a593Smuzhiyun nctrl.ncmd.s.more = mc_count;
1975*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
1976*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
1977*4882a593Smuzhiyun nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
1978*4882a593Smuzhiyun
1979*4882a593Smuzhiyun /* copy all the addresses into the udd */
1980*4882a593Smuzhiyun mc = &nctrl.udd[0];
1981*4882a593Smuzhiyun netdev_for_each_mc_addr(ha, netdev) {
1982*4882a593Smuzhiyun *mc = 0;
1983*4882a593Smuzhiyun memcpy(((u8 *)mc) + 2, ha->addr, ETH_ALEN);
1984*4882a593Smuzhiyun /* no need to swap bytes */
1985*4882a593Smuzhiyun
1986*4882a593Smuzhiyun if (++mc > &nctrl.udd[mc_count])
1987*4882a593Smuzhiyun break;
1988*4882a593Smuzhiyun }
1989*4882a593Smuzhiyun
1990*4882a593Smuzhiyun /* Apparently, any activity in this call from the kernel has to
1991*4882a593Smuzhiyun * be atomic. So we won't wait for response.
1992*4882a593Smuzhiyun */
1993*4882a593Smuzhiyun
1994*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
1995*4882a593Smuzhiyun if (ret) {
1996*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "DEVFLAGS change failed in core (ret: 0x%x)\n",
1997*4882a593Smuzhiyun ret);
1998*4882a593Smuzhiyun }
1999*4882a593Smuzhiyun }
2000*4882a593Smuzhiyun
2001*4882a593Smuzhiyun /**
2002*4882a593Smuzhiyun * liquidio_set_mac - Net device set_mac_address
2003*4882a593Smuzhiyun * @netdev: network device
2004*4882a593Smuzhiyun * @p: pointer to sockaddr
2005*4882a593Smuzhiyun */
liquidio_set_mac(struct net_device * netdev,void * p)2006*4882a593Smuzhiyun static int liquidio_set_mac(struct net_device *netdev, void *p)
2007*4882a593Smuzhiyun {
2008*4882a593Smuzhiyun int ret = 0;
2009*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2010*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2011*4882a593Smuzhiyun struct sockaddr *addr = (struct sockaddr *)p;
2012*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2013*4882a593Smuzhiyun
2014*4882a593Smuzhiyun if (!is_valid_ether_addr(addr->sa_data))
2015*4882a593Smuzhiyun return -EADDRNOTAVAIL;
2016*4882a593Smuzhiyun
2017*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2018*4882a593Smuzhiyun
2019*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
2020*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2021*4882a593Smuzhiyun nctrl.ncmd.s.param1 = 0;
2022*4882a593Smuzhiyun nctrl.ncmd.s.more = 1;
2023*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2024*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
2025*4882a593Smuzhiyun
2026*4882a593Smuzhiyun nctrl.udd[0] = 0;
2027*4882a593Smuzhiyun /* The MAC Address is presented in network byte order. */
2028*4882a593Smuzhiyun memcpy((u8 *)&nctrl.udd[0] + 2, addr->sa_data, ETH_ALEN);
2029*4882a593Smuzhiyun
2030*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2031*4882a593Smuzhiyun if (ret < 0) {
2032*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "MAC Address change failed\n");
2033*4882a593Smuzhiyun return -ENOMEM;
2034*4882a593Smuzhiyun }
2035*4882a593Smuzhiyun
2036*4882a593Smuzhiyun if (nctrl.sc_status) {
2037*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
2038*4882a593Smuzhiyun "%s: MAC Address change failed. sc return=%x\n",
2039*4882a593Smuzhiyun __func__, nctrl.sc_status);
2040*4882a593Smuzhiyun return -EIO;
2041*4882a593Smuzhiyun }
2042*4882a593Smuzhiyun
2043*4882a593Smuzhiyun memcpy(netdev->dev_addr, addr->sa_data, netdev->addr_len);
2044*4882a593Smuzhiyun memcpy(((u8 *)&lio->linfo.hw_addr) + 2, addr->sa_data, ETH_ALEN);
2045*4882a593Smuzhiyun
2046*4882a593Smuzhiyun return 0;
2047*4882a593Smuzhiyun }
2048*4882a593Smuzhiyun
2049*4882a593Smuzhiyun static void
liquidio_get_stats64(struct net_device * netdev,struct rtnl_link_stats64 * lstats)2050*4882a593Smuzhiyun liquidio_get_stats64(struct net_device *netdev,
2051*4882a593Smuzhiyun struct rtnl_link_stats64 *lstats)
2052*4882a593Smuzhiyun {
2053*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2054*4882a593Smuzhiyun struct octeon_device *oct;
2055*4882a593Smuzhiyun u64 pkts = 0, drop = 0, bytes = 0;
2056*4882a593Smuzhiyun struct oct_droq_stats *oq_stats;
2057*4882a593Smuzhiyun struct oct_iq_stats *iq_stats;
2058*4882a593Smuzhiyun int i, iq_no, oq_no;
2059*4882a593Smuzhiyun
2060*4882a593Smuzhiyun oct = lio->oct_dev;
2061*4882a593Smuzhiyun
2062*4882a593Smuzhiyun if (ifstate_check(lio, LIO_IFSTATE_RESETTING))
2063*4882a593Smuzhiyun return;
2064*4882a593Smuzhiyun
2065*4882a593Smuzhiyun for (i = 0; i < oct->num_iqs; i++) {
2066*4882a593Smuzhiyun iq_no = lio->linfo.txpciq[i].s.q_no;
2067*4882a593Smuzhiyun iq_stats = &oct->instr_queue[iq_no]->stats;
2068*4882a593Smuzhiyun pkts += iq_stats->tx_done;
2069*4882a593Smuzhiyun drop += iq_stats->tx_dropped;
2070*4882a593Smuzhiyun bytes += iq_stats->tx_tot_bytes;
2071*4882a593Smuzhiyun }
2072*4882a593Smuzhiyun
2073*4882a593Smuzhiyun lstats->tx_packets = pkts;
2074*4882a593Smuzhiyun lstats->tx_bytes = bytes;
2075*4882a593Smuzhiyun lstats->tx_dropped = drop;
2076*4882a593Smuzhiyun
2077*4882a593Smuzhiyun pkts = 0;
2078*4882a593Smuzhiyun drop = 0;
2079*4882a593Smuzhiyun bytes = 0;
2080*4882a593Smuzhiyun
2081*4882a593Smuzhiyun for (i = 0; i < oct->num_oqs; i++) {
2082*4882a593Smuzhiyun oq_no = lio->linfo.rxpciq[i].s.q_no;
2083*4882a593Smuzhiyun oq_stats = &oct->droq[oq_no]->stats;
2084*4882a593Smuzhiyun pkts += oq_stats->rx_pkts_received;
2085*4882a593Smuzhiyun drop += (oq_stats->rx_dropped +
2086*4882a593Smuzhiyun oq_stats->dropped_nodispatch +
2087*4882a593Smuzhiyun oq_stats->dropped_toomany +
2088*4882a593Smuzhiyun oq_stats->dropped_nomem);
2089*4882a593Smuzhiyun bytes += oq_stats->rx_bytes_received;
2090*4882a593Smuzhiyun }
2091*4882a593Smuzhiyun
2092*4882a593Smuzhiyun lstats->rx_bytes = bytes;
2093*4882a593Smuzhiyun lstats->rx_packets = pkts;
2094*4882a593Smuzhiyun lstats->rx_dropped = drop;
2095*4882a593Smuzhiyun
2096*4882a593Smuzhiyun lstats->multicast = oct->link_stats.fromwire.fw_total_mcast;
2097*4882a593Smuzhiyun lstats->collisions = oct->link_stats.fromhost.total_collisions;
2098*4882a593Smuzhiyun
2099*4882a593Smuzhiyun /* detailed rx_errors: */
2100*4882a593Smuzhiyun lstats->rx_length_errors = oct->link_stats.fromwire.l2_err;
2101*4882a593Smuzhiyun /* recved pkt with crc error */
2102*4882a593Smuzhiyun lstats->rx_crc_errors = oct->link_stats.fromwire.fcs_err;
2103*4882a593Smuzhiyun /* recv'd frame alignment error */
2104*4882a593Smuzhiyun lstats->rx_frame_errors = oct->link_stats.fromwire.frame_err;
2105*4882a593Smuzhiyun /* recv'r fifo overrun */
2106*4882a593Smuzhiyun lstats->rx_fifo_errors = oct->link_stats.fromwire.fifo_err;
2107*4882a593Smuzhiyun
2108*4882a593Smuzhiyun lstats->rx_errors = lstats->rx_length_errors + lstats->rx_crc_errors +
2109*4882a593Smuzhiyun lstats->rx_frame_errors + lstats->rx_fifo_errors;
2110*4882a593Smuzhiyun
2111*4882a593Smuzhiyun /* detailed tx_errors */
2112*4882a593Smuzhiyun lstats->tx_aborted_errors = oct->link_stats.fromhost.fw_err_pko;
2113*4882a593Smuzhiyun lstats->tx_carrier_errors = oct->link_stats.fromhost.fw_err_link;
2114*4882a593Smuzhiyun lstats->tx_fifo_errors = oct->link_stats.fromhost.fifo_err;
2115*4882a593Smuzhiyun
2116*4882a593Smuzhiyun lstats->tx_errors = lstats->tx_aborted_errors +
2117*4882a593Smuzhiyun lstats->tx_carrier_errors +
2118*4882a593Smuzhiyun lstats->tx_fifo_errors;
2119*4882a593Smuzhiyun }
2120*4882a593Smuzhiyun
2121*4882a593Smuzhiyun /**
2122*4882a593Smuzhiyun * hwtstamp_ioctl - Handler for SIOCSHWTSTAMP ioctl
2123*4882a593Smuzhiyun * @netdev: network device
2124*4882a593Smuzhiyun * @ifr: interface request
2125*4882a593Smuzhiyun */
hwtstamp_ioctl(struct net_device * netdev,struct ifreq * ifr)2126*4882a593Smuzhiyun static int hwtstamp_ioctl(struct net_device *netdev, struct ifreq *ifr)
2127*4882a593Smuzhiyun {
2128*4882a593Smuzhiyun struct hwtstamp_config conf;
2129*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2130*4882a593Smuzhiyun
2131*4882a593Smuzhiyun if (copy_from_user(&conf, ifr->ifr_data, sizeof(conf)))
2132*4882a593Smuzhiyun return -EFAULT;
2133*4882a593Smuzhiyun
2134*4882a593Smuzhiyun if (conf.flags)
2135*4882a593Smuzhiyun return -EINVAL;
2136*4882a593Smuzhiyun
2137*4882a593Smuzhiyun switch (conf.tx_type) {
2138*4882a593Smuzhiyun case HWTSTAMP_TX_ON:
2139*4882a593Smuzhiyun case HWTSTAMP_TX_OFF:
2140*4882a593Smuzhiyun break;
2141*4882a593Smuzhiyun default:
2142*4882a593Smuzhiyun return -ERANGE;
2143*4882a593Smuzhiyun }
2144*4882a593Smuzhiyun
2145*4882a593Smuzhiyun switch (conf.rx_filter) {
2146*4882a593Smuzhiyun case HWTSTAMP_FILTER_NONE:
2147*4882a593Smuzhiyun break;
2148*4882a593Smuzhiyun case HWTSTAMP_FILTER_ALL:
2149*4882a593Smuzhiyun case HWTSTAMP_FILTER_SOME:
2150*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V1_L4_EVENT:
2151*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V1_L4_SYNC:
2152*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V1_L4_DELAY_REQ:
2153*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L4_EVENT:
2154*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L4_SYNC:
2155*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L4_DELAY_REQ:
2156*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L2_EVENT:
2157*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L2_SYNC:
2158*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_L2_DELAY_REQ:
2159*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_EVENT:
2160*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_SYNC:
2161*4882a593Smuzhiyun case HWTSTAMP_FILTER_PTP_V2_DELAY_REQ:
2162*4882a593Smuzhiyun case HWTSTAMP_FILTER_NTP_ALL:
2163*4882a593Smuzhiyun conf.rx_filter = HWTSTAMP_FILTER_ALL;
2164*4882a593Smuzhiyun break;
2165*4882a593Smuzhiyun default:
2166*4882a593Smuzhiyun return -ERANGE;
2167*4882a593Smuzhiyun }
2168*4882a593Smuzhiyun
2169*4882a593Smuzhiyun if (conf.rx_filter == HWTSTAMP_FILTER_ALL)
2170*4882a593Smuzhiyun ifstate_set(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2171*4882a593Smuzhiyun
2172*4882a593Smuzhiyun else
2173*4882a593Smuzhiyun ifstate_reset(lio, LIO_IFSTATE_RX_TIMESTAMP_ENABLED);
2174*4882a593Smuzhiyun
2175*4882a593Smuzhiyun return copy_to_user(ifr->ifr_data, &conf, sizeof(conf)) ? -EFAULT : 0;
2176*4882a593Smuzhiyun }
2177*4882a593Smuzhiyun
2178*4882a593Smuzhiyun /**
2179*4882a593Smuzhiyun * liquidio_ioctl - ioctl handler
2180*4882a593Smuzhiyun * @netdev: network device
2181*4882a593Smuzhiyun * @ifr: interface request
2182*4882a593Smuzhiyun * @cmd: command
2183*4882a593Smuzhiyun */
liquidio_ioctl(struct net_device * netdev,struct ifreq * ifr,int cmd)2184*4882a593Smuzhiyun static int liquidio_ioctl(struct net_device *netdev, struct ifreq *ifr, int cmd)
2185*4882a593Smuzhiyun {
2186*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2187*4882a593Smuzhiyun
2188*4882a593Smuzhiyun switch (cmd) {
2189*4882a593Smuzhiyun case SIOCSHWTSTAMP:
2190*4882a593Smuzhiyun if (lio->oct_dev->ptp_enable)
2191*4882a593Smuzhiyun return hwtstamp_ioctl(netdev, ifr);
2192*4882a593Smuzhiyun fallthrough;
2193*4882a593Smuzhiyun default:
2194*4882a593Smuzhiyun return -EOPNOTSUPP;
2195*4882a593Smuzhiyun }
2196*4882a593Smuzhiyun }
2197*4882a593Smuzhiyun
2198*4882a593Smuzhiyun /**
2199*4882a593Smuzhiyun * handle_timestamp - handle a Tx timestamp response
2200*4882a593Smuzhiyun * @oct: octeon device
2201*4882a593Smuzhiyun * @status: response status
2202*4882a593Smuzhiyun * @buf: pointer to skb
2203*4882a593Smuzhiyun */
handle_timestamp(struct octeon_device * oct,u32 status,void * buf)2204*4882a593Smuzhiyun static void handle_timestamp(struct octeon_device *oct,
2205*4882a593Smuzhiyun u32 status,
2206*4882a593Smuzhiyun void *buf)
2207*4882a593Smuzhiyun {
2208*4882a593Smuzhiyun struct octnet_buf_free_info *finfo;
2209*4882a593Smuzhiyun struct octeon_soft_command *sc;
2210*4882a593Smuzhiyun struct oct_timestamp_resp *resp;
2211*4882a593Smuzhiyun struct lio *lio;
2212*4882a593Smuzhiyun struct sk_buff *skb = (struct sk_buff *)buf;
2213*4882a593Smuzhiyun
2214*4882a593Smuzhiyun finfo = (struct octnet_buf_free_info *)skb->cb;
2215*4882a593Smuzhiyun lio = finfo->lio;
2216*4882a593Smuzhiyun sc = finfo->sc;
2217*4882a593Smuzhiyun oct = lio->oct_dev;
2218*4882a593Smuzhiyun resp = (struct oct_timestamp_resp *)sc->virtrptr;
2219*4882a593Smuzhiyun
2220*4882a593Smuzhiyun if (status != OCTEON_REQUEST_DONE) {
2221*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Tx timestamp instruction failed. Status: %llx\n",
2222*4882a593Smuzhiyun CVM_CAST64(status));
2223*4882a593Smuzhiyun resp->timestamp = 0;
2224*4882a593Smuzhiyun }
2225*4882a593Smuzhiyun
2226*4882a593Smuzhiyun octeon_swap_8B_data(&resp->timestamp, 1);
2227*4882a593Smuzhiyun
2228*4882a593Smuzhiyun if (unlikely((skb_shinfo(skb)->tx_flags & SKBTX_IN_PROGRESS) != 0)) {
2229*4882a593Smuzhiyun struct skb_shared_hwtstamps ts;
2230*4882a593Smuzhiyun u64 ns = resp->timestamp;
2231*4882a593Smuzhiyun
2232*4882a593Smuzhiyun netif_info(lio, tx_done, lio->netdev,
2233*4882a593Smuzhiyun "Got resulting SKBTX_HW_TSTAMP skb=%p ns=%016llu\n",
2234*4882a593Smuzhiyun skb, (unsigned long long)ns);
2235*4882a593Smuzhiyun ts.hwtstamp = ns_to_ktime(ns + lio->ptp_adjust);
2236*4882a593Smuzhiyun skb_tstamp_tx(skb, &ts);
2237*4882a593Smuzhiyun }
2238*4882a593Smuzhiyun
2239*4882a593Smuzhiyun octeon_free_soft_command(oct, sc);
2240*4882a593Smuzhiyun tx_buffer_free(skb);
2241*4882a593Smuzhiyun }
2242*4882a593Smuzhiyun
2243*4882a593Smuzhiyun /**
2244*4882a593Smuzhiyun * send_nic_timestamp_pkt - Send a data packet that will be timestamped
2245*4882a593Smuzhiyun * @oct: octeon device
2246*4882a593Smuzhiyun * @ndata: pointer to network data
2247*4882a593Smuzhiyun * @finfo: pointer to private network data
2248*4882a593Smuzhiyun * @xmit_more: more is coming
2249*4882a593Smuzhiyun */
send_nic_timestamp_pkt(struct octeon_device * oct,struct octnic_data_pkt * ndata,struct octnet_buf_free_info * finfo,int xmit_more)2250*4882a593Smuzhiyun static inline int send_nic_timestamp_pkt(struct octeon_device *oct,
2251*4882a593Smuzhiyun struct octnic_data_pkt *ndata,
2252*4882a593Smuzhiyun struct octnet_buf_free_info *finfo,
2253*4882a593Smuzhiyun int xmit_more)
2254*4882a593Smuzhiyun {
2255*4882a593Smuzhiyun int retval;
2256*4882a593Smuzhiyun struct octeon_soft_command *sc;
2257*4882a593Smuzhiyun struct lio *lio;
2258*4882a593Smuzhiyun int ring_doorbell;
2259*4882a593Smuzhiyun u32 len;
2260*4882a593Smuzhiyun
2261*4882a593Smuzhiyun lio = finfo->lio;
2262*4882a593Smuzhiyun
2263*4882a593Smuzhiyun sc = octeon_alloc_soft_command_resp(oct, &ndata->cmd,
2264*4882a593Smuzhiyun sizeof(struct oct_timestamp_resp));
2265*4882a593Smuzhiyun finfo->sc = sc;
2266*4882a593Smuzhiyun
2267*4882a593Smuzhiyun if (!sc) {
2268*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "No memory for timestamped data packet\n");
2269*4882a593Smuzhiyun return IQ_SEND_FAILED;
2270*4882a593Smuzhiyun }
2271*4882a593Smuzhiyun
2272*4882a593Smuzhiyun if (ndata->reqtype == REQTYPE_NORESP_NET)
2273*4882a593Smuzhiyun ndata->reqtype = REQTYPE_RESP_NET;
2274*4882a593Smuzhiyun else if (ndata->reqtype == REQTYPE_NORESP_NET_SG)
2275*4882a593Smuzhiyun ndata->reqtype = REQTYPE_RESP_NET_SG;
2276*4882a593Smuzhiyun
2277*4882a593Smuzhiyun sc->callback = handle_timestamp;
2278*4882a593Smuzhiyun sc->callback_arg = finfo->skb;
2279*4882a593Smuzhiyun sc->iq_no = ndata->q_no;
2280*4882a593Smuzhiyun
2281*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
2282*4882a593Smuzhiyun len = (u32)((struct octeon_instr_ih3 *)
2283*4882a593Smuzhiyun (&sc->cmd.cmd3.ih3))->dlengsz;
2284*4882a593Smuzhiyun else
2285*4882a593Smuzhiyun len = (u32)((struct octeon_instr_ih2 *)
2286*4882a593Smuzhiyun (&sc->cmd.cmd2.ih2))->dlengsz;
2287*4882a593Smuzhiyun
2288*4882a593Smuzhiyun ring_doorbell = !xmit_more;
2289*4882a593Smuzhiyun
2290*4882a593Smuzhiyun retval = octeon_send_command(oct, sc->iq_no, ring_doorbell, &sc->cmd,
2291*4882a593Smuzhiyun sc, len, ndata->reqtype);
2292*4882a593Smuzhiyun
2293*4882a593Smuzhiyun if (retval == IQ_SEND_FAILED) {
2294*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "timestamp data packet failed status: %x\n",
2295*4882a593Smuzhiyun retval);
2296*4882a593Smuzhiyun octeon_free_soft_command(oct, sc);
2297*4882a593Smuzhiyun } else {
2298*4882a593Smuzhiyun netif_info(lio, tx_queued, lio->netdev, "Queued timestamp packet\n");
2299*4882a593Smuzhiyun }
2300*4882a593Smuzhiyun
2301*4882a593Smuzhiyun return retval;
2302*4882a593Smuzhiyun }
2303*4882a593Smuzhiyun
2304*4882a593Smuzhiyun /**
2305*4882a593Smuzhiyun * liquidio_xmit - Transmit networks packets to the Octeon interface
2306*4882a593Smuzhiyun * @skb: skbuff struct to be passed to network layer.
2307*4882a593Smuzhiyun * @netdev: pointer to network device
2308*4882a593Smuzhiyun *
2309*4882a593Smuzhiyun * Return: whether the packet was transmitted to the device okay or not
2310*4882a593Smuzhiyun * (NETDEV_TX_OK or NETDEV_TX_BUSY)
2311*4882a593Smuzhiyun */
liquidio_xmit(struct sk_buff * skb,struct net_device * netdev)2312*4882a593Smuzhiyun static netdev_tx_t liquidio_xmit(struct sk_buff *skb, struct net_device *netdev)
2313*4882a593Smuzhiyun {
2314*4882a593Smuzhiyun struct lio *lio;
2315*4882a593Smuzhiyun struct octnet_buf_free_info *finfo;
2316*4882a593Smuzhiyun union octnic_cmd_setup cmdsetup;
2317*4882a593Smuzhiyun struct octnic_data_pkt ndata;
2318*4882a593Smuzhiyun struct octeon_device *oct;
2319*4882a593Smuzhiyun struct oct_iq_stats *stats;
2320*4882a593Smuzhiyun struct octeon_instr_irh *irh;
2321*4882a593Smuzhiyun union tx_info *tx_info;
2322*4882a593Smuzhiyun int status = 0;
2323*4882a593Smuzhiyun int q_idx = 0, iq_no = 0;
2324*4882a593Smuzhiyun int j, xmit_more = 0;
2325*4882a593Smuzhiyun u64 dptr = 0;
2326*4882a593Smuzhiyun u32 tag = 0;
2327*4882a593Smuzhiyun
2328*4882a593Smuzhiyun lio = GET_LIO(netdev);
2329*4882a593Smuzhiyun oct = lio->oct_dev;
2330*4882a593Smuzhiyun
2331*4882a593Smuzhiyun q_idx = skb_iq(oct, skb);
2332*4882a593Smuzhiyun tag = q_idx;
2333*4882a593Smuzhiyun iq_no = lio->linfo.txpciq[q_idx].s.q_no;
2334*4882a593Smuzhiyun
2335*4882a593Smuzhiyun stats = &oct->instr_queue[iq_no]->stats;
2336*4882a593Smuzhiyun
2337*4882a593Smuzhiyun /* Check for all conditions in which the current packet cannot be
2338*4882a593Smuzhiyun * transmitted.
2339*4882a593Smuzhiyun */
2340*4882a593Smuzhiyun if (!(atomic_read(&lio->ifstate) & LIO_IFSTATE_RUNNING) ||
2341*4882a593Smuzhiyun (!lio->linfo.link.s.link_up) ||
2342*4882a593Smuzhiyun (skb->len <= 0)) {
2343*4882a593Smuzhiyun netif_info(lio, tx_err, lio->netdev,
2344*4882a593Smuzhiyun "Transmit failed link_status : %d\n",
2345*4882a593Smuzhiyun lio->linfo.link.s.link_up);
2346*4882a593Smuzhiyun goto lio_xmit_failed;
2347*4882a593Smuzhiyun }
2348*4882a593Smuzhiyun
2349*4882a593Smuzhiyun /* Use space in skb->cb to store info used to unmap and
2350*4882a593Smuzhiyun * free the buffers.
2351*4882a593Smuzhiyun */
2352*4882a593Smuzhiyun finfo = (struct octnet_buf_free_info *)skb->cb;
2353*4882a593Smuzhiyun finfo->lio = lio;
2354*4882a593Smuzhiyun finfo->skb = skb;
2355*4882a593Smuzhiyun finfo->sc = NULL;
2356*4882a593Smuzhiyun
2357*4882a593Smuzhiyun /* Prepare the attributes for the data to be passed to OSI. */
2358*4882a593Smuzhiyun memset(&ndata, 0, sizeof(struct octnic_data_pkt));
2359*4882a593Smuzhiyun
2360*4882a593Smuzhiyun ndata.buf = (void *)finfo;
2361*4882a593Smuzhiyun
2362*4882a593Smuzhiyun ndata.q_no = iq_no;
2363*4882a593Smuzhiyun
2364*4882a593Smuzhiyun if (octnet_iq_is_full(oct, ndata.q_no)) {
2365*4882a593Smuzhiyun /* defer sending if queue is full */
2366*4882a593Smuzhiyun netif_info(lio, tx_err, lio->netdev, "Transmit failed iq:%d full\n",
2367*4882a593Smuzhiyun ndata.q_no);
2368*4882a593Smuzhiyun stats->tx_iq_busy++;
2369*4882a593Smuzhiyun return NETDEV_TX_BUSY;
2370*4882a593Smuzhiyun }
2371*4882a593Smuzhiyun
2372*4882a593Smuzhiyun /* pr_info(" XMIT - valid Qs: %d, 1st Q no: %d, cpu: %d, q_no:%d\n",
2373*4882a593Smuzhiyun * lio->linfo.num_txpciq, lio->txq, cpu, ndata.q_no);
2374*4882a593Smuzhiyun */
2375*4882a593Smuzhiyun
2376*4882a593Smuzhiyun ndata.datasize = skb->len;
2377*4882a593Smuzhiyun
2378*4882a593Smuzhiyun cmdsetup.u64 = 0;
2379*4882a593Smuzhiyun cmdsetup.s.iq_no = iq_no;
2380*4882a593Smuzhiyun
2381*4882a593Smuzhiyun if (skb->ip_summed == CHECKSUM_PARTIAL) {
2382*4882a593Smuzhiyun if (skb->encapsulation) {
2383*4882a593Smuzhiyun cmdsetup.s.tnl_csum = 1;
2384*4882a593Smuzhiyun stats->tx_vxlan++;
2385*4882a593Smuzhiyun } else {
2386*4882a593Smuzhiyun cmdsetup.s.transport_csum = 1;
2387*4882a593Smuzhiyun }
2388*4882a593Smuzhiyun }
2389*4882a593Smuzhiyun if (unlikely(skb_shinfo(skb)->tx_flags & SKBTX_HW_TSTAMP)) {
2390*4882a593Smuzhiyun skb_shinfo(skb)->tx_flags |= SKBTX_IN_PROGRESS;
2391*4882a593Smuzhiyun cmdsetup.s.timestamp = 1;
2392*4882a593Smuzhiyun }
2393*4882a593Smuzhiyun
2394*4882a593Smuzhiyun if (skb_shinfo(skb)->nr_frags == 0) {
2395*4882a593Smuzhiyun cmdsetup.s.u.datasize = skb->len;
2396*4882a593Smuzhiyun octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2397*4882a593Smuzhiyun
2398*4882a593Smuzhiyun /* Offload checksum calculation for TCP/UDP packets */
2399*4882a593Smuzhiyun dptr = dma_map_single(&oct->pci_dev->dev,
2400*4882a593Smuzhiyun skb->data,
2401*4882a593Smuzhiyun skb->len,
2402*4882a593Smuzhiyun DMA_TO_DEVICE);
2403*4882a593Smuzhiyun if (dma_mapping_error(&oct->pci_dev->dev, dptr)) {
2404*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "%s DMA mapping error 1\n",
2405*4882a593Smuzhiyun __func__);
2406*4882a593Smuzhiyun stats->tx_dmamap_fail++;
2407*4882a593Smuzhiyun return NETDEV_TX_BUSY;
2408*4882a593Smuzhiyun }
2409*4882a593Smuzhiyun
2410*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
2411*4882a593Smuzhiyun ndata.cmd.cmd3.dptr = dptr;
2412*4882a593Smuzhiyun else
2413*4882a593Smuzhiyun ndata.cmd.cmd2.dptr = dptr;
2414*4882a593Smuzhiyun finfo->dptr = dptr;
2415*4882a593Smuzhiyun ndata.reqtype = REQTYPE_NORESP_NET;
2416*4882a593Smuzhiyun
2417*4882a593Smuzhiyun } else {
2418*4882a593Smuzhiyun int i, frags;
2419*4882a593Smuzhiyun skb_frag_t *frag;
2420*4882a593Smuzhiyun struct octnic_gather *g;
2421*4882a593Smuzhiyun
2422*4882a593Smuzhiyun spin_lock(&lio->glist_lock[q_idx]);
2423*4882a593Smuzhiyun g = (struct octnic_gather *)
2424*4882a593Smuzhiyun lio_list_delete_head(&lio->glist[q_idx]);
2425*4882a593Smuzhiyun spin_unlock(&lio->glist_lock[q_idx]);
2426*4882a593Smuzhiyun
2427*4882a593Smuzhiyun if (!g) {
2428*4882a593Smuzhiyun netif_info(lio, tx_err, lio->netdev,
2429*4882a593Smuzhiyun "Transmit scatter gather: glist null!\n");
2430*4882a593Smuzhiyun goto lio_xmit_failed;
2431*4882a593Smuzhiyun }
2432*4882a593Smuzhiyun
2433*4882a593Smuzhiyun cmdsetup.s.gather = 1;
2434*4882a593Smuzhiyun cmdsetup.s.u.gatherptrs = (skb_shinfo(skb)->nr_frags + 1);
2435*4882a593Smuzhiyun octnet_prepare_pci_cmd(oct, &ndata.cmd, &cmdsetup, tag);
2436*4882a593Smuzhiyun
2437*4882a593Smuzhiyun memset(g->sg, 0, g->sg_size);
2438*4882a593Smuzhiyun
2439*4882a593Smuzhiyun g->sg[0].ptr[0] = dma_map_single(&oct->pci_dev->dev,
2440*4882a593Smuzhiyun skb->data,
2441*4882a593Smuzhiyun (skb->len - skb->data_len),
2442*4882a593Smuzhiyun DMA_TO_DEVICE);
2443*4882a593Smuzhiyun if (dma_mapping_error(&oct->pci_dev->dev, g->sg[0].ptr[0])) {
2444*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "%s DMA mapping error 2\n",
2445*4882a593Smuzhiyun __func__);
2446*4882a593Smuzhiyun stats->tx_dmamap_fail++;
2447*4882a593Smuzhiyun return NETDEV_TX_BUSY;
2448*4882a593Smuzhiyun }
2449*4882a593Smuzhiyun add_sg_size(&g->sg[0], (skb->len - skb->data_len), 0);
2450*4882a593Smuzhiyun
2451*4882a593Smuzhiyun frags = skb_shinfo(skb)->nr_frags;
2452*4882a593Smuzhiyun i = 1;
2453*4882a593Smuzhiyun while (frags--) {
2454*4882a593Smuzhiyun frag = &skb_shinfo(skb)->frags[i - 1];
2455*4882a593Smuzhiyun
2456*4882a593Smuzhiyun g->sg[(i >> 2)].ptr[(i & 3)] =
2457*4882a593Smuzhiyun skb_frag_dma_map(&oct->pci_dev->dev,
2458*4882a593Smuzhiyun frag, 0, skb_frag_size(frag),
2459*4882a593Smuzhiyun DMA_TO_DEVICE);
2460*4882a593Smuzhiyun
2461*4882a593Smuzhiyun if (dma_mapping_error(&oct->pci_dev->dev,
2462*4882a593Smuzhiyun g->sg[i >> 2].ptr[i & 3])) {
2463*4882a593Smuzhiyun dma_unmap_single(&oct->pci_dev->dev,
2464*4882a593Smuzhiyun g->sg[0].ptr[0],
2465*4882a593Smuzhiyun skb->len - skb->data_len,
2466*4882a593Smuzhiyun DMA_TO_DEVICE);
2467*4882a593Smuzhiyun for (j = 1; j < i; j++) {
2468*4882a593Smuzhiyun frag = &skb_shinfo(skb)->frags[j - 1];
2469*4882a593Smuzhiyun dma_unmap_page(&oct->pci_dev->dev,
2470*4882a593Smuzhiyun g->sg[j >> 2].ptr[j & 3],
2471*4882a593Smuzhiyun skb_frag_size(frag),
2472*4882a593Smuzhiyun DMA_TO_DEVICE);
2473*4882a593Smuzhiyun }
2474*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "%s DMA mapping error 3\n",
2475*4882a593Smuzhiyun __func__);
2476*4882a593Smuzhiyun return NETDEV_TX_BUSY;
2477*4882a593Smuzhiyun }
2478*4882a593Smuzhiyun
2479*4882a593Smuzhiyun add_sg_size(&g->sg[(i >> 2)], skb_frag_size(frag),
2480*4882a593Smuzhiyun (i & 3));
2481*4882a593Smuzhiyun i++;
2482*4882a593Smuzhiyun }
2483*4882a593Smuzhiyun
2484*4882a593Smuzhiyun dptr = g->sg_dma_ptr;
2485*4882a593Smuzhiyun
2486*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct))
2487*4882a593Smuzhiyun ndata.cmd.cmd3.dptr = dptr;
2488*4882a593Smuzhiyun else
2489*4882a593Smuzhiyun ndata.cmd.cmd2.dptr = dptr;
2490*4882a593Smuzhiyun finfo->dptr = dptr;
2491*4882a593Smuzhiyun finfo->g = g;
2492*4882a593Smuzhiyun
2493*4882a593Smuzhiyun ndata.reqtype = REQTYPE_NORESP_NET_SG;
2494*4882a593Smuzhiyun }
2495*4882a593Smuzhiyun
2496*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct)) {
2497*4882a593Smuzhiyun irh = (struct octeon_instr_irh *)&ndata.cmd.cmd3.irh;
2498*4882a593Smuzhiyun tx_info = (union tx_info *)&ndata.cmd.cmd3.ossp[0];
2499*4882a593Smuzhiyun } else {
2500*4882a593Smuzhiyun irh = (struct octeon_instr_irh *)&ndata.cmd.cmd2.irh;
2501*4882a593Smuzhiyun tx_info = (union tx_info *)&ndata.cmd.cmd2.ossp[0];
2502*4882a593Smuzhiyun }
2503*4882a593Smuzhiyun
2504*4882a593Smuzhiyun if (skb_shinfo(skb)->gso_size) {
2505*4882a593Smuzhiyun tx_info->s.gso_size = skb_shinfo(skb)->gso_size;
2506*4882a593Smuzhiyun tx_info->s.gso_segs = skb_shinfo(skb)->gso_segs;
2507*4882a593Smuzhiyun stats->tx_gso++;
2508*4882a593Smuzhiyun }
2509*4882a593Smuzhiyun
2510*4882a593Smuzhiyun /* HW insert VLAN tag */
2511*4882a593Smuzhiyun if (skb_vlan_tag_present(skb)) {
2512*4882a593Smuzhiyun irh->priority = skb_vlan_tag_get(skb) >> 13;
2513*4882a593Smuzhiyun irh->vlan = skb_vlan_tag_get(skb) & 0xfff;
2514*4882a593Smuzhiyun }
2515*4882a593Smuzhiyun
2516*4882a593Smuzhiyun xmit_more = netdev_xmit_more();
2517*4882a593Smuzhiyun
2518*4882a593Smuzhiyun if (unlikely(cmdsetup.s.timestamp))
2519*4882a593Smuzhiyun status = send_nic_timestamp_pkt(oct, &ndata, finfo, xmit_more);
2520*4882a593Smuzhiyun else
2521*4882a593Smuzhiyun status = octnet_send_nic_data_pkt(oct, &ndata, xmit_more);
2522*4882a593Smuzhiyun if (status == IQ_SEND_FAILED)
2523*4882a593Smuzhiyun goto lio_xmit_failed;
2524*4882a593Smuzhiyun
2525*4882a593Smuzhiyun netif_info(lio, tx_queued, lio->netdev, "Transmit queued successfully\n");
2526*4882a593Smuzhiyun
2527*4882a593Smuzhiyun if (status == IQ_SEND_STOP)
2528*4882a593Smuzhiyun netif_stop_subqueue(netdev, q_idx);
2529*4882a593Smuzhiyun
2530*4882a593Smuzhiyun netif_trans_update(netdev);
2531*4882a593Smuzhiyun
2532*4882a593Smuzhiyun if (tx_info->s.gso_segs)
2533*4882a593Smuzhiyun stats->tx_done += tx_info->s.gso_segs;
2534*4882a593Smuzhiyun else
2535*4882a593Smuzhiyun stats->tx_done++;
2536*4882a593Smuzhiyun stats->tx_tot_bytes += ndata.datasize;
2537*4882a593Smuzhiyun
2538*4882a593Smuzhiyun return NETDEV_TX_OK;
2539*4882a593Smuzhiyun
2540*4882a593Smuzhiyun lio_xmit_failed:
2541*4882a593Smuzhiyun stats->tx_dropped++;
2542*4882a593Smuzhiyun netif_info(lio, tx_err, lio->netdev, "IQ%d Transmit dropped:%llu\n",
2543*4882a593Smuzhiyun iq_no, stats->tx_dropped);
2544*4882a593Smuzhiyun if (dptr)
2545*4882a593Smuzhiyun dma_unmap_single(&oct->pci_dev->dev, dptr,
2546*4882a593Smuzhiyun ndata.datasize, DMA_TO_DEVICE);
2547*4882a593Smuzhiyun
2548*4882a593Smuzhiyun octeon_ring_doorbell_locked(oct, iq_no);
2549*4882a593Smuzhiyun
2550*4882a593Smuzhiyun tx_buffer_free(skb);
2551*4882a593Smuzhiyun return NETDEV_TX_OK;
2552*4882a593Smuzhiyun }
2553*4882a593Smuzhiyun
2554*4882a593Smuzhiyun /**
2555*4882a593Smuzhiyun * liquidio_tx_timeout - Network device Tx timeout
2556*4882a593Smuzhiyun * @netdev: pointer to network device
2557*4882a593Smuzhiyun * @txqueue: index of the hung transmit queue
2558*4882a593Smuzhiyun */
liquidio_tx_timeout(struct net_device * netdev,unsigned int txqueue)2559*4882a593Smuzhiyun static void liquidio_tx_timeout(struct net_device *netdev, unsigned int txqueue)
2560*4882a593Smuzhiyun {
2561*4882a593Smuzhiyun struct lio *lio;
2562*4882a593Smuzhiyun
2563*4882a593Smuzhiyun lio = GET_LIO(netdev);
2564*4882a593Smuzhiyun
2565*4882a593Smuzhiyun netif_info(lio, tx_err, lio->netdev,
2566*4882a593Smuzhiyun "Transmit timeout tx_dropped:%ld, waking up queues now!!\n",
2567*4882a593Smuzhiyun netdev->stats.tx_dropped);
2568*4882a593Smuzhiyun netif_trans_update(netdev);
2569*4882a593Smuzhiyun wake_txqs(netdev);
2570*4882a593Smuzhiyun }
2571*4882a593Smuzhiyun
liquidio_vlan_rx_add_vid(struct net_device * netdev,__be16 proto,u16 vid)2572*4882a593Smuzhiyun static int liquidio_vlan_rx_add_vid(struct net_device *netdev,
2573*4882a593Smuzhiyun __be16 proto __attribute__((unused)),
2574*4882a593Smuzhiyun u16 vid)
2575*4882a593Smuzhiyun {
2576*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2577*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2578*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2579*4882a593Smuzhiyun int ret = 0;
2580*4882a593Smuzhiyun
2581*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2582*4882a593Smuzhiyun
2583*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
2584*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2585*4882a593Smuzhiyun nctrl.ncmd.s.param1 = vid;
2586*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2587*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
2588*4882a593Smuzhiyun nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2589*4882a593Smuzhiyun
2590*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2591*4882a593Smuzhiyun if (ret) {
2592*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Add VLAN filter failed in core (ret: 0x%x)\n",
2593*4882a593Smuzhiyun ret);
2594*4882a593Smuzhiyun if (ret > 0)
2595*4882a593Smuzhiyun ret = -EIO;
2596*4882a593Smuzhiyun }
2597*4882a593Smuzhiyun
2598*4882a593Smuzhiyun return ret;
2599*4882a593Smuzhiyun }
2600*4882a593Smuzhiyun
liquidio_vlan_rx_kill_vid(struct net_device * netdev,__be16 proto,u16 vid)2601*4882a593Smuzhiyun static int liquidio_vlan_rx_kill_vid(struct net_device *netdev,
2602*4882a593Smuzhiyun __be16 proto __attribute__((unused)),
2603*4882a593Smuzhiyun u16 vid)
2604*4882a593Smuzhiyun {
2605*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2606*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2607*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2608*4882a593Smuzhiyun int ret = 0;
2609*4882a593Smuzhiyun
2610*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2611*4882a593Smuzhiyun
2612*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
2613*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2614*4882a593Smuzhiyun nctrl.ncmd.s.param1 = vid;
2615*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2616*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
2617*4882a593Smuzhiyun nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2618*4882a593Smuzhiyun
2619*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2620*4882a593Smuzhiyun if (ret) {
2621*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Del VLAN filter failed in core (ret: 0x%x)\n",
2622*4882a593Smuzhiyun ret);
2623*4882a593Smuzhiyun if (ret > 0)
2624*4882a593Smuzhiyun ret = -EIO;
2625*4882a593Smuzhiyun }
2626*4882a593Smuzhiyun return ret;
2627*4882a593Smuzhiyun }
2628*4882a593Smuzhiyun
2629*4882a593Smuzhiyun /**
2630*4882a593Smuzhiyun * liquidio_set_rxcsum_command - Sending command to enable/disable RX checksum offload
2631*4882a593Smuzhiyun * @netdev: pointer to network device
2632*4882a593Smuzhiyun * @command: OCTNET_CMD_TNL_RX_CSUM_CTL
2633*4882a593Smuzhiyun * @rx_cmd: OCTNET_CMD_RXCSUM_ENABLE/OCTNET_CMD_RXCSUM_DISABLE
2634*4882a593Smuzhiyun * Returns: SUCCESS or FAILURE
2635*4882a593Smuzhiyun */
liquidio_set_rxcsum_command(struct net_device * netdev,int command,u8 rx_cmd)2636*4882a593Smuzhiyun static int liquidio_set_rxcsum_command(struct net_device *netdev, int command,
2637*4882a593Smuzhiyun u8 rx_cmd)
2638*4882a593Smuzhiyun {
2639*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2640*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2641*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2642*4882a593Smuzhiyun int ret = 0;
2643*4882a593Smuzhiyun
2644*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2645*4882a593Smuzhiyun
2646*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
2647*4882a593Smuzhiyun nctrl.ncmd.s.cmd = command;
2648*4882a593Smuzhiyun nctrl.ncmd.s.param1 = rx_cmd;
2649*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2650*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
2651*4882a593Smuzhiyun nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2652*4882a593Smuzhiyun
2653*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2654*4882a593Smuzhiyun if (ret) {
2655*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
2656*4882a593Smuzhiyun "DEVFLAGS RXCSUM change failed in core(ret:0x%x)\n",
2657*4882a593Smuzhiyun ret);
2658*4882a593Smuzhiyun if (ret > 0)
2659*4882a593Smuzhiyun ret = -EIO;
2660*4882a593Smuzhiyun }
2661*4882a593Smuzhiyun return ret;
2662*4882a593Smuzhiyun }
2663*4882a593Smuzhiyun
2664*4882a593Smuzhiyun /**
2665*4882a593Smuzhiyun * liquidio_vxlan_port_command - Sending command to add/delete VxLAN UDP port to firmware
2666*4882a593Smuzhiyun * @netdev: pointer to network device
2667*4882a593Smuzhiyun * @command: OCTNET_CMD_VXLAN_PORT_CONFIG
2668*4882a593Smuzhiyun * @vxlan_port: VxLAN port to be added or deleted
2669*4882a593Smuzhiyun * @vxlan_cmd_bit: OCTNET_CMD_VXLAN_PORT_ADD,
2670*4882a593Smuzhiyun * OCTNET_CMD_VXLAN_PORT_DEL
2671*4882a593Smuzhiyun * Return: SUCCESS or FAILURE
2672*4882a593Smuzhiyun */
liquidio_vxlan_port_command(struct net_device * netdev,int command,u16 vxlan_port,u8 vxlan_cmd_bit)2673*4882a593Smuzhiyun static int liquidio_vxlan_port_command(struct net_device *netdev, int command,
2674*4882a593Smuzhiyun u16 vxlan_port, u8 vxlan_cmd_bit)
2675*4882a593Smuzhiyun {
2676*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2677*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2678*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2679*4882a593Smuzhiyun int ret = 0;
2680*4882a593Smuzhiyun
2681*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2682*4882a593Smuzhiyun
2683*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
2684*4882a593Smuzhiyun nctrl.ncmd.s.cmd = command;
2685*4882a593Smuzhiyun nctrl.ncmd.s.more = vxlan_cmd_bit;
2686*4882a593Smuzhiyun nctrl.ncmd.s.param1 = vxlan_port;
2687*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2688*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
2689*4882a593Smuzhiyun nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2690*4882a593Smuzhiyun
2691*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(lio->oct_dev, &nctrl);
2692*4882a593Smuzhiyun if (ret) {
2693*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
2694*4882a593Smuzhiyun "VxLAN port add/delete failed in core (ret:0x%x)\n",
2695*4882a593Smuzhiyun ret);
2696*4882a593Smuzhiyun if (ret > 0)
2697*4882a593Smuzhiyun ret = -EIO;
2698*4882a593Smuzhiyun }
2699*4882a593Smuzhiyun return ret;
2700*4882a593Smuzhiyun }
2701*4882a593Smuzhiyun
liquidio_udp_tunnel_set_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)2702*4882a593Smuzhiyun static int liquidio_udp_tunnel_set_port(struct net_device *netdev,
2703*4882a593Smuzhiyun unsigned int table, unsigned int entry,
2704*4882a593Smuzhiyun struct udp_tunnel_info *ti)
2705*4882a593Smuzhiyun {
2706*4882a593Smuzhiyun return liquidio_vxlan_port_command(netdev,
2707*4882a593Smuzhiyun OCTNET_CMD_VXLAN_PORT_CONFIG,
2708*4882a593Smuzhiyun htons(ti->port),
2709*4882a593Smuzhiyun OCTNET_CMD_VXLAN_PORT_ADD);
2710*4882a593Smuzhiyun }
2711*4882a593Smuzhiyun
liquidio_udp_tunnel_unset_port(struct net_device * netdev,unsigned int table,unsigned int entry,struct udp_tunnel_info * ti)2712*4882a593Smuzhiyun static int liquidio_udp_tunnel_unset_port(struct net_device *netdev,
2713*4882a593Smuzhiyun unsigned int table,
2714*4882a593Smuzhiyun unsigned int entry,
2715*4882a593Smuzhiyun struct udp_tunnel_info *ti)
2716*4882a593Smuzhiyun {
2717*4882a593Smuzhiyun return liquidio_vxlan_port_command(netdev,
2718*4882a593Smuzhiyun OCTNET_CMD_VXLAN_PORT_CONFIG,
2719*4882a593Smuzhiyun htons(ti->port),
2720*4882a593Smuzhiyun OCTNET_CMD_VXLAN_PORT_DEL);
2721*4882a593Smuzhiyun }
2722*4882a593Smuzhiyun
2723*4882a593Smuzhiyun static const struct udp_tunnel_nic_info liquidio_udp_tunnels = {
2724*4882a593Smuzhiyun .set_port = liquidio_udp_tunnel_set_port,
2725*4882a593Smuzhiyun .unset_port = liquidio_udp_tunnel_unset_port,
2726*4882a593Smuzhiyun .tables = {
2727*4882a593Smuzhiyun { .n_entries = 1024, .tunnel_types = UDP_TUNNEL_TYPE_VXLAN, },
2728*4882a593Smuzhiyun },
2729*4882a593Smuzhiyun };
2730*4882a593Smuzhiyun
2731*4882a593Smuzhiyun /**
2732*4882a593Smuzhiyun * liquidio_fix_features - Net device fix features
2733*4882a593Smuzhiyun * @netdev: pointer to network device
2734*4882a593Smuzhiyun * @request: features requested
2735*4882a593Smuzhiyun * Return: updated features list
2736*4882a593Smuzhiyun */
liquidio_fix_features(struct net_device * netdev,netdev_features_t request)2737*4882a593Smuzhiyun static netdev_features_t liquidio_fix_features(struct net_device *netdev,
2738*4882a593Smuzhiyun netdev_features_t request)
2739*4882a593Smuzhiyun {
2740*4882a593Smuzhiyun struct lio *lio = netdev_priv(netdev);
2741*4882a593Smuzhiyun
2742*4882a593Smuzhiyun if ((request & NETIF_F_RXCSUM) &&
2743*4882a593Smuzhiyun !(lio->dev_capability & NETIF_F_RXCSUM))
2744*4882a593Smuzhiyun request &= ~NETIF_F_RXCSUM;
2745*4882a593Smuzhiyun
2746*4882a593Smuzhiyun if ((request & NETIF_F_HW_CSUM) &&
2747*4882a593Smuzhiyun !(lio->dev_capability & NETIF_F_HW_CSUM))
2748*4882a593Smuzhiyun request &= ~NETIF_F_HW_CSUM;
2749*4882a593Smuzhiyun
2750*4882a593Smuzhiyun if ((request & NETIF_F_TSO) && !(lio->dev_capability & NETIF_F_TSO))
2751*4882a593Smuzhiyun request &= ~NETIF_F_TSO;
2752*4882a593Smuzhiyun
2753*4882a593Smuzhiyun if ((request & NETIF_F_TSO6) && !(lio->dev_capability & NETIF_F_TSO6))
2754*4882a593Smuzhiyun request &= ~NETIF_F_TSO6;
2755*4882a593Smuzhiyun
2756*4882a593Smuzhiyun if ((request & NETIF_F_LRO) && !(lio->dev_capability & NETIF_F_LRO))
2757*4882a593Smuzhiyun request &= ~NETIF_F_LRO;
2758*4882a593Smuzhiyun
2759*4882a593Smuzhiyun /*Disable LRO if RXCSUM is off */
2760*4882a593Smuzhiyun if (!(request & NETIF_F_RXCSUM) && (netdev->features & NETIF_F_LRO) &&
2761*4882a593Smuzhiyun (lio->dev_capability & NETIF_F_LRO))
2762*4882a593Smuzhiyun request &= ~NETIF_F_LRO;
2763*4882a593Smuzhiyun
2764*4882a593Smuzhiyun if ((request & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2765*4882a593Smuzhiyun !(lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER))
2766*4882a593Smuzhiyun request &= ~NETIF_F_HW_VLAN_CTAG_FILTER;
2767*4882a593Smuzhiyun
2768*4882a593Smuzhiyun return request;
2769*4882a593Smuzhiyun }
2770*4882a593Smuzhiyun
2771*4882a593Smuzhiyun /**
2772*4882a593Smuzhiyun * liquidio_set_features - Net device set features
2773*4882a593Smuzhiyun * @netdev: pointer to network device
2774*4882a593Smuzhiyun * @features: features to enable/disable
2775*4882a593Smuzhiyun */
liquidio_set_features(struct net_device * netdev,netdev_features_t features)2776*4882a593Smuzhiyun static int liquidio_set_features(struct net_device *netdev,
2777*4882a593Smuzhiyun netdev_features_t features)
2778*4882a593Smuzhiyun {
2779*4882a593Smuzhiyun struct lio *lio = netdev_priv(netdev);
2780*4882a593Smuzhiyun
2781*4882a593Smuzhiyun if ((features & NETIF_F_LRO) &&
2782*4882a593Smuzhiyun (lio->dev_capability & NETIF_F_LRO) &&
2783*4882a593Smuzhiyun !(netdev->features & NETIF_F_LRO))
2784*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
2785*4882a593Smuzhiyun OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2786*4882a593Smuzhiyun else if (!(features & NETIF_F_LRO) &&
2787*4882a593Smuzhiyun (lio->dev_capability & NETIF_F_LRO) &&
2788*4882a593Smuzhiyun (netdev->features & NETIF_F_LRO))
2789*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_LRO_DISABLE,
2790*4882a593Smuzhiyun OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
2791*4882a593Smuzhiyun
2792*4882a593Smuzhiyun /* Sending command to firmware to enable/disable RX checksum
2793*4882a593Smuzhiyun * offload settings using ethtool
2794*4882a593Smuzhiyun */
2795*4882a593Smuzhiyun if (!(netdev->features & NETIF_F_RXCSUM) &&
2796*4882a593Smuzhiyun (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2797*4882a593Smuzhiyun (features & NETIF_F_RXCSUM))
2798*4882a593Smuzhiyun liquidio_set_rxcsum_command(netdev,
2799*4882a593Smuzhiyun OCTNET_CMD_TNL_RX_CSUM_CTL,
2800*4882a593Smuzhiyun OCTNET_CMD_RXCSUM_ENABLE);
2801*4882a593Smuzhiyun else if ((netdev->features & NETIF_F_RXCSUM) &&
2802*4882a593Smuzhiyun (lio->enc_dev_capability & NETIF_F_RXCSUM) &&
2803*4882a593Smuzhiyun !(features & NETIF_F_RXCSUM))
2804*4882a593Smuzhiyun liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
2805*4882a593Smuzhiyun OCTNET_CMD_RXCSUM_DISABLE);
2806*4882a593Smuzhiyun
2807*4882a593Smuzhiyun if ((features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2808*4882a593Smuzhiyun (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2809*4882a593Smuzhiyun !(netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2810*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2811*4882a593Smuzhiyun OCTNET_CMD_VLAN_FILTER_ENABLE);
2812*4882a593Smuzhiyun else if (!(features & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2813*4882a593Smuzhiyun (lio->dev_capability & NETIF_F_HW_VLAN_CTAG_FILTER) &&
2814*4882a593Smuzhiyun (netdev->features & NETIF_F_HW_VLAN_CTAG_FILTER))
2815*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
2816*4882a593Smuzhiyun OCTNET_CMD_VLAN_FILTER_DISABLE);
2817*4882a593Smuzhiyun
2818*4882a593Smuzhiyun return 0;
2819*4882a593Smuzhiyun }
2820*4882a593Smuzhiyun
__liquidio_set_vf_mac(struct net_device * netdev,int vfidx,u8 * mac,bool is_admin_assigned)2821*4882a593Smuzhiyun static int __liquidio_set_vf_mac(struct net_device *netdev, int vfidx,
2822*4882a593Smuzhiyun u8 *mac, bool is_admin_assigned)
2823*4882a593Smuzhiyun {
2824*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2825*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2826*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2827*4882a593Smuzhiyun int ret = 0;
2828*4882a593Smuzhiyun
2829*4882a593Smuzhiyun if (!is_valid_ether_addr(mac))
2830*4882a593Smuzhiyun return -EINVAL;
2831*4882a593Smuzhiyun
2832*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.max_vfs)
2833*4882a593Smuzhiyun return -EINVAL;
2834*4882a593Smuzhiyun
2835*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2836*4882a593Smuzhiyun
2837*4882a593Smuzhiyun nctrl.ncmd.u64 = 0;
2838*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_CHANGE_MACADDR;
2839*4882a593Smuzhiyun /* vfidx is 0 based, but vf_num (param1) is 1 based */
2840*4882a593Smuzhiyun nctrl.ncmd.s.param1 = vfidx + 1;
2841*4882a593Smuzhiyun nctrl.ncmd.s.more = 1;
2842*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2843*4882a593Smuzhiyun nctrl.netpndev = (u64)netdev;
2844*4882a593Smuzhiyun if (is_admin_assigned) {
2845*4882a593Smuzhiyun nctrl.ncmd.s.param2 = true;
2846*4882a593Smuzhiyun nctrl.cb_fn = liquidio_link_ctrl_cmd_completion;
2847*4882a593Smuzhiyun }
2848*4882a593Smuzhiyun
2849*4882a593Smuzhiyun nctrl.udd[0] = 0;
2850*4882a593Smuzhiyun /* The MAC Address is presented in network byte order. */
2851*4882a593Smuzhiyun ether_addr_copy((u8 *)&nctrl.udd[0] + 2, mac);
2852*4882a593Smuzhiyun
2853*4882a593Smuzhiyun oct->sriov_info.vf_macaddr[vfidx] = nctrl.udd[0];
2854*4882a593Smuzhiyun
2855*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2856*4882a593Smuzhiyun if (ret > 0)
2857*4882a593Smuzhiyun ret = -EIO;
2858*4882a593Smuzhiyun
2859*4882a593Smuzhiyun return ret;
2860*4882a593Smuzhiyun }
2861*4882a593Smuzhiyun
liquidio_set_vf_mac(struct net_device * netdev,int vfidx,u8 * mac)2862*4882a593Smuzhiyun static int liquidio_set_vf_mac(struct net_device *netdev, int vfidx, u8 *mac)
2863*4882a593Smuzhiyun {
2864*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2865*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2866*4882a593Smuzhiyun int retval;
2867*4882a593Smuzhiyun
2868*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2869*4882a593Smuzhiyun return -EINVAL;
2870*4882a593Smuzhiyun
2871*4882a593Smuzhiyun retval = __liquidio_set_vf_mac(netdev, vfidx, mac, true);
2872*4882a593Smuzhiyun if (!retval)
2873*4882a593Smuzhiyun cn23xx_tell_vf_its_macaddr_changed(oct, vfidx, mac);
2874*4882a593Smuzhiyun
2875*4882a593Smuzhiyun return retval;
2876*4882a593Smuzhiyun }
2877*4882a593Smuzhiyun
liquidio_set_vf_spoofchk(struct net_device * netdev,int vfidx,bool enable)2878*4882a593Smuzhiyun static int liquidio_set_vf_spoofchk(struct net_device *netdev, int vfidx,
2879*4882a593Smuzhiyun bool enable)
2880*4882a593Smuzhiyun {
2881*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2882*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2883*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2884*4882a593Smuzhiyun int retval;
2885*4882a593Smuzhiyun
2886*4882a593Smuzhiyun if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SPOOFCHK_CAP)) {
2887*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev,
2888*4882a593Smuzhiyun "firmware does not support spoofchk\n");
2889*4882a593Smuzhiyun return -EOPNOTSUPP;
2890*4882a593Smuzhiyun }
2891*4882a593Smuzhiyun
2892*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
2893*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
2894*4882a593Smuzhiyun return -EINVAL;
2895*4882a593Smuzhiyun }
2896*4882a593Smuzhiyun
2897*4882a593Smuzhiyun if (enable) {
2898*4882a593Smuzhiyun if (oct->sriov_info.vf_spoofchk[vfidx])
2899*4882a593Smuzhiyun return 0;
2900*4882a593Smuzhiyun } else {
2901*4882a593Smuzhiyun /* Clear */
2902*4882a593Smuzhiyun if (!oct->sriov_info.vf_spoofchk[vfidx])
2903*4882a593Smuzhiyun return 0;
2904*4882a593Smuzhiyun }
2905*4882a593Smuzhiyun
2906*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2907*4882a593Smuzhiyun nctrl.ncmd.s.cmdgroup = OCTNET_CMD_GROUP1;
2908*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_SPOOFCHK;
2909*4882a593Smuzhiyun nctrl.ncmd.s.param1 =
2910*4882a593Smuzhiyun vfidx + 1; /* vfidx is 0 based,
2911*4882a593Smuzhiyun * but vf_num (param1) is 1 based
2912*4882a593Smuzhiyun */
2913*4882a593Smuzhiyun nctrl.ncmd.s.param2 = enable;
2914*4882a593Smuzhiyun nctrl.ncmd.s.more = 0;
2915*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2916*4882a593Smuzhiyun nctrl.cb_fn = NULL;
2917*4882a593Smuzhiyun
2918*4882a593Smuzhiyun retval = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2919*4882a593Smuzhiyun
2920*4882a593Smuzhiyun if (retval) {
2921*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev,
2922*4882a593Smuzhiyun "Failed to set VF %d spoofchk %s\n", vfidx,
2923*4882a593Smuzhiyun enable ? "on" : "off");
2924*4882a593Smuzhiyun return -1;
2925*4882a593Smuzhiyun }
2926*4882a593Smuzhiyun
2927*4882a593Smuzhiyun oct->sriov_info.vf_spoofchk[vfidx] = enable;
2928*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev, "VF %u spoofchk is %s\n", vfidx,
2929*4882a593Smuzhiyun enable ? "on" : "off");
2930*4882a593Smuzhiyun
2931*4882a593Smuzhiyun return 0;
2932*4882a593Smuzhiyun }
2933*4882a593Smuzhiyun
liquidio_set_vf_vlan(struct net_device * netdev,int vfidx,u16 vlan,u8 qos,__be16 vlan_proto)2934*4882a593Smuzhiyun static int liquidio_set_vf_vlan(struct net_device *netdev, int vfidx,
2935*4882a593Smuzhiyun u16 vlan, u8 qos, __be16 vlan_proto)
2936*4882a593Smuzhiyun {
2937*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2938*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2939*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
2940*4882a593Smuzhiyun u16 vlantci;
2941*4882a593Smuzhiyun int ret = 0;
2942*4882a593Smuzhiyun
2943*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2944*4882a593Smuzhiyun return -EINVAL;
2945*4882a593Smuzhiyun
2946*4882a593Smuzhiyun if (vlan_proto != htons(ETH_P_8021Q))
2947*4882a593Smuzhiyun return -EPROTONOSUPPORT;
2948*4882a593Smuzhiyun
2949*4882a593Smuzhiyun if (vlan >= VLAN_N_VID || qos > 7)
2950*4882a593Smuzhiyun return -EINVAL;
2951*4882a593Smuzhiyun
2952*4882a593Smuzhiyun if (vlan)
2953*4882a593Smuzhiyun vlantci = vlan | (u16)qos << VLAN_PRIO_SHIFT;
2954*4882a593Smuzhiyun else
2955*4882a593Smuzhiyun vlantci = 0;
2956*4882a593Smuzhiyun
2957*4882a593Smuzhiyun if (oct->sriov_info.vf_vlantci[vfidx] == vlantci)
2958*4882a593Smuzhiyun return 0;
2959*4882a593Smuzhiyun
2960*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
2961*4882a593Smuzhiyun
2962*4882a593Smuzhiyun if (vlan)
2963*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_ADD_VLAN_FILTER;
2964*4882a593Smuzhiyun else
2965*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_DEL_VLAN_FILTER;
2966*4882a593Smuzhiyun
2967*4882a593Smuzhiyun nctrl.ncmd.s.param1 = vlantci;
2968*4882a593Smuzhiyun nctrl.ncmd.s.param2 =
2969*4882a593Smuzhiyun vfidx + 1; /* vfidx is 0 based, but vf_num (param2) is 1 based */
2970*4882a593Smuzhiyun nctrl.ncmd.s.more = 0;
2971*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
2972*4882a593Smuzhiyun nctrl.cb_fn = NULL;
2973*4882a593Smuzhiyun
2974*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
2975*4882a593Smuzhiyun if (ret) {
2976*4882a593Smuzhiyun if (ret > 0)
2977*4882a593Smuzhiyun ret = -EIO;
2978*4882a593Smuzhiyun return ret;
2979*4882a593Smuzhiyun }
2980*4882a593Smuzhiyun
2981*4882a593Smuzhiyun oct->sriov_info.vf_vlantci[vfidx] = vlantci;
2982*4882a593Smuzhiyun
2983*4882a593Smuzhiyun return ret;
2984*4882a593Smuzhiyun }
2985*4882a593Smuzhiyun
liquidio_get_vf_config(struct net_device * netdev,int vfidx,struct ifla_vf_info * ivi)2986*4882a593Smuzhiyun static int liquidio_get_vf_config(struct net_device *netdev, int vfidx,
2987*4882a593Smuzhiyun struct ifla_vf_info *ivi)
2988*4882a593Smuzhiyun {
2989*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
2990*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
2991*4882a593Smuzhiyun u8 *macaddr;
2992*4882a593Smuzhiyun
2993*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
2994*4882a593Smuzhiyun return -EINVAL;
2995*4882a593Smuzhiyun
2996*4882a593Smuzhiyun memset(ivi, 0, sizeof(struct ifla_vf_info));
2997*4882a593Smuzhiyun
2998*4882a593Smuzhiyun ivi->vf = vfidx;
2999*4882a593Smuzhiyun macaddr = 2 + (u8 *)&oct->sriov_info.vf_macaddr[vfidx];
3000*4882a593Smuzhiyun ether_addr_copy(&ivi->mac[0], macaddr);
3001*4882a593Smuzhiyun ivi->vlan = oct->sriov_info.vf_vlantci[vfidx] & VLAN_VID_MASK;
3002*4882a593Smuzhiyun ivi->qos = oct->sriov_info.vf_vlantci[vfidx] >> VLAN_PRIO_SHIFT;
3003*4882a593Smuzhiyun if (oct->sriov_info.trusted_vf.active &&
3004*4882a593Smuzhiyun oct->sriov_info.trusted_vf.id == vfidx)
3005*4882a593Smuzhiyun ivi->trusted = true;
3006*4882a593Smuzhiyun else
3007*4882a593Smuzhiyun ivi->trusted = false;
3008*4882a593Smuzhiyun ivi->linkstate = oct->sriov_info.vf_linkstate[vfidx];
3009*4882a593Smuzhiyun ivi->spoofchk = oct->sriov_info.vf_spoofchk[vfidx];
3010*4882a593Smuzhiyun ivi->max_tx_rate = lio->linfo.link.s.speed;
3011*4882a593Smuzhiyun ivi->min_tx_rate = 0;
3012*4882a593Smuzhiyun
3013*4882a593Smuzhiyun return 0;
3014*4882a593Smuzhiyun }
3015*4882a593Smuzhiyun
liquidio_send_vf_trust_cmd(struct lio * lio,int vfidx,bool trusted)3016*4882a593Smuzhiyun static int liquidio_send_vf_trust_cmd(struct lio *lio, int vfidx, bool trusted)
3017*4882a593Smuzhiyun {
3018*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
3019*4882a593Smuzhiyun struct octeon_soft_command *sc;
3020*4882a593Smuzhiyun int retval;
3021*4882a593Smuzhiyun
3022*4882a593Smuzhiyun sc = octeon_alloc_soft_command(oct, 0, 16, 0);
3023*4882a593Smuzhiyun if (!sc)
3024*4882a593Smuzhiyun return -ENOMEM;
3025*4882a593Smuzhiyun
3026*4882a593Smuzhiyun sc->iq_no = lio->linfo.txpciq[0].s.q_no;
3027*4882a593Smuzhiyun
3028*4882a593Smuzhiyun /* vfidx is 0 based, but vf_num (param1) is 1 based */
3029*4882a593Smuzhiyun octeon_prepare_soft_command(oct, sc, OPCODE_NIC,
3030*4882a593Smuzhiyun OPCODE_NIC_SET_TRUSTED_VF, 0, vfidx + 1,
3031*4882a593Smuzhiyun trusted);
3032*4882a593Smuzhiyun
3033*4882a593Smuzhiyun init_completion(&sc->complete);
3034*4882a593Smuzhiyun sc->sc_status = OCTEON_REQUEST_PENDING;
3035*4882a593Smuzhiyun
3036*4882a593Smuzhiyun retval = octeon_send_soft_command(oct, sc);
3037*4882a593Smuzhiyun if (retval == IQ_SEND_FAILED) {
3038*4882a593Smuzhiyun octeon_free_soft_command(oct, sc);
3039*4882a593Smuzhiyun retval = -1;
3040*4882a593Smuzhiyun } else {
3041*4882a593Smuzhiyun /* Wait for response or timeout */
3042*4882a593Smuzhiyun retval = wait_for_sc_completion_timeout(oct, sc, 0);
3043*4882a593Smuzhiyun if (retval)
3044*4882a593Smuzhiyun return (retval);
3045*4882a593Smuzhiyun
3046*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3047*4882a593Smuzhiyun }
3048*4882a593Smuzhiyun
3049*4882a593Smuzhiyun return retval;
3050*4882a593Smuzhiyun }
3051*4882a593Smuzhiyun
liquidio_set_vf_trust(struct net_device * netdev,int vfidx,bool setting)3052*4882a593Smuzhiyun static int liquidio_set_vf_trust(struct net_device *netdev, int vfidx,
3053*4882a593Smuzhiyun bool setting)
3054*4882a593Smuzhiyun {
3055*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
3056*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
3057*4882a593Smuzhiyun
3058*4882a593Smuzhiyun if (strcmp(oct->fw_info.liquidio_firmware_version, "1.7.1") < 0) {
3059*4882a593Smuzhiyun /* trusted vf is not supported by firmware older than 1.7.1 */
3060*4882a593Smuzhiyun return -EOPNOTSUPP;
3061*4882a593Smuzhiyun }
3062*4882a593Smuzhiyun
3063*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced) {
3064*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev, "Invalid vfidx %d\n", vfidx);
3065*4882a593Smuzhiyun return -EINVAL;
3066*4882a593Smuzhiyun }
3067*4882a593Smuzhiyun
3068*4882a593Smuzhiyun if (setting) {
3069*4882a593Smuzhiyun /* Set */
3070*4882a593Smuzhiyun
3071*4882a593Smuzhiyun if (oct->sriov_info.trusted_vf.active &&
3072*4882a593Smuzhiyun oct->sriov_info.trusted_vf.id == vfidx)
3073*4882a593Smuzhiyun return 0;
3074*4882a593Smuzhiyun
3075*4882a593Smuzhiyun if (oct->sriov_info.trusted_vf.active) {
3076*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev, "More than one trusted VF is not allowed\n");
3077*4882a593Smuzhiyun return -EPERM;
3078*4882a593Smuzhiyun }
3079*4882a593Smuzhiyun } else {
3080*4882a593Smuzhiyun /* Clear */
3081*4882a593Smuzhiyun
3082*4882a593Smuzhiyun if (!oct->sriov_info.trusted_vf.active)
3083*4882a593Smuzhiyun return 0;
3084*4882a593Smuzhiyun }
3085*4882a593Smuzhiyun
3086*4882a593Smuzhiyun if (!liquidio_send_vf_trust_cmd(lio, vfidx, setting)) {
3087*4882a593Smuzhiyun if (setting) {
3088*4882a593Smuzhiyun oct->sriov_info.trusted_vf.id = vfidx;
3089*4882a593Smuzhiyun oct->sriov_info.trusted_vf.active = true;
3090*4882a593Smuzhiyun } else {
3091*4882a593Smuzhiyun oct->sriov_info.trusted_vf.active = false;
3092*4882a593Smuzhiyun }
3093*4882a593Smuzhiyun
3094*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev, "VF %u is %strusted\n", vfidx,
3095*4882a593Smuzhiyun setting ? "" : "not ");
3096*4882a593Smuzhiyun } else {
3097*4882a593Smuzhiyun netif_info(lio, drv, lio->netdev, "Failed to set VF trusted\n");
3098*4882a593Smuzhiyun return -1;
3099*4882a593Smuzhiyun }
3100*4882a593Smuzhiyun
3101*4882a593Smuzhiyun return 0;
3102*4882a593Smuzhiyun }
3103*4882a593Smuzhiyun
liquidio_set_vf_link_state(struct net_device * netdev,int vfidx,int linkstate)3104*4882a593Smuzhiyun static int liquidio_set_vf_link_state(struct net_device *netdev, int vfidx,
3105*4882a593Smuzhiyun int linkstate)
3106*4882a593Smuzhiyun {
3107*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
3108*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
3109*4882a593Smuzhiyun struct octnic_ctrl_pkt nctrl;
3110*4882a593Smuzhiyun int ret = 0;
3111*4882a593Smuzhiyun
3112*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3113*4882a593Smuzhiyun return -EINVAL;
3114*4882a593Smuzhiyun
3115*4882a593Smuzhiyun if (oct->sriov_info.vf_linkstate[vfidx] == linkstate)
3116*4882a593Smuzhiyun return 0;
3117*4882a593Smuzhiyun
3118*4882a593Smuzhiyun memset(&nctrl, 0, sizeof(struct octnic_ctrl_pkt));
3119*4882a593Smuzhiyun nctrl.ncmd.s.cmd = OCTNET_CMD_SET_VF_LINKSTATE;
3120*4882a593Smuzhiyun nctrl.ncmd.s.param1 =
3121*4882a593Smuzhiyun vfidx + 1; /* vfidx is 0 based, but vf_num (param1) is 1 based */
3122*4882a593Smuzhiyun nctrl.ncmd.s.param2 = linkstate;
3123*4882a593Smuzhiyun nctrl.ncmd.s.more = 0;
3124*4882a593Smuzhiyun nctrl.iq_no = lio->linfo.txpciq[0].s.q_no;
3125*4882a593Smuzhiyun nctrl.cb_fn = NULL;
3126*4882a593Smuzhiyun
3127*4882a593Smuzhiyun ret = octnet_send_nic_ctrl_pkt(oct, &nctrl);
3128*4882a593Smuzhiyun
3129*4882a593Smuzhiyun if (!ret)
3130*4882a593Smuzhiyun oct->sriov_info.vf_linkstate[vfidx] = linkstate;
3131*4882a593Smuzhiyun else if (ret > 0)
3132*4882a593Smuzhiyun ret = -EIO;
3133*4882a593Smuzhiyun
3134*4882a593Smuzhiyun return ret;
3135*4882a593Smuzhiyun }
3136*4882a593Smuzhiyun
3137*4882a593Smuzhiyun static int
liquidio_eswitch_mode_get(struct devlink * devlink,u16 * mode)3138*4882a593Smuzhiyun liquidio_eswitch_mode_get(struct devlink *devlink, u16 *mode)
3139*4882a593Smuzhiyun {
3140*4882a593Smuzhiyun struct lio_devlink_priv *priv;
3141*4882a593Smuzhiyun struct octeon_device *oct;
3142*4882a593Smuzhiyun
3143*4882a593Smuzhiyun priv = devlink_priv(devlink);
3144*4882a593Smuzhiyun oct = priv->oct;
3145*4882a593Smuzhiyun
3146*4882a593Smuzhiyun *mode = oct->eswitch_mode;
3147*4882a593Smuzhiyun
3148*4882a593Smuzhiyun return 0;
3149*4882a593Smuzhiyun }
3150*4882a593Smuzhiyun
3151*4882a593Smuzhiyun static int
liquidio_eswitch_mode_set(struct devlink * devlink,u16 mode,struct netlink_ext_ack * extack)3152*4882a593Smuzhiyun liquidio_eswitch_mode_set(struct devlink *devlink, u16 mode,
3153*4882a593Smuzhiyun struct netlink_ext_ack *extack)
3154*4882a593Smuzhiyun {
3155*4882a593Smuzhiyun struct lio_devlink_priv *priv;
3156*4882a593Smuzhiyun struct octeon_device *oct;
3157*4882a593Smuzhiyun int ret = 0;
3158*4882a593Smuzhiyun
3159*4882a593Smuzhiyun priv = devlink_priv(devlink);
3160*4882a593Smuzhiyun oct = priv->oct;
3161*4882a593Smuzhiyun
3162*4882a593Smuzhiyun if (!(oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP))
3163*4882a593Smuzhiyun return -EINVAL;
3164*4882a593Smuzhiyun
3165*4882a593Smuzhiyun if (oct->eswitch_mode == mode)
3166*4882a593Smuzhiyun return 0;
3167*4882a593Smuzhiyun
3168*4882a593Smuzhiyun switch (mode) {
3169*4882a593Smuzhiyun case DEVLINK_ESWITCH_MODE_SWITCHDEV:
3170*4882a593Smuzhiyun oct->eswitch_mode = mode;
3171*4882a593Smuzhiyun ret = lio_vf_rep_create(oct);
3172*4882a593Smuzhiyun break;
3173*4882a593Smuzhiyun
3174*4882a593Smuzhiyun case DEVLINK_ESWITCH_MODE_LEGACY:
3175*4882a593Smuzhiyun lio_vf_rep_destroy(oct);
3176*4882a593Smuzhiyun oct->eswitch_mode = mode;
3177*4882a593Smuzhiyun break;
3178*4882a593Smuzhiyun
3179*4882a593Smuzhiyun default:
3180*4882a593Smuzhiyun ret = -EINVAL;
3181*4882a593Smuzhiyun }
3182*4882a593Smuzhiyun
3183*4882a593Smuzhiyun return ret;
3184*4882a593Smuzhiyun }
3185*4882a593Smuzhiyun
3186*4882a593Smuzhiyun static const struct devlink_ops liquidio_devlink_ops = {
3187*4882a593Smuzhiyun .eswitch_mode_get = liquidio_eswitch_mode_get,
3188*4882a593Smuzhiyun .eswitch_mode_set = liquidio_eswitch_mode_set,
3189*4882a593Smuzhiyun };
3190*4882a593Smuzhiyun
3191*4882a593Smuzhiyun static int
liquidio_get_port_parent_id(struct net_device * dev,struct netdev_phys_item_id * ppid)3192*4882a593Smuzhiyun liquidio_get_port_parent_id(struct net_device *dev,
3193*4882a593Smuzhiyun struct netdev_phys_item_id *ppid)
3194*4882a593Smuzhiyun {
3195*4882a593Smuzhiyun struct lio *lio = GET_LIO(dev);
3196*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
3197*4882a593Smuzhiyun
3198*4882a593Smuzhiyun if (oct->eswitch_mode != DEVLINK_ESWITCH_MODE_SWITCHDEV)
3199*4882a593Smuzhiyun return -EOPNOTSUPP;
3200*4882a593Smuzhiyun
3201*4882a593Smuzhiyun ppid->id_len = ETH_ALEN;
3202*4882a593Smuzhiyun ether_addr_copy(ppid->id, (void *)&lio->linfo.hw_addr + 2);
3203*4882a593Smuzhiyun
3204*4882a593Smuzhiyun return 0;
3205*4882a593Smuzhiyun }
3206*4882a593Smuzhiyun
liquidio_get_vf_stats(struct net_device * netdev,int vfidx,struct ifla_vf_stats * vf_stats)3207*4882a593Smuzhiyun static int liquidio_get_vf_stats(struct net_device *netdev, int vfidx,
3208*4882a593Smuzhiyun struct ifla_vf_stats *vf_stats)
3209*4882a593Smuzhiyun {
3210*4882a593Smuzhiyun struct lio *lio = GET_LIO(netdev);
3211*4882a593Smuzhiyun struct octeon_device *oct = lio->oct_dev;
3212*4882a593Smuzhiyun struct oct_vf_stats stats;
3213*4882a593Smuzhiyun int ret;
3214*4882a593Smuzhiyun
3215*4882a593Smuzhiyun if (vfidx < 0 || vfidx >= oct->sriov_info.num_vfs_alloced)
3216*4882a593Smuzhiyun return -EINVAL;
3217*4882a593Smuzhiyun
3218*4882a593Smuzhiyun memset(&stats, 0, sizeof(struct oct_vf_stats));
3219*4882a593Smuzhiyun ret = cn23xx_get_vf_stats(oct, vfidx, &stats);
3220*4882a593Smuzhiyun if (!ret) {
3221*4882a593Smuzhiyun vf_stats->rx_packets = stats.rx_packets;
3222*4882a593Smuzhiyun vf_stats->tx_packets = stats.tx_packets;
3223*4882a593Smuzhiyun vf_stats->rx_bytes = stats.rx_bytes;
3224*4882a593Smuzhiyun vf_stats->tx_bytes = stats.tx_bytes;
3225*4882a593Smuzhiyun vf_stats->broadcast = stats.broadcast;
3226*4882a593Smuzhiyun vf_stats->multicast = stats.multicast;
3227*4882a593Smuzhiyun }
3228*4882a593Smuzhiyun
3229*4882a593Smuzhiyun return ret;
3230*4882a593Smuzhiyun }
3231*4882a593Smuzhiyun
3232*4882a593Smuzhiyun static const struct net_device_ops lionetdevops = {
3233*4882a593Smuzhiyun .ndo_open = liquidio_open,
3234*4882a593Smuzhiyun .ndo_stop = liquidio_stop,
3235*4882a593Smuzhiyun .ndo_start_xmit = liquidio_xmit,
3236*4882a593Smuzhiyun .ndo_get_stats64 = liquidio_get_stats64,
3237*4882a593Smuzhiyun .ndo_set_mac_address = liquidio_set_mac,
3238*4882a593Smuzhiyun .ndo_set_rx_mode = liquidio_set_mcast_list,
3239*4882a593Smuzhiyun .ndo_tx_timeout = liquidio_tx_timeout,
3240*4882a593Smuzhiyun
3241*4882a593Smuzhiyun .ndo_vlan_rx_add_vid = liquidio_vlan_rx_add_vid,
3242*4882a593Smuzhiyun .ndo_vlan_rx_kill_vid = liquidio_vlan_rx_kill_vid,
3243*4882a593Smuzhiyun .ndo_change_mtu = liquidio_change_mtu,
3244*4882a593Smuzhiyun .ndo_do_ioctl = liquidio_ioctl,
3245*4882a593Smuzhiyun .ndo_fix_features = liquidio_fix_features,
3246*4882a593Smuzhiyun .ndo_set_features = liquidio_set_features,
3247*4882a593Smuzhiyun .ndo_udp_tunnel_add = udp_tunnel_nic_add_port,
3248*4882a593Smuzhiyun .ndo_udp_tunnel_del = udp_tunnel_nic_del_port,
3249*4882a593Smuzhiyun .ndo_set_vf_mac = liquidio_set_vf_mac,
3250*4882a593Smuzhiyun .ndo_set_vf_vlan = liquidio_set_vf_vlan,
3251*4882a593Smuzhiyun .ndo_get_vf_config = liquidio_get_vf_config,
3252*4882a593Smuzhiyun .ndo_set_vf_spoofchk = liquidio_set_vf_spoofchk,
3253*4882a593Smuzhiyun .ndo_set_vf_trust = liquidio_set_vf_trust,
3254*4882a593Smuzhiyun .ndo_set_vf_link_state = liquidio_set_vf_link_state,
3255*4882a593Smuzhiyun .ndo_get_vf_stats = liquidio_get_vf_stats,
3256*4882a593Smuzhiyun .ndo_get_port_parent_id = liquidio_get_port_parent_id,
3257*4882a593Smuzhiyun };
3258*4882a593Smuzhiyun
3259*4882a593Smuzhiyun /**
3260*4882a593Smuzhiyun * liquidio_init - Entry point for the liquidio module
3261*4882a593Smuzhiyun */
liquidio_init(void)3262*4882a593Smuzhiyun static int __init liquidio_init(void)
3263*4882a593Smuzhiyun {
3264*4882a593Smuzhiyun int i;
3265*4882a593Smuzhiyun struct handshake *hs;
3266*4882a593Smuzhiyun
3267*4882a593Smuzhiyun init_completion(&first_stage);
3268*4882a593Smuzhiyun
3269*4882a593Smuzhiyun octeon_init_device_list(OCTEON_CONFIG_TYPE_DEFAULT);
3270*4882a593Smuzhiyun
3271*4882a593Smuzhiyun if (liquidio_init_pci())
3272*4882a593Smuzhiyun return -EINVAL;
3273*4882a593Smuzhiyun
3274*4882a593Smuzhiyun wait_for_completion_timeout(&first_stage, msecs_to_jiffies(1000));
3275*4882a593Smuzhiyun
3276*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3277*4882a593Smuzhiyun hs = &handshake[i];
3278*4882a593Smuzhiyun if (hs->pci_dev) {
3279*4882a593Smuzhiyun wait_for_completion(&hs->init);
3280*4882a593Smuzhiyun if (!hs->init_ok) {
3281*4882a593Smuzhiyun /* init handshake failed */
3282*4882a593Smuzhiyun dev_err(&hs->pci_dev->dev,
3283*4882a593Smuzhiyun "Failed to init device\n");
3284*4882a593Smuzhiyun liquidio_deinit_pci();
3285*4882a593Smuzhiyun return -EIO;
3286*4882a593Smuzhiyun }
3287*4882a593Smuzhiyun }
3288*4882a593Smuzhiyun }
3289*4882a593Smuzhiyun
3290*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_DEVICES; i++) {
3291*4882a593Smuzhiyun hs = &handshake[i];
3292*4882a593Smuzhiyun if (hs->pci_dev) {
3293*4882a593Smuzhiyun wait_for_completion_timeout(&hs->started,
3294*4882a593Smuzhiyun msecs_to_jiffies(30000));
3295*4882a593Smuzhiyun if (!hs->started_ok) {
3296*4882a593Smuzhiyun /* starter handshake failed */
3297*4882a593Smuzhiyun dev_err(&hs->pci_dev->dev,
3298*4882a593Smuzhiyun "Firmware failed to start\n");
3299*4882a593Smuzhiyun liquidio_deinit_pci();
3300*4882a593Smuzhiyun return -EIO;
3301*4882a593Smuzhiyun }
3302*4882a593Smuzhiyun }
3303*4882a593Smuzhiyun }
3304*4882a593Smuzhiyun
3305*4882a593Smuzhiyun return 0;
3306*4882a593Smuzhiyun }
3307*4882a593Smuzhiyun
lio_nic_info(struct octeon_recv_info * recv_info,void * buf)3308*4882a593Smuzhiyun static int lio_nic_info(struct octeon_recv_info *recv_info, void *buf)
3309*4882a593Smuzhiyun {
3310*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)buf;
3311*4882a593Smuzhiyun struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
3312*4882a593Smuzhiyun int gmxport = 0;
3313*4882a593Smuzhiyun union oct_link_status *ls;
3314*4882a593Smuzhiyun int i;
3315*4882a593Smuzhiyun
3316*4882a593Smuzhiyun if (recv_pkt->buffer_size[0] != (sizeof(*ls) + OCT_DROQ_INFO_SIZE)) {
3317*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Malformed NIC_INFO, len=%d, ifidx=%d\n",
3318*4882a593Smuzhiyun recv_pkt->buffer_size[0],
3319*4882a593Smuzhiyun recv_pkt->rh.r_nic_info.gmxport);
3320*4882a593Smuzhiyun goto nic_info_err;
3321*4882a593Smuzhiyun }
3322*4882a593Smuzhiyun
3323*4882a593Smuzhiyun gmxport = recv_pkt->rh.r_nic_info.gmxport;
3324*4882a593Smuzhiyun ls = (union oct_link_status *)(get_rbd(recv_pkt->buffer_ptr[0]) +
3325*4882a593Smuzhiyun OCT_DROQ_INFO_SIZE);
3326*4882a593Smuzhiyun
3327*4882a593Smuzhiyun octeon_swap_8B_data((u64 *)ls, (sizeof(union oct_link_status)) >> 3);
3328*4882a593Smuzhiyun for (i = 0; i < oct->ifcount; i++) {
3329*4882a593Smuzhiyun if (oct->props[i].gmxport == gmxport) {
3330*4882a593Smuzhiyun update_link_status(oct->props[i].netdev, ls);
3331*4882a593Smuzhiyun break;
3332*4882a593Smuzhiyun }
3333*4882a593Smuzhiyun }
3334*4882a593Smuzhiyun
3335*4882a593Smuzhiyun nic_info_err:
3336*4882a593Smuzhiyun for (i = 0; i < recv_pkt->buffer_count; i++)
3337*4882a593Smuzhiyun recv_buffer_free(recv_pkt->buffer_ptr[i]);
3338*4882a593Smuzhiyun octeon_free_recv_info(recv_info);
3339*4882a593Smuzhiyun return 0;
3340*4882a593Smuzhiyun }
3341*4882a593Smuzhiyun
3342*4882a593Smuzhiyun /**
3343*4882a593Smuzhiyun * setup_nic_devices - Setup network interfaces
3344*4882a593Smuzhiyun * @octeon_dev: octeon device
3345*4882a593Smuzhiyun *
3346*4882a593Smuzhiyun * Called during init time for each device. It assumes the NIC
3347*4882a593Smuzhiyun * is already up and running. The link information for each
3348*4882a593Smuzhiyun * interface is passed in link_info.
3349*4882a593Smuzhiyun */
setup_nic_devices(struct octeon_device * octeon_dev)3350*4882a593Smuzhiyun static int setup_nic_devices(struct octeon_device *octeon_dev)
3351*4882a593Smuzhiyun {
3352*4882a593Smuzhiyun struct lio *lio = NULL;
3353*4882a593Smuzhiyun struct net_device *netdev;
3354*4882a593Smuzhiyun u8 mac[6], i, j, *fw_ver, *micro_ver;
3355*4882a593Smuzhiyun unsigned long micro;
3356*4882a593Smuzhiyun u32 cur_ver;
3357*4882a593Smuzhiyun struct octeon_soft_command *sc;
3358*4882a593Smuzhiyun struct liquidio_if_cfg_resp *resp;
3359*4882a593Smuzhiyun struct octdev_props *props;
3360*4882a593Smuzhiyun int retval, num_iqueues, num_oqueues;
3361*4882a593Smuzhiyun int max_num_queues = 0;
3362*4882a593Smuzhiyun union oct_nic_if_cfg if_cfg;
3363*4882a593Smuzhiyun unsigned int base_queue;
3364*4882a593Smuzhiyun unsigned int gmx_port_id;
3365*4882a593Smuzhiyun u32 resp_size, data_size;
3366*4882a593Smuzhiyun u32 ifidx_or_pfnum;
3367*4882a593Smuzhiyun struct lio_version *vdata;
3368*4882a593Smuzhiyun struct devlink *devlink;
3369*4882a593Smuzhiyun struct lio_devlink_priv *lio_devlink;
3370*4882a593Smuzhiyun
3371*4882a593Smuzhiyun /* This is to handle link status changes */
3372*4882a593Smuzhiyun octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
3373*4882a593Smuzhiyun OPCODE_NIC_INFO,
3374*4882a593Smuzhiyun lio_nic_info, octeon_dev);
3375*4882a593Smuzhiyun
3376*4882a593Smuzhiyun /* REQTYPE_RESP_NET and REQTYPE_SOFT_COMMAND do not have free functions.
3377*4882a593Smuzhiyun * They are handled directly.
3378*4882a593Smuzhiyun */
3379*4882a593Smuzhiyun octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET,
3380*4882a593Smuzhiyun free_netbuf);
3381*4882a593Smuzhiyun
3382*4882a593Smuzhiyun octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_NORESP_NET_SG,
3383*4882a593Smuzhiyun free_netsgbuf);
3384*4882a593Smuzhiyun
3385*4882a593Smuzhiyun octeon_register_reqtype_free_fn(octeon_dev, REQTYPE_RESP_NET_SG,
3386*4882a593Smuzhiyun free_netsgbuf_with_resp);
3387*4882a593Smuzhiyun
3388*4882a593Smuzhiyun for (i = 0; i < octeon_dev->ifcount; i++) {
3389*4882a593Smuzhiyun resp_size = sizeof(struct liquidio_if_cfg_resp);
3390*4882a593Smuzhiyun data_size = sizeof(struct lio_version);
3391*4882a593Smuzhiyun sc = (struct octeon_soft_command *)
3392*4882a593Smuzhiyun octeon_alloc_soft_command(octeon_dev, data_size,
3393*4882a593Smuzhiyun resp_size, 0);
3394*4882a593Smuzhiyun resp = (struct liquidio_if_cfg_resp *)sc->virtrptr;
3395*4882a593Smuzhiyun vdata = (struct lio_version *)sc->virtdptr;
3396*4882a593Smuzhiyun
3397*4882a593Smuzhiyun *((u64 *)vdata) = 0;
3398*4882a593Smuzhiyun vdata->major = cpu_to_be16(LIQUIDIO_BASE_MAJOR_VERSION);
3399*4882a593Smuzhiyun vdata->minor = cpu_to_be16(LIQUIDIO_BASE_MINOR_VERSION);
3400*4882a593Smuzhiyun vdata->micro = cpu_to_be16(LIQUIDIO_BASE_MICRO_VERSION);
3401*4882a593Smuzhiyun
3402*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(octeon_dev)) {
3403*4882a593Smuzhiyun num_iqueues = octeon_dev->sriov_info.num_pf_rings;
3404*4882a593Smuzhiyun num_oqueues = octeon_dev->sriov_info.num_pf_rings;
3405*4882a593Smuzhiyun base_queue = octeon_dev->sriov_info.pf_srn;
3406*4882a593Smuzhiyun
3407*4882a593Smuzhiyun gmx_port_id = octeon_dev->pf_num;
3408*4882a593Smuzhiyun ifidx_or_pfnum = octeon_dev->pf_num;
3409*4882a593Smuzhiyun } else {
3410*4882a593Smuzhiyun num_iqueues = CFG_GET_NUM_TXQS_NIC_IF(
3411*4882a593Smuzhiyun octeon_get_conf(octeon_dev), i);
3412*4882a593Smuzhiyun num_oqueues = CFG_GET_NUM_RXQS_NIC_IF(
3413*4882a593Smuzhiyun octeon_get_conf(octeon_dev), i);
3414*4882a593Smuzhiyun base_queue = CFG_GET_BASE_QUE_NIC_IF(
3415*4882a593Smuzhiyun octeon_get_conf(octeon_dev), i);
3416*4882a593Smuzhiyun gmx_port_id = CFG_GET_GMXID_NIC_IF(
3417*4882a593Smuzhiyun octeon_get_conf(octeon_dev), i);
3418*4882a593Smuzhiyun ifidx_or_pfnum = i;
3419*4882a593Smuzhiyun }
3420*4882a593Smuzhiyun
3421*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev,
3422*4882a593Smuzhiyun "requesting config for interface %d, iqs %d, oqs %d\n",
3423*4882a593Smuzhiyun ifidx_or_pfnum, num_iqueues, num_oqueues);
3424*4882a593Smuzhiyun
3425*4882a593Smuzhiyun if_cfg.u64 = 0;
3426*4882a593Smuzhiyun if_cfg.s.num_iqueues = num_iqueues;
3427*4882a593Smuzhiyun if_cfg.s.num_oqueues = num_oqueues;
3428*4882a593Smuzhiyun if_cfg.s.base_queue = base_queue;
3429*4882a593Smuzhiyun if_cfg.s.gmx_port_id = gmx_port_id;
3430*4882a593Smuzhiyun
3431*4882a593Smuzhiyun sc->iq_no = 0;
3432*4882a593Smuzhiyun
3433*4882a593Smuzhiyun octeon_prepare_soft_command(octeon_dev, sc, OPCODE_NIC,
3434*4882a593Smuzhiyun OPCODE_NIC_IF_CFG, 0,
3435*4882a593Smuzhiyun if_cfg.u64, 0);
3436*4882a593Smuzhiyun
3437*4882a593Smuzhiyun init_completion(&sc->complete);
3438*4882a593Smuzhiyun sc->sc_status = OCTEON_REQUEST_PENDING;
3439*4882a593Smuzhiyun
3440*4882a593Smuzhiyun retval = octeon_send_soft_command(octeon_dev, sc);
3441*4882a593Smuzhiyun if (retval == IQ_SEND_FAILED) {
3442*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3443*4882a593Smuzhiyun "iq/oq config failed status: %x\n",
3444*4882a593Smuzhiyun retval);
3445*4882a593Smuzhiyun /* Soft instr is freed by driver in case of failure. */
3446*4882a593Smuzhiyun octeon_free_soft_command(octeon_dev, sc);
3447*4882a593Smuzhiyun return(-EIO);
3448*4882a593Smuzhiyun }
3449*4882a593Smuzhiyun
3450*4882a593Smuzhiyun /* Sleep on a wait queue till the cond flag indicates that the
3451*4882a593Smuzhiyun * response arrived or timed-out.
3452*4882a593Smuzhiyun */
3453*4882a593Smuzhiyun retval = wait_for_sc_completion_timeout(octeon_dev, sc, 0);
3454*4882a593Smuzhiyun if (retval)
3455*4882a593Smuzhiyun return retval;
3456*4882a593Smuzhiyun
3457*4882a593Smuzhiyun retval = resp->status;
3458*4882a593Smuzhiyun if (retval) {
3459*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "iq/oq config failed\n");
3460*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3461*4882a593Smuzhiyun goto setup_nic_dev_done;
3462*4882a593Smuzhiyun }
3463*4882a593Smuzhiyun snprintf(octeon_dev->fw_info.liquidio_firmware_version,
3464*4882a593Smuzhiyun 32, "%s",
3465*4882a593Smuzhiyun resp->cfg_info.liquidio_firmware_version);
3466*4882a593Smuzhiyun
3467*4882a593Smuzhiyun /* Verify f/w version (in case of 'auto' loading from flash) */
3468*4882a593Smuzhiyun fw_ver = octeon_dev->fw_info.liquidio_firmware_version;
3469*4882a593Smuzhiyun if (memcmp(LIQUIDIO_BASE_VERSION,
3470*4882a593Smuzhiyun fw_ver,
3471*4882a593Smuzhiyun strlen(LIQUIDIO_BASE_VERSION))) {
3472*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3473*4882a593Smuzhiyun "Unmatched firmware version. Expected %s.x, got %s.\n",
3474*4882a593Smuzhiyun LIQUIDIO_BASE_VERSION, fw_ver);
3475*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3476*4882a593Smuzhiyun goto setup_nic_dev_done;
3477*4882a593Smuzhiyun } else if (atomic_read(octeon_dev->adapter_fw_state) ==
3478*4882a593Smuzhiyun FW_IS_PRELOADED) {
3479*4882a593Smuzhiyun dev_info(&octeon_dev->pci_dev->dev,
3480*4882a593Smuzhiyun "Using auto-loaded firmware version %s.\n",
3481*4882a593Smuzhiyun fw_ver);
3482*4882a593Smuzhiyun }
3483*4882a593Smuzhiyun
3484*4882a593Smuzhiyun /* extract micro version field; point past '<maj>.<min>.' */
3485*4882a593Smuzhiyun micro_ver = fw_ver + strlen(LIQUIDIO_BASE_VERSION) + 1;
3486*4882a593Smuzhiyun if (kstrtoul(micro_ver, 10, µ) != 0)
3487*4882a593Smuzhiyun micro = 0;
3488*4882a593Smuzhiyun octeon_dev->fw_info.ver.maj = LIQUIDIO_BASE_MAJOR_VERSION;
3489*4882a593Smuzhiyun octeon_dev->fw_info.ver.min = LIQUIDIO_BASE_MINOR_VERSION;
3490*4882a593Smuzhiyun octeon_dev->fw_info.ver.rev = micro;
3491*4882a593Smuzhiyun
3492*4882a593Smuzhiyun octeon_swap_8B_data((u64 *)(&resp->cfg_info),
3493*4882a593Smuzhiyun (sizeof(struct liquidio_if_cfg_info)) >> 3);
3494*4882a593Smuzhiyun
3495*4882a593Smuzhiyun num_iqueues = hweight64(resp->cfg_info.iqmask);
3496*4882a593Smuzhiyun num_oqueues = hweight64(resp->cfg_info.oqmask);
3497*4882a593Smuzhiyun
3498*4882a593Smuzhiyun if (!(num_iqueues) || !(num_oqueues)) {
3499*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3500*4882a593Smuzhiyun "Got bad iqueues (%016llx) or oqueues (%016llx) from firmware.\n",
3501*4882a593Smuzhiyun resp->cfg_info.iqmask,
3502*4882a593Smuzhiyun resp->cfg_info.oqmask);
3503*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3504*4882a593Smuzhiyun goto setup_nic_dev_done;
3505*4882a593Smuzhiyun }
3506*4882a593Smuzhiyun
3507*4882a593Smuzhiyun if (OCTEON_CN6XXX(octeon_dev)) {
3508*4882a593Smuzhiyun max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3509*4882a593Smuzhiyun cn6xxx));
3510*4882a593Smuzhiyun } else if (OCTEON_CN23XX_PF(octeon_dev)) {
3511*4882a593Smuzhiyun max_num_queues = CFG_GET_IQ_MAX_Q(CHIP_CONF(octeon_dev,
3512*4882a593Smuzhiyun cn23xx_pf));
3513*4882a593Smuzhiyun }
3514*4882a593Smuzhiyun
3515*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev,
3516*4882a593Smuzhiyun "interface %d, iqmask %016llx, oqmask %016llx, numiqueues %d, numoqueues %d max_num_queues: %d\n",
3517*4882a593Smuzhiyun i, resp->cfg_info.iqmask, resp->cfg_info.oqmask,
3518*4882a593Smuzhiyun num_iqueues, num_oqueues, max_num_queues);
3519*4882a593Smuzhiyun netdev = alloc_etherdev_mq(LIO_SIZE, max_num_queues);
3520*4882a593Smuzhiyun
3521*4882a593Smuzhiyun if (!netdev) {
3522*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Device allocation failed\n");
3523*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3524*4882a593Smuzhiyun goto setup_nic_dev_done;
3525*4882a593Smuzhiyun }
3526*4882a593Smuzhiyun
3527*4882a593Smuzhiyun SET_NETDEV_DEV(netdev, &octeon_dev->pci_dev->dev);
3528*4882a593Smuzhiyun
3529*4882a593Smuzhiyun /* Associate the routines that will handle different
3530*4882a593Smuzhiyun * netdev tasks.
3531*4882a593Smuzhiyun */
3532*4882a593Smuzhiyun netdev->netdev_ops = &lionetdevops;
3533*4882a593Smuzhiyun
3534*4882a593Smuzhiyun retval = netif_set_real_num_rx_queues(netdev, num_oqueues);
3535*4882a593Smuzhiyun if (retval) {
3536*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3537*4882a593Smuzhiyun "setting real number rx failed\n");
3538*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3539*4882a593Smuzhiyun goto setup_nic_dev_free;
3540*4882a593Smuzhiyun }
3541*4882a593Smuzhiyun
3542*4882a593Smuzhiyun retval = netif_set_real_num_tx_queues(netdev, num_iqueues);
3543*4882a593Smuzhiyun if (retval) {
3544*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3545*4882a593Smuzhiyun "setting real number tx failed\n");
3546*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3547*4882a593Smuzhiyun goto setup_nic_dev_free;
3548*4882a593Smuzhiyun }
3549*4882a593Smuzhiyun
3550*4882a593Smuzhiyun lio = GET_LIO(netdev);
3551*4882a593Smuzhiyun
3552*4882a593Smuzhiyun memset(lio, 0, sizeof(struct lio));
3553*4882a593Smuzhiyun
3554*4882a593Smuzhiyun lio->ifidx = ifidx_or_pfnum;
3555*4882a593Smuzhiyun
3556*4882a593Smuzhiyun props = &octeon_dev->props[i];
3557*4882a593Smuzhiyun props->gmxport = resp->cfg_info.linfo.gmxport;
3558*4882a593Smuzhiyun props->netdev = netdev;
3559*4882a593Smuzhiyun
3560*4882a593Smuzhiyun lio->linfo.num_rxpciq = num_oqueues;
3561*4882a593Smuzhiyun lio->linfo.num_txpciq = num_iqueues;
3562*4882a593Smuzhiyun for (j = 0; j < num_oqueues; j++) {
3563*4882a593Smuzhiyun lio->linfo.rxpciq[j].u64 =
3564*4882a593Smuzhiyun resp->cfg_info.linfo.rxpciq[j].u64;
3565*4882a593Smuzhiyun }
3566*4882a593Smuzhiyun for (j = 0; j < num_iqueues; j++) {
3567*4882a593Smuzhiyun lio->linfo.txpciq[j].u64 =
3568*4882a593Smuzhiyun resp->cfg_info.linfo.txpciq[j].u64;
3569*4882a593Smuzhiyun }
3570*4882a593Smuzhiyun lio->linfo.hw_addr = resp->cfg_info.linfo.hw_addr;
3571*4882a593Smuzhiyun lio->linfo.gmxport = resp->cfg_info.linfo.gmxport;
3572*4882a593Smuzhiyun lio->linfo.link.u64 = resp->cfg_info.linfo.link.u64;
3573*4882a593Smuzhiyun
3574*4882a593Smuzhiyun WRITE_ONCE(sc->caller_is_done, true);
3575*4882a593Smuzhiyun
3576*4882a593Smuzhiyun lio->msg_enable = netif_msg_init(debug, DEFAULT_MSG_ENABLE);
3577*4882a593Smuzhiyun
3578*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(octeon_dev) ||
3579*4882a593Smuzhiyun OCTEON_CN6XXX(octeon_dev)) {
3580*4882a593Smuzhiyun lio->dev_capability = NETIF_F_HIGHDMA
3581*4882a593Smuzhiyun | NETIF_F_IP_CSUM
3582*4882a593Smuzhiyun | NETIF_F_IPV6_CSUM
3583*4882a593Smuzhiyun | NETIF_F_SG | NETIF_F_RXCSUM
3584*4882a593Smuzhiyun | NETIF_F_GRO
3585*4882a593Smuzhiyun | NETIF_F_TSO | NETIF_F_TSO6
3586*4882a593Smuzhiyun | NETIF_F_LRO;
3587*4882a593Smuzhiyun }
3588*4882a593Smuzhiyun netif_set_gso_max_size(netdev, OCTNIC_GSO_MAX_SIZE);
3589*4882a593Smuzhiyun
3590*4882a593Smuzhiyun /* Copy of transmit encapsulation capabilities:
3591*4882a593Smuzhiyun * TSO, TSO6, Checksums for this device
3592*4882a593Smuzhiyun */
3593*4882a593Smuzhiyun lio->enc_dev_capability = NETIF_F_IP_CSUM
3594*4882a593Smuzhiyun | NETIF_F_IPV6_CSUM
3595*4882a593Smuzhiyun | NETIF_F_GSO_UDP_TUNNEL
3596*4882a593Smuzhiyun | NETIF_F_HW_CSUM | NETIF_F_SG
3597*4882a593Smuzhiyun | NETIF_F_RXCSUM
3598*4882a593Smuzhiyun | NETIF_F_TSO | NETIF_F_TSO6
3599*4882a593Smuzhiyun | NETIF_F_LRO;
3600*4882a593Smuzhiyun
3601*4882a593Smuzhiyun netdev->hw_enc_features = (lio->enc_dev_capability &
3602*4882a593Smuzhiyun ~NETIF_F_LRO);
3603*4882a593Smuzhiyun
3604*4882a593Smuzhiyun netdev->udp_tunnel_nic_info = &liquidio_udp_tunnels;
3605*4882a593Smuzhiyun
3606*4882a593Smuzhiyun lio->dev_capability |= NETIF_F_GSO_UDP_TUNNEL;
3607*4882a593Smuzhiyun
3608*4882a593Smuzhiyun netdev->vlan_features = lio->dev_capability;
3609*4882a593Smuzhiyun /* Add any unchangeable hw features */
3610*4882a593Smuzhiyun lio->dev_capability |= NETIF_F_HW_VLAN_CTAG_FILTER |
3611*4882a593Smuzhiyun NETIF_F_HW_VLAN_CTAG_RX |
3612*4882a593Smuzhiyun NETIF_F_HW_VLAN_CTAG_TX;
3613*4882a593Smuzhiyun
3614*4882a593Smuzhiyun netdev->features = (lio->dev_capability & ~NETIF_F_LRO);
3615*4882a593Smuzhiyun
3616*4882a593Smuzhiyun netdev->hw_features = lio->dev_capability;
3617*4882a593Smuzhiyun /*HW_VLAN_RX and HW_VLAN_FILTER is always on*/
3618*4882a593Smuzhiyun netdev->hw_features = netdev->hw_features &
3619*4882a593Smuzhiyun ~NETIF_F_HW_VLAN_CTAG_RX;
3620*4882a593Smuzhiyun
3621*4882a593Smuzhiyun /* MTU range: 68 - 16000 */
3622*4882a593Smuzhiyun netdev->min_mtu = LIO_MIN_MTU_SIZE;
3623*4882a593Smuzhiyun netdev->max_mtu = LIO_MAX_MTU_SIZE;
3624*4882a593Smuzhiyun
3625*4882a593Smuzhiyun /* Point to the properties for octeon device to which this
3626*4882a593Smuzhiyun * interface belongs.
3627*4882a593Smuzhiyun */
3628*4882a593Smuzhiyun lio->oct_dev = octeon_dev;
3629*4882a593Smuzhiyun lio->octprops = props;
3630*4882a593Smuzhiyun lio->netdev = netdev;
3631*4882a593Smuzhiyun
3632*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev,
3633*4882a593Smuzhiyun "if%d gmx: %d hw_addr: 0x%llx\n", i,
3634*4882a593Smuzhiyun lio->linfo.gmxport, CVM_CAST64(lio->linfo.hw_addr));
3635*4882a593Smuzhiyun
3636*4882a593Smuzhiyun for (j = 0; j < octeon_dev->sriov_info.max_vfs; j++) {
3637*4882a593Smuzhiyun u8 vfmac[ETH_ALEN];
3638*4882a593Smuzhiyun
3639*4882a593Smuzhiyun eth_random_addr(vfmac);
3640*4882a593Smuzhiyun if (__liquidio_set_vf_mac(netdev, j, vfmac, false)) {
3641*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3642*4882a593Smuzhiyun "Error setting VF%d MAC address\n",
3643*4882a593Smuzhiyun j);
3644*4882a593Smuzhiyun goto setup_nic_dev_free;
3645*4882a593Smuzhiyun }
3646*4882a593Smuzhiyun }
3647*4882a593Smuzhiyun
3648*4882a593Smuzhiyun /* 64-bit swap required on LE machines */
3649*4882a593Smuzhiyun octeon_swap_8B_data(&lio->linfo.hw_addr, 1);
3650*4882a593Smuzhiyun for (j = 0; j < 6; j++)
3651*4882a593Smuzhiyun mac[j] = *((u8 *)(((u8 *)&lio->linfo.hw_addr) + 2 + j));
3652*4882a593Smuzhiyun
3653*4882a593Smuzhiyun /* Copy MAC Address to OS network device structure */
3654*4882a593Smuzhiyun
3655*4882a593Smuzhiyun ether_addr_copy(netdev->dev_addr, mac);
3656*4882a593Smuzhiyun
3657*4882a593Smuzhiyun /* By default all interfaces on a single Octeon uses the same
3658*4882a593Smuzhiyun * tx and rx queues
3659*4882a593Smuzhiyun */
3660*4882a593Smuzhiyun lio->txq = lio->linfo.txpciq[0].s.q_no;
3661*4882a593Smuzhiyun lio->rxq = lio->linfo.rxpciq[0].s.q_no;
3662*4882a593Smuzhiyun if (liquidio_setup_io_queues(octeon_dev, i,
3663*4882a593Smuzhiyun lio->linfo.num_txpciq,
3664*4882a593Smuzhiyun lio->linfo.num_rxpciq)) {
3665*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "I/O queues creation failed\n");
3666*4882a593Smuzhiyun goto setup_nic_dev_free;
3667*4882a593Smuzhiyun }
3668*4882a593Smuzhiyun
3669*4882a593Smuzhiyun ifstate_set(lio, LIO_IFSTATE_DROQ_OPS);
3670*4882a593Smuzhiyun
3671*4882a593Smuzhiyun lio->tx_qsize = octeon_get_tx_qsize(octeon_dev, lio->txq);
3672*4882a593Smuzhiyun lio->rx_qsize = octeon_get_rx_qsize(octeon_dev, lio->rxq);
3673*4882a593Smuzhiyun
3674*4882a593Smuzhiyun if (lio_setup_glists(octeon_dev, lio, num_iqueues)) {
3675*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3676*4882a593Smuzhiyun "Gather list allocation failed\n");
3677*4882a593Smuzhiyun goto setup_nic_dev_free;
3678*4882a593Smuzhiyun }
3679*4882a593Smuzhiyun
3680*4882a593Smuzhiyun /* Register ethtool support */
3681*4882a593Smuzhiyun liquidio_set_ethtool_ops(netdev);
3682*4882a593Smuzhiyun if (lio->oct_dev->chip_id == OCTEON_CN23XX_PF_VID)
3683*4882a593Smuzhiyun octeon_dev->priv_flags = OCT_PRIV_FLAG_DEFAULT;
3684*4882a593Smuzhiyun else
3685*4882a593Smuzhiyun octeon_dev->priv_flags = 0x0;
3686*4882a593Smuzhiyun
3687*4882a593Smuzhiyun if (netdev->features & NETIF_F_LRO)
3688*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_LRO_ENABLE,
3689*4882a593Smuzhiyun OCTNIC_LROIPV4 | OCTNIC_LROIPV6);
3690*4882a593Smuzhiyun
3691*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_VLAN_FILTER_CTL,
3692*4882a593Smuzhiyun OCTNET_CMD_VLAN_FILTER_ENABLE);
3693*4882a593Smuzhiyun
3694*4882a593Smuzhiyun if ((debug != -1) && (debug & NETIF_MSG_HW))
3695*4882a593Smuzhiyun liquidio_set_feature(netdev,
3696*4882a593Smuzhiyun OCTNET_CMD_VERBOSE_ENABLE, 0);
3697*4882a593Smuzhiyun
3698*4882a593Smuzhiyun if (setup_link_status_change_wq(netdev))
3699*4882a593Smuzhiyun goto setup_nic_dev_free;
3700*4882a593Smuzhiyun
3701*4882a593Smuzhiyun if ((octeon_dev->fw_info.app_cap_flags &
3702*4882a593Smuzhiyun LIQUIDIO_TIME_SYNC_CAP) &&
3703*4882a593Smuzhiyun setup_sync_octeon_time_wq(netdev))
3704*4882a593Smuzhiyun goto setup_nic_dev_free;
3705*4882a593Smuzhiyun
3706*4882a593Smuzhiyun if (setup_rx_oom_poll_fn(netdev))
3707*4882a593Smuzhiyun goto setup_nic_dev_free;
3708*4882a593Smuzhiyun
3709*4882a593Smuzhiyun /* Register the network device with the OS */
3710*4882a593Smuzhiyun if (register_netdev(netdev)) {
3711*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Device registration failed\n");
3712*4882a593Smuzhiyun goto setup_nic_dev_free;
3713*4882a593Smuzhiyun }
3714*4882a593Smuzhiyun
3715*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev,
3716*4882a593Smuzhiyun "Setup NIC ifidx:%d mac:%02x%02x%02x%02x%02x%02x\n",
3717*4882a593Smuzhiyun i, mac[0], mac[1], mac[2], mac[3], mac[4], mac[5]);
3718*4882a593Smuzhiyun netif_carrier_off(netdev);
3719*4882a593Smuzhiyun lio->link_changes++;
3720*4882a593Smuzhiyun
3721*4882a593Smuzhiyun ifstate_set(lio, LIO_IFSTATE_REGISTERED);
3722*4882a593Smuzhiyun
3723*4882a593Smuzhiyun /* Sending command to firmware to enable Rx checksum offload
3724*4882a593Smuzhiyun * by default at the time of setup of Liquidio driver for
3725*4882a593Smuzhiyun * this device
3726*4882a593Smuzhiyun */
3727*4882a593Smuzhiyun liquidio_set_rxcsum_command(netdev, OCTNET_CMD_TNL_RX_CSUM_CTL,
3728*4882a593Smuzhiyun OCTNET_CMD_RXCSUM_ENABLE);
3729*4882a593Smuzhiyun liquidio_set_feature(netdev, OCTNET_CMD_TNL_TX_CSUM_CTL,
3730*4882a593Smuzhiyun OCTNET_CMD_TXCSUM_ENABLE);
3731*4882a593Smuzhiyun
3732*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev,
3733*4882a593Smuzhiyun "NIC ifidx:%d Setup successful\n", i);
3734*4882a593Smuzhiyun
3735*4882a593Smuzhiyun if (octeon_dev->subsystem_id ==
3736*4882a593Smuzhiyun OCTEON_CN2350_25GB_SUBSYS_ID ||
3737*4882a593Smuzhiyun octeon_dev->subsystem_id ==
3738*4882a593Smuzhiyun OCTEON_CN2360_25GB_SUBSYS_ID) {
3739*4882a593Smuzhiyun cur_ver = OCT_FW_VER(octeon_dev->fw_info.ver.maj,
3740*4882a593Smuzhiyun octeon_dev->fw_info.ver.min,
3741*4882a593Smuzhiyun octeon_dev->fw_info.ver.rev);
3742*4882a593Smuzhiyun
3743*4882a593Smuzhiyun /* speed control unsupported in f/w older than 1.7.2 */
3744*4882a593Smuzhiyun if (cur_ver < OCT_FW_VER(1, 7, 2)) {
3745*4882a593Smuzhiyun dev_info(&octeon_dev->pci_dev->dev,
3746*4882a593Smuzhiyun "speed setting not supported by f/w.");
3747*4882a593Smuzhiyun octeon_dev->speed_setting = 25;
3748*4882a593Smuzhiyun octeon_dev->no_speed_setting = 1;
3749*4882a593Smuzhiyun } else {
3750*4882a593Smuzhiyun liquidio_get_speed(lio);
3751*4882a593Smuzhiyun }
3752*4882a593Smuzhiyun
3753*4882a593Smuzhiyun if (octeon_dev->speed_setting == 0) {
3754*4882a593Smuzhiyun octeon_dev->speed_setting = 25;
3755*4882a593Smuzhiyun octeon_dev->no_speed_setting = 1;
3756*4882a593Smuzhiyun }
3757*4882a593Smuzhiyun } else {
3758*4882a593Smuzhiyun octeon_dev->no_speed_setting = 1;
3759*4882a593Smuzhiyun octeon_dev->speed_setting = 10;
3760*4882a593Smuzhiyun }
3761*4882a593Smuzhiyun octeon_dev->speed_boot = octeon_dev->speed_setting;
3762*4882a593Smuzhiyun
3763*4882a593Smuzhiyun /* don't read FEC setting if unsupported by f/w (see above) */
3764*4882a593Smuzhiyun if (octeon_dev->speed_boot == 25 &&
3765*4882a593Smuzhiyun !octeon_dev->no_speed_setting) {
3766*4882a593Smuzhiyun liquidio_get_fec(lio);
3767*4882a593Smuzhiyun octeon_dev->props[lio->ifidx].fec_boot =
3768*4882a593Smuzhiyun octeon_dev->props[lio->ifidx].fec;
3769*4882a593Smuzhiyun }
3770*4882a593Smuzhiyun }
3771*4882a593Smuzhiyun
3772*4882a593Smuzhiyun devlink = devlink_alloc(&liquidio_devlink_ops,
3773*4882a593Smuzhiyun sizeof(struct lio_devlink_priv));
3774*4882a593Smuzhiyun if (!devlink) {
3775*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "devlink alloc failed\n");
3776*4882a593Smuzhiyun goto setup_nic_dev_free;
3777*4882a593Smuzhiyun }
3778*4882a593Smuzhiyun
3779*4882a593Smuzhiyun lio_devlink = devlink_priv(devlink);
3780*4882a593Smuzhiyun lio_devlink->oct = octeon_dev;
3781*4882a593Smuzhiyun
3782*4882a593Smuzhiyun if (devlink_register(devlink, &octeon_dev->pci_dev->dev)) {
3783*4882a593Smuzhiyun devlink_free(devlink);
3784*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3785*4882a593Smuzhiyun "devlink registration failed\n");
3786*4882a593Smuzhiyun goto setup_nic_dev_free;
3787*4882a593Smuzhiyun }
3788*4882a593Smuzhiyun
3789*4882a593Smuzhiyun octeon_dev->devlink = devlink;
3790*4882a593Smuzhiyun octeon_dev->eswitch_mode = DEVLINK_ESWITCH_MODE_LEGACY;
3791*4882a593Smuzhiyun
3792*4882a593Smuzhiyun return 0;
3793*4882a593Smuzhiyun
3794*4882a593Smuzhiyun setup_nic_dev_free:
3795*4882a593Smuzhiyun
3796*4882a593Smuzhiyun while (i--) {
3797*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
3798*4882a593Smuzhiyun "NIC ifidx:%d Setup failed\n", i);
3799*4882a593Smuzhiyun liquidio_destroy_nic_device(octeon_dev, i);
3800*4882a593Smuzhiyun }
3801*4882a593Smuzhiyun
3802*4882a593Smuzhiyun setup_nic_dev_done:
3803*4882a593Smuzhiyun
3804*4882a593Smuzhiyun return -ENODEV;
3805*4882a593Smuzhiyun }
3806*4882a593Smuzhiyun
3807*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
octeon_enable_sriov(struct octeon_device * oct)3808*4882a593Smuzhiyun static int octeon_enable_sriov(struct octeon_device *oct)
3809*4882a593Smuzhiyun {
3810*4882a593Smuzhiyun unsigned int num_vfs_alloced = oct->sriov_info.num_vfs_alloced;
3811*4882a593Smuzhiyun struct pci_dev *vfdev;
3812*4882a593Smuzhiyun int err;
3813*4882a593Smuzhiyun u32 u;
3814*4882a593Smuzhiyun
3815*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(oct) && num_vfs_alloced) {
3816*4882a593Smuzhiyun err = pci_enable_sriov(oct->pci_dev,
3817*4882a593Smuzhiyun oct->sriov_info.num_vfs_alloced);
3818*4882a593Smuzhiyun if (err) {
3819*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
3820*4882a593Smuzhiyun "OCTEON: Failed to enable PCI sriov: %d\n",
3821*4882a593Smuzhiyun err);
3822*4882a593Smuzhiyun oct->sriov_info.num_vfs_alloced = 0;
3823*4882a593Smuzhiyun return err;
3824*4882a593Smuzhiyun }
3825*4882a593Smuzhiyun oct->sriov_info.sriov_enabled = 1;
3826*4882a593Smuzhiyun
3827*4882a593Smuzhiyun /* init lookup table that maps DPI ring number to VF pci_dev
3828*4882a593Smuzhiyun * struct pointer
3829*4882a593Smuzhiyun */
3830*4882a593Smuzhiyun u = 0;
3831*4882a593Smuzhiyun vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3832*4882a593Smuzhiyun OCTEON_CN23XX_VF_VID, NULL);
3833*4882a593Smuzhiyun while (vfdev) {
3834*4882a593Smuzhiyun if (vfdev->is_virtfn &&
3835*4882a593Smuzhiyun (vfdev->physfn == oct->pci_dev)) {
3836*4882a593Smuzhiyun oct->sriov_info.dpiring_to_vfpcidev_lut[u] =
3837*4882a593Smuzhiyun vfdev;
3838*4882a593Smuzhiyun u += oct->sriov_info.rings_per_vf;
3839*4882a593Smuzhiyun }
3840*4882a593Smuzhiyun vfdev = pci_get_device(PCI_VENDOR_ID_CAVIUM,
3841*4882a593Smuzhiyun OCTEON_CN23XX_VF_VID, vfdev);
3842*4882a593Smuzhiyun }
3843*4882a593Smuzhiyun }
3844*4882a593Smuzhiyun
3845*4882a593Smuzhiyun return num_vfs_alloced;
3846*4882a593Smuzhiyun }
3847*4882a593Smuzhiyun
lio_pci_sriov_disable(struct octeon_device * oct)3848*4882a593Smuzhiyun static int lio_pci_sriov_disable(struct octeon_device *oct)
3849*4882a593Smuzhiyun {
3850*4882a593Smuzhiyun int u;
3851*4882a593Smuzhiyun
3852*4882a593Smuzhiyun if (pci_vfs_assigned(oct->pci_dev)) {
3853*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "VFs are still assigned to VMs.\n");
3854*4882a593Smuzhiyun return -EPERM;
3855*4882a593Smuzhiyun }
3856*4882a593Smuzhiyun
3857*4882a593Smuzhiyun pci_disable_sriov(oct->pci_dev);
3858*4882a593Smuzhiyun
3859*4882a593Smuzhiyun u = 0;
3860*4882a593Smuzhiyun while (u < MAX_POSSIBLE_VFS) {
3861*4882a593Smuzhiyun oct->sriov_info.dpiring_to_vfpcidev_lut[u] = NULL;
3862*4882a593Smuzhiyun u += oct->sriov_info.rings_per_vf;
3863*4882a593Smuzhiyun }
3864*4882a593Smuzhiyun
3865*4882a593Smuzhiyun oct->sriov_info.num_vfs_alloced = 0;
3866*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "oct->pf_num:%d disabled VFs\n",
3867*4882a593Smuzhiyun oct->pf_num);
3868*4882a593Smuzhiyun
3869*4882a593Smuzhiyun return 0;
3870*4882a593Smuzhiyun }
3871*4882a593Smuzhiyun
liquidio_enable_sriov(struct pci_dev * dev,int num_vfs)3872*4882a593Smuzhiyun static int liquidio_enable_sriov(struct pci_dev *dev, int num_vfs)
3873*4882a593Smuzhiyun {
3874*4882a593Smuzhiyun struct octeon_device *oct = pci_get_drvdata(dev);
3875*4882a593Smuzhiyun int ret = 0;
3876*4882a593Smuzhiyun
3877*4882a593Smuzhiyun if ((num_vfs == oct->sriov_info.num_vfs_alloced) &&
3878*4882a593Smuzhiyun (oct->sriov_info.sriov_enabled)) {
3879*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "oct->pf_num:%d already enabled num_vfs:%d\n",
3880*4882a593Smuzhiyun oct->pf_num, num_vfs);
3881*4882a593Smuzhiyun return 0;
3882*4882a593Smuzhiyun }
3883*4882a593Smuzhiyun
3884*4882a593Smuzhiyun if (!num_vfs) {
3885*4882a593Smuzhiyun lio_vf_rep_destroy(oct);
3886*4882a593Smuzhiyun ret = lio_pci_sriov_disable(oct);
3887*4882a593Smuzhiyun } else if (num_vfs > oct->sriov_info.max_vfs) {
3888*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
3889*4882a593Smuzhiyun "OCTEON: Max allowed VFs:%d user requested:%d",
3890*4882a593Smuzhiyun oct->sriov_info.max_vfs, num_vfs);
3891*4882a593Smuzhiyun ret = -EPERM;
3892*4882a593Smuzhiyun } else {
3893*4882a593Smuzhiyun oct->sriov_info.num_vfs_alloced = num_vfs;
3894*4882a593Smuzhiyun ret = octeon_enable_sriov(oct);
3895*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "oct->pf_num:%d num_vfs:%d\n",
3896*4882a593Smuzhiyun oct->pf_num, num_vfs);
3897*4882a593Smuzhiyun ret = lio_vf_rep_create(oct);
3898*4882a593Smuzhiyun if (ret)
3899*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev,
3900*4882a593Smuzhiyun "vf representor create failed");
3901*4882a593Smuzhiyun }
3902*4882a593Smuzhiyun
3903*4882a593Smuzhiyun return ret;
3904*4882a593Smuzhiyun }
3905*4882a593Smuzhiyun #endif
3906*4882a593Smuzhiyun
3907*4882a593Smuzhiyun /**
3908*4882a593Smuzhiyun * liquidio_init_nic_module - initialize the NIC
3909*4882a593Smuzhiyun * @oct: octeon device
3910*4882a593Smuzhiyun *
3911*4882a593Smuzhiyun * This initialization routine is called once the Octeon device application is
3912*4882a593Smuzhiyun * up and running
3913*4882a593Smuzhiyun */
liquidio_init_nic_module(struct octeon_device * oct)3914*4882a593Smuzhiyun static int liquidio_init_nic_module(struct octeon_device *oct)
3915*4882a593Smuzhiyun {
3916*4882a593Smuzhiyun int i, retval = 0;
3917*4882a593Smuzhiyun int num_nic_ports = CFG_GET_NUM_NIC_PORTS(octeon_get_conf(oct));
3918*4882a593Smuzhiyun
3919*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Initializing network interfaces\n");
3920*4882a593Smuzhiyun
3921*4882a593Smuzhiyun /* only default iq and oq were initialized
3922*4882a593Smuzhiyun * initialize the rest as well
3923*4882a593Smuzhiyun */
3924*4882a593Smuzhiyun /* run port_config command for each port */
3925*4882a593Smuzhiyun oct->ifcount = num_nic_ports;
3926*4882a593Smuzhiyun
3927*4882a593Smuzhiyun memset(oct->props, 0, sizeof(struct octdev_props) * num_nic_ports);
3928*4882a593Smuzhiyun
3929*4882a593Smuzhiyun for (i = 0; i < MAX_OCTEON_LINKS; i++)
3930*4882a593Smuzhiyun oct->props[i].gmxport = -1;
3931*4882a593Smuzhiyun
3932*4882a593Smuzhiyun retval = setup_nic_devices(oct);
3933*4882a593Smuzhiyun if (retval) {
3934*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "Setup NIC devices failed\n");
3935*4882a593Smuzhiyun goto octnet_init_failure;
3936*4882a593Smuzhiyun }
3937*4882a593Smuzhiyun
3938*4882a593Smuzhiyun /* Call vf_rep_modinit if the firmware is switchdev capable
3939*4882a593Smuzhiyun * and do it from the first liquidio function probed.
3940*4882a593Smuzhiyun */
3941*4882a593Smuzhiyun if (!oct->octeon_id &&
3942*4882a593Smuzhiyun oct->fw_info.app_cap_flags & LIQUIDIO_SWITCHDEV_CAP) {
3943*4882a593Smuzhiyun retval = lio_vf_rep_modinit();
3944*4882a593Smuzhiyun if (retval) {
3945*4882a593Smuzhiyun liquidio_stop_nic_module(oct);
3946*4882a593Smuzhiyun goto octnet_init_failure;
3947*4882a593Smuzhiyun }
3948*4882a593Smuzhiyun }
3949*4882a593Smuzhiyun
3950*4882a593Smuzhiyun liquidio_ptp_init(oct);
3951*4882a593Smuzhiyun
3952*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Network interfaces ready\n");
3953*4882a593Smuzhiyun
3954*4882a593Smuzhiyun return retval;
3955*4882a593Smuzhiyun
3956*4882a593Smuzhiyun octnet_init_failure:
3957*4882a593Smuzhiyun
3958*4882a593Smuzhiyun oct->ifcount = 0;
3959*4882a593Smuzhiyun
3960*4882a593Smuzhiyun return retval;
3961*4882a593Smuzhiyun }
3962*4882a593Smuzhiyun
3963*4882a593Smuzhiyun /**
3964*4882a593Smuzhiyun * nic_starter - finish init
3965*4882a593Smuzhiyun * @work: work struct work_struct
3966*4882a593Smuzhiyun *
3967*4882a593Smuzhiyun * starter callback that invokes the remaining initialization work after the NIC is up and running.
3968*4882a593Smuzhiyun */
nic_starter(struct work_struct * work)3969*4882a593Smuzhiyun static void nic_starter(struct work_struct *work)
3970*4882a593Smuzhiyun {
3971*4882a593Smuzhiyun struct octeon_device *oct;
3972*4882a593Smuzhiyun struct cavium_wk *wk = (struct cavium_wk *)work;
3973*4882a593Smuzhiyun
3974*4882a593Smuzhiyun oct = (struct octeon_device *)wk->ctxptr;
3975*4882a593Smuzhiyun
3976*4882a593Smuzhiyun if (atomic_read(&oct->status) == OCT_DEV_RUNNING)
3977*4882a593Smuzhiyun return;
3978*4882a593Smuzhiyun
3979*4882a593Smuzhiyun /* If the status of the device is CORE_OK, the core
3980*4882a593Smuzhiyun * application has reported its application type. Call
3981*4882a593Smuzhiyun * any registered handlers now and move to the RUNNING
3982*4882a593Smuzhiyun * state.
3983*4882a593Smuzhiyun */
3984*4882a593Smuzhiyun if (atomic_read(&oct->status) != OCT_DEV_CORE_OK) {
3985*4882a593Smuzhiyun schedule_delayed_work(&oct->nic_poll_work.work,
3986*4882a593Smuzhiyun LIQUIDIO_STARTER_POLL_INTERVAL_MS);
3987*4882a593Smuzhiyun return;
3988*4882a593Smuzhiyun }
3989*4882a593Smuzhiyun
3990*4882a593Smuzhiyun atomic_set(&oct->status, OCT_DEV_RUNNING);
3991*4882a593Smuzhiyun
3992*4882a593Smuzhiyun if (oct->app_mode && oct->app_mode == CVM_DRV_NIC_APP) {
3993*4882a593Smuzhiyun dev_dbg(&oct->pci_dev->dev, "Starting NIC module\n");
3994*4882a593Smuzhiyun
3995*4882a593Smuzhiyun if (liquidio_init_nic_module(oct))
3996*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev, "NIC initialization failed\n");
3997*4882a593Smuzhiyun else
3998*4882a593Smuzhiyun handshake[oct->octeon_id].started_ok = 1;
3999*4882a593Smuzhiyun } else {
4000*4882a593Smuzhiyun dev_err(&oct->pci_dev->dev,
4001*4882a593Smuzhiyun "Unexpected application running on NIC (%d). Check firmware.\n",
4002*4882a593Smuzhiyun oct->app_mode);
4003*4882a593Smuzhiyun }
4004*4882a593Smuzhiyun
4005*4882a593Smuzhiyun complete(&handshake[oct->octeon_id].started);
4006*4882a593Smuzhiyun }
4007*4882a593Smuzhiyun
4008*4882a593Smuzhiyun static int
octeon_recv_vf_drv_notice(struct octeon_recv_info * recv_info,void * buf)4009*4882a593Smuzhiyun octeon_recv_vf_drv_notice(struct octeon_recv_info *recv_info, void *buf)
4010*4882a593Smuzhiyun {
4011*4882a593Smuzhiyun struct octeon_device *oct = (struct octeon_device *)buf;
4012*4882a593Smuzhiyun struct octeon_recv_pkt *recv_pkt = recv_info->recv_pkt;
4013*4882a593Smuzhiyun int i, notice, vf_idx;
4014*4882a593Smuzhiyun bool cores_crashed;
4015*4882a593Smuzhiyun u64 *data, vf_num;
4016*4882a593Smuzhiyun
4017*4882a593Smuzhiyun notice = recv_pkt->rh.r.ossp;
4018*4882a593Smuzhiyun data = (u64 *)(get_rbd(recv_pkt->buffer_ptr[0]) + OCT_DROQ_INFO_SIZE);
4019*4882a593Smuzhiyun
4020*4882a593Smuzhiyun /* the first 64-bit word of data is the vf_num */
4021*4882a593Smuzhiyun vf_num = data[0];
4022*4882a593Smuzhiyun octeon_swap_8B_data(&vf_num, 1);
4023*4882a593Smuzhiyun vf_idx = (int)vf_num - 1;
4024*4882a593Smuzhiyun
4025*4882a593Smuzhiyun cores_crashed = READ_ONCE(oct->cores_crashed);
4026*4882a593Smuzhiyun
4027*4882a593Smuzhiyun if (notice == VF_DRV_LOADED) {
4028*4882a593Smuzhiyun if (!(oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx))) {
4029*4882a593Smuzhiyun oct->sriov_info.vf_drv_loaded_mask |= BIT_ULL(vf_idx);
4030*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev,
4031*4882a593Smuzhiyun "driver for VF%d was loaded\n", vf_idx);
4032*4882a593Smuzhiyun if (!cores_crashed)
4033*4882a593Smuzhiyun try_module_get(THIS_MODULE);
4034*4882a593Smuzhiyun }
4035*4882a593Smuzhiyun } else if (notice == VF_DRV_REMOVED) {
4036*4882a593Smuzhiyun if (oct->sriov_info.vf_drv_loaded_mask & BIT_ULL(vf_idx)) {
4037*4882a593Smuzhiyun oct->sriov_info.vf_drv_loaded_mask &= ~BIT_ULL(vf_idx);
4038*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev,
4039*4882a593Smuzhiyun "driver for VF%d was removed\n", vf_idx);
4040*4882a593Smuzhiyun if (!cores_crashed)
4041*4882a593Smuzhiyun module_put(THIS_MODULE);
4042*4882a593Smuzhiyun }
4043*4882a593Smuzhiyun } else if (notice == VF_DRV_MACADDR_CHANGED) {
4044*4882a593Smuzhiyun u8 *b = (u8 *)&data[1];
4045*4882a593Smuzhiyun
4046*4882a593Smuzhiyun oct->sriov_info.vf_macaddr[vf_idx] = data[1];
4047*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev,
4048*4882a593Smuzhiyun "VF driver changed VF%d's MAC address to %pM\n",
4049*4882a593Smuzhiyun vf_idx, b + 2);
4050*4882a593Smuzhiyun }
4051*4882a593Smuzhiyun
4052*4882a593Smuzhiyun for (i = 0; i < recv_pkt->buffer_count; i++)
4053*4882a593Smuzhiyun recv_buffer_free(recv_pkt->buffer_ptr[i]);
4054*4882a593Smuzhiyun octeon_free_recv_info(recv_info);
4055*4882a593Smuzhiyun
4056*4882a593Smuzhiyun return 0;
4057*4882a593Smuzhiyun }
4058*4882a593Smuzhiyun
4059*4882a593Smuzhiyun /**
4060*4882a593Smuzhiyun * octeon_device_init - Device initialization for each Octeon device that is probed
4061*4882a593Smuzhiyun * @octeon_dev: octeon device
4062*4882a593Smuzhiyun */
octeon_device_init(struct octeon_device * octeon_dev)4063*4882a593Smuzhiyun static int octeon_device_init(struct octeon_device *octeon_dev)
4064*4882a593Smuzhiyun {
4065*4882a593Smuzhiyun int j, ret;
4066*4882a593Smuzhiyun char bootcmd[] = "\n";
4067*4882a593Smuzhiyun char *dbg_enb = NULL;
4068*4882a593Smuzhiyun enum lio_fw_state fw_state;
4069*4882a593Smuzhiyun struct octeon_device_priv *oct_priv =
4070*4882a593Smuzhiyun (struct octeon_device_priv *)octeon_dev->priv;
4071*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_BEGIN_STATE);
4072*4882a593Smuzhiyun
4073*4882a593Smuzhiyun /* Enable access to the octeon device and make its DMA capability
4074*4882a593Smuzhiyun * known to the OS.
4075*4882a593Smuzhiyun */
4076*4882a593Smuzhiyun if (octeon_pci_os_setup(octeon_dev))
4077*4882a593Smuzhiyun return 1;
4078*4882a593Smuzhiyun
4079*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_PCI_ENABLE_DONE);
4080*4882a593Smuzhiyun
4081*4882a593Smuzhiyun /* Identify the Octeon type and map the BAR address space. */
4082*4882a593Smuzhiyun if (octeon_chip_specific_setup(octeon_dev)) {
4083*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Chip specific setup failed\n");
4084*4882a593Smuzhiyun return 1;
4085*4882a593Smuzhiyun }
4086*4882a593Smuzhiyun
4087*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_PCI_MAP_DONE);
4088*4882a593Smuzhiyun
4089*4882a593Smuzhiyun /* Only add a reference after setting status 'OCT_DEV_PCI_MAP_DONE',
4090*4882a593Smuzhiyun * since that is what is required for the reference to be removed
4091*4882a593Smuzhiyun * during de-initialization (see 'octeon_destroy_resources').
4092*4882a593Smuzhiyun */
4093*4882a593Smuzhiyun octeon_register_device(octeon_dev, octeon_dev->pci_dev->bus->number,
4094*4882a593Smuzhiyun PCI_SLOT(octeon_dev->pci_dev->devfn),
4095*4882a593Smuzhiyun PCI_FUNC(octeon_dev->pci_dev->devfn),
4096*4882a593Smuzhiyun true);
4097*4882a593Smuzhiyun
4098*4882a593Smuzhiyun octeon_dev->app_mode = CVM_DRV_INVALID_APP;
4099*4882a593Smuzhiyun
4100*4882a593Smuzhiyun /* CN23XX supports preloaded firmware if the following is true:
4101*4882a593Smuzhiyun *
4102*4882a593Smuzhiyun * The adapter indicates that firmware is currently running AND
4103*4882a593Smuzhiyun * 'fw_type' is 'auto'.
4104*4882a593Smuzhiyun *
4105*4882a593Smuzhiyun * (default state is NEEDS_TO_BE_LOADED, override it if appropriate).
4106*4882a593Smuzhiyun */
4107*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(octeon_dev) &&
4108*4882a593Smuzhiyun cn23xx_fw_loaded(octeon_dev) && fw_type_is_auto()) {
4109*4882a593Smuzhiyun atomic_cmpxchg(octeon_dev->adapter_fw_state,
4110*4882a593Smuzhiyun FW_NEEDS_TO_BE_LOADED, FW_IS_PRELOADED);
4111*4882a593Smuzhiyun }
4112*4882a593Smuzhiyun
4113*4882a593Smuzhiyun /* If loading firmware, only first device of adapter needs to do so. */
4114*4882a593Smuzhiyun fw_state = atomic_cmpxchg(octeon_dev->adapter_fw_state,
4115*4882a593Smuzhiyun FW_NEEDS_TO_BE_LOADED,
4116*4882a593Smuzhiyun FW_IS_BEING_LOADED);
4117*4882a593Smuzhiyun
4118*4882a593Smuzhiyun /* Here, [local variable] 'fw_state' is set to one of:
4119*4882a593Smuzhiyun *
4120*4882a593Smuzhiyun * FW_IS_PRELOADED: No firmware is to be loaded (see above)
4121*4882a593Smuzhiyun * FW_NEEDS_TO_BE_LOADED: The driver's first instance will load
4122*4882a593Smuzhiyun * firmware to the adapter.
4123*4882a593Smuzhiyun * FW_IS_BEING_LOADED: The driver's second instance will not load
4124*4882a593Smuzhiyun * firmware to the adapter.
4125*4882a593Smuzhiyun */
4126*4882a593Smuzhiyun
4127*4882a593Smuzhiyun /* Prior to f/w load, perform a soft reset of the Octeon device;
4128*4882a593Smuzhiyun * if error resetting, return w/error.
4129*4882a593Smuzhiyun */
4130*4882a593Smuzhiyun if (fw_state == FW_NEEDS_TO_BE_LOADED)
4131*4882a593Smuzhiyun if (octeon_dev->fn_list.soft_reset(octeon_dev))
4132*4882a593Smuzhiyun return 1;
4133*4882a593Smuzhiyun
4134*4882a593Smuzhiyun /* Initialize the dispatch mechanism used to push packets arriving on
4135*4882a593Smuzhiyun * Octeon Output queues.
4136*4882a593Smuzhiyun */
4137*4882a593Smuzhiyun if (octeon_init_dispatch_list(octeon_dev))
4138*4882a593Smuzhiyun return 1;
4139*4882a593Smuzhiyun
4140*4882a593Smuzhiyun octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4141*4882a593Smuzhiyun OPCODE_NIC_CORE_DRV_ACTIVE,
4142*4882a593Smuzhiyun octeon_core_drv_init,
4143*4882a593Smuzhiyun octeon_dev);
4144*4882a593Smuzhiyun
4145*4882a593Smuzhiyun octeon_register_dispatch_fn(octeon_dev, OPCODE_NIC,
4146*4882a593Smuzhiyun OPCODE_NIC_VF_DRV_NOTICE,
4147*4882a593Smuzhiyun octeon_recv_vf_drv_notice, octeon_dev);
4148*4882a593Smuzhiyun INIT_DELAYED_WORK(&octeon_dev->nic_poll_work.work, nic_starter);
4149*4882a593Smuzhiyun octeon_dev->nic_poll_work.ctxptr = (void *)octeon_dev;
4150*4882a593Smuzhiyun schedule_delayed_work(&octeon_dev->nic_poll_work.work,
4151*4882a593Smuzhiyun LIQUIDIO_STARTER_POLL_INTERVAL_MS);
4152*4882a593Smuzhiyun
4153*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_DISPATCH_INIT_DONE);
4154*4882a593Smuzhiyun
4155*4882a593Smuzhiyun if (octeon_set_io_queues_off(octeon_dev)) {
4156*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "setting io queues off failed\n");
4157*4882a593Smuzhiyun return 1;
4158*4882a593Smuzhiyun }
4159*4882a593Smuzhiyun
4160*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(octeon_dev)) {
4161*4882a593Smuzhiyun ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4162*4882a593Smuzhiyun if (ret) {
4163*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Failed to configure device registers\n");
4164*4882a593Smuzhiyun return ret;
4165*4882a593Smuzhiyun }
4166*4882a593Smuzhiyun }
4167*4882a593Smuzhiyun
4168*4882a593Smuzhiyun /* Initialize soft command buffer pool
4169*4882a593Smuzhiyun */
4170*4882a593Smuzhiyun if (octeon_setup_sc_buffer_pool(octeon_dev)) {
4171*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "sc buffer pool allocation failed\n");
4172*4882a593Smuzhiyun return 1;
4173*4882a593Smuzhiyun }
4174*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_SC_BUFF_POOL_INIT_DONE);
4175*4882a593Smuzhiyun
4176*4882a593Smuzhiyun /* Setup the data structures that manage this Octeon's Input queues. */
4177*4882a593Smuzhiyun if (octeon_setup_instr_queues(octeon_dev)) {
4178*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
4179*4882a593Smuzhiyun "instruction queue initialization failed\n");
4180*4882a593Smuzhiyun return 1;
4181*4882a593Smuzhiyun }
4182*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_INSTR_QUEUE_INIT_DONE);
4183*4882a593Smuzhiyun
4184*4882a593Smuzhiyun /* Initialize lists to manage the requests of different types that
4185*4882a593Smuzhiyun * arrive from user & kernel applications for this octeon device.
4186*4882a593Smuzhiyun */
4187*4882a593Smuzhiyun if (octeon_setup_response_list(octeon_dev)) {
4188*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Response list allocation failed\n");
4189*4882a593Smuzhiyun return 1;
4190*4882a593Smuzhiyun }
4191*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_RESP_LIST_INIT_DONE);
4192*4882a593Smuzhiyun
4193*4882a593Smuzhiyun if (octeon_setup_output_queues(octeon_dev)) {
4194*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Output queue initialization failed\n");
4195*4882a593Smuzhiyun return 1;
4196*4882a593Smuzhiyun }
4197*4882a593Smuzhiyun
4198*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_DROQ_INIT_DONE);
4199*4882a593Smuzhiyun
4200*4882a593Smuzhiyun if (OCTEON_CN23XX_PF(octeon_dev)) {
4201*4882a593Smuzhiyun if (octeon_dev->fn_list.setup_mbox(octeon_dev)) {
4202*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "OCTEON: Mailbox setup failed\n");
4203*4882a593Smuzhiyun return 1;
4204*4882a593Smuzhiyun }
4205*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_MBOX_SETUP_DONE);
4206*4882a593Smuzhiyun
4207*4882a593Smuzhiyun if (octeon_allocate_ioq_vector
4208*4882a593Smuzhiyun (octeon_dev,
4209*4882a593Smuzhiyun octeon_dev->sriov_info.num_pf_rings)) {
4210*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "OCTEON: ioq vector allocation failed\n");
4211*4882a593Smuzhiyun return 1;
4212*4882a593Smuzhiyun }
4213*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_MSIX_ALLOC_VECTOR_DONE);
4214*4882a593Smuzhiyun
4215*4882a593Smuzhiyun } else {
4216*4882a593Smuzhiyun /* The input and output queue registers were setup earlier (the
4217*4882a593Smuzhiyun * queues were not enabled). Any additional registers
4218*4882a593Smuzhiyun * that need to be programmed should be done now.
4219*4882a593Smuzhiyun */
4220*4882a593Smuzhiyun ret = octeon_dev->fn_list.setup_device_regs(octeon_dev);
4221*4882a593Smuzhiyun if (ret) {
4222*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
4223*4882a593Smuzhiyun "Failed to configure device registers\n");
4224*4882a593Smuzhiyun return ret;
4225*4882a593Smuzhiyun }
4226*4882a593Smuzhiyun }
4227*4882a593Smuzhiyun
4228*4882a593Smuzhiyun /* Initialize the tasklet that handles output queue packet processing.*/
4229*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev, "Initializing droq tasklet\n");
4230*4882a593Smuzhiyun tasklet_setup(&oct_priv->droq_tasklet, octeon_droq_bh);
4231*4882a593Smuzhiyun
4232*4882a593Smuzhiyun /* Setup the interrupt handler and record the INT SUM register address
4233*4882a593Smuzhiyun */
4234*4882a593Smuzhiyun if (octeon_setup_interrupt(octeon_dev,
4235*4882a593Smuzhiyun octeon_dev->sriov_info.num_pf_rings))
4236*4882a593Smuzhiyun return 1;
4237*4882a593Smuzhiyun
4238*4882a593Smuzhiyun /* Enable Octeon device interrupts */
4239*4882a593Smuzhiyun octeon_dev->fn_list.enable_interrupt(octeon_dev, OCTEON_ALL_INTR);
4240*4882a593Smuzhiyun
4241*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_INTR_SET_DONE);
4242*4882a593Smuzhiyun
4243*4882a593Smuzhiyun /* Send Credit for Octeon Output queues. Credits are always sent BEFORE
4244*4882a593Smuzhiyun * the output queue is enabled.
4245*4882a593Smuzhiyun * This ensures that we'll receive the f/w CORE DRV_ACTIVE message in
4246*4882a593Smuzhiyun * case we've configured CN23XX_SLI_GBL_CONTROL[NOPTR_D] = 0.
4247*4882a593Smuzhiyun * Otherwise, it is possible that the DRV_ACTIVE message will be sent
4248*4882a593Smuzhiyun * before any credits have been issued, causing the ring to be reset
4249*4882a593Smuzhiyun * (and the f/w appear to never have started).
4250*4882a593Smuzhiyun */
4251*4882a593Smuzhiyun for (j = 0; j < octeon_dev->num_oqs; j++)
4252*4882a593Smuzhiyun writel(octeon_dev->droq[j]->max_count,
4253*4882a593Smuzhiyun octeon_dev->droq[j]->pkts_credit_reg);
4254*4882a593Smuzhiyun
4255*4882a593Smuzhiyun /* Enable the input and output queues for this Octeon device */
4256*4882a593Smuzhiyun ret = octeon_dev->fn_list.enable_io_queues(octeon_dev);
4257*4882a593Smuzhiyun if (ret) {
4258*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Failed to enable input/output queues");
4259*4882a593Smuzhiyun return ret;
4260*4882a593Smuzhiyun }
4261*4882a593Smuzhiyun
4262*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_IO_QUEUES_DONE);
4263*4882a593Smuzhiyun
4264*4882a593Smuzhiyun if (fw_state == FW_NEEDS_TO_BE_LOADED) {
4265*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev, "Waiting for DDR initialization...\n");
4266*4882a593Smuzhiyun if (!ddr_timeout) {
4267*4882a593Smuzhiyun dev_info(&octeon_dev->pci_dev->dev,
4268*4882a593Smuzhiyun "WAITING. Set ddr_timeout to non-zero value to proceed with initialization.\n");
4269*4882a593Smuzhiyun }
4270*4882a593Smuzhiyun
4271*4882a593Smuzhiyun schedule_timeout_uninterruptible(HZ * LIO_RESET_SECS);
4272*4882a593Smuzhiyun
4273*4882a593Smuzhiyun /* Wait for the octeon to initialize DDR after the soft-reset.*/
4274*4882a593Smuzhiyun while (!ddr_timeout) {
4275*4882a593Smuzhiyun set_current_state(TASK_INTERRUPTIBLE);
4276*4882a593Smuzhiyun if (schedule_timeout(HZ / 10)) {
4277*4882a593Smuzhiyun /* user probably pressed Control-C */
4278*4882a593Smuzhiyun return 1;
4279*4882a593Smuzhiyun }
4280*4882a593Smuzhiyun }
4281*4882a593Smuzhiyun ret = octeon_wait_for_ddr_init(octeon_dev, &ddr_timeout);
4282*4882a593Smuzhiyun if (ret) {
4283*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev,
4284*4882a593Smuzhiyun "DDR not initialized. Please confirm that board is configured to boot from Flash, ret: %d\n",
4285*4882a593Smuzhiyun ret);
4286*4882a593Smuzhiyun return 1;
4287*4882a593Smuzhiyun }
4288*4882a593Smuzhiyun
4289*4882a593Smuzhiyun if (octeon_wait_for_bootloader(octeon_dev, 1000)) {
4290*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Board not responding\n");
4291*4882a593Smuzhiyun return 1;
4292*4882a593Smuzhiyun }
4293*4882a593Smuzhiyun
4294*4882a593Smuzhiyun /* Divert uboot to take commands from host instead. */
4295*4882a593Smuzhiyun ret = octeon_console_send_cmd(octeon_dev, bootcmd, 50);
4296*4882a593Smuzhiyun
4297*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev, "Initializing consoles\n");
4298*4882a593Smuzhiyun ret = octeon_init_consoles(octeon_dev);
4299*4882a593Smuzhiyun if (ret) {
4300*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Could not access board consoles\n");
4301*4882a593Smuzhiyun return 1;
4302*4882a593Smuzhiyun }
4303*4882a593Smuzhiyun /* If console debug enabled, specify empty string to use default
4304*4882a593Smuzhiyun * enablement ELSE specify NULL string for 'disabled'.
4305*4882a593Smuzhiyun */
4306*4882a593Smuzhiyun dbg_enb = octeon_console_debug_enabled(0) ? "" : NULL;
4307*4882a593Smuzhiyun ret = octeon_add_console(octeon_dev, 0, dbg_enb);
4308*4882a593Smuzhiyun if (ret) {
4309*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Could not access board console\n");
4310*4882a593Smuzhiyun return 1;
4311*4882a593Smuzhiyun } else if (octeon_console_debug_enabled(0)) {
4312*4882a593Smuzhiyun /* If console was added AND we're logging console output
4313*4882a593Smuzhiyun * then set our console print function.
4314*4882a593Smuzhiyun */
4315*4882a593Smuzhiyun octeon_dev->console[0].print = octeon_dbg_console_print;
4316*4882a593Smuzhiyun }
4317*4882a593Smuzhiyun
4318*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_CONSOLE_INIT_DONE);
4319*4882a593Smuzhiyun
4320*4882a593Smuzhiyun dev_dbg(&octeon_dev->pci_dev->dev, "Loading firmware\n");
4321*4882a593Smuzhiyun ret = load_firmware(octeon_dev);
4322*4882a593Smuzhiyun if (ret) {
4323*4882a593Smuzhiyun dev_err(&octeon_dev->pci_dev->dev, "Could not load firmware to board\n");
4324*4882a593Smuzhiyun return 1;
4325*4882a593Smuzhiyun }
4326*4882a593Smuzhiyun
4327*4882a593Smuzhiyun atomic_set(octeon_dev->adapter_fw_state, FW_HAS_BEEN_LOADED);
4328*4882a593Smuzhiyun }
4329*4882a593Smuzhiyun
4330*4882a593Smuzhiyun handshake[octeon_dev->octeon_id].init_ok = 1;
4331*4882a593Smuzhiyun complete(&handshake[octeon_dev->octeon_id].init);
4332*4882a593Smuzhiyun
4333*4882a593Smuzhiyun atomic_set(&octeon_dev->status, OCT_DEV_HOST_OK);
4334*4882a593Smuzhiyun oct_priv->dev = octeon_dev;
4335*4882a593Smuzhiyun
4336*4882a593Smuzhiyun return 0;
4337*4882a593Smuzhiyun }
4338*4882a593Smuzhiyun
4339*4882a593Smuzhiyun /**
4340*4882a593Smuzhiyun * octeon_dbg_console_print - Debug console print function
4341*4882a593Smuzhiyun * @oct: octeon device
4342*4882a593Smuzhiyun * @console_num: console number
4343*4882a593Smuzhiyun * @prefix: first portion of line to display
4344*4882a593Smuzhiyun * @suffix: second portion of line to display
4345*4882a593Smuzhiyun *
4346*4882a593Smuzhiyun * The OCTEON debug console outputs entire lines (excluding '\n').
4347*4882a593Smuzhiyun * Normally, the line will be passed in the 'prefix' parameter.
4348*4882a593Smuzhiyun * However, due to buffering, it is possible for a line to be split into two
4349*4882a593Smuzhiyun * parts, in which case they will be passed as the 'prefix' parameter and
4350*4882a593Smuzhiyun * 'suffix' parameter.
4351*4882a593Smuzhiyun */
octeon_dbg_console_print(struct octeon_device * oct,u32 console_num,char * prefix,char * suffix)4352*4882a593Smuzhiyun static int octeon_dbg_console_print(struct octeon_device *oct, u32 console_num,
4353*4882a593Smuzhiyun char *prefix, char *suffix)
4354*4882a593Smuzhiyun {
4355*4882a593Smuzhiyun if (prefix && suffix)
4356*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "%u: %s%s\n", console_num, prefix,
4357*4882a593Smuzhiyun suffix);
4358*4882a593Smuzhiyun else if (prefix)
4359*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, prefix);
4360*4882a593Smuzhiyun else if (suffix)
4361*4882a593Smuzhiyun dev_info(&oct->pci_dev->dev, "%u: %s\n", console_num, suffix);
4362*4882a593Smuzhiyun
4363*4882a593Smuzhiyun return 0;
4364*4882a593Smuzhiyun }
4365*4882a593Smuzhiyun
4366*4882a593Smuzhiyun /**
4367*4882a593Smuzhiyun * liquidio_exit - Exits the module
4368*4882a593Smuzhiyun */
liquidio_exit(void)4369*4882a593Smuzhiyun static void __exit liquidio_exit(void)
4370*4882a593Smuzhiyun {
4371*4882a593Smuzhiyun liquidio_deinit_pci();
4372*4882a593Smuzhiyun
4373*4882a593Smuzhiyun pr_info("LiquidIO network module is now unloaded\n");
4374*4882a593Smuzhiyun }
4375*4882a593Smuzhiyun
4376*4882a593Smuzhiyun module_init(liquidio_init);
4377*4882a593Smuzhiyun module_exit(liquidio_exit);
4378