xref: /OK3568_Linux_fs/kernel/drivers/net/ethernet/cavium/liquidio/cn68xx_device.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /**********************************************************************
2*4882a593Smuzhiyun  * Author: Cavium, Inc.
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * Contact: support@cavium.com
5*4882a593Smuzhiyun  *          Please include "LiquidIO" in the subject.
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * Copyright (c) 2003-2016 Cavium, Inc.
8*4882a593Smuzhiyun  *
9*4882a593Smuzhiyun  * This file is free software; you can redistribute it and/or modify
10*4882a593Smuzhiyun  * it under the terms of the GNU General Public License, Version 2, as
11*4882a593Smuzhiyun  * published by the Free Software Foundation.
12*4882a593Smuzhiyun  *
13*4882a593Smuzhiyun  * This file is distributed in the hope that it will be useful, but
14*4882a593Smuzhiyun  * AS-IS and WITHOUT ANY WARRANTY; without even the implied warranty
15*4882a593Smuzhiyun  * of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE, TITLE, or
16*4882a593Smuzhiyun  * NONINFRINGEMENT.  See the GNU General Public License for more details.
17*4882a593Smuzhiyun  ***********************************************************************/
18*4882a593Smuzhiyun #include <linux/pci.h>
19*4882a593Smuzhiyun #include <linux/netdevice.h>
20*4882a593Smuzhiyun #include "liquidio_common.h"
21*4882a593Smuzhiyun #include "octeon_droq.h"
22*4882a593Smuzhiyun #include "octeon_iq.h"
23*4882a593Smuzhiyun #include "response_manager.h"
24*4882a593Smuzhiyun #include "octeon_device.h"
25*4882a593Smuzhiyun #include "octeon_main.h"
26*4882a593Smuzhiyun #include "cn66xx_regs.h"
27*4882a593Smuzhiyun #include "cn66xx_device.h"
28*4882a593Smuzhiyun #include "cn68xx_device.h"
29*4882a593Smuzhiyun #include "cn68xx_regs.h"
30*4882a593Smuzhiyun #include "cn68xx_device.h"
31*4882a593Smuzhiyun 
lio_cn68xx_set_dpi_regs(struct octeon_device * oct)32*4882a593Smuzhiyun static void lio_cn68xx_set_dpi_regs(struct octeon_device *oct)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	u32 i;
35*4882a593Smuzhiyun 	u32 fifo_sizes[6] = { 3, 3, 1, 1, 1, 8 };
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	lio_pci_writeq(oct, CN6XXX_DPI_DMA_CTL_MASK, CN6XXX_DPI_DMA_CONTROL);
38*4882a593Smuzhiyun 	dev_dbg(&oct->pci_dev->dev, "DPI_DMA_CONTROL: 0x%016llx\n",
39*4882a593Smuzhiyun 		lio_pci_readq(oct, CN6XXX_DPI_DMA_CONTROL));
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun 	for (i = 0; i < 6; i++) {
42*4882a593Smuzhiyun 		/* Prevent service of instruction queue for all DMA engines
43*4882a593Smuzhiyun 		 * Engine 5 will remain 0. Engines 0 - 4 will be setup by
44*4882a593Smuzhiyun 		 * core.
45*4882a593Smuzhiyun 		 */
46*4882a593Smuzhiyun 		lio_pci_writeq(oct, 0, CN6XXX_DPI_DMA_ENG_ENB(i));
47*4882a593Smuzhiyun 		lio_pci_writeq(oct, fifo_sizes[i], CN6XXX_DPI_DMA_ENG_BUF(i));
48*4882a593Smuzhiyun 		dev_dbg(&oct->pci_dev->dev, "DPI_ENG_BUF%d: 0x%016llx\n", i,
49*4882a593Smuzhiyun 			lio_pci_readq(oct, CN6XXX_DPI_DMA_ENG_BUF(i)));
50*4882a593Smuzhiyun 	}
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	/* DPI_SLI_PRT_CFG has MPS and MRRS settings that will be set
53*4882a593Smuzhiyun 	 * separately.
54*4882a593Smuzhiyun 	 */
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun 	lio_pci_writeq(oct, 1, CN6XXX_DPI_CTL);
57*4882a593Smuzhiyun 	dev_dbg(&oct->pci_dev->dev, "DPI_CTL: 0x%016llx\n",
58*4882a593Smuzhiyun 		lio_pci_readq(oct, CN6XXX_DPI_CTL));
59*4882a593Smuzhiyun }
60*4882a593Smuzhiyun 
lio_cn68xx_soft_reset(struct octeon_device * oct)61*4882a593Smuzhiyun static int lio_cn68xx_soft_reset(struct octeon_device *oct)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun 	lio_cn6xxx_soft_reset(oct);
64*4882a593Smuzhiyun 	lio_cn68xx_set_dpi_regs(oct);
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	return 0;
67*4882a593Smuzhiyun }
68*4882a593Smuzhiyun 
lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device * oct)69*4882a593Smuzhiyun static void lio_cn68xx_setup_pkt_ctl_regs(struct octeon_device *oct)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
72*4882a593Smuzhiyun 	u64 pktctl, tx_pipe, max_oqs;
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun 	pktctl = octeon_read_csr64(oct, CN6XXX_SLI_PKT_CTL);
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	/* 68XX specific */
77*4882a593Smuzhiyun 	max_oqs = CFG_GET_OQ_MAX_Q(CHIP_CONF(oct, cn6xxx));
78*4882a593Smuzhiyun 	tx_pipe  = octeon_read_csr64(oct, CN68XX_SLI_TX_PIPE);
79*4882a593Smuzhiyun 	tx_pipe &= 0xffffffffff00ffffULL; /* clear out NUMP field */
80*4882a593Smuzhiyun 	tx_pipe |= max_oqs << 16; /* put max_oqs in NUMP field */
81*4882a593Smuzhiyun 	octeon_write_csr64(oct, CN68XX_SLI_TX_PIPE, tx_pipe);
82*4882a593Smuzhiyun 
83*4882a593Smuzhiyun 	if (CFG_GET_IS_SLI_BP_ON(cn68xx->conf))
84*4882a593Smuzhiyun 		pktctl |= 0xF;
85*4882a593Smuzhiyun 	else
86*4882a593Smuzhiyun 		/* Disable per-port backpressure. */
87*4882a593Smuzhiyun 		pktctl &= ~0xF;
88*4882a593Smuzhiyun 	octeon_write_csr64(oct, CN6XXX_SLI_PKT_CTL, pktctl);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun 
lio_cn68xx_setup_device_regs(struct octeon_device * oct)91*4882a593Smuzhiyun static int lio_cn68xx_setup_device_regs(struct octeon_device *oct)
92*4882a593Smuzhiyun {
93*4882a593Smuzhiyun 	lio_cn6xxx_setup_pcie_mps(oct, PCIE_MPS_DEFAULT);
94*4882a593Smuzhiyun 	lio_cn6xxx_setup_pcie_mrrs(oct, PCIE_MRRS_256B);
95*4882a593Smuzhiyun 	lio_cn6xxx_enable_error_reporting(oct);
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	lio_cn6xxx_setup_global_input_regs(oct);
98*4882a593Smuzhiyun 	lio_cn68xx_setup_pkt_ctl_regs(oct);
99*4882a593Smuzhiyun 	lio_cn6xxx_setup_global_output_regs(oct);
100*4882a593Smuzhiyun 
101*4882a593Smuzhiyun 	/* Default error timeout value should be 0x200000 to avoid host hang
102*4882a593Smuzhiyun 	 * when reads invalid register
103*4882a593Smuzhiyun 	 */
104*4882a593Smuzhiyun 	octeon_write_csr64(oct, CN6XXX_SLI_WINDOW_CTL, 0x200000ULL);
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	return 0;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
lio_cn68xx_vendor_message_fix(struct octeon_device * oct)109*4882a593Smuzhiyun static inline void lio_cn68xx_vendor_message_fix(struct octeon_device *oct)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	u32 val = 0;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	/* Set M_VEND1_DRP and M_VEND0_DRP bits */
114*4882a593Smuzhiyun 	pci_read_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, &val);
115*4882a593Smuzhiyun 	val |= 0x3;
116*4882a593Smuzhiyun 	pci_write_config_dword(oct->pci_dev, CN6XXX_PCIE_FLTMSK, val);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun 
lio_is_210nv(struct octeon_device * oct)119*4882a593Smuzhiyun static int lio_is_210nv(struct octeon_device *oct)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun 	u64 mio_qlm4_cfg = lio_pci_readq(oct, CN6XXX_MIO_QLM4_CFG);
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	return ((mio_qlm4_cfg & CN6XXX_MIO_QLM_CFG_MASK) == 0);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
lio_setup_cn68xx_octeon_device(struct octeon_device * oct)126*4882a593Smuzhiyun int lio_setup_cn68xx_octeon_device(struct octeon_device *oct)
127*4882a593Smuzhiyun {
128*4882a593Smuzhiyun 	struct octeon_cn6xxx *cn68xx = (struct octeon_cn6xxx *)oct->chip;
129*4882a593Smuzhiyun 	u16 card_type = LIO_410NV;
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (octeon_map_pci_barx(oct, 0, 0))
132*4882a593Smuzhiyun 		return 1;
133*4882a593Smuzhiyun 
134*4882a593Smuzhiyun 	if (octeon_map_pci_barx(oct, 1, MAX_BAR1_IOREMAP_SIZE)) {
135*4882a593Smuzhiyun 		dev_err(&oct->pci_dev->dev, "%s CN68XX BAR1 map failed\n",
136*4882a593Smuzhiyun 			__func__);
137*4882a593Smuzhiyun 		octeon_unmap_pci_barx(oct, 0);
138*4882a593Smuzhiyun 		return 1;
139*4882a593Smuzhiyun 	}
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	spin_lock_init(&cn68xx->lock_for_droq_int_enb_reg);
142*4882a593Smuzhiyun 
143*4882a593Smuzhiyun 	oct->fn_list.setup_iq_regs = lio_cn6xxx_setup_iq_regs;
144*4882a593Smuzhiyun 	oct->fn_list.setup_oq_regs = lio_cn6xxx_setup_oq_regs;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	oct->fn_list.process_interrupt_regs = lio_cn6xxx_process_interrupt_regs;
147*4882a593Smuzhiyun 	oct->fn_list.soft_reset = lio_cn68xx_soft_reset;
148*4882a593Smuzhiyun 	oct->fn_list.setup_device_regs = lio_cn68xx_setup_device_regs;
149*4882a593Smuzhiyun 	oct->fn_list.update_iq_read_idx = lio_cn6xxx_update_read_index;
150*4882a593Smuzhiyun 
151*4882a593Smuzhiyun 	oct->fn_list.bar1_idx_setup = lio_cn6xxx_bar1_idx_setup;
152*4882a593Smuzhiyun 	oct->fn_list.bar1_idx_write = lio_cn6xxx_bar1_idx_write;
153*4882a593Smuzhiyun 	oct->fn_list.bar1_idx_read = lio_cn6xxx_bar1_idx_read;
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	oct->fn_list.enable_interrupt = lio_cn6xxx_enable_interrupt;
156*4882a593Smuzhiyun 	oct->fn_list.disable_interrupt = lio_cn6xxx_disable_interrupt;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	oct->fn_list.enable_io_queues = lio_cn6xxx_enable_io_queues;
159*4882a593Smuzhiyun 	oct->fn_list.disable_io_queues = lio_cn6xxx_disable_io_queues;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	lio_cn6xxx_setup_reg_address(oct, oct->chip, &oct->reg_list);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	/* Determine variant of card */
164*4882a593Smuzhiyun 	if (lio_is_210nv(oct))
165*4882a593Smuzhiyun 		card_type = LIO_210NV;
166*4882a593Smuzhiyun 
167*4882a593Smuzhiyun 	cn68xx->conf = (struct octeon_config *)
168*4882a593Smuzhiyun 		       oct_get_config_info(oct, card_type);
169*4882a593Smuzhiyun 	if (!cn68xx->conf) {
170*4882a593Smuzhiyun 		dev_err(&oct->pci_dev->dev, "%s No Config found for CN68XX %s\n",
171*4882a593Smuzhiyun 			__func__,
172*4882a593Smuzhiyun 			(card_type == LIO_410NV) ? LIO_410NV_NAME :
173*4882a593Smuzhiyun 			LIO_210NV_NAME);
174*4882a593Smuzhiyun 		octeon_unmap_pci_barx(oct, 0);
175*4882a593Smuzhiyun 		octeon_unmap_pci_barx(oct, 1);
176*4882a593Smuzhiyun 		return 1;
177*4882a593Smuzhiyun 	}
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	oct->coproc_clock_rate = 1000000ULL * lio_cn6xxx_coprocessor_clock(oct);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	lio_cn68xx_vendor_message_fix(oct);
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	return 0;
184*4882a593Smuzhiyun }
185