1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun #include <linux/delay.h>
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include "nitrox_dev.h"
5*4882a593Smuzhiyun #include "nitrox_csr.h"
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #define PLL_REF_CLK 50
8*4882a593Smuzhiyun #define MAX_CSR_RETRIES 10
9*4882a593Smuzhiyun
10*4882a593Smuzhiyun /**
11*4882a593Smuzhiyun * emu_enable_cores - Enable EMU cluster cores.
12*4882a593Smuzhiyun * @ndev: NITROX device
13*4882a593Smuzhiyun */
emu_enable_cores(struct nitrox_device * ndev)14*4882a593Smuzhiyun static void emu_enable_cores(struct nitrox_device *ndev)
15*4882a593Smuzhiyun {
16*4882a593Smuzhiyun union emu_se_enable emu_se;
17*4882a593Smuzhiyun union emu_ae_enable emu_ae;
18*4882a593Smuzhiyun int i;
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun /* AE cores 20 per cluster */
21*4882a593Smuzhiyun emu_ae.value = 0;
22*4882a593Smuzhiyun emu_ae.s.enable = 0xfffff;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun /* SE cores 16 per cluster */
25*4882a593Smuzhiyun emu_se.value = 0;
26*4882a593Smuzhiyun emu_se.s.enable = 0xffff;
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun /* enable per cluster cores */
29*4882a593Smuzhiyun for (i = 0; i < NR_CLUSTERS; i++) {
30*4882a593Smuzhiyun nitrox_write_csr(ndev, EMU_AE_ENABLEX(i), emu_ae.value);
31*4882a593Smuzhiyun nitrox_write_csr(ndev, EMU_SE_ENABLEX(i), emu_se.value);
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun }
34*4882a593Smuzhiyun
35*4882a593Smuzhiyun /**
36*4882a593Smuzhiyun * nitrox_config_emu_unit - configure EMU unit.
37*4882a593Smuzhiyun * @ndev: NITROX device
38*4882a593Smuzhiyun */
nitrox_config_emu_unit(struct nitrox_device * ndev)39*4882a593Smuzhiyun void nitrox_config_emu_unit(struct nitrox_device *ndev)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun union emu_wd_int_ena_w1s emu_wd_int;
42*4882a593Smuzhiyun union emu_ge_int_ena_w1s emu_ge_int;
43*4882a593Smuzhiyun u64 offset;
44*4882a593Smuzhiyun int i;
45*4882a593Smuzhiyun
46*4882a593Smuzhiyun /* enable cores */
47*4882a593Smuzhiyun emu_enable_cores(ndev);
48*4882a593Smuzhiyun
49*4882a593Smuzhiyun /* enable general error and watch dog interrupts */
50*4882a593Smuzhiyun emu_ge_int.value = 0;
51*4882a593Smuzhiyun emu_ge_int.s.se_ge = 0xffff;
52*4882a593Smuzhiyun emu_ge_int.s.ae_ge = 0xfffff;
53*4882a593Smuzhiyun emu_wd_int.value = 0;
54*4882a593Smuzhiyun emu_wd_int.s.se_wd = 1;
55*4882a593Smuzhiyun
56*4882a593Smuzhiyun for (i = 0; i < NR_CLUSTERS; i++) {
57*4882a593Smuzhiyun offset = EMU_WD_INT_ENA_W1SX(i);
58*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, emu_wd_int.value);
59*4882a593Smuzhiyun offset = EMU_GE_INT_ENA_W1SX(i);
60*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, emu_ge_int.value);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun }
63*4882a593Smuzhiyun
reset_pkt_input_ring(struct nitrox_device * ndev,int ring)64*4882a593Smuzhiyun static void reset_pkt_input_ring(struct nitrox_device *ndev, int ring)
65*4882a593Smuzhiyun {
66*4882a593Smuzhiyun union nps_pkt_in_instr_ctl pkt_in_ctl;
67*4882a593Smuzhiyun union nps_pkt_in_done_cnts pkt_in_cnts;
68*4882a593Smuzhiyun int max_retries = MAX_CSR_RETRIES;
69*4882a593Smuzhiyun u64 offset;
70*4882a593Smuzhiyun
71*4882a593Smuzhiyun /* step 1: disable the ring, clear enable bit */
72*4882a593Smuzhiyun offset = NPS_PKT_IN_INSTR_CTLX(ring);
73*4882a593Smuzhiyun pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
74*4882a593Smuzhiyun pkt_in_ctl.s.enb = 0;
75*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /* step 2: wait to clear [ENB] */
78*4882a593Smuzhiyun usleep_range(100, 150);
79*4882a593Smuzhiyun do {
80*4882a593Smuzhiyun pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
81*4882a593Smuzhiyun if (!pkt_in_ctl.s.enb)
82*4882a593Smuzhiyun break;
83*4882a593Smuzhiyun udelay(50);
84*4882a593Smuzhiyun } while (max_retries--);
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun /* step 3: clear done counts */
87*4882a593Smuzhiyun offset = NPS_PKT_IN_DONE_CNTSX(ring);
88*4882a593Smuzhiyun pkt_in_cnts.value = nitrox_read_csr(ndev, offset);
89*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_in_cnts.value);
90*4882a593Smuzhiyun usleep_range(50, 100);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
enable_pkt_input_ring(struct nitrox_device * ndev,int ring)93*4882a593Smuzhiyun void enable_pkt_input_ring(struct nitrox_device *ndev, int ring)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun union nps_pkt_in_instr_ctl pkt_in_ctl;
96*4882a593Smuzhiyun int max_retries = MAX_CSR_RETRIES;
97*4882a593Smuzhiyun u64 offset;
98*4882a593Smuzhiyun
99*4882a593Smuzhiyun /* 64-byte instruction size */
100*4882a593Smuzhiyun offset = NPS_PKT_IN_INSTR_CTLX(ring);
101*4882a593Smuzhiyun pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
102*4882a593Smuzhiyun pkt_in_ctl.s.is64b = 1;
103*4882a593Smuzhiyun pkt_in_ctl.s.enb = 1;
104*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_in_ctl.value);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun /* wait for set [ENB] */
107*4882a593Smuzhiyun do {
108*4882a593Smuzhiyun pkt_in_ctl.value = nitrox_read_csr(ndev, offset);
109*4882a593Smuzhiyun if (pkt_in_ctl.s.enb)
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun udelay(50);
112*4882a593Smuzhiyun } while (max_retries--);
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun
115*4882a593Smuzhiyun /**
116*4882a593Smuzhiyun * nitrox_config_pkt_input_rings - configure Packet Input Rings
117*4882a593Smuzhiyun * @ndev: NITROX device
118*4882a593Smuzhiyun */
nitrox_config_pkt_input_rings(struct nitrox_device * ndev)119*4882a593Smuzhiyun void nitrox_config_pkt_input_rings(struct nitrox_device *ndev)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun int i;
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun for (i = 0; i < ndev->nr_queues; i++) {
124*4882a593Smuzhiyun struct nitrox_cmdq *cmdq = &ndev->pkt_inq[i];
125*4882a593Smuzhiyun union nps_pkt_in_instr_rsize pkt_in_rsize;
126*4882a593Smuzhiyun union nps_pkt_in_instr_baoff_dbell pkt_in_dbell;
127*4882a593Smuzhiyun u64 offset;
128*4882a593Smuzhiyun
129*4882a593Smuzhiyun reset_pkt_input_ring(ndev, i);
130*4882a593Smuzhiyun
131*4882a593Smuzhiyun /**
132*4882a593Smuzhiyun * step 4:
133*4882a593Smuzhiyun * configure ring base address 16-byte aligned,
134*4882a593Smuzhiyun * size and interrupt threshold.
135*4882a593Smuzhiyun */
136*4882a593Smuzhiyun offset = NPS_PKT_IN_INSTR_BADDRX(i);
137*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, cmdq->dma);
138*4882a593Smuzhiyun
139*4882a593Smuzhiyun /* configure ring size */
140*4882a593Smuzhiyun offset = NPS_PKT_IN_INSTR_RSIZEX(i);
141*4882a593Smuzhiyun pkt_in_rsize.value = 0;
142*4882a593Smuzhiyun pkt_in_rsize.s.rsize = ndev->qlen;
143*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_in_rsize.value);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun /* set high threshold for pkt input ring interrupts */
146*4882a593Smuzhiyun offset = NPS_PKT_IN_INT_LEVELSX(i);
147*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, 0xffffffff);
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun /* step 5: clear off door bell counts */
150*4882a593Smuzhiyun offset = NPS_PKT_IN_INSTR_BAOFF_DBELLX(i);
151*4882a593Smuzhiyun pkt_in_dbell.value = 0;
152*4882a593Smuzhiyun pkt_in_dbell.s.dbell = 0xffffffff;
153*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_in_dbell.value);
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun /* enable the ring */
156*4882a593Smuzhiyun enable_pkt_input_ring(ndev, i);
157*4882a593Smuzhiyun }
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun
reset_pkt_solicit_port(struct nitrox_device * ndev,int port)160*4882a593Smuzhiyun static void reset_pkt_solicit_port(struct nitrox_device *ndev, int port)
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun union nps_pkt_slc_ctl pkt_slc_ctl;
163*4882a593Smuzhiyun union nps_pkt_slc_cnts pkt_slc_cnts;
164*4882a593Smuzhiyun int max_retries = MAX_CSR_RETRIES;
165*4882a593Smuzhiyun u64 offset;
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* step 1: disable slc port */
168*4882a593Smuzhiyun offset = NPS_PKT_SLC_CTLX(port);
169*4882a593Smuzhiyun pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
170*4882a593Smuzhiyun pkt_slc_ctl.s.enb = 0;
171*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /* step 2 */
174*4882a593Smuzhiyun usleep_range(100, 150);
175*4882a593Smuzhiyun /* wait to clear [ENB] */
176*4882a593Smuzhiyun do {
177*4882a593Smuzhiyun pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
178*4882a593Smuzhiyun if (!pkt_slc_ctl.s.enb)
179*4882a593Smuzhiyun break;
180*4882a593Smuzhiyun udelay(50);
181*4882a593Smuzhiyun } while (max_retries--);
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun /* step 3: clear slc counters */
184*4882a593Smuzhiyun offset = NPS_PKT_SLC_CNTSX(port);
185*4882a593Smuzhiyun pkt_slc_cnts.value = nitrox_read_csr(ndev, offset);
186*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_slc_cnts.value);
187*4882a593Smuzhiyun usleep_range(50, 100);
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun
enable_pkt_solicit_port(struct nitrox_device * ndev,int port)190*4882a593Smuzhiyun void enable_pkt_solicit_port(struct nitrox_device *ndev, int port)
191*4882a593Smuzhiyun {
192*4882a593Smuzhiyun union nps_pkt_slc_ctl pkt_slc_ctl;
193*4882a593Smuzhiyun int max_retries = MAX_CSR_RETRIES;
194*4882a593Smuzhiyun u64 offset;
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun offset = NPS_PKT_SLC_CTLX(port);
197*4882a593Smuzhiyun pkt_slc_ctl.value = 0;
198*4882a593Smuzhiyun pkt_slc_ctl.s.enb = 1;
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * 8 trailing 0x00 bytes will be added
201*4882a593Smuzhiyun * to the end of the outgoing packet.
202*4882a593Smuzhiyun */
203*4882a593Smuzhiyun pkt_slc_ctl.s.z = 1;
204*4882a593Smuzhiyun /* enable response header */
205*4882a593Smuzhiyun pkt_slc_ctl.s.rh = 1;
206*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_slc_ctl.value);
207*4882a593Smuzhiyun
208*4882a593Smuzhiyun /* wait to set [ENB] */
209*4882a593Smuzhiyun do {
210*4882a593Smuzhiyun pkt_slc_ctl.value = nitrox_read_csr(ndev, offset);
211*4882a593Smuzhiyun if (pkt_slc_ctl.s.enb)
212*4882a593Smuzhiyun break;
213*4882a593Smuzhiyun udelay(50);
214*4882a593Smuzhiyun } while (max_retries--);
215*4882a593Smuzhiyun }
216*4882a593Smuzhiyun
config_pkt_solicit_port(struct nitrox_device * ndev,int port)217*4882a593Smuzhiyun static void config_pkt_solicit_port(struct nitrox_device *ndev, int port)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun union nps_pkt_slc_int_levels pkt_slc_int;
220*4882a593Smuzhiyun u64 offset;
221*4882a593Smuzhiyun
222*4882a593Smuzhiyun reset_pkt_solicit_port(ndev, port);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun /* step 4: configure interrupt levels */
225*4882a593Smuzhiyun offset = NPS_PKT_SLC_INT_LEVELSX(port);
226*4882a593Smuzhiyun pkt_slc_int.value = 0;
227*4882a593Smuzhiyun /* time interrupt threshold */
228*4882a593Smuzhiyun pkt_slc_int.s.timet = 0x3fffff;
229*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, pkt_slc_int.value);
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /* enable the solicit port */
232*4882a593Smuzhiyun enable_pkt_solicit_port(ndev, port);
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
nitrox_config_pkt_solicit_ports(struct nitrox_device * ndev)235*4882a593Smuzhiyun void nitrox_config_pkt_solicit_ports(struct nitrox_device *ndev)
236*4882a593Smuzhiyun {
237*4882a593Smuzhiyun int i;
238*4882a593Smuzhiyun
239*4882a593Smuzhiyun for (i = 0; i < ndev->nr_queues; i++)
240*4882a593Smuzhiyun config_pkt_solicit_port(ndev, i);
241*4882a593Smuzhiyun }
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /**
244*4882a593Smuzhiyun * enable_nps_core_interrupts - enable NPS core interrutps
245*4882a593Smuzhiyun * @ndev: NITROX device.
246*4882a593Smuzhiyun *
247*4882a593Smuzhiyun * This includes NPS core interrupts.
248*4882a593Smuzhiyun */
enable_nps_core_interrupts(struct nitrox_device * ndev)249*4882a593Smuzhiyun static void enable_nps_core_interrupts(struct nitrox_device *ndev)
250*4882a593Smuzhiyun {
251*4882a593Smuzhiyun union nps_core_int_ena_w1s core_int;
252*4882a593Smuzhiyun
253*4882a593Smuzhiyun /* NPS core interrutps */
254*4882a593Smuzhiyun core_int.value = 0;
255*4882a593Smuzhiyun core_int.s.host_wr_err = 1;
256*4882a593Smuzhiyun core_int.s.host_wr_timeout = 1;
257*4882a593Smuzhiyun core_int.s.exec_wr_timeout = 1;
258*4882a593Smuzhiyun core_int.s.npco_dma_malform = 1;
259*4882a593Smuzhiyun core_int.s.host_nps_wr_err = 1;
260*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_CORE_INT_ENA_W1S, core_int.value);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
nitrox_config_nps_core_unit(struct nitrox_device * ndev)263*4882a593Smuzhiyun void nitrox_config_nps_core_unit(struct nitrox_device *ndev)
264*4882a593Smuzhiyun {
265*4882a593Smuzhiyun union nps_core_gbl_vfcfg core_gbl_vfcfg;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun /* endian control information */
268*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_CORE_CONTROL, 1ULL);
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun /* disable ILK interface */
271*4882a593Smuzhiyun core_gbl_vfcfg.value = 0;
272*4882a593Smuzhiyun core_gbl_vfcfg.s.ilk_disable = 1;
273*4882a593Smuzhiyun core_gbl_vfcfg.s.cfg = __NDEV_MODE_PF;
274*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, core_gbl_vfcfg.value);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun /* enable nps core interrupts */
277*4882a593Smuzhiyun enable_nps_core_interrupts(ndev);
278*4882a593Smuzhiyun }
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun * enable_nps_pkt_interrupts - enable NPS packet interrutps
282*4882a593Smuzhiyun * @ndev: NITROX device.
283*4882a593Smuzhiyun *
284*4882a593Smuzhiyun * This includes NPS packet in and slc interrupts.
285*4882a593Smuzhiyun */
enable_nps_pkt_interrupts(struct nitrox_device * ndev)286*4882a593Smuzhiyun static void enable_nps_pkt_interrupts(struct nitrox_device *ndev)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun /* NPS packet in ring interrupts */
289*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_PKT_IN_RERR_LO_ENA_W1S, (~0ULL));
290*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_PKT_IN_RERR_HI_ENA_W1S, (~0ULL));
291*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_PKT_IN_ERR_TYPE_ENA_W1S, (~0ULL));
292*4882a593Smuzhiyun /* NPS packet slc port interrupts */
293*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_HI_ENA_W1S, (~0ULL));
294*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_PKT_SLC_RERR_LO_ENA_W1S, (~0ULL));
295*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_PKT_SLC_ERR_TYPE_ENA_W1S, (~0uLL));
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
nitrox_config_nps_pkt_unit(struct nitrox_device * ndev)298*4882a593Smuzhiyun void nitrox_config_nps_pkt_unit(struct nitrox_device *ndev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun /* config input and solicit ports */
301*4882a593Smuzhiyun nitrox_config_pkt_input_rings(ndev);
302*4882a593Smuzhiyun nitrox_config_pkt_solicit_ports(ndev);
303*4882a593Smuzhiyun
304*4882a593Smuzhiyun /* enable nps packet interrupts */
305*4882a593Smuzhiyun enable_nps_pkt_interrupts(ndev);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
reset_aqm_ring(struct nitrox_device * ndev,int ring)308*4882a593Smuzhiyun static void reset_aqm_ring(struct nitrox_device *ndev, int ring)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun union aqmq_en aqmq_en_reg;
311*4882a593Smuzhiyun union aqmq_activity_stat activity_stat;
312*4882a593Smuzhiyun union aqmq_cmp_cnt cmp_cnt;
313*4882a593Smuzhiyun int max_retries = MAX_CSR_RETRIES;
314*4882a593Smuzhiyun u64 offset;
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /* step 1: disable the queue */
317*4882a593Smuzhiyun offset = AQMQ_ENX(ring);
318*4882a593Smuzhiyun aqmq_en_reg.value = 0;
319*4882a593Smuzhiyun aqmq_en_reg.queue_enable = 0;
320*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun /* step 2: wait for AQMQ_ACTIVITY_STATX[QUEUE_ACTIVE] to clear */
323*4882a593Smuzhiyun usleep_range(100, 150);
324*4882a593Smuzhiyun offset = AQMQ_ACTIVITY_STATX(ring);
325*4882a593Smuzhiyun do {
326*4882a593Smuzhiyun activity_stat.value = nitrox_read_csr(ndev, offset);
327*4882a593Smuzhiyun if (!activity_stat.queue_active)
328*4882a593Smuzhiyun break;
329*4882a593Smuzhiyun udelay(50);
330*4882a593Smuzhiyun } while (max_retries--);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun /* step 3: clear commands completed count */
333*4882a593Smuzhiyun offset = AQMQ_CMP_CNTX(ring);
334*4882a593Smuzhiyun cmp_cnt.value = nitrox_read_csr(ndev, offset);
335*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, cmp_cnt.value);
336*4882a593Smuzhiyun usleep_range(50, 100);
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
enable_aqm_ring(struct nitrox_device * ndev,int ring)339*4882a593Smuzhiyun void enable_aqm_ring(struct nitrox_device *ndev, int ring)
340*4882a593Smuzhiyun {
341*4882a593Smuzhiyun union aqmq_en aqmq_en_reg;
342*4882a593Smuzhiyun u64 offset;
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun offset = AQMQ_ENX(ring);
345*4882a593Smuzhiyun aqmq_en_reg.value = 0;
346*4882a593Smuzhiyun aqmq_en_reg.queue_enable = 1;
347*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, aqmq_en_reg.value);
348*4882a593Smuzhiyun usleep_range(50, 100);
349*4882a593Smuzhiyun }
350*4882a593Smuzhiyun
nitrox_config_aqm_rings(struct nitrox_device * ndev)351*4882a593Smuzhiyun void nitrox_config_aqm_rings(struct nitrox_device *ndev)
352*4882a593Smuzhiyun {
353*4882a593Smuzhiyun int ring;
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun for (ring = 0; ring < ndev->nr_queues; ring++) {
356*4882a593Smuzhiyun struct nitrox_cmdq *cmdq = ndev->aqmq[ring];
357*4882a593Smuzhiyun union aqmq_drbl drbl;
358*4882a593Smuzhiyun union aqmq_qsz qsize;
359*4882a593Smuzhiyun union aqmq_cmp_thr cmp_thr;
360*4882a593Smuzhiyun u64 offset;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun /* steps 1 - 3 */
363*4882a593Smuzhiyun reset_aqm_ring(ndev, ring);
364*4882a593Smuzhiyun
365*4882a593Smuzhiyun /* step 4: clear doorbell count of ring */
366*4882a593Smuzhiyun offset = AQMQ_DRBLX(ring);
367*4882a593Smuzhiyun drbl.value = 0;
368*4882a593Smuzhiyun drbl.dbell_count = 0xFFFFFFFF;
369*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, drbl.value);
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun /* step 5: configure host ring details */
372*4882a593Smuzhiyun
373*4882a593Smuzhiyun /* set host address for next command of ring */
374*4882a593Smuzhiyun offset = AQMQ_NXT_CMDX(ring);
375*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, 0ULL);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun /* set host address of ring base */
378*4882a593Smuzhiyun offset = AQMQ_BADRX(ring);
379*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, cmdq->dma);
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /* set ring size */
382*4882a593Smuzhiyun offset = AQMQ_QSZX(ring);
383*4882a593Smuzhiyun qsize.value = 0;
384*4882a593Smuzhiyun qsize.host_queue_size = ndev->qlen;
385*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, qsize.value);
386*4882a593Smuzhiyun
387*4882a593Smuzhiyun /* set command completion threshold */
388*4882a593Smuzhiyun offset = AQMQ_CMP_THRX(ring);
389*4882a593Smuzhiyun cmp_thr.value = 0;
390*4882a593Smuzhiyun cmp_thr.commands_completed_threshold = 1;
391*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, cmp_thr.value);
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun /* step 6: enable the queue */
394*4882a593Smuzhiyun enable_aqm_ring(ndev, ring);
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun }
397*4882a593Smuzhiyun
enable_aqm_interrupts(struct nitrox_device * ndev)398*4882a593Smuzhiyun static void enable_aqm_interrupts(struct nitrox_device *ndev)
399*4882a593Smuzhiyun {
400*4882a593Smuzhiyun /* clear interrupt enable bits */
401*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_DBELL_OVF_LO_ENA_W1S, (~0ULL));
402*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_DBELL_OVF_HI_ENA_W1S, (~0ULL));
403*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_DMA_RD_ERR_LO_ENA_W1S, (~0ULL));
404*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_DMA_RD_ERR_HI_ENA_W1S, (~0ULL));
405*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_EXEC_NA_LO_ENA_W1S, (~0ULL));
406*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_EXEC_NA_HI_ENA_W1S, (~0ULL));
407*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_EXEC_ERR_LO_ENA_W1S, (~0ULL));
408*4882a593Smuzhiyun nitrox_write_csr(ndev, AQM_EXEC_ERR_HI_ENA_W1S, (~0ULL));
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
nitrox_config_aqm_unit(struct nitrox_device * ndev)411*4882a593Smuzhiyun void nitrox_config_aqm_unit(struct nitrox_device *ndev)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun /* config aqm command queues */
414*4882a593Smuzhiyun nitrox_config_aqm_rings(ndev);
415*4882a593Smuzhiyun
416*4882a593Smuzhiyun /* enable aqm interrupts */
417*4882a593Smuzhiyun enable_aqm_interrupts(ndev);
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
nitrox_config_pom_unit(struct nitrox_device * ndev)420*4882a593Smuzhiyun void nitrox_config_pom_unit(struct nitrox_device *ndev)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun union pom_int_ena_w1s pom_int;
423*4882a593Smuzhiyun int i;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun /* enable pom interrupts */
426*4882a593Smuzhiyun pom_int.value = 0;
427*4882a593Smuzhiyun pom_int.s.illegal_dport = 1;
428*4882a593Smuzhiyun nitrox_write_csr(ndev, POM_INT_ENA_W1S, pom_int.value);
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun /* enable perf counters */
431*4882a593Smuzhiyun for (i = 0; i < ndev->hw.se_cores; i++)
432*4882a593Smuzhiyun nitrox_write_csr(ndev, POM_PERF_CTL, BIT_ULL(i));
433*4882a593Smuzhiyun }
434*4882a593Smuzhiyun
435*4882a593Smuzhiyun /**
436*4882a593Smuzhiyun * nitrox_config_rand_unit - enable NITROX random number unit
437*4882a593Smuzhiyun * @ndev: NITROX device
438*4882a593Smuzhiyun */
nitrox_config_rand_unit(struct nitrox_device * ndev)439*4882a593Smuzhiyun void nitrox_config_rand_unit(struct nitrox_device *ndev)
440*4882a593Smuzhiyun {
441*4882a593Smuzhiyun union efl_rnm_ctl_status efl_rnm_ctl;
442*4882a593Smuzhiyun u64 offset;
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun offset = EFL_RNM_CTL_STATUS;
445*4882a593Smuzhiyun efl_rnm_ctl.value = nitrox_read_csr(ndev, offset);
446*4882a593Smuzhiyun efl_rnm_ctl.s.ent_en = 1;
447*4882a593Smuzhiyun efl_rnm_ctl.s.rng_en = 1;
448*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, efl_rnm_ctl.value);
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
nitrox_config_efl_unit(struct nitrox_device * ndev)451*4882a593Smuzhiyun void nitrox_config_efl_unit(struct nitrox_device *ndev)
452*4882a593Smuzhiyun {
453*4882a593Smuzhiyun int i;
454*4882a593Smuzhiyun
455*4882a593Smuzhiyun for (i = 0; i < NR_CLUSTERS; i++) {
456*4882a593Smuzhiyun union efl_core_int_ena_w1s efl_core_int;
457*4882a593Smuzhiyun u64 offset;
458*4882a593Smuzhiyun
459*4882a593Smuzhiyun /* EFL core interrupts */
460*4882a593Smuzhiyun offset = EFL_CORE_INT_ENA_W1SX(i);
461*4882a593Smuzhiyun efl_core_int.value = 0;
462*4882a593Smuzhiyun efl_core_int.s.len_ovr = 1;
463*4882a593Smuzhiyun efl_core_int.s.d_left = 1;
464*4882a593Smuzhiyun efl_core_int.s.epci_decode_err = 1;
465*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, efl_core_int.value);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun offset = EFL_CORE_VF_ERR_INT0_ENA_W1SX(i);
468*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
469*4882a593Smuzhiyun offset = EFL_CORE_VF_ERR_INT1_ENA_W1SX(i);
470*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
nitrox_config_bmi_unit(struct nitrox_device * ndev)474*4882a593Smuzhiyun void nitrox_config_bmi_unit(struct nitrox_device *ndev)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun union bmi_ctl bmi_ctl;
477*4882a593Smuzhiyun union bmi_int_ena_w1s bmi_int_ena;
478*4882a593Smuzhiyun u64 offset;
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun /* no threshold limits for PCIe */
481*4882a593Smuzhiyun offset = BMI_CTL;
482*4882a593Smuzhiyun bmi_ctl.value = nitrox_read_csr(ndev, offset);
483*4882a593Smuzhiyun bmi_ctl.s.max_pkt_len = 0xff;
484*4882a593Smuzhiyun bmi_ctl.s.nps_free_thrsh = 0xff;
485*4882a593Smuzhiyun bmi_ctl.s.nps_hdrq_thrsh = 0x7a;
486*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, bmi_ctl.value);
487*4882a593Smuzhiyun
488*4882a593Smuzhiyun /* enable interrupts */
489*4882a593Smuzhiyun offset = BMI_INT_ENA_W1S;
490*4882a593Smuzhiyun bmi_int_ena.value = 0;
491*4882a593Smuzhiyun bmi_int_ena.s.max_len_err_nps = 1;
492*4882a593Smuzhiyun bmi_int_ena.s.pkt_rcv_err_nps = 1;
493*4882a593Smuzhiyun bmi_int_ena.s.fpf_undrrn = 1;
494*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, bmi_int_ena.value);
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
nitrox_config_bmo_unit(struct nitrox_device * ndev)497*4882a593Smuzhiyun void nitrox_config_bmo_unit(struct nitrox_device *ndev)
498*4882a593Smuzhiyun {
499*4882a593Smuzhiyun union bmo_ctl2 bmo_ctl2;
500*4882a593Smuzhiyun u64 offset;
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun /* no threshold limits for PCIe */
503*4882a593Smuzhiyun offset = BMO_CTL2;
504*4882a593Smuzhiyun bmo_ctl2.value = nitrox_read_csr(ndev, offset);
505*4882a593Smuzhiyun bmo_ctl2.s.nps_slc_buf_thrsh = 0xff;
506*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, bmo_ctl2.value);
507*4882a593Smuzhiyun }
508*4882a593Smuzhiyun
invalidate_lbc(struct nitrox_device * ndev)509*4882a593Smuzhiyun void invalidate_lbc(struct nitrox_device *ndev)
510*4882a593Smuzhiyun {
511*4882a593Smuzhiyun union lbc_inval_ctl lbc_ctl;
512*4882a593Smuzhiyun union lbc_inval_status lbc_stat;
513*4882a593Smuzhiyun int max_retries = MAX_CSR_RETRIES;
514*4882a593Smuzhiyun u64 offset;
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun /* invalidate LBC */
517*4882a593Smuzhiyun offset = LBC_INVAL_CTL;
518*4882a593Smuzhiyun lbc_ctl.value = nitrox_read_csr(ndev, offset);
519*4882a593Smuzhiyun lbc_ctl.s.cam_inval_start = 1;
520*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, lbc_ctl.value);
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun offset = LBC_INVAL_STATUS;
523*4882a593Smuzhiyun do {
524*4882a593Smuzhiyun lbc_stat.value = nitrox_read_csr(ndev, offset);
525*4882a593Smuzhiyun if (lbc_stat.s.done)
526*4882a593Smuzhiyun break;
527*4882a593Smuzhiyun udelay(50);
528*4882a593Smuzhiyun } while (max_retries--);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
nitrox_config_lbc_unit(struct nitrox_device * ndev)531*4882a593Smuzhiyun void nitrox_config_lbc_unit(struct nitrox_device *ndev)
532*4882a593Smuzhiyun {
533*4882a593Smuzhiyun union lbc_int_ena_w1s lbc_int_ena;
534*4882a593Smuzhiyun u64 offset;
535*4882a593Smuzhiyun
536*4882a593Smuzhiyun invalidate_lbc(ndev);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun /* enable interrupts */
539*4882a593Smuzhiyun offset = LBC_INT_ENA_W1S;
540*4882a593Smuzhiyun lbc_int_ena.value = 0;
541*4882a593Smuzhiyun lbc_int_ena.s.dma_rd_err = 1;
542*4882a593Smuzhiyun lbc_int_ena.s.over_fetch_err = 1;
543*4882a593Smuzhiyun lbc_int_ena.s.cam_inval_abort = 1;
544*4882a593Smuzhiyun lbc_int_ena.s.cam_hard_err = 1;
545*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, lbc_int_ena.value);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun offset = LBC_PLM_VF1_64_INT_ENA_W1S;
548*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
549*4882a593Smuzhiyun offset = LBC_PLM_VF65_128_INT_ENA_W1S;
550*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
551*4882a593Smuzhiyun
552*4882a593Smuzhiyun offset = LBC_ELM_VF1_64_INT_ENA_W1S;
553*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
554*4882a593Smuzhiyun offset = LBC_ELM_VF65_128_INT_ENA_W1S;
555*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
config_nps_core_vfcfg_mode(struct nitrox_device * ndev,enum vf_mode mode)558*4882a593Smuzhiyun void config_nps_core_vfcfg_mode(struct nitrox_device *ndev, enum vf_mode mode)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun union nps_core_gbl_vfcfg vfcfg;
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun vfcfg.value = nitrox_read_csr(ndev, NPS_CORE_GBL_VFCFG);
563*4882a593Smuzhiyun vfcfg.s.cfg = mode & 0x7;
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun nitrox_write_csr(ndev, NPS_CORE_GBL_VFCFG, vfcfg.value);
566*4882a593Smuzhiyun }
567*4882a593Smuzhiyun
get_core_option(u8 se_cores,u8 ae_cores)568*4882a593Smuzhiyun static const char *get_core_option(u8 se_cores, u8 ae_cores)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun const char *option = "";
571*4882a593Smuzhiyun
572*4882a593Smuzhiyun if (ae_cores == AE_MAX_CORES) {
573*4882a593Smuzhiyun switch (se_cores) {
574*4882a593Smuzhiyun case SE_MAX_CORES:
575*4882a593Smuzhiyun option = "60";
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun case 40:
578*4882a593Smuzhiyun option = "60s";
579*4882a593Smuzhiyun break;
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun } else if (ae_cores == (AE_MAX_CORES / 2)) {
582*4882a593Smuzhiyun option = "30";
583*4882a593Smuzhiyun } else {
584*4882a593Smuzhiyun option = "60i";
585*4882a593Smuzhiyun }
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun return option;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
get_feature_option(u8 zip_cores,int core_freq)590*4882a593Smuzhiyun static const char *get_feature_option(u8 zip_cores, int core_freq)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun if (zip_cores == 0)
593*4882a593Smuzhiyun return "";
594*4882a593Smuzhiyun else if (zip_cores < ZIP_MAX_CORES)
595*4882a593Smuzhiyun return "-C15";
596*4882a593Smuzhiyun
597*4882a593Smuzhiyun if (core_freq >= 850)
598*4882a593Smuzhiyun return "-C45";
599*4882a593Smuzhiyun else if (core_freq >= 750)
600*4882a593Smuzhiyun return "-C35";
601*4882a593Smuzhiyun else if (core_freq >= 550)
602*4882a593Smuzhiyun return "-C25";
603*4882a593Smuzhiyun
604*4882a593Smuzhiyun return "";
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun
nitrox_get_hwinfo(struct nitrox_device * ndev)607*4882a593Smuzhiyun void nitrox_get_hwinfo(struct nitrox_device *ndev)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun union emu_fuse_map emu_fuse;
610*4882a593Smuzhiyun union rst_boot rst_boot;
611*4882a593Smuzhiyun union fus_dat1 fus_dat1;
612*4882a593Smuzhiyun unsigned char name[IFNAMSIZ * 2] = {};
613*4882a593Smuzhiyun int i, dead_cores;
614*4882a593Smuzhiyun u64 offset;
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun /* get core frequency */
617*4882a593Smuzhiyun offset = RST_BOOT;
618*4882a593Smuzhiyun rst_boot.value = nitrox_read_csr(ndev, offset);
619*4882a593Smuzhiyun ndev->hw.freq = (rst_boot.pnr_mul + 3) * PLL_REF_CLK;
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun for (i = 0; i < NR_CLUSTERS; i++) {
622*4882a593Smuzhiyun offset = EMU_FUSE_MAPX(i);
623*4882a593Smuzhiyun emu_fuse.value = nitrox_read_csr(ndev, offset);
624*4882a593Smuzhiyun if (emu_fuse.s.valid) {
625*4882a593Smuzhiyun dead_cores = hweight32(emu_fuse.s.ae_fuse);
626*4882a593Smuzhiyun ndev->hw.ae_cores += AE_CORES_PER_CLUSTER - dead_cores;
627*4882a593Smuzhiyun dead_cores = hweight16(emu_fuse.s.se_fuse);
628*4882a593Smuzhiyun ndev->hw.se_cores += SE_CORES_PER_CLUSTER - dead_cores;
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun /* find zip hardware availability */
632*4882a593Smuzhiyun offset = FUS_DAT1;
633*4882a593Smuzhiyun fus_dat1.value = nitrox_read_csr(ndev, offset);
634*4882a593Smuzhiyun if (!fus_dat1.nozip) {
635*4882a593Smuzhiyun dead_cores = hweight8(fus_dat1.zip_info);
636*4882a593Smuzhiyun ndev->hw.zip_cores = ZIP_MAX_CORES - dead_cores;
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun /* determine the partname
640*4882a593Smuzhiyun * CNN55<core option>-<freq><pincount>-<feature option>-<rev>
641*4882a593Smuzhiyun */
642*4882a593Smuzhiyun snprintf(name, sizeof(name), "CNN55%s-%3dBG676%s-1.%u",
643*4882a593Smuzhiyun get_core_option(ndev->hw.se_cores, ndev->hw.ae_cores),
644*4882a593Smuzhiyun ndev->hw.freq,
645*4882a593Smuzhiyun get_feature_option(ndev->hw.zip_cores, ndev->hw.freq),
646*4882a593Smuzhiyun ndev->hw.revision_id);
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* copy partname */
649*4882a593Smuzhiyun strncpy(ndev->hw.partname, name, sizeof(ndev->hw.partname));
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
enable_pf2vf_mbox_interrupts(struct nitrox_device * ndev)652*4882a593Smuzhiyun void enable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
653*4882a593Smuzhiyun {
654*4882a593Smuzhiyun u64 value = ~0ULL;
655*4882a593Smuzhiyun u64 reg_addr;
656*4882a593Smuzhiyun
657*4882a593Smuzhiyun /* Mailbox interrupt low enable set register */
658*4882a593Smuzhiyun reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1S;
659*4882a593Smuzhiyun nitrox_write_csr(ndev, reg_addr, value);
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun /* Mailbox interrupt high enable set register */
662*4882a593Smuzhiyun reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1S;
663*4882a593Smuzhiyun nitrox_write_csr(ndev, reg_addr, value);
664*4882a593Smuzhiyun }
665*4882a593Smuzhiyun
disable_pf2vf_mbox_interrupts(struct nitrox_device * ndev)666*4882a593Smuzhiyun void disable_pf2vf_mbox_interrupts(struct nitrox_device *ndev)
667*4882a593Smuzhiyun {
668*4882a593Smuzhiyun u64 value = ~0ULL;
669*4882a593Smuzhiyun u64 reg_addr;
670*4882a593Smuzhiyun
671*4882a593Smuzhiyun /* Mailbox interrupt low enable clear register */
672*4882a593Smuzhiyun reg_addr = NPS_PKT_MBOX_INT_LO_ENA_W1C;
673*4882a593Smuzhiyun nitrox_write_csr(ndev, reg_addr, value);
674*4882a593Smuzhiyun
675*4882a593Smuzhiyun /* Mailbox interrupt high enable clear register */
676*4882a593Smuzhiyun reg_addr = NPS_PKT_MBOX_INT_HI_ENA_W1C;
677*4882a593Smuzhiyun nitrox_write_csr(ndev, reg_addr, value);
678*4882a593Smuzhiyun }
679