1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun #include <linux/aer.h>
3*4882a593Smuzhiyun #include <linux/delay.h>
4*4882a593Smuzhiyun #include <linux/firmware.h>
5*4882a593Smuzhiyun #include <linux/list.h>
6*4882a593Smuzhiyun #include <linux/module.h>
7*4882a593Smuzhiyun #include <linux/mutex.h>
8*4882a593Smuzhiyun #include <linux/pci.h>
9*4882a593Smuzhiyun #include <linux/pci_ids.h>
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include "nitrox_dev.h"
12*4882a593Smuzhiyun #include "nitrox_common.h"
13*4882a593Smuzhiyun #include "nitrox_csr.h"
14*4882a593Smuzhiyun #include "nitrox_hal.h"
15*4882a593Smuzhiyun #include "nitrox_isr.h"
16*4882a593Smuzhiyun #include "nitrox_debugfs.h"
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #define CNN55XX_DEV_ID 0x12
19*4882a593Smuzhiyun #define UCODE_HLEN 48
20*4882a593Smuzhiyun #define DEFAULT_SE_GROUP 0
21*4882a593Smuzhiyun #define DEFAULT_AE_GROUP 0
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #define DRIVER_VERSION "1.2"
24*4882a593Smuzhiyun #define CNN55XX_UCD_BLOCK_SIZE 32768
25*4882a593Smuzhiyun #define CNN55XX_MAX_UCODE_SIZE (CNN55XX_UCD_BLOCK_SIZE * 2)
26*4882a593Smuzhiyun #define FW_DIR "cavium/"
27*4882a593Smuzhiyun /* SE microcode */
28*4882a593Smuzhiyun #define SE_FW FW_DIR "cnn55xx_se.fw"
29*4882a593Smuzhiyun /* AE microcode */
30*4882a593Smuzhiyun #define AE_FW FW_DIR "cnn55xx_ae.fw"
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun static const char nitrox_driver_name[] = "CNN55XX";
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun static LIST_HEAD(ndevlist);
35*4882a593Smuzhiyun static DEFINE_MUTEX(devlist_lock);
36*4882a593Smuzhiyun static unsigned int num_devices;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /**
39*4882a593Smuzhiyun * nitrox_pci_tbl - PCI Device ID Table
40*4882a593Smuzhiyun */
41*4882a593Smuzhiyun static const struct pci_device_id nitrox_pci_tbl[] = {
42*4882a593Smuzhiyun {PCI_VDEVICE(CAVIUM, CNN55XX_DEV_ID), 0},
43*4882a593Smuzhiyun /* required last entry */
44*4882a593Smuzhiyun {0, }
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun MODULE_DEVICE_TABLE(pci, nitrox_pci_tbl);
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun static unsigned int qlen = DEFAULT_CMD_QLEN;
49*4882a593Smuzhiyun module_param(qlen, uint, 0644);
50*4882a593Smuzhiyun MODULE_PARM_DESC(qlen, "Command queue length - default 2048");
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
53*4882a593Smuzhiyun int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs);
54*4882a593Smuzhiyun #else
nitrox_sriov_configure(struct pci_dev * pdev,int num_vfs)55*4882a593Smuzhiyun int nitrox_sriov_configure(struct pci_dev *pdev, int num_vfs)
56*4882a593Smuzhiyun {
57*4882a593Smuzhiyun return 0;
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * struct ucode - Firmware Header
63*4882a593Smuzhiyun * @id: microcode ID
64*4882a593Smuzhiyun * @version: firmware version
65*4882a593Smuzhiyun * @code_size: code section size
66*4882a593Smuzhiyun * @raz: alignment
67*4882a593Smuzhiyun * @code: code section
68*4882a593Smuzhiyun */
69*4882a593Smuzhiyun struct ucode {
70*4882a593Smuzhiyun u8 id;
71*4882a593Smuzhiyun char version[VERSION_LEN - 1];
72*4882a593Smuzhiyun __be32 code_size;
73*4882a593Smuzhiyun u8 raz[12];
74*4882a593Smuzhiyun u64 code[];
75*4882a593Smuzhiyun };
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun /**
78*4882a593Smuzhiyun * write_to_ucd_unit - Write Firmware to NITROX UCD unit
79*4882a593Smuzhiyun */
write_to_ucd_unit(struct nitrox_device * ndev,u32 ucode_size,u64 * ucode_data,int block_num)80*4882a593Smuzhiyun static void write_to_ucd_unit(struct nitrox_device *ndev, u32 ucode_size,
81*4882a593Smuzhiyun u64 *ucode_data, int block_num)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun u32 code_size;
84*4882a593Smuzhiyun u64 offset, data;
85*4882a593Smuzhiyun int i = 0;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun /*
88*4882a593Smuzhiyun * UCD structure
89*4882a593Smuzhiyun *
90*4882a593Smuzhiyun * -------------
91*4882a593Smuzhiyun * | BLK 7 |
92*4882a593Smuzhiyun * -------------
93*4882a593Smuzhiyun * | BLK 6 |
94*4882a593Smuzhiyun * -------------
95*4882a593Smuzhiyun * | ... |
96*4882a593Smuzhiyun * -------------
97*4882a593Smuzhiyun * | BLK 0 |
98*4882a593Smuzhiyun * -------------
99*4882a593Smuzhiyun * Total of 8 blocks, each size 32KB
100*4882a593Smuzhiyun */
101*4882a593Smuzhiyun
102*4882a593Smuzhiyun /* set the block number */
103*4882a593Smuzhiyun offset = UCD_UCODE_LOAD_BLOCK_NUM;
104*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, block_num);
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun code_size = roundup(ucode_size, 16);
107*4882a593Smuzhiyun while (code_size) {
108*4882a593Smuzhiyun data = ucode_data[i];
109*4882a593Smuzhiyun /* write 8 bytes at a time */
110*4882a593Smuzhiyun offset = UCD_UCODE_LOAD_IDX_DATAX(i);
111*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, data);
112*4882a593Smuzhiyun code_size -= 8;
113*4882a593Smuzhiyun i++;
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun
116*4882a593Smuzhiyun usleep_range(300, 400);
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
nitrox_load_fw(struct nitrox_device * ndev)119*4882a593Smuzhiyun static int nitrox_load_fw(struct nitrox_device *ndev)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun const struct firmware *fw;
122*4882a593Smuzhiyun const char *fw_name;
123*4882a593Smuzhiyun struct ucode *ucode;
124*4882a593Smuzhiyun u64 *ucode_data;
125*4882a593Smuzhiyun u64 offset;
126*4882a593Smuzhiyun union ucd_core_eid_ucode_block_num core_2_eid_val;
127*4882a593Smuzhiyun union aqm_grp_execmsk_lo aqm_grp_execmask_lo;
128*4882a593Smuzhiyun union aqm_grp_execmsk_hi aqm_grp_execmask_hi;
129*4882a593Smuzhiyun u32 ucode_size;
130*4882a593Smuzhiyun int ret, i = 0;
131*4882a593Smuzhiyun
132*4882a593Smuzhiyun fw_name = SE_FW;
133*4882a593Smuzhiyun dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun ret = request_firmware(&fw, fw_name, DEV(ndev));
136*4882a593Smuzhiyun if (ret < 0) {
137*4882a593Smuzhiyun dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
138*4882a593Smuzhiyun return ret;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun ucode = (struct ucode *)fw->data;
142*4882a593Smuzhiyun
143*4882a593Smuzhiyun ucode_size = be32_to_cpu(ucode->code_size) * 2;
144*4882a593Smuzhiyun if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
145*4882a593Smuzhiyun dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
146*4882a593Smuzhiyun ucode_size, fw_name);
147*4882a593Smuzhiyun release_firmware(fw);
148*4882a593Smuzhiyun return -EINVAL;
149*4882a593Smuzhiyun }
150*4882a593Smuzhiyun ucode_data = ucode->code;
151*4882a593Smuzhiyun
152*4882a593Smuzhiyun /* copy the firmware version */
153*4882a593Smuzhiyun memcpy(&ndev->hw.fw_name[0][0], ucode->version, (VERSION_LEN - 2));
154*4882a593Smuzhiyun ndev->hw.fw_name[0][VERSION_LEN - 1] = '\0';
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Load SE Firmware on UCD Block 0 */
157*4882a593Smuzhiyun write_to_ucd_unit(ndev, ucode_size, ucode_data, 0);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun release_firmware(fw);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /* put all SE cores in DEFAULT_SE_GROUP */
162*4882a593Smuzhiyun offset = POM_GRP_EXECMASKX(DEFAULT_SE_GROUP);
163*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, (~0ULL));
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun /* write block number and firmware length
166*4882a593Smuzhiyun * bit:<2:0> block number
167*4882a593Smuzhiyun * bit:3 is set SE uses 32KB microcode
168*4882a593Smuzhiyun * bit:3 is clear SE uses 64KB microcode
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun core_2_eid_val.value = 0ULL;
171*4882a593Smuzhiyun core_2_eid_val.ucode_blk = 0;
172*4882a593Smuzhiyun if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
173*4882a593Smuzhiyun core_2_eid_val.ucode_len = 1;
174*4882a593Smuzhiyun else
175*4882a593Smuzhiyun core_2_eid_val.ucode_len = 0;
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun for (i = 0; i < ndev->hw.se_cores; i++) {
178*4882a593Smuzhiyun offset = UCD_SE_EID_UCODE_BLOCK_NUMX(i);
179*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, core_2_eid_val.value);
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun
183*4882a593Smuzhiyun fw_name = AE_FW;
184*4882a593Smuzhiyun dev_info(DEV(ndev), "Loading firmware \"%s\"\n", fw_name);
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun ret = request_firmware(&fw, fw_name, DEV(ndev));
187*4882a593Smuzhiyun if (ret < 0) {
188*4882a593Smuzhiyun dev_err(DEV(ndev), "failed to get firmware %s\n", fw_name);
189*4882a593Smuzhiyun return ret;
190*4882a593Smuzhiyun }
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun ucode = (struct ucode *)fw->data;
193*4882a593Smuzhiyun
194*4882a593Smuzhiyun ucode_size = be32_to_cpu(ucode->code_size) * 2;
195*4882a593Smuzhiyun if (!ucode_size || ucode_size > CNN55XX_MAX_UCODE_SIZE) {
196*4882a593Smuzhiyun dev_err(DEV(ndev), "Invalid ucode size: %u for firmware %s\n",
197*4882a593Smuzhiyun ucode_size, fw_name);
198*4882a593Smuzhiyun release_firmware(fw);
199*4882a593Smuzhiyun return -EINVAL;
200*4882a593Smuzhiyun }
201*4882a593Smuzhiyun ucode_data = ucode->code;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* copy the firmware version */
204*4882a593Smuzhiyun memcpy(&ndev->hw.fw_name[1][0], ucode->version, (VERSION_LEN - 2));
205*4882a593Smuzhiyun ndev->hw.fw_name[1][VERSION_LEN - 1] = '\0';
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* Load AE Firmware on UCD Block 2 */
208*4882a593Smuzhiyun write_to_ucd_unit(ndev, ucode_size, ucode_data, 2);
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun release_firmware(fw);
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun /* put all AE cores in DEFAULT_AE_GROUP */
213*4882a593Smuzhiyun offset = AQM_GRP_EXECMSK_LOX(DEFAULT_AE_GROUP);
214*4882a593Smuzhiyun aqm_grp_execmask_lo.exec_0_to_39 = 0xFFFFFFFFFFULL;
215*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, aqm_grp_execmask_lo.value);
216*4882a593Smuzhiyun offset = AQM_GRP_EXECMSK_HIX(DEFAULT_AE_GROUP);
217*4882a593Smuzhiyun aqm_grp_execmask_hi.exec_40_to_79 = 0xFFFFFFFFFFULL;
218*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, aqm_grp_execmask_hi.value);
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* write block number and firmware length
221*4882a593Smuzhiyun * bit:<2:0> block number
222*4882a593Smuzhiyun * bit:3 is set AE uses 32KB microcode
223*4882a593Smuzhiyun * bit:3 is clear AE uses 64KB microcode
224*4882a593Smuzhiyun */
225*4882a593Smuzhiyun core_2_eid_val.value = 0ULL;
226*4882a593Smuzhiyun core_2_eid_val.ucode_blk = 2;
227*4882a593Smuzhiyun if (ucode_size <= CNN55XX_UCD_BLOCK_SIZE)
228*4882a593Smuzhiyun core_2_eid_val.ucode_len = 1;
229*4882a593Smuzhiyun else
230*4882a593Smuzhiyun core_2_eid_val.ucode_len = 0;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun for (i = 0; i < ndev->hw.ae_cores; i++) {
233*4882a593Smuzhiyun offset = UCD_AE_EID_UCODE_BLOCK_NUMX(i);
234*4882a593Smuzhiyun nitrox_write_csr(ndev, offset, core_2_eid_val.value);
235*4882a593Smuzhiyun }
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /**
241*4882a593Smuzhiyun * nitrox_add_to_devlist - add NITROX device to global device list
242*4882a593Smuzhiyun * @ndev: NITROX device
243*4882a593Smuzhiyun */
nitrox_add_to_devlist(struct nitrox_device * ndev)244*4882a593Smuzhiyun static int nitrox_add_to_devlist(struct nitrox_device *ndev)
245*4882a593Smuzhiyun {
246*4882a593Smuzhiyun struct nitrox_device *dev;
247*4882a593Smuzhiyun int ret = 0;
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun INIT_LIST_HEAD(&ndev->list);
250*4882a593Smuzhiyun refcount_set(&ndev->refcnt, 1);
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun mutex_lock(&devlist_lock);
253*4882a593Smuzhiyun list_for_each_entry(dev, &ndevlist, list) {
254*4882a593Smuzhiyun if (dev == ndev) {
255*4882a593Smuzhiyun ret = -EEXIST;
256*4882a593Smuzhiyun goto unlock;
257*4882a593Smuzhiyun }
258*4882a593Smuzhiyun }
259*4882a593Smuzhiyun ndev->idx = num_devices++;
260*4882a593Smuzhiyun list_add_tail(&ndev->list, &ndevlist);
261*4882a593Smuzhiyun unlock:
262*4882a593Smuzhiyun mutex_unlock(&devlist_lock);
263*4882a593Smuzhiyun return ret;
264*4882a593Smuzhiyun }
265*4882a593Smuzhiyun
266*4882a593Smuzhiyun /**
267*4882a593Smuzhiyun * nitrox_remove_from_devlist - remove NITROX device from
268*4882a593Smuzhiyun * global device list
269*4882a593Smuzhiyun * @ndev: NITROX device
270*4882a593Smuzhiyun */
nitrox_remove_from_devlist(struct nitrox_device * ndev)271*4882a593Smuzhiyun static void nitrox_remove_from_devlist(struct nitrox_device *ndev)
272*4882a593Smuzhiyun {
273*4882a593Smuzhiyun mutex_lock(&devlist_lock);
274*4882a593Smuzhiyun list_del(&ndev->list);
275*4882a593Smuzhiyun num_devices--;
276*4882a593Smuzhiyun mutex_unlock(&devlist_lock);
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
nitrox_get_first_device(void)279*4882a593Smuzhiyun struct nitrox_device *nitrox_get_first_device(void)
280*4882a593Smuzhiyun {
281*4882a593Smuzhiyun struct nitrox_device *ndev;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun mutex_lock(&devlist_lock);
284*4882a593Smuzhiyun list_for_each_entry(ndev, &ndevlist, list) {
285*4882a593Smuzhiyun if (nitrox_ready(ndev))
286*4882a593Smuzhiyun break;
287*4882a593Smuzhiyun }
288*4882a593Smuzhiyun mutex_unlock(&devlist_lock);
289*4882a593Smuzhiyun if (&ndev->list == &ndevlist)
290*4882a593Smuzhiyun return NULL;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun refcount_inc(&ndev->refcnt);
293*4882a593Smuzhiyun /* barrier to sync with other cpus */
294*4882a593Smuzhiyun smp_mb__after_atomic();
295*4882a593Smuzhiyun return ndev;
296*4882a593Smuzhiyun }
297*4882a593Smuzhiyun
nitrox_put_device(struct nitrox_device * ndev)298*4882a593Smuzhiyun void nitrox_put_device(struct nitrox_device *ndev)
299*4882a593Smuzhiyun {
300*4882a593Smuzhiyun if (!ndev)
301*4882a593Smuzhiyun return;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun refcount_dec(&ndev->refcnt);
304*4882a593Smuzhiyun /* barrier to sync with other cpus */
305*4882a593Smuzhiyun smp_mb__after_atomic();
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
nitrox_device_flr(struct pci_dev * pdev)308*4882a593Smuzhiyun static int nitrox_device_flr(struct pci_dev *pdev)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun int pos = 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun pos = pci_save_state(pdev);
313*4882a593Smuzhiyun if (pos) {
314*4882a593Smuzhiyun dev_err(&pdev->dev, "Failed to save pci state\n");
315*4882a593Smuzhiyun return -ENOMEM;
316*4882a593Smuzhiyun }
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun /* check flr support */
319*4882a593Smuzhiyun if (pcie_has_flr(pdev))
320*4882a593Smuzhiyun pcie_flr(pdev);
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun pci_restore_state(pdev);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun return 0;
325*4882a593Smuzhiyun }
326*4882a593Smuzhiyun
nitrox_pf_sw_init(struct nitrox_device * ndev)327*4882a593Smuzhiyun static int nitrox_pf_sw_init(struct nitrox_device *ndev)
328*4882a593Smuzhiyun {
329*4882a593Smuzhiyun int err;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun err = nitrox_common_sw_init(ndev);
332*4882a593Smuzhiyun if (err)
333*4882a593Smuzhiyun return err;
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun err = nitrox_register_interrupts(ndev);
336*4882a593Smuzhiyun if (err)
337*4882a593Smuzhiyun nitrox_common_sw_cleanup(ndev);
338*4882a593Smuzhiyun
339*4882a593Smuzhiyun return err;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun
nitrox_pf_sw_cleanup(struct nitrox_device * ndev)342*4882a593Smuzhiyun static void nitrox_pf_sw_cleanup(struct nitrox_device *ndev)
343*4882a593Smuzhiyun {
344*4882a593Smuzhiyun nitrox_unregister_interrupts(ndev);
345*4882a593Smuzhiyun nitrox_common_sw_cleanup(ndev);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /**
349*4882a593Smuzhiyun * nitrox_bist_check - Check NITROX BIST registers status
350*4882a593Smuzhiyun * @ndev: NITROX device
351*4882a593Smuzhiyun */
nitrox_bist_check(struct nitrox_device * ndev)352*4882a593Smuzhiyun static int nitrox_bist_check(struct nitrox_device *ndev)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun u64 value = 0;
355*4882a593Smuzhiyun int i;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun for (i = 0; i < NR_CLUSTERS; i++) {
358*4882a593Smuzhiyun value += nitrox_read_csr(ndev, EMU_BIST_STATUSX(i));
359*4882a593Smuzhiyun value += nitrox_read_csr(ndev, EFL_CORE_BIST_REGX(i));
360*4882a593Smuzhiyun }
361*4882a593Smuzhiyun value += nitrox_read_csr(ndev, UCD_BIST_STATUS);
362*4882a593Smuzhiyun value += nitrox_read_csr(ndev, NPS_CORE_BIST_REG);
363*4882a593Smuzhiyun value += nitrox_read_csr(ndev, NPS_CORE_NPC_BIST_REG);
364*4882a593Smuzhiyun value += nitrox_read_csr(ndev, NPS_PKT_SLC_BIST_REG);
365*4882a593Smuzhiyun value += nitrox_read_csr(ndev, NPS_PKT_IN_BIST_REG);
366*4882a593Smuzhiyun value += nitrox_read_csr(ndev, POM_BIST_REG);
367*4882a593Smuzhiyun value += nitrox_read_csr(ndev, BMI_BIST_REG);
368*4882a593Smuzhiyun value += nitrox_read_csr(ndev, EFL_TOP_BIST_STAT);
369*4882a593Smuzhiyun value += nitrox_read_csr(ndev, BMO_BIST_REG);
370*4882a593Smuzhiyun value += nitrox_read_csr(ndev, LBC_BIST_STATUS);
371*4882a593Smuzhiyun value += nitrox_read_csr(ndev, PEM_BIST_STATUSX(0));
372*4882a593Smuzhiyun if (value)
373*4882a593Smuzhiyun return -EIO;
374*4882a593Smuzhiyun return 0;
375*4882a593Smuzhiyun }
376*4882a593Smuzhiyun
nitrox_pf_hw_init(struct nitrox_device * ndev)377*4882a593Smuzhiyun static int nitrox_pf_hw_init(struct nitrox_device *ndev)
378*4882a593Smuzhiyun {
379*4882a593Smuzhiyun int err;
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun err = nitrox_bist_check(ndev);
382*4882a593Smuzhiyun if (err) {
383*4882a593Smuzhiyun dev_err(&ndev->pdev->dev, "BIST check failed\n");
384*4882a593Smuzhiyun return err;
385*4882a593Smuzhiyun }
386*4882a593Smuzhiyun /* get cores information */
387*4882a593Smuzhiyun nitrox_get_hwinfo(ndev);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun nitrox_config_nps_core_unit(ndev);
390*4882a593Smuzhiyun nitrox_config_aqm_unit(ndev);
391*4882a593Smuzhiyun nitrox_config_nps_pkt_unit(ndev);
392*4882a593Smuzhiyun nitrox_config_pom_unit(ndev);
393*4882a593Smuzhiyun nitrox_config_efl_unit(ndev);
394*4882a593Smuzhiyun /* configure IO units */
395*4882a593Smuzhiyun nitrox_config_bmi_unit(ndev);
396*4882a593Smuzhiyun nitrox_config_bmo_unit(ndev);
397*4882a593Smuzhiyun /* configure Local Buffer Cache */
398*4882a593Smuzhiyun nitrox_config_lbc_unit(ndev);
399*4882a593Smuzhiyun nitrox_config_rand_unit(ndev);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun /* load firmware on cores */
402*4882a593Smuzhiyun err = nitrox_load_fw(ndev);
403*4882a593Smuzhiyun if (err)
404*4882a593Smuzhiyun return err;
405*4882a593Smuzhiyun
406*4882a593Smuzhiyun nitrox_config_emu_unit(ndev);
407*4882a593Smuzhiyun
408*4882a593Smuzhiyun return 0;
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun /**
412*4882a593Smuzhiyun * nitrox_probe - NITROX Initialization function.
413*4882a593Smuzhiyun * @pdev: PCI device information struct
414*4882a593Smuzhiyun * @id: entry in nitrox_pci_tbl
415*4882a593Smuzhiyun *
416*4882a593Smuzhiyun * Return: 0, if the driver is bound to the device, or
417*4882a593Smuzhiyun * a negative error if there is failure.
418*4882a593Smuzhiyun */
nitrox_probe(struct pci_dev * pdev,const struct pci_device_id * id)419*4882a593Smuzhiyun static int nitrox_probe(struct pci_dev *pdev,
420*4882a593Smuzhiyun const struct pci_device_id *id)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun struct nitrox_device *ndev;
423*4882a593Smuzhiyun int err;
424*4882a593Smuzhiyun
425*4882a593Smuzhiyun dev_info_once(&pdev->dev, "%s driver version %s\n",
426*4882a593Smuzhiyun nitrox_driver_name, DRIVER_VERSION);
427*4882a593Smuzhiyun
428*4882a593Smuzhiyun err = pci_enable_device_mem(pdev);
429*4882a593Smuzhiyun if (err)
430*4882a593Smuzhiyun return err;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* do FLR */
433*4882a593Smuzhiyun err = nitrox_device_flr(pdev);
434*4882a593Smuzhiyun if (err) {
435*4882a593Smuzhiyun dev_err(&pdev->dev, "FLR failed\n");
436*4882a593Smuzhiyun pci_disable_device(pdev);
437*4882a593Smuzhiyun return err;
438*4882a593Smuzhiyun }
439*4882a593Smuzhiyun
440*4882a593Smuzhiyun if (!dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(64))) {
441*4882a593Smuzhiyun dev_dbg(&pdev->dev, "DMA to 64-BIT address\n");
442*4882a593Smuzhiyun } else {
443*4882a593Smuzhiyun err = dma_set_mask_and_coherent(&pdev->dev, DMA_BIT_MASK(32));
444*4882a593Smuzhiyun if (err) {
445*4882a593Smuzhiyun dev_err(&pdev->dev, "DMA configuration failed\n");
446*4882a593Smuzhiyun pci_disable_device(pdev);
447*4882a593Smuzhiyun return err;
448*4882a593Smuzhiyun }
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun err = pci_request_mem_regions(pdev, nitrox_driver_name);
452*4882a593Smuzhiyun if (err) {
453*4882a593Smuzhiyun pci_disable_device(pdev);
454*4882a593Smuzhiyun return err;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun pci_set_master(pdev);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun ndev = kzalloc(sizeof(*ndev), GFP_KERNEL);
459*4882a593Smuzhiyun if (!ndev) {
460*4882a593Smuzhiyun err = -ENOMEM;
461*4882a593Smuzhiyun goto ndev_fail;
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun
464*4882a593Smuzhiyun pci_set_drvdata(pdev, ndev);
465*4882a593Smuzhiyun ndev->pdev = pdev;
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun /* add to device list */
468*4882a593Smuzhiyun nitrox_add_to_devlist(ndev);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun ndev->hw.vendor_id = pdev->vendor;
471*4882a593Smuzhiyun ndev->hw.device_id = pdev->device;
472*4882a593Smuzhiyun ndev->hw.revision_id = pdev->revision;
473*4882a593Smuzhiyun /* command timeout in jiffies */
474*4882a593Smuzhiyun ndev->timeout = msecs_to_jiffies(CMD_TIMEOUT);
475*4882a593Smuzhiyun ndev->node = dev_to_node(&pdev->dev);
476*4882a593Smuzhiyun if (ndev->node == NUMA_NO_NODE)
477*4882a593Smuzhiyun ndev->node = 0;
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun ndev->bar_addr = ioremap(pci_resource_start(pdev, 0),
480*4882a593Smuzhiyun pci_resource_len(pdev, 0));
481*4882a593Smuzhiyun if (!ndev->bar_addr) {
482*4882a593Smuzhiyun err = -EIO;
483*4882a593Smuzhiyun goto ioremap_err;
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun /* allocate command queus based on cpus, max queues are 64 */
486*4882a593Smuzhiyun ndev->nr_queues = min_t(u32, MAX_PF_QUEUES, num_online_cpus());
487*4882a593Smuzhiyun ndev->qlen = qlen;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun err = nitrox_pf_sw_init(ndev);
490*4882a593Smuzhiyun if (err)
491*4882a593Smuzhiyun goto ioremap_err;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun err = nitrox_pf_hw_init(ndev);
494*4882a593Smuzhiyun if (err)
495*4882a593Smuzhiyun goto pf_hw_fail;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun nitrox_debugfs_init(ndev);
498*4882a593Smuzhiyun
499*4882a593Smuzhiyun /* clear the statistics */
500*4882a593Smuzhiyun atomic64_set(&ndev->stats.posted, 0);
501*4882a593Smuzhiyun atomic64_set(&ndev->stats.completed, 0);
502*4882a593Smuzhiyun atomic64_set(&ndev->stats.dropped, 0);
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun atomic_set(&ndev->state, __NDEV_READY);
505*4882a593Smuzhiyun /* barrier to sync with other cpus */
506*4882a593Smuzhiyun smp_mb__after_atomic();
507*4882a593Smuzhiyun
508*4882a593Smuzhiyun err = nitrox_crypto_register();
509*4882a593Smuzhiyun if (err)
510*4882a593Smuzhiyun goto crypto_fail;
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun return 0;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun crypto_fail:
515*4882a593Smuzhiyun nitrox_debugfs_exit(ndev);
516*4882a593Smuzhiyun atomic_set(&ndev->state, __NDEV_NOT_READY);
517*4882a593Smuzhiyun /* barrier to sync with other cpus */
518*4882a593Smuzhiyun smp_mb__after_atomic();
519*4882a593Smuzhiyun pf_hw_fail:
520*4882a593Smuzhiyun nitrox_pf_sw_cleanup(ndev);
521*4882a593Smuzhiyun ioremap_err:
522*4882a593Smuzhiyun nitrox_remove_from_devlist(ndev);
523*4882a593Smuzhiyun kfree(ndev);
524*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
525*4882a593Smuzhiyun ndev_fail:
526*4882a593Smuzhiyun pci_release_mem_regions(pdev);
527*4882a593Smuzhiyun pci_disable_device(pdev);
528*4882a593Smuzhiyun return err;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun /**
532*4882a593Smuzhiyun * nitrox_remove - Unbind the driver from the device.
533*4882a593Smuzhiyun * @pdev: PCI device information struct
534*4882a593Smuzhiyun */
nitrox_remove(struct pci_dev * pdev)535*4882a593Smuzhiyun static void nitrox_remove(struct pci_dev *pdev)
536*4882a593Smuzhiyun {
537*4882a593Smuzhiyun struct nitrox_device *ndev = pci_get_drvdata(pdev);
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun if (!ndev)
540*4882a593Smuzhiyun return;
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun if (!refcount_dec_and_test(&ndev->refcnt)) {
543*4882a593Smuzhiyun dev_err(DEV(ndev), "Device refcnt not zero (%d)\n",
544*4882a593Smuzhiyun refcount_read(&ndev->refcnt));
545*4882a593Smuzhiyun return;
546*4882a593Smuzhiyun }
547*4882a593Smuzhiyun
548*4882a593Smuzhiyun dev_info(DEV(ndev), "Removing Device %x:%x\n",
549*4882a593Smuzhiyun ndev->hw.vendor_id, ndev->hw.device_id);
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun atomic_set(&ndev->state, __NDEV_NOT_READY);
552*4882a593Smuzhiyun /* barrier to sync with other cpus */
553*4882a593Smuzhiyun smp_mb__after_atomic();
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun nitrox_remove_from_devlist(ndev);
556*4882a593Smuzhiyun
557*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
558*4882a593Smuzhiyun /* disable SR-IOV */
559*4882a593Smuzhiyun nitrox_sriov_configure(pdev, 0);
560*4882a593Smuzhiyun #endif
561*4882a593Smuzhiyun nitrox_crypto_unregister();
562*4882a593Smuzhiyun nitrox_debugfs_exit(ndev);
563*4882a593Smuzhiyun nitrox_pf_sw_cleanup(ndev);
564*4882a593Smuzhiyun
565*4882a593Smuzhiyun iounmap(ndev->bar_addr);
566*4882a593Smuzhiyun kfree(ndev);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
569*4882a593Smuzhiyun pci_release_mem_regions(pdev);
570*4882a593Smuzhiyun pci_disable_device(pdev);
571*4882a593Smuzhiyun }
572*4882a593Smuzhiyun
nitrox_shutdown(struct pci_dev * pdev)573*4882a593Smuzhiyun static void nitrox_shutdown(struct pci_dev *pdev)
574*4882a593Smuzhiyun {
575*4882a593Smuzhiyun pci_set_drvdata(pdev, NULL);
576*4882a593Smuzhiyun pci_release_mem_regions(pdev);
577*4882a593Smuzhiyun pci_disable_device(pdev);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun static struct pci_driver nitrox_driver = {
581*4882a593Smuzhiyun .name = nitrox_driver_name,
582*4882a593Smuzhiyun .id_table = nitrox_pci_tbl,
583*4882a593Smuzhiyun .probe = nitrox_probe,
584*4882a593Smuzhiyun .remove = nitrox_remove,
585*4882a593Smuzhiyun .shutdown = nitrox_shutdown,
586*4882a593Smuzhiyun #ifdef CONFIG_PCI_IOV
587*4882a593Smuzhiyun .sriov_configure = nitrox_sriov_configure,
588*4882a593Smuzhiyun #endif
589*4882a593Smuzhiyun };
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun module_pci_driver(nitrox_driver);
592*4882a593Smuzhiyun
593*4882a593Smuzhiyun MODULE_AUTHOR("Srikanth Jampala <Jampala.Srikanth@cavium.com>");
594*4882a593Smuzhiyun MODULE_DESCRIPTION("Cavium CNN55XX PF Driver" DRIVER_VERSION " ");
595*4882a593Smuzhiyun MODULE_LICENSE("GPL");
596*4882a593Smuzhiyun MODULE_VERSION(DRIVER_VERSION);
597*4882a593Smuzhiyun MODULE_FIRMWARE(SE_FW);
598