Lines Matching refs:cpt
27 static void cpt_disable_cores(struct cpt_device *cpt, u64 coremask, in cpt_disable_cores() argument
33 struct device *dev = &cpt->pdev->dev; in cpt_disable_cores()
36 coremask = (coremask << cpt->max_se_cores); in cpt_disable_cores()
39 grpmask = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); in cpt_disable_cores()
40 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), in cpt_disable_cores()
43 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); in cpt_disable_cores()
46 grp = cpt_read_csr64(cpt->reg_base, in cpt_disable_cores()
55 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); in cpt_disable_cores()
56 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), in cpt_disable_cores()
64 static void cpt_enable_cores(struct cpt_device *cpt, u64 coremask, in cpt_enable_cores() argument
70 coremask = (coremask << cpt->max_se_cores); in cpt_enable_cores()
72 pf_exe_ctl = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0)); in cpt_enable_cores()
73 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), in cpt_enable_cores()
78 static void cpt_configure_group(struct cpt_device *cpt, u8 grp, in cpt_configure_group() argument
84 coremask = (coremask << cpt->max_se_cores); in cpt_configure_group()
86 pf_gx_en = cpt_read_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp)); in cpt_configure_group()
87 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), in cpt_configure_group()
92 static void cpt_disable_mbox_interrupts(struct cpt_device *cpt) in cpt_disable_mbox_interrupts() argument
95 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1CX(0, 0), ~0ull); in cpt_disable_mbox_interrupts()
98 static void cpt_disable_ecc_interrupts(struct cpt_device *cpt) in cpt_disable_ecc_interrupts() argument
101 cpt_write_csr64(cpt->reg_base, CPTX_PF_ECC0_ENA_W1C(0), ~0ull); in cpt_disable_ecc_interrupts()
104 static void cpt_disable_exec_interrupts(struct cpt_device *cpt) in cpt_disable_exec_interrupts() argument
107 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXEC_ENA_W1C(0), ~0ull); in cpt_disable_exec_interrupts()
110 static void cpt_disable_all_interrupts(struct cpt_device *cpt) in cpt_disable_all_interrupts() argument
112 cpt_disable_mbox_interrupts(cpt); in cpt_disable_all_interrupts()
113 cpt_disable_ecc_interrupts(cpt); in cpt_disable_all_interrupts()
114 cpt_disable_exec_interrupts(cpt); in cpt_disable_all_interrupts()
117 static void cpt_enable_mbox_interrupts(struct cpt_device *cpt) in cpt_enable_mbox_interrupts() argument
120 cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_ENA_W1SX(0, 0), ~0ull); in cpt_enable_mbox_interrupts()
123 static int cpt_load_microcode(struct cpt_device *cpt, struct microcode *mcode) in cpt_load_microcode() argument
127 struct device *dev = &cpt->pdev->dev; in cpt_load_microcode()
153 cpt_write_csr64(cpt->reg_base, in cpt_load_microcode()
161 static int do_cpt_init(struct cpt_device *cpt, struct microcode *mcode) in do_cpt_init() argument
164 struct device *dev = &cpt->pdev->dev; in do_cpt_init()
167 cpt->flags &= ~CPT_FLAG_DEVICE_READY; in do_cpt_init()
169 cpt_disable_all_interrupts(cpt); in do_cpt_init()
172 if (mcode->num_cores > cpt->max_ae_cores) { in do_cpt_init()
178 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { in do_cpt_init()
183 mcode->group = cpt->next_group; in do_cpt_init()
186 cpt_disable_cores(cpt, mcode->core_mask, AE_TYPES, in do_cpt_init()
189 ret = cpt_load_microcode(cpt, mcode); in do_cpt_init()
195 cpt->next_group++; in do_cpt_init()
197 cpt_configure_group(cpt, mcode->group, mcode->core_mask, in do_cpt_init()
200 cpt_enable_cores(cpt, mcode->core_mask, AE_TYPES); in do_cpt_init()
202 if (mcode->num_cores > cpt->max_se_cores) { in do_cpt_init()
207 if (cpt->next_group >= CPT_MAX_CORE_GROUPS) { in do_cpt_init()
212 mcode->group = cpt->next_group; in do_cpt_init()
215 cpt_disable_cores(cpt, mcode->core_mask, SE_TYPES, in do_cpt_init()
218 ret = cpt_load_microcode(cpt, mcode); in do_cpt_init()
224 cpt->next_group++; in do_cpt_init()
226 cpt_configure_group(cpt, mcode->group, mcode->core_mask, in do_cpt_init()
229 cpt_enable_cores(cpt, mcode->core_mask, SE_TYPES); in do_cpt_init()
233 cpt_enable_mbox_interrupts(cpt); in do_cpt_init()
234 cpt->flags |= CPT_FLAG_DEVICE_READY; in do_cpt_init()
240 cpt_enable_mbox_interrupts(cpt); in do_cpt_init()
252 static int cpt_ucode_load_fw(struct cpt_device *cpt, const u8 *fw, bool is_ae) in cpt_ucode_load_fw() argument
255 struct device *dev = &cpt->pdev->dev; in cpt_ucode_load_fw()
266 mcode = &cpt->mcode[cpt->next_mc_idx]; in cpt_ucode_load_fw()
280 mcode->code = dma_alloc_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_ucode_load_fw()
304 ret = do_cpt_init(cpt, mcode); in cpt_ucode_load_fw()
312 cpt->next_mc_idx++; in cpt_ucode_load_fw()
320 static int cpt_ucode_load(struct cpt_device *cpt) in cpt_ucode_load() argument
323 struct device *dev = &cpt->pdev->dev; in cpt_ucode_load()
325 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-ae.out", true); in cpt_ucode_load()
330 ret = cpt_ucode_load_fw(cpt, "cpt8x-mc-se.out", false); in cpt_ucode_load()
341 struct cpt_device *cpt = (struct cpt_device *)cpt_irq; in cpt_mbx0_intr_handler() local
343 cpt_mbox_intr_handler(cpt, 0); in cpt_mbx0_intr_handler()
348 static void cpt_reset(struct cpt_device *cpt) in cpt_reset() argument
350 cpt_write_csr64(cpt->reg_base, CPTX_PF_RESET(0), 1); in cpt_reset()
353 static void cpt_find_max_enabled_cores(struct cpt_device *cpt) in cpt_find_max_enabled_cores() argument
357 pf_cnsts.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_CONSTANTS(0)); in cpt_find_max_enabled_cores()
358 cpt->max_se_cores = pf_cnsts.s.se; in cpt_find_max_enabled_cores()
359 cpt->max_ae_cores = pf_cnsts.s.ae; in cpt_find_max_enabled_cores()
362 static u32 cpt_check_bist_status(struct cpt_device *cpt) in cpt_check_bist_status() argument
366 bist_sts.u = cpt_read_csr64(cpt->reg_base, in cpt_check_bist_status()
372 static u64 cpt_check_exe_bist_status(struct cpt_device *cpt) in cpt_check_exe_bist_status() argument
376 bist_sts.u = cpt_read_csr64(cpt->reg_base, in cpt_check_exe_bist_status()
382 static void cpt_disable_all_cores(struct cpt_device *cpt) in cpt_disable_all_cores() argument
385 struct device *dev = &cpt->pdev->dev; in cpt_disable_all_cores()
389 cpt_write_csr64(cpt->reg_base, CPTX_PF_GX_EN(0, grp), 0); in cpt_disable_all_cores()
393 grp = cpt_read_csr64(cpt->reg_base, CPTX_PF_EXEC_BUSY(0)); in cpt_disable_all_cores()
396 grp = cpt_read_csr64(cpt->reg_base, in cpt_disable_all_cores()
404 cpt_write_csr64(cpt->reg_base, CPTX_PF_EXE_CTL(0), 0); in cpt_disable_all_cores()
412 static void cpt_unload_microcode(struct cpt_device *cpt) in cpt_unload_microcode() argument
418 struct microcode *mcode = &cpt->mcode[grp]; in cpt_unload_microcode()
420 if (cpt->mcode[grp].code) in cpt_unload_microcode()
421 dma_free_coherent(&cpt->pdev->dev, mcode->code_size, in cpt_unload_microcode()
427 cpt_write_csr64(cpt->reg_base, in cpt_unload_microcode()
431 static int cpt_device_init(struct cpt_device *cpt) in cpt_device_init() argument
434 struct device *dev = &cpt->pdev->dev; in cpt_device_init()
437 cpt_reset(cpt); in cpt_device_init()
441 bist = (u64)cpt_check_bist_status(cpt); in cpt_device_init()
447 bist = cpt_check_exe_bist_status(cpt); in cpt_device_init()
455 cpt_find_max_enabled_cores(cpt); in cpt_device_init()
457 cpt_disable_all_cores(cpt); in cpt_device_init()
459 cpt->next_mc_idx = 0; in cpt_device_init()
460 cpt->next_group = 0; in cpt_device_init()
462 cpt->flags |= CPT_FLAG_DEVICE_READY; in cpt_device_init()
467 static int cpt_register_interrupts(struct cpt_device *cpt) in cpt_register_interrupts() argument
470 struct device *dev = &cpt->pdev->dev; in cpt_register_interrupts()
473 ret = pci_alloc_irq_vectors(cpt->pdev, CPT_PF_MSIX_VECTORS, in cpt_register_interrupts()
476 dev_err(&cpt->pdev->dev, "Request for #%d msix vectors failed\n", in cpt_register_interrupts()
482 ret = request_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), in cpt_register_interrupts()
483 cpt_mbx0_intr_handler, 0, "CPT Mbox0", cpt); in cpt_register_interrupts()
488 cpt_enable_mbox_interrupts(cpt); in cpt_register_interrupts()
493 pci_disable_msix(cpt->pdev); in cpt_register_interrupts()
497 static void cpt_unregister_interrupts(struct cpt_device *cpt) in cpt_unregister_interrupts() argument
499 free_irq(pci_irq_vector(cpt->pdev, CPT_PF_INT_VEC_E_MBOXX(0)), cpt); in cpt_unregister_interrupts()
500 pci_disable_msix(cpt->pdev); in cpt_unregister_interrupts()
503 static int cpt_sriov_init(struct cpt_device *cpt, int num_vfs) in cpt_sriov_init() argument
508 struct pci_dev *pdev = cpt->pdev; in cpt_sriov_init()
516 cpt->num_vf_en = num_vfs; /* User requested VFs */ in cpt_sriov_init()
518 if (total_vf_cnt < cpt->num_vf_en) in cpt_sriov_init()
519 cpt->num_vf_en = total_vf_cnt; in cpt_sriov_init()
525 err = pci_enable_sriov(pdev, cpt->num_vf_en); in cpt_sriov_init()
528 cpt->num_vf_en); in cpt_sriov_init()
529 cpt->num_vf_en = 0; in cpt_sriov_init()
536 cpt->num_vf_en); in cpt_sriov_init()
538 cpt->flags |= CPT_FLAG_SRIOV_ENABLED; in cpt_sriov_init()
546 struct cpt_device *cpt; in cpt_probe() local
555 cpt = devm_kzalloc(dev, sizeof(*cpt), GFP_KERNEL); in cpt_probe()
556 if (!cpt) in cpt_probe()
559 pci_set_drvdata(pdev, cpt); in cpt_probe()
560 cpt->pdev = pdev; in cpt_probe()
587 cpt->reg_base = pcim_iomap(pdev, 0, 0); in cpt_probe()
588 if (!cpt->reg_base) { in cpt_probe()
595 cpt_device_init(cpt); in cpt_probe()
598 err = cpt_register_interrupts(cpt); in cpt_probe()
602 err = cpt_ucode_load(cpt); in cpt_probe()
607 err = cpt_sriov_init(cpt, num_vfs); in cpt_probe()
614 cpt_unregister_interrupts(cpt); in cpt_probe()
625 struct cpt_device *cpt = pci_get_drvdata(pdev); in cpt_remove() local
628 cpt_disable_all_cores(cpt); in cpt_remove()
630 cpt_unload_microcode(cpt); in cpt_remove()
631 cpt_unregister_interrupts(cpt); in cpt_remove()
640 struct cpt_device *cpt = pci_get_drvdata(pdev); in cpt_shutdown() local
642 if (!cpt) in cpt_shutdown()
648 cpt_unregister_interrupts(cpt); in cpt_shutdown()