1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2016 Cavium, Inc.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun #include "cptpf.h"
7*4882a593Smuzhiyun
cpt_send_msg_to_vf(struct cpt_device * cpt,int vf,struct cpt_mbox * mbx)8*4882a593Smuzhiyun static void cpt_send_msg_to_vf(struct cpt_device *cpt, int vf,
9*4882a593Smuzhiyun struct cpt_mbox *mbx)
10*4882a593Smuzhiyun {
11*4882a593Smuzhiyun /* Writing mbox(0) causes interrupt */
12*4882a593Smuzhiyun cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1),
13*4882a593Smuzhiyun mbx->data);
14*4882a593Smuzhiyun cpt_write_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0), mbx->msg);
15*4882a593Smuzhiyun }
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /* ACKs VF's mailbox message
18*4882a593Smuzhiyun * @vf: VF to which ACK to be sent
19*4882a593Smuzhiyun */
cpt_mbox_send_ack(struct cpt_device * cpt,int vf,struct cpt_mbox * mbx)20*4882a593Smuzhiyun static void cpt_mbox_send_ack(struct cpt_device *cpt, int vf,
21*4882a593Smuzhiyun struct cpt_mbox *mbx)
22*4882a593Smuzhiyun {
23*4882a593Smuzhiyun mbx->data = 0ull;
24*4882a593Smuzhiyun mbx->msg = CPT_MBOX_MSG_TYPE_ACK;
25*4882a593Smuzhiyun cpt_send_msg_to_vf(cpt, vf, mbx);
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun
cpt_clear_mbox_intr(struct cpt_device * cpt,u32 vf)28*4882a593Smuzhiyun static void cpt_clear_mbox_intr(struct cpt_device *cpt, u32 vf)
29*4882a593Smuzhiyun {
30*4882a593Smuzhiyun /* W1C for the VF */
31*4882a593Smuzhiyun cpt_write_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0), (1 << vf));
32*4882a593Smuzhiyun }
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun /*
35*4882a593Smuzhiyun * Configure QLEN/Chunk sizes for VF
36*4882a593Smuzhiyun */
cpt_cfg_qlen_for_vf(struct cpt_device * cpt,int vf,u32 size)37*4882a593Smuzhiyun static void cpt_cfg_qlen_for_vf(struct cpt_device *cpt, int vf, u32 size)
38*4882a593Smuzhiyun {
39*4882a593Smuzhiyun union cptx_pf_qx_ctl pf_qx_ctl;
40*4882a593Smuzhiyun
41*4882a593Smuzhiyun pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
42*4882a593Smuzhiyun pf_qx_ctl.s.size = size;
43*4882a593Smuzhiyun pf_qx_ctl.s.cont_err = true;
44*4882a593Smuzhiyun cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
45*4882a593Smuzhiyun }
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * Configure VQ priority
49*4882a593Smuzhiyun */
cpt_cfg_vq_priority(struct cpt_device * cpt,int vf,u32 pri)50*4882a593Smuzhiyun static void cpt_cfg_vq_priority(struct cpt_device *cpt, int vf, u32 pri)
51*4882a593Smuzhiyun {
52*4882a593Smuzhiyun union cptx_pf_qx_ctl pf_qx_ctl;
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf));
55*4882a593Smuzhiyun pf_qx_ctl.s.pri = pri;
56*4882a593Smuzhiyun cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, vf), pf_qx_ctl.u);
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
cpt_bind_vq_to_grp(struct cpt_device * cpt,u8 q,u8 grp)59*4882a593Smuzhiyun static int cpt_bind_vq_to_grp(struct cpt_device *cpt, u8 q, u8 grp)
60*4882a593Smuzhiyun {
61*4882a593Smuzhiyun struct microcode *mcode = cpt->mcode;
62*4882a593Smuzhiyun union cptx_pf_qx_ctl pf_qx_ctl;
63*4882a593Smuzhiyun struct device *dev = &cpt->pdev->dev;
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun if (q >= CPT_MAX_VF_NUM) {
66*4882a593Smuzhiyun dev_err(dev, "Queues are more than cores in the group");
67*4882a593Smuzhiyun return -EINVAL;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun if (grp >= CPT_MAX_CORE_GROUPS) {
70*4882a593Smuzhiyun dev_err(dev, "Request group is more than possible groups");
71*4882a593Smuzhiyun return -EINVAL;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun if (grp >= cpt->next_mc_idx) {
74*4882a593Smuzhiyun dev_err(dev, "Request group is higher than available functional groups");
75*4882a593Smuzhiyun return -EINVAL;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun pf_qx_ctl.u = cpt_read_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q));
78*4882a593Smuzhiyun pf_qx_ctl.s.grp = mcode[grp].group;
79*4882a593Smuzhiyun cpt_write_csr64(cpt->reg_base, CPTX_PF_QX_CTL(0, q), pf_qx_ctl.u);
80*4882a593Smuzhiyun dev_dbg(dev, "VF %d TYPE %s", q, (mcode[grp].is_ae ? "AE" : "SE"));
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun return mcode[grp].is_ae ? AE_TYPES : SE_TYPES;
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* Interrupt handler to handle mailbox messages from VFs */
cpt_handle_mbox_intr(struct cpt_device * cpt,int vf)86*4882a593Smuzhiyun static void cpt_handle_mbox_intr(struct cpt_device *cpt, int vf)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun struct cpt_vf_info *vfx = &cpt->vfinfo[vf];
89*4882a593Smuzhiyun struct cpt_mbox mbx = {};
90*4882a593Smuzhiyun int vftype;
91*4882a593Smuzhiyun struct device *dev = &cpt->pdev->dev;
92*4882a593Smuzhiyun /*
93*4882a593Smuzhiyun * MBOX[0] contains msg
94*4882a593Smuzhiyun * MBOX[1] contains data
95*4882a593Smuzhiyun */
96*4882a593Smuzhiyun mbx.msg = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 0));
97*4882a593Smuzhiyun mbx.data = cpt_read_csr64(cpt->reg_base, CPTX_PF_VFX_MBOXX(0, vf, 1));
98*4882a593Smuzhiyun dev_dbg(dev, "%s: Mailbox msg 0x%llx from VF%d", __func__, mbx.msg, vf);
99*4882a593Smuzhiyun switch (mbx.msg) {
100*4882a593Smuzhiyun case CPT_MSG_VF_UP:
101*4882a593Smuzhiyun vfx->state = VF_STATE_UP;
102*4882a593Smuzhiyun try_module_get(THIS_MODULE);
103*4882a593Smuzhiyun cpt_mbox_send_ack(cpt, vf, &mbx);
104*4882a593Smuzhiyun break;
105*4882a593Smuzhiyun case CPT_MSG_READY:
106*4882a593Smuzhiyun mbx.msg = CPT_MSG_READY;
107*4882a593Smuzhiyun mbx.data = vf;
108*4882a593Smuzhiyun cpt_send_msg_to_vf(cpt, vf, &mbx);
109*4882a593Smuzhiyun break;
110*4882a593Smuzhiyun case CPT_MSG_VF_DOWN:
111*4882a593Smuzhiyun /* First msg in VF teardown sequence */
112*4882a593Smuzhiyun vfx->state = VF_STATE_DOWN;
113*4882a593Smuzhiyun module_put(THIS_MODULE);
114*4882a593Smuzhiyun cpt_mbox_send_ack(cpt, vf, &mbx);
115*4882a593Smuzhiyun break;
116*4882a593Smuzhiyun case CPT_MSG_QLEN:
117*4882a593Smuzhiyun vfx->qlen = mbx.data;
118*4882a593Smuzhiyun cpt_cfg_qlen_for_vf(cpt, vf, vfx->qlen);
119*4882a593Smuzhiyun cpt_mbox_send_ack(cpt, vf, &mbx);
120*4882a593Smuzhiyun break;
121*4882a593Smuzhiyun case CPT_MSG_QBIND_GRP:
122*4882a593Smuzhiyun vftype = cpt_bind_vq_to_grp(cpt, vf, (u8)mbx.data);
123*4882a593Smuzhiyun if ((vftype != AE_TYPES) && (vftype != SE_TYPES))
124*4882a593Smuzhiyun dev_err(dev, "Queue %d binding to group %llu failed",
125*4882a593Smuzhiyun vf, mbx.data);
126*4882a593Smuzhiyun else {
127*4882a593Smuzhiyun dev_dbg(dev, "Queue %d binding to group %llu successful",
128*4882a593Smuzhiyun vf, mbx.data);
129*4882a593Smuzhiyun mbx.msg = CPT_MSG_QBIND_GRP;
130*4882a593Smuzhiyun mbx.data = vftype;
131*4882a593Smuzhiyun cpt_send_msg_to_vf(cpt, vf, &mbx);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun break;
134*4882a593Smuzhiyun case CPT_MSG_VQ_PRIORITY:
135*4882a593Smuzhiyun vfx->priority = mbx.data;
136*4882a593Smuzhiyun cpt_cfg_vq_priority(cpt, vf, vfx->priority);
137*4882a593Smuzhiyun cpt_mbox_send_ack(cpt, vf, &mbx);
138*4882a593Smuzhiyun break;
139*4882a593Smuzhiyun default:
140*4882a593Smuzhiyun dev_err(&cpt->pdev->dev, "Invalid msg from VF%d, msg 0x%llx\n",
141*4882a593Smuzhiyun vf, mbx.msg);
142*4882a593Smuzhiyun break;
143*4882a593Smuzhiyun }
144*4882a593Smuzhiyun }
145*4882a593Smuzhiyun
cpt_mbox_intr_handler(struct cpt_device * cpt,int mbx)146*4882a593Smuzhiyun void cpt_mbox_intr_handler (struct cpt_device *cpt, int mbx)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun u64 intr;
149*4882a593Smuzhiyun u8 vf;
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun intr = cpt_read_csr64(cpt->reg_base, CPTX_PF_MBOX_INTX(0, 0));
152*4882a593Smuzhiyun dev_dbg(&cpt->pdev->dev, "PF interrupt Mbox%d 0x%llx\n", mbx, intr);
153*4882a593Smuzhiyun for (vf = 0; vf < CPT_MAX_VF_NUM; vf++) {
154*4882a593Smuzhiyun if (intr & (1ULL << vf)) {
155*4882a593Smuzhiyun dev_dbg(&cpt->pdev->dev, "Intr from VF %d\n", vf);
156*4882a593Smuzhiyun cpt_handle_mbox_intr(cpt, vf);
157*4882a593Smuzhiyun cpt_clear_mbox_intr(cpt, vf);
158*4882a593Smuzhiyun }
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun }
161