1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Marvell OcteonTX CPT driver
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Copyright (C) 2019 Marvell International Ltd.
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * This program is free software; you can redistribute it and/or modify
7*4882a593Smuzhiyun * it under the terms of the GNU General Public License version 2 as
8*4882a593Smuzhiyun * published by the Free Software Foundation.
9*4882a593Smuzhiyun */
10*4882a593Smuzhiyun
11*4882a593Smuzhiyun #include <linux/ctype.h>
12*4882a593Smuzhiyun #include <linux/firmware.h>
13*4882a593Smuzhiyun #include "otx_cpt_common.h"
14*4882a593Smuzhiyun #include "otx_cptpf_ucode.h"
15*4882a593Smuzhiyun #include "otx_cptpf.h"
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun #define CSR_DELAY 30
18*4882a593Smuzhiyun /* Tar archive defines */
19*4882a593Smuzhiyun #define TAR_MAGIC "ustar"
20*4882a593Smuzhiyun #define TAR_MAGIC_LEN 6
21*4882a593Smuzhiyun #define TAR_BLOCK_LEN 512
22*4882a593Smuzhiyun #define REGTYPE '0'
23*4882a593Smuzhiyun #define AREGTYPE '\0'
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun /* tar header as defined in POSIX 1003.1-1990. */
26*4882a593Smuzhiyun struct tar_hdr_t {
27*4882a593Smuzhiyun char name[100];
28*4882a593Smuzhiyun char mode[8];
29*4882a593Smuzhiyun char uid[8];
30*4882a593Smuzhiyun char gid[8];
31*4882a593Smuzhiyun char size[12];
32*4882a593Smuzhiyun char mtime[12];
33*4882a593Smuzhiyun char chksum[8];
34*4882a593Smuzhiyun char typeflag;
35*4882a593Smuzhiyun char linkname[100];
36*4882a593Smuzhiyun char magic[6];
37*4882a593Smuzhiyun char version[2];
38*4882a593Smuzhiyun char uname[32];
39*4882a593Smuzhiyun char gname[32];
40*4882a593Smuzhiyun char devmajor[8];
41*4882a593Smuzhiyun char devminor[8];
42*4882a593Smuzhiyun char prefix[155];
43*4882a593Smuzhiyun };
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun struct tar_blk_t {
46*4882a593Smuzhiyun union {
47*4882a593Smuzhiyun struct tar_hdr_t hdr;
48*4882a593Smuzhiyun char block[TAR_BLOCK_LEN];
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun };
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun struct tar_arch_info_t {
53*4882a593Smuzhiyun struct list_head ucodes;
54*4882a593Smuzhiyun const struct firmware *fw;
55*4882a593Smuzhiyun };
56*4882a593Smuzhiyun
get_cores_bmap(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)57*4882a593Smuzhiyun static struct otx_cpt_bitmap get_cores_bmap(struct device *dev,
58*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp)
59*4882a593Smuzhiyun {
60*4882a593Smuzhiyun struct otx_cpt_bitmap bmap = { {0} };
61*4882a593Smuzhiyun bool found = false;
62*4882a593Smuzhiyun int i;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun if (eng_grp->g->engs_num > OTX_CPT_MAX_ENGINES) {
65*4882a593Smuzhiyun dev_err(dev, "unsupported number of engines %d on octeontx\n",
66*4882a593Smuzhiyun eng_grp->g->engs_num);
67*4882a593Smuzhiyun return bmap;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
71*4882a593Smuzhiyun if (eng_grp->engs[i].type) {
72*4882a593Smuzhiyun bitmap_or(bmap.bits, bmap.bits,
73*4882a593Smuzhiyun eng_grp->engs[i].bmap,
74*4882a593Smuzhiyun eng_grp->g->engs_num);
75*4882a593Smuzhiyun bmap.size = eng_grp->g->engs_num;
76*4882a593Smuzhiyun found = true;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun if (!found)
81*4882a593Smuzhiyun dev_err(dev, "No engines reserved for engine group %d\n",
82*4882a593Smuzhiyun eng_grp->idx);
83*4882a593Smuzhiyun return bmap;
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
is_eng_type(int val,int eng_type)86*4882a593Smuzhiyun static int is_eng_type(int val, int eng_type)
87*4882a593Smuzhiyun {
88*4882a593Smuzhiyun return val & (1 << eng_type);
89*4882a593Smuzhiyun }
90*4882a593Smuzhiyun
dev_supports_eng_type(struct otx_cpt_eng_grps * eng_grps,int eng_type)91*4882a593Smuzhiyun static int dev_supports_eng_type(struct otx_cpt_eng_grps *eng_grps,
92*4882a593Smuzhiyun int eng_type)
93*4882a593Smuzhiyun {
94*4882a593Smuzhiyun return is_eng_type(eng_grps->eng_types_supported, eng_type);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
set_ucode_filename(struct otx_cpt_ucode * ucode,const char * filename)97*4882a593Smuzhiyun static void set_ucode_filename(struct otx_cpt_ucode *ucode,
98*4882a593Smuzhiyun const char *filename)
99*4882a593Smuzhiyun {
100*4882a593Smuzhiyun strlcpy(ucode->filename, filename, OTX_CPT_UCODE_NAME_LENGTH);
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun
get_eng_type_str(int eng_type)103*4882a593Smuzhiyun static char *get_eng_type_str(int eng_type)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun char *str = "unknown";
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun switch (eng_type) {
108*4882a593Smuzhiyun case OTX_CPT_SE_TYPES:
109*4882a593Smuzhiyun str = "SE";
110*4882a593Smuzhiyun break;
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun case OTX_CPT_AE_TYPES:
113*4882a593Smuzhiyun str = "AE";
114*4882a593Smuzhiyun break;
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun return str;
117*4882a593Smuzhiyun }
118*4882a593Smuzhiyun
get_ucode_type_str(int ucode_type)119*4882a593Smuzhiyun static char *get_ucode_type_str(int ucode_type)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun char *str = "unknown";
122*4882a593Smuzhiyun
123*4882a593Smuzhiyun switch (ucode_type) {
124*4882a593Smuzhiyun case (1 << OTX_CPT_SE_TYPES):
125*4882a593Smuzhiyun str = "SE";
126*4882a593Smuzhiyun break;
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun case (1 << OTX_CPT_AE_TYPES):
129*4882a593Smuzhiyun str = "AE";
130*4882a593Smuzhiyun break;
131*4882a593Smuzhiyun }
132*4882a593Smuzhiyun return str;
133*4882a593Smuzhiyun }
134*4882a593Smuzhiyun
get_ucode_type(struct otx_cpt_ucode_hdr * ucode_hdr,int * ucode_type)135*4882a593Smuzhiyun static int get_ucode_type(struct otx_cpt_ucode_hdr *ucode_hdr, int *ucode_type)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun char tmp_ver_str[OTX_CPT_UCODE_VER_STR_SZ];
138*4882a593Smuzhiyun u32 i, val = 0;
139*4882a593Smuzhiyun u8 nn;
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun strlcpy(tmp_ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
142*4882a593Smuzhiyun for (i = 0; i < strlen(tmp_ver_str); i++)
143*4882a593Smuzhiyun tmp_ver_str[i] = tolower(tmp_ver_str[i]);
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun nn = ucode_hdr->ver_num.nn;
146*4882a593Smuzhiyun if (strnstr(tmp_ver_str, "se-", OTX_CPT_UCODE_VER_STR_SZ) &&
147*4882a593Smuzhiyun (nn == OTX_CPT_SE_UC_TYPE1 || nn == OTX_CPT_SE_UC_TYPE2 ||
148*4882a593Smuzhiyun nn == OTX_CPT_SE_UC_TYPE3))
149*4882a593Smuzhiyun val |= 1 << OTX_CPT_SE_TYPES;
150*4882a593Smuzhiyun if (strnstr(tmp_ver_str, "ae", OTX_CPT_UCODE_VER_STR_SZ) &&
151*4882a593Smuzhiyun nn == OTX_CPT_AE_UC_TYPE)
152*4882a593Smuzhiyun val |= 1 << OTX_CPT_AE_TYPES;
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun *ucode_type = val;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun if (!val)
157*4882a593Smuzhiyun return -EINVAL;
158*4882a593Smuzhiyun if (is_eng_type(val, OTX_CPT_AE_TYPES) &&
159*4882a593Smuzhiyun is_eng_type(val, OTX_CPT_SE_TYPES))
160*4882a593Smuzhiyun return -EINVAL;
161*4882a593Smuzhiyun return 0;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun
is_mem_zero(const char * ptr,int size)164*4882a593Smuzhiyun static int is_mem_zero(const char *ptr, int size)
165*4882a593Smuzhiyun {
166*4882a593Smuzhiyun int i;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun for (i = 0; i < size; i++) {
169*4882a593Smuzhiyun if (ptr[i])
170*4882a593Smuzhiyun return 0;
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun return 1;
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun
cpt_set_ucode_base(struct otx_cpt_eng_grp_info * eng_grp,void * obj)175*4882a593Smuzhiyun static int cpt_set_ucode_base(struct otx_cpt_eng_grp_info *eng_grp, void *obj)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
178*4882a593Smuzhiyun dma_addr_t dma_addr;
179*4882a593Smuzhiyun struct otx_cpt_bitmap bmap;
180*4882a593Smuzhiyun int i;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
183*4882a593Smuzhiyun if (!bmap.size)
184*4882a593Smuzhiyun return -EINVAL;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun if (eng_grp->mirror.is_ena)
187*4882a593Smuzhiyun dma_addr =
188*4882a593Smuzhiyun eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].align_dma;
189*4882a593Smuzhiyun else
190*4882a593Smuzhiyun dma_addr = eng_grp->ucode[0].align_dma;
191*4882a593Smuzhiyun
192*4882a593Smuzhiyun /*
193*4882a593Smuzhiyun * Set UCODE_BASE only for the cores which are not used,
194*4882a593Smuzhiyun * other cores should have already valid UCODE_BASE set
195*4882a593Smuzhiyun */
196*4882a593Smuzhiyun for_each_set_bit(i, bmap.bits, bmap.size)
197*4882a593Smuzhiyun if (!eng_grp->g->eng_ref_cnt[i])
198*4882a593Smuzhiyun writeq((u64) dma_addr, cpt->reg_base +
199*4882a593Smuzhiyun OTX_CPT_PF_ENGX_UCODE_BASE(i));
200*4882a593Smuzhiyun return 0;
201*4882a593Smuzhiyun }
202*4882a593Smuzhiyun
cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info * eng_grp,void * obj)203*4882a593Smuzhiyun static int cpt_detach_and_disable_cores(struct otx_cpt_eng_grp_info *eng_grp,
204*4882a593Smuzhiyun void *obj)
205*4882a593Smuzhiyun {
206*4882a593Smuzhiyun struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
207*4882a593Smuzhiyun struct otx_cpt_bitmap bmap = { {0} };
208*4882a593Smuzhiyun int timeout = 10;
209*4882a593Smuzhiyun int i, busy;
210*4882a593Smuzhiyun u64 reg;
211*4882a593Smuzhiyun
212*4882a593Smuzhiyun bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
213*4882a593Smuzhiyun if (!bmap.size)
214*4882a593Smuzhiyun return -EINVAL;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun /* Detach the cores from group */
217*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
218*4882a593Smuzhiyun for_each_set_bit(i, bmap.bits, bmap.size) {
219*4882a593Smuzhiyun if (reg & (1ull << i)) {
220*4882a593Smuzhiyun eng_grp->g->eng_ref_cnt[i]--;
221*4882a593Smuzhiyun reg &= ~(1ull << i);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Wait for cores to become idle */
227*4882a593Smuzhiyun do {
228*4882a593Smuzhiyun busy = 0;
229*4882a593Smuzhiyun usleep_range(10000, 20000);
230*4882a593Smuzhiyun if (timeout-- < 0)
231*4882a593Smuzhiyun return -EBUSY;
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
234*4882a593Smuzhiyun for_each_set_bit(i, bmap.bits, bmap.size)
235*4882a593Smuzhiyun if (reg & (1ull << i)) {
236*4882a593Smuzhiyun busy = 1;
237*4882a593Smuzhiyun break;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun } while (busy);
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun /* Disable the cores only if they are not used anymore */
242*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
243*4882a593Smuzhiyun for_each_set_bit(i, bmap.bits, bmap.size)
244*4882a593Smuzhiyun if (!eng_grp->g->eng_ref_cnt[i])
245*4882a593Smuzhiyun reg &= ~(1ull << i);
246*4882a593Smuzhiyun writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun return 0;
249*4882a593Smuzhiyun }
250*4882a593Smuzhiyun
cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info * eng_grp,void * obj)251*4882a593Smuzhiyun static int cpt_attach_and_enable_cores(struct otx_cpt_eng_grp_info *eng_grp,
252*4882a593Smuzhiyun void *obj)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun struct otx_cpt_device *cpt = (struct otx_cpt_device *) obj;
255*4882a593Smuzhiyun struct otx_cpt_bitmap bmap;
256*4882a593Smuzhiyun u64 reg;
257*4882a593Smuzhiyun int i;
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun bmap = get_cores_bmap(&cpt->pdev->dev, eng_grp);
260*4882a593Smuzhiyun if (!bmap.size)
261*4882a593Smuzhiyun return -EINVAL;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* Attach the cores to the group */
264*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
265*4882a593Smuzhiyun for_each_set_bit(i, bmap.bits, bmap.size) {
266*4882a593Smuzhiyun if (!(reg & (1ull << i))) {
267*4882a593Smuzhiyun eng_grp->g->eng_ref_cnt[i]++;
268*4882a593Smuzhiyun reg |= 1ull << i;
269*4882a593Smuzhiyun }
270*4882a593Smuzhiyun }
271*4882a593Smuzhiyun writeq(reg, cpt->reg_base + OTX_CPT_PF_GX_EN(eng_grp->idx));
272*4882a593Smuzhiyun
273*4882a593Smuzhiyun /* Enable the cores */
274*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_EXE_CTL);
275*4882a593Smuzhiyun for_each_set_bit(i, bmap.bits, bmap.size)
276*4882a593Smuzhiyun reg |= 1ull << i;
277*4882a593Smuzhiyun writeq(reg, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
process_tar_file(struct device * dev,struct tar_arch_info_t * tar_arch,char * filename,const u8 * data,u32 size)282*4882a593Smuzhiyun static int process_tar_file(struct device *dev,
283*4882a593Smuzhiyun struct tar_arch_info_t *tar_arch, char *filename,
284*4882a593Smuzhiyun const u8 *data, u32 size)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun struct tar_ucode_info_t *tar_info;
287*4882a593Smuzhiyun struct otx_cpt_ucode_hdr *ucode_hdr;
288*4882a593Smuzhiyun int ucode_type, ucode_size;
289*4882a593Smuzhiyun unsigned int code_length;
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun /*
292*4882a593Smuzhiyun * If size is less than microcode header size then don't report
293*4882a593Smuzhiyun * an error because it might not be microcode file, just process
294*4882a593Smuzhiyun * next file from archive
295*4882a593Smuzhiyun */
296*4882a593Smuzhiyun if (size < sizeof(struct otx_cpt_ucode_hdr))
297*4882a593Smuzhiyun return 0;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun ucode_hdr = (struct otx_cpt_ucode_hdr *) data;
300*4882a593Smuzhiyun /*
301*4882a593Smuzhiyun * If microcode version can't be found don't report an error
302*4882a593Smuzhiyun * because it might not be microcode file, just process next file
303*4882a593Smuzhiyun */
304*4882a593Smuzhiyun if (get_ucode_type(ucode_hdr, &ucode_type))
305*4882a593Smuzhiyun return 0;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun code_length = ntohl(ucode_hdr->code_length);
308*4882a593Smuzhiyun if (code_length >= INT_MAX / 2) {
309*4882a593Smuzhiyun dev_err(dev, "Invalid code_length %u\n", code_length);
310*4882a593Smuzhiyun return -EINVAL;
311*4882a593Smuzhiyun }
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun ucode_size = code_length * 2;
314*4882a593Smuzhiyun if (!ucode_size || (size < round_up(ucode_size, 16) +
315*4882a593Smuzhiyun sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
316*4882a593Smuzhiyun dev_err(dev, "Ucode %s invalid size\n", filename);
317*4882a593Smuzhiyun return -EINVAL;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun tar_info = kzalloc(sizeof(struct tar_ucode_info_t), GFP_KERNEL);
321*4882a593Smuzhiyun if (!tar_info)
322*4882a593Smuzhiyun return -ENOMEM;
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun tar_info->ucode_ptr = data;
325*4882a593Smuzhiyun set_ucode_filename(&tar_info->ucode, filename);
326*4882a593Smuzhiyun memcpy(tar_info->ucode.ver_str, ucode_hdr->ver_str,
327*4882a593Smuzhiyun OTX_CPT_UCODE_VER_STR_SZ);
328*4882a593Smuzhiyun tar_info->ucode.ver_num = ucode_hdr->ver_num;
329*4882a593Smuzhiyun tar_info->ucode.type = ucode_type;
330*4882a593Smuzhiyun tar_info->ucode.size = ucode_size;
331*4882a593Smuzhiyun list_add_tail(&tar_info->list, &tar_arch->ucodes);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
release_tar_archive(struct tar_arch_info_t * tar_arch)336*4882a593Smuzhiyun static void release_tar_archive(struct tar_arch_info_t *tar_arch)
337*4882a593Smuzhiyun {
338*4882a593Smuzhiyun struct tar_ucode_info_t *curr, *temp;
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun if (!tar_arch)
341*4882a593Smuzhiyun return;
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun list_for_each_entry_safe(curr, temp, &tar_arch->ucodes, list) {
344*4882a593Smuzhiyun list_del(&curr->list);
345*4882a593Smuzhiyun kfree(curr);
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun if (tar_arch->fw)
349*4882a593Smuzhiyun release_firmware(tar_arch->fw);
350*4882a593Smuzhiyun kfree(tar_arch);
351*4882a593Smuzhiyun }
352*4882a593Smuzhiyun
get_uc_from_tar_archive(struct tar_arch_info_t * tar_arch,int ucode_type)353*4882a593Smuzhiyun static struct tar_ucode_info_t *get_uc_from_tar_archive(
354*4882a593Smuzhiyun struct tar_arch_info_t *tar_arch,
355*4882a593Smuzhiyun int ucode_type)
356*4882a593Smuzhiyun {
357*4882a593Smuzhiyun struct tar_ucode_info_t *curr, *uc_found = NULL;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun list_for_each_entry(curr, &tar_arch->ucodes, list) {
360*4882a593Smuzhiyun if (!is_eng_type(curr->ucode.type, ucode_type))
361*4882a593Smuzhiyun continue;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun if (!uc_found) {
364*4882a593Smuzhiyun uc_found = curr;
365*4882a593Smuzhiyun continue;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun switch (ucode_type) {
369*4882a593Smuzhiyun case OTX_CPT_AE_TYPES:
370*4882a593Smuzhiyun break;
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun case OTX_CPT_SE_TYPES:
373*4882a593Smuzhiyun if (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE2 ||
374*4882a593Smuzhiyun (uc_found->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE3
375*4882a593Smuzhiyun && curr->ucode.ver_num.nn == OTX_CPT_SE_UC_TYPE1))
376*4882a593Smuzhiyun uc_found = curr;
377*4882a593Smuzhiyun break;
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun return uc_found;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
print_tar_dbg_info(struct tar_arch_info_t * tar_arch,char * tar_filename)384*4882a593Smuzhiyun static void print_tar_dbg_info(struct tar_arch_info_t *tar_arch,
385*4882a593Smuzhiyun char *tar_filename)
386*4882a593Smuzhiyun {
387*4882a593Smuzhiyun struct tar_ucode_info_t *curr;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun pr_debug("Tar archive filename %s\n", tar_filename);
390*4882a593Smuzhiyun pr_debug("Tar archive pointer %p, size %ld\n", tar_arch->fw->data,
391*4882a593Smuzhiyun tar_arch->fw->size);
392*4882a593Smuzhiyun list_for_each_entry(curr, &tar_arch->ucodes, list) {
393*4882a593Smuzhiyun pr_debug("Ucode filename %s\n", curr->ucode.filename);
394*4882a593Smuzhiyun pr_debug("Ucode version string %s\n", curr->ucode.ver_str);
395*4882a593Smuzhiyun pr_debug("Ucode version %d.%d.%d.%d\n",
396*4882a593Smuzhiyun curr->ucode.ver_num.nn, curr->ucode.ver_num.xx,
397*4882a593Smuzhiyun curr->ucode.ver_num.yy, curr->ucode.ver_num.zz);
398*4882a593Smuzhiyun pr_debug("Ucode type (%d) %s\n", curr->ucode.type,
399*4882a593Smuzhiyun get_ucode_type_str(curr->ucode.type));
400*4882a593Smuzhiyun pr_debug("Ucode size %d\n", curr->ucode.size);
401*4882a593Smuzhiyun pr_debug("Ucode ptr %p\n", curr->ucode_ptr);
402*4882a593Smuzhiyun }
403*4882a593Smuzhiyun }
404*4882a593Smuzhiyun
load_tar_archive(struct device * dev,char * tar_filename)405*4882a593Smuzhiyun static struct tar_arch_info_t *load_tar_archive(struct device *dev,
406*4882a593Smuzhiyun char *tar_filename)
407*4882a593Smuzhiyun {
408*4882a593Smuzhiyun struct tar_arch_info_t *tar_arch = NULL;
409*4882a593Smuzhiyun struct tar_blk_t *tar_blk;
410*4882a593Smuzhiyun unsigned int cur_size;
411*4882a593Smuzhiyun size_t tar_offs = 0;
412*4882a593Smuzhiyun size_t tar_size;
413*4882a593Smuzhiyun int ret;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun tar_arch = kzalloc(sizeof(struct tar_arch_info_t), GFP_KERNEL);
416*4882a593Smuzhiyun if (!tar_arch)
417*4882a593Smuzhiyun return NULL;
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun INIT_LIST_HEAD(&tar_arch->ucodes);
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Load tar archive */
422*4882a593Smuzhiyun ret = request_firmware(&tar_arch->fw, tar_filename, dev);
423*4882a593Smuzhiyun if (ret)
424*4882a593Smuzhiyun goto release_tar_arch;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (tar_arch->fw->size < TAR_BLOCK_LEN) {
427*4882a593Smuzhiyun dev_err(dev, "Invalid tar archive %s\n", tar_filename);
428*4882a593Smuzhiyun goto release_tar_arch;
429*4882a593Smuzhiyun }
430*4882a593Smuzhiyun
431*4882a593Smuzhiyun tar_size = tar_arch->fw->size;
432*4882a593Smuzhiyun tar_blk = (struct tar_blk_t *) tar_arch->fw->data;
433*4882a593Smuzhiyun if (strncmp(tar_blk->hdr.magic, TAR_MAGIC, TAR_MAGIC_LEN - 1)) {
434*4882a593Smuzhiyun dev_err(dev, "Unsupported format of tar archive %s\n",
435*4882a593Smuzhiyun tar_filename);
436*4882a593Smuzhiyun goto release_tar_arch;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun while (1) {
440*4882a593Smuzhiyun /* Read current file size */
441*4882a593Smuzhiyun ret = kstrtouint(tar_blk->hdr.size, 8, &cur_size);
442*4882a593Smuzhiyun if (ret)
443*4882a593Smuzhiyun goto release_tar_arch;
444*4882a593Smuzhiyun
445*4882a593Smuzhiyun if (tar_offs + cur_size > tar_size ||
446*4882a593Smuzhiyun tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
447*4882a593Smuzhiyun dev_err(dev, "Invalid tar archive %s\n", tar_filename);
448*4882a593Smuzhiyun goto release_tar_arch;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun tar_offs += TAR_BLOCK_LEN;
452*4882a593Smuzhiyun if (tar_blk->hdr.typeflag == REGTYPE ||
453*4882a593Smuzhiyun tar_blk->hdr.typeflag == AREGTYPE) {
454*4882a593Smuzhiyun ret = process_tar_file(dev, tar_arch,
455*4882a593Smuzhiyun tar_blk->hdr.name,
456*4882a593Smuzhiyun &tar_arch->fw->data[tar_offs],
457*4882a593Smuzhiyun cur_size);
458*4882a593Smuzhiyun if (ret)
459*4882a593Smuzhiyun goto release_tar_arch;
460*4882a593Smuzhiyun }
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun tar_offs += (cur_size/TAR_BLOCK_LEN) * TAR_BLOCK_LEN;
463*4882a593Smuzhiyun if (cur_size % TAR_BLOCK_LEN)
464*4882a593Smuzhiyun tar_offs += TAR_BLOCK_LEN;
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun /* Check for the end of the archive */
467*4882a593Smuzhiyun if (tar_offs + 2*TAR_BLOCK_LEN > tar_size) {
468*4882a593Smuzhiyun dev_err(dev, "Invalid tar archive %s\n", tar_filename);
469*4882a593Smuzhiyun goto release_tar_arch;
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun if (is_mem_zero(&tar_arch->fw->data[tar_offs],
473*4882a593Smuzhiyun 2*TAR_BLOCK_LEN))
474*4882a593Smuzhiyun break;
475*4882a593Smuzhiyun
476*4882a593Smuzhiyun /* Read next block from tar archive */
477*4882a593Smuzhiyun tar_blk = (struct tar_blk_t *) &tar_arch->fw->data[tar_offs];
478*4882a593Smuzhiyun }
479*4882a593Smuzhiyun
480*4882a593Smuzhiyun print_tar_dbg_info(tar_arch, tar_filename);
481*4882a593Smuzhiyun return tar_arch;
482*4882a593Smuzhiyun release_tar_arch:
483*4882a593Smuzhiyun release_tar_archive(tar_arch);
484*4882a593Smuzhiyun return NULL;
485*4882a593Smuzhiyun }
486*4882a593Smuzhiyun
find_engines_by_type(struct otx_cpt_eng_grp_info * eng_grp,int eng_type)487*4882a593Smuzhiyun static struct otx_cpt_engs_rsvd *find_engines_by_type(
488*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp,
489*4882a593Smuzhiyun int eng_type)
490*4882a593Smuzhiyun {
491*4882a593Smuzhiyun int i;
492*4882a593Smuzhiyun
493*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
494*4882a593Smuzhiyun if (!eng_grp->engs[i].type)
495*4882a593Smuzhiyun continue;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (eng_grp->engs[i].type == eng_type)
498*4882a593Smuzhiyun return &eng_grp->engs[i];
499*4882a593Smuzhiyun }
500*4882a593Smuzhiyun return NULL;
501*4882a593Smuzhiyun }
502*4882a593Smuzhiyun
otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode * ucode,int eng_type)503*4882a593Smuzhiyun int otx_cpt_uc_supports_eng_type(struct otx_cpt_ucode *ucode, int eng_type)
504*4882a593Smuzhiyun {
505*4882a593Smuzhiyun return is_eng_type(ucode->type, eng_type);
506*4882a593Smuzhiyun }
507*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(otx_cpt_uc_supports_eng_type);
508*4882a593Smuzhiyun
otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info * eng_grp,int eng_type)509*4882a593Smuzhiyun int otx_cpt_eng_grp_has_eng_type(struct otx_cpt_eng_grp_info *eng_grp,
510*4882a593Smuzhiyun int eng_type)
511*4882a593Smuzhiyun {
512*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun engs = find_engines_by_type(eng_grp, eng_type);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun return (engs != NULL ? 1 : 0);
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(otx_cpt_eng_grp_has_eng_type);
519*4882a593Smuzhiyun
print_ucode_info(struct otx_cpt_eng_grp_info * eng_grp,char * buf,int size)520*4882a593Smuzhiyun static void print_ucode_info(struct otx_cpt_eng_grp_info *eng_grp,
521*4882a593Smuzhiyun char *buf, int size)
522*4882a593Smuzhiyun {
523*4882a593Smuzhiyun if (eng_grp->mirror.is_ena) {
524*4882a593Smuzhiyun scnprintf(buf, size, "%s (shared with engine_group%d)",
525*4882a593Smuzhiyun eng_grp->g->grp[eng_grp->mirror.idx].ucode[0].ver_str,
526*4882a593Smuzhiyun eng_grp->mirror.idx);
527*4882a593Smuzhiyun } else {
528*4882a593Smuzhiyun scnprintf(buf, size, "%s", eng_grp->ucode[0].ver_str);
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
print_engs_info(struct otx_cpt_eng_grp_info * eng_grp,char * buf,int size,int idx)532*4882a593Smuzhiyun static void print_engs_info(struct otx_cpt_eng_grp_info *eng_grp,
533*4882a593Smuzhiyun char *buf, int size, int idx)
534*4882a593Smuzhiyun {
535*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *mirrored_engs = NULL;
536*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs;
537*4882a593Smuzhiyun int len, i;
538*4882a593Smuzhiyun
539*4882a593Smuzhiyun buf[0] = '\0';
540*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
541*4882a593Smuzhiyun engs = &eng_grp->engs[i];
542*4882a593Smuzhiyun if (!engs->type)
543*4882a593Smuzhiyun continue;
544*4882a593Smuzhiyun if (idx != -1 && idx != i)
545*4882a593Smuzhiyun continue;
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun if (eng_grp->mirror.is_ena)
548*4882a593Smuzhiyun mirrored_engs = find_engines_by_type(
549*4882a593Smuzhiyun &eng_grp->g->grp[eng_grp->mirror.idx],
550*4882a593Smuzhiyun engs->type);
551*4882a593Smuzhiyun if (i > 0 && idx == -1) {
552*4882a593Smuzhiyun len = strlen(buf);
553*4882a593Smuzhiyun scnprintf(buf+len, size-len, ", ");
554*4882a593Smuzhiyun }
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun len = strlen(buf);
557*4882a593Smuzhiyun scnprintf(buf+len, size-len, "%d %s ", mirrored_engs ?
558*4882a593Smuzhiyun engs->count + mirrored_engs->count : engs->count,
559*4882a593Smuzhiyun get_eng_type_str(engs->type));
560*4882a593Smuzhiyun if (mirrored_engs) {
561*4882a593Smuzhiyun len = strlen(buf);
562*4882a593Smuzhiyun scnprintf(buf+len, size-len,
563*4882a593Smuzhiyun "(%d shared with engine_group%d) ",
564*4882a593Smuzhiyun engs->count <= 0 ? engs->count +
565*4882a593Smuzhiyun mirrored_engs->count : mirrored_engs->count,
566*4882a593Smuzhiyun eng_grp->mirror.idx);
567*4882a593Smuzhiyun }
568*4882a593Smuzhiyun }
569*4882a593Smuzhiyun }
570*4882a593Smuzhiyun
print_ucode_dbg_info(struct otx_cpt_ucode * ucode)571*4882a593Smuzhiyun static void print_ucode_dbg_info(struct otx_cpt_ucode *ucode)
572*4882a593Smuzhiyun {
573*4882a593Smuzhiyun pr_debug("Ucode info\n");
574*4882a593Smuzhiyun pr_debug("Ucode version string %s\n", ucode->ver_str);
575*4882a593Smuzhiyun pr_debug("Ucode version %d.%d.%d.%d\n", ucode->ver_num.nn,
576*4882a593Smuzhiyun ucode->ver_num.xx, ucode->ver_num.yy, ucode->ver_num.zz);
577*4882a593Smuzhiyun pr_debug("Ucode type %s\n", get_ucode_type_str(ucode->type));
578*4882a593Smuzhiyun pr_debug("Ucode size %d\n", ucode->size);
579*4882a593Smuzhiyun pr_debug("Ucode virt address %16.16llx\n", (u64)ucode->align_va);
580*4882a593Smuzhiyun pr_debug("Ucode phys address %16.16llx\n", ucode->align_dma);
581*4882a593Smuzhiyun }
582*4882a593Smuzhiyun
cpt_print_engines_mask(struct otx_cpt_eng_grp_info * eng_grp,struct device * dev,char * buf,int size)583*4882a593Smuzhiyun static void cpt_print_engines_mask(struct otx_cpt_eng_grp_info *eng_grp,
584*4882a593Smuzhiyun struct device *dev, char *buf, int size)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun struct otx_cpt_bitmap bmap;
587*4882a593Smuzhiyun u32 mask[2];
588*4882a593Smuzhiyun
589*4882a593Smuzhiyun bmap = get_cores_bmap(dev, eng_grp);
590*4882a593Smuzhiyun if (!bmap.size) {
591*4882a593Smuzhiyun scnprintf(buf, size, "unknown");
592*4882a593Smuzhiyun return;
593*4882a593Smuzhiyun }
594*4882a593Smuzhiyun bitmap_to_arr32(mask, bmap.bits, bmap.size);
595*4882a593Smuzhiyun scnprintf(buf, size, "%8.8x %8.8x", mask[1], mask[0]);
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun
print_dbg_info(struct device * dev,struct otx_cpt_eng_grps * eng_grps)599*4882a593Smuzhiyun static void print_dbg_info(struct device *dev,
600*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps)
601*4882a593Smuzhiyun {
602*4882a593Smuzhiyun char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
603*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *mirrored_grp;
604*4882a593Smuzhiyun char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
605*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *grp;
606*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs;
607*4882a593Smuzhiyun u32 mask[4];
608*4882a593Smuzhiyun int i, j;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun pr_debug("Engine groups global info\n");
611*4882a593Smuzhiyun pr_debug("max SE %d, max AE %d\n",
612*4882a593Smuzhiyun eng_grps->avail.max_se_cnt, eng_grps->avail.max_ae_cnt);
613*4882a593Smuzhiyun pr_debug("free SE %d\n", eng_grps->avail.se_cnt);
614*4882a593Smuzhiyun pr_debug("free AE %d\n", eng_grps->avail.ae_cnt);
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
617*4882a593Smuzhiyun grp = &eng_grps->grp[i];
618*4882a593Smuzhiyun pr_debug("engine_group%d, state %s\n", i, grp->is_enabled ?
619*4882a593Smuzhiyun "enabled" : "disabled");
620*4882a593Smuzhiyun if (grp->is_enabled) {
621*4882a593Smuzhiyun mirrored_grp = &eng_grps->grp[grp->mirror.idx];
622*4882a593Smuzhiyun pr_debug("Ucode0 filename %s, version %s\n",
623*4882a593Smuzhiyun grp->mirror.is_ena ?
624*4882a593Smuzhiyun mirrored_grp->ucode[0].filename :
625*4882a593Smuzhiyun grp->ucode[0].filename,
626*4882a593Smuzhiyun grp->mirror.is_ena ?
627*4882a593Smuzhiyun mirrored_grp->ucode[0].ver_str :
628*4882a593Smuzhiyun grp->ucode[0].ver_str);
629*4882a593Smuzhiyun }
630*4882a593Smuzhiyun
631*4882a593Smuzhiyun for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
632*4882a593Smuzhiyun engs = &grp->engs[j];
633*4882a593Smuzhiyun if (engs->type) {
634*4882a593Smuzhiyun print_engs_info(grp, engs_info,
635*4882a593Smuzhiyun 2*OTX_CPT_UCODE_NAME_LENGTH, j);
636*4882a593Smuzhiyun pr_debug("Slot%d: %s\n", j, engs_info);
637*4882a593Smuzhiyun bitmap_to_arr32(mask, engs->bmap,
638*4882a593Smuzhiyun eng_grps->engs_num);
639*4882a593Smuzhiyun pr_debug("Mask: %8.8x %8.8x %8.8x %8.8x\n",
640*4882a593Smuzhiyun mask[3], mask[2], mask[1], mask[0]);
641*4882a593Smuzhiyun } else
642*4882a593Smuzhiyun pr_debug("Slot%d not used\n", j);
643*4882a593Smuzhiyun }
644*4882a593Smuzhiyun if (grp->is_enabled) {
645*4882a593Smuzhiyun cpt_print_engines_mask(grp, dev, engs_mask,
646*4882a593Smuzhiyun OTX_CPT_UCODE_NAME_LENGTH);
647*4882a593Smuzhiyun pr_debug("Cmask: %s\n", engs_mask);
648*4882a593Smuzhiyun }
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun }
651*4882a593Smuzhiyun
update_engines_avail_count(struct device * dev,struct otx_cpt_engs_available * avail,struct otx_cpt_engs_rsvd * engs,int val)652*4882a593Smuzhiyun static int update_engines_avail_count(struct device *dev,
653*4882a593Smuzhiyun struct otx_cpt_engs_available *avail,
654*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs, int val)
655*4882a593Smuzhiyun {
656*4882a593Smuzhiyun switch (engs->type) {
657*4882a593Smuzhiyun case OTX_CPT_SE_TYPES:
658*4882a593Smuzhiyun avail->se_cnt += val;
659*4882a593Smuzhiyun break;
660*4882a593Smuzhiyun
661*4882a593Smuzhiyun case OTX_CPT_AE_TYPES:
662*4882a593Smuzhiyun avail->ae_cnt += val;
663*4882a593Smuzhiyun break;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun default:
666*4882a593Smuzhiyun dev_err(dev, "Invalid engine type %d\n", engs->type);
667*4882a593Smuzhiyun return -EINVAL;
668*4882a593Smuzhiyun }
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun return 0;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
update_engines_offset(struct device * dev,struct otx_cpt_engs_available * avail,struct otx_cpt_engs_rsvd * engs)673*4882a593Smuzhiyun static int update_engines_offset(struct device *dev,
674*4882a593Smuzhiyun struct otx_cpt_engs_available *avail,
675*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs)
676*4882a593Smuzhiyun {
677*4882a593Smuzhiyun switch (engs->type) {
678*4882a593Smuzhiyun case OTX_CPT_SE_TYPES:
679*4882a593Smuzhiyun engs->offset = 0;
680*4882a593Smuzhiyun break;
681*4882a593Smuzhiyun
682*4882a593Smuzhiyun case OTX_CPT_AE_TYPES:
683*4882a593Smuzhiyun engs->offset = avail->max_se_cnt;
684*4882a593Smuzhiyun break;
685*4882a593Smuzhiyun
686*4882a593Smuzhiyun default:
687*4882a593Smuzhiyun dev_err(dev, "Invalid engine type %d\n", engs->type);
688*4882a593Smuzhiyun return -EINVAL;
689*4882a593Smuzhiyun }
690*4882a593Smuzhiyun
691*4882a593Smuzhiyun return 0;
692*4882a593Smuzhiyun }
693*4882a593Smuzhiyun
release_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp)694*4882a593Smuzhiyun static int release_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp)
695*4882a593Smuzhiyun {
696*4882a593Smuzhiyun int i, ret = 0;
697*4882a593Smuzhiyun
698*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
699*4882a593Smuzhiyun if (!grp->engs[i].type)
700*4882a593Smuzhiyun continue;
701*4882a593Smuzhiyun
702*4882a593Smuzhiyun if (grp->engs[i].count > 0) {
703*4882a593Smuzhiyun ret = update_engines_avail_count(dev, &grp->g->avail,
704*4882a593Smuzhiyun &grp->engs[i],
705*4882a593Smuzhiyun grp->engs[i].count);
706*4882a593Smuzhiyun if (ret)
707*4882a593Smuzhiyun return ret;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
710*4882a593Smuzhiyun grp->engs[i].type = 0;
711*4882a593Smuzhiyun grp->engs[i].count = 0;
712*4882a593Smuzhiyun grp->engs[i].offset = 0;
713*4882a593Smuzhiyun grp->engs[i].ucode = NULL;
714*4882a593Smuzhiyun bitmap_zero(grp->engs[i].bmap, grp->g->engs_num);
715*4882a593Smuzhiyun }
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun return 0;
718*4882a593Smuzhiyun }
719*4882a593Smuzhiyun
do_reserve_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_engs)720*4882a593Smuzhiyun static int do_reserve_engines(struct device *dev,
721*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *grp,
722*4882a593Smuzhiyun struct otx_cpt_engines *req_engs)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs = NULL;
725*4882a593Smuzhiyun int i, ret;
726*4882a593Smuzhiyun
727*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
728*4882a593Smuzhiyun if (!grp->engs[i].type) {
729*4882a593Smuzhiyun engs = &grp->engs[i];
730*4882a593Smuzhiyun break;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun }
733*4882a593Smuzhiyun
734*4882a593Smuzhiyun if (!engs)
735*4882a593Smuzhiyun return -ENOMEM;
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun engs->type = req_engs->type;
738*4882a593Smuzhiyun engs->count = req_engs->count;
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun ret = update_engines_offset(dev, &grp->g->avail, engs);
741*4882a593Smuzhiyun if (ret)
742*4882a593Smuzhiyun return ret;
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (engs->count > 0) {
745*4882a593Smuzhiyun ret = update_engines_avail_count(dev, &grp->g->avail, engs,
746*4882a593Smuzhiyun -engs->count);
747*4882a593Smuzhiyun if (ret)
748*4882a593Smuzhiyun return ret;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun return 0;
752*4882a593Smuzhiyun }
753*4882a593Smuzhiyun
check_engines_availability(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_eng)754*4882a593Smuzhiyun static int check_engines_availability(struct device *dev,
755*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *grp,
756*4882a593Smuzhiyun struct otx_cpt_engines *req_eng)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun int avail_cnt = 0;
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun switch (req_eng->type) {
761*4882a593Smuzhiyun case OTX_CPT_SE_TYPES:
762*4882a593Smuzhiyun avail_cnt = grp->g->avail.se_cnt;
763*4882a593Smuzhiyun break;
764*4882a593Smuzhiyun
765*4882a593Smuzhiyun case OTX_CPT_AE_TYPES:
766*4882a593Smuzhiyun avail_cnt = grp->g->avail.ae_cnt;
767*4882a593Smuzhiyun break;
768*4882a593Smuzhiyun
769*4882a593Smuzhiyun default:
770*4882a593Smuzhiyun dev_err(dev, "Invalid engine type %d\n", req_eng->type);
771*4882a593Smuzhiyun return -EINVAL;
772*4882a593Smuzhiyun }
773*4882a593Smuzhiyun
774*4882a593Smuzhiyun if (avail_cnt < req_eng->count) {
775*4882a593Smuzhiyun dev_err(dev,
776*4882a593Smuzhiyun "Error available %s engines %d < than requested %d\n",
777*4882a593Smuzhiyun get_eng_type_str(req_eng->type),
778*4882a593Smuzhiyun avail_cnt, req_eng->count);
779*4882a593Smuzhiyun return -EBUSY;
780*4882a593Smuzhiyun }
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun return 0;
783*4882a593Smuzhiyun }
784*4882a593Smuzhiyun
reserve_engines(struct device * dev,struct otx_cpt_eng_grp_info * grp,struct otx_cpt_engines * req_engs,int req_cnt)785*4882a593Smuzhiyun static int reserve_engines(struct device *dev, struct otx_cpt_eng_grp_info *grp,
786*4882a593Smuzhiyun struct otx_cpt_engines *req_engs, int req_cnt)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun int i, ret;
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun /* Validate if a number of requested engines is available */
791*4882a593Smuzhiyun for (i = 0; i < req_cnt; i++) {
792*4882a593Smuzhiyun ret = check_engines_availability(dev, grp, &req_engs[i]);
793*4882a593Smuzhiyun if (ret)
794*4882a593Smuzhiyun return ret;
795*4882a593Smuzhiyun }
796*4882a593Smuzhiyun
797*4882a593Smuzhiyun /* Reserve requested engines for this engine group */
798*4882a593Smuzhiyun for (i = 0; i < req_cnt; i++) {
799*4882a593Smuzhiyun ret = do_reserve_engines(dev, grp, &req_engs[i]);
800*4882a593Smuzhiyun if (ret)
801*4882a593Smuzhiyun return ret;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun return 0;
804*4882a593Smuzhiyun }
805*4882a593Smuzhiyun
eng_grp_info_show(struct device * dev,struct device_attribute * attr,char * buf)806*4882a593Smuzhiyun static ssize_t eng_grp_info_show(struct device *dev,
807*4882a593Smuzhiyun struct device_attribute *attr,
808*4882a593Smuzhiyun char *buf)
809*4882a593Smuzhiyun {
810*4882a593Smuzhiyun char ucode_info[2*OTX_CPT_UCODE_NAME_LENGTH];
811*4882a593Smuzhiyun char engs_info[2*OTX_CPT_UCODE_NAME_LENGTH];
812*4882a593Smuzhiyun char engs_mask[OTX_CPT_UCODE_NAME_LENGTH];
813*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp;
814*4882a593Smuzhiyun int ret;
815*4882a593Smuzhiyun
816*4882a593Smuzhiyun eng_grp = container_of(attr, struct otx_cpt_eng_grp_info, info_attr);
817*4882a593Smuzhiyun mutex_lock(&eng_grp->g->lock);
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun print_engs_info(eng_grp, engs_info, 2*OTX_CPT_UCODE_NAME_LENGTH, -1);
820*4882a593Smuzhiyun print_ucode_info(eng_grp, ucode_info, 2*OTX_CPT_UCODE_NAME_LENGTH);
821*4882a593Smuzhiyun cpt_print_engines_mask(eng_grp, dev, engs_mask,
822*4882a593Smuzhiyun OTX_CPT_UCODE_NAME_LENGTH);
823*4882a593Smuzhiyun ret = scnprintf(buf, PAGE_SIZE,
824*4882a593Smuzhiyun "Microcode : %s\nEngines: %s\nEngines mask: %s\n",
825*4882a593Smuzhiyun ucode_info, engs_info, engs_mask);
826*4882a593Smuzhiyun
827*4882a593Smuzhiyun mutex_unlock(&eng_grp->g->lock);
828*4882a593Smuzhiyun return ret;
829*4882a593Smuzhiyun }
830*4882a593Smuzhiyun
create_sysfs_eng_grps_info(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)831*4882a593Smuzhiyun static int create_sysfs_eng_grps_info(struct device *dev,
832*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp)
833*4882a593Smuzhiyun {
834*4882a593Smuzhiyun eng_grp->info_attr.show = eng_grp_info_show;
835*4882a593Smuzhiyun eng_grp->info_attr.store = NULL;
836*4882a593Smuzhiyun eng_grp->info_attr.attr.name = eng_grp->sysfs_info_name;
837*4882a593Smuzhiyun eng_grp->info_attr.attr.mode = 0440;
838*4882a593Smuzhiyun sysfs_attr_init(&eng_grp->info_attr.attr);
839*4882a593Smuzhiyun return device_create_file(dev, &eng_grp->info_attr);
840*4882a593Smuzhiyun }
841*4882a593Smuzhiyun
ucode_unload(struct device * dev,struct otx_cpt_ucode * ucode)842*4882a593Smuzhiyun static void ucode_unload(struct device *dev, struct otx_cpt_ucode *ucode)
843*4882a593Smuzhiyun {
844*4882a593Smuzhiyun if (ucode->va) {
845*4882a593Smuzhiyun dma_free_coherent(dev, ucode->size + OTX_CPT_UCODE_ALIGNMENT,
846*4882a593Smuzhiyun ucode->va, ucode->dma);
847*4882a593Smuzhiyun ucode->va = NULL;
848*4882a593Smuzhiyun ucode->align_va = NULL;
849*4882a593Smuzhiyun ucode->dma = 0;
850*4882a593Smuzhiyun ucode->align_dma = 0;
851*4882a593Smuzhiyun ucode->size = 0;
852*4882a593Smuzhiyun }
853*4882a593Smuzhiyun
854*4882a593Smuzhiyun memset(&ucode->ver_str, 0, OTX_CPT_UCODE_VER_STR_SZ);
855*4882a593Smuzhiyun memset(&ucode->ver_num, 0, sizeof(struct otx_cpt_ucode_ver_num));
856*4882a593Smuzhiyun set_ucode_filename(ucode, "");
857*4882a593Smuzhiyun ucode->type = 0;
858*4882a593Smuzhiyun }
859*4882a593Smuzhiyun
copy_ucode_to_dma_mem(struct device * dev,struct otx_cpt_ucode * ucode,const u8 * ucode_data)860*4882a593Smuzhiyun static int copy_ucode_to_dma_mem(struct device *dev,
861*4882a593Smuzhiyun struct otx_cpt_ucode *ucode,
862*4882a593Smuzhiyun const u8 *ucode_data)
863*4882a593Smuzhiyun {
864*4882a593Smuzhiyun u32 i;
865*4882a593Smuzhiyun
866*4882a593Smuzhiyun /* Allocate DMAable space */
867*4882a593Smuzhiyun ucode->va = dma_alloc_coherent(dev, ucode->size +
868*4882a593Smuzhiyun OTX_CPT_UCODE_ALIGNMENT,
869*4882a593Smuzhiyun &ucode->dma, GFP_KERNEL);
870*4882a593Smuzhiyun if (!ucode->va) {
871*4882a593Smuzhiyun dev_err(dev, "Unable to allocate space for microcode\n");
872*4882a593Smuzhiyun return -ENOMEM;
873*4882a593Smuzhiyun }
874*4882a593Smuzhiyun ucode->align_va = PTR_ALIGN(ucode->va, OTX_CPT_UCODE_ALIGNMENT);
875*4882a593Smuzhiyun ucode->align_dma = PTR_ALIGN(ucode->dma, OTX_CPT_UCODE_ALIGNMENT);
876*4882a593Smuzhiyun
877*4882a593Smuzhiyun memcpy((void *) ucode->align_va, (void *) ucode_data +
878*4882a593Smuzhiyun sizeof(struct otx_cpt_ucode_hdr), ucode->size);
879*4882a593Smuzhiyun
880*4882a593Smuzhiyun /* Byte swap 64-bit */
881*4882a593Smuzhiyun for (i = 0; i < (ucode->size / 8); i++)
882*4882a593Smuzhiyun ((__be64 *)ucode->align_va)[i] =
883*4882a593Smuzhiyun cpu_to_be64(((u64 *)ucode->align_va)[i]);
884*4882a593Smuzhiyun /* Ucode needs 16-bit swap */
885*4882a593Smuzhiyun for (i = 0; i < (ucode->size / 2); i++)
886*4882a593Smuzhiyun ((__be16 *)ucode->align_va)[i] =
887*4882a593Smuzhiyun cpu_to_be16(((u16 *)ucode->align_va)[i]);
888*4882a593Smuzhiyun return 0;
889*4882a593Smuzhiyun }
890*4882a593Smuzhiyun
ucode_load(struct device * dev,struct otx_cpt_ucode * ucode,const char * ucode_filename)891*4882a593Smuzhiyun static int ucode_load(struct device *dev, struct otx_cpt_ucode *ucode,
892*4882a593Smuzhiyun const char *ucode_filename)
893*4882a593Smuzhiyun {
894*4882a593Smuzhiyun struct otx_cpt_ucode_hdr *ucode_hdr;
895*4882a593Smuzhiyun const struct firmware *fw;
896*4882a593Smuzhiyun unsigned int code_length;
897*4882a593Smuzhiyun int ret;
898*4882a593Smuzhiyun
899*4882a593Smuzhiyun set_ucode_filename(ucode, ucode_filename);
900*4882a593Smuzhiyun ret = request_firmware(&fw, ucode->filename, dev);
901*4882a593Smuzhiyun if (ret)
902*4882a593Smuzhiyun return ret;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun ucode_hdr = (struct otx_cpt_ucode_hdr *) fw->data;
905*4882a593Smuzhiyun memcpy(ucode->ver_str, ucode_hdr->ver_str, OTX_CPT_UCODE_VER_STR_SZ);
906*4882a593Smuzhiyun ucode->ver_num = ucode_hdr->ver_num;
907*4882a593Smuzhiyun code_length = ntohl(ucode_hdr->code_length);
908*4882a593Smuzhiyun if (code_length >= INT_MAX / 2) {
909*4882a593Smuzhiyun dev_err(dev, "Ucode invalid code_length %u\n", code_length);
910*4882a593Smuzhiyun ret = -EINVAL;
911*4882a593Smuzhiyun goto release_fw;
912*4882a593Smuzhiyun }
913*4882a593Smuzhiyun ucode->size = code_length * 2;
914*4882a593Smuzhiyun if (!ucode->size || (fw->size < round_up(ucode->size, 16)
915*4882a593Smuzhiyun + sizeof(struct otx_cpt_ucode_hdr) + OTX_CPT_UCODE_SIGN_LEN)) {
916*4882a593Smuzhiyun dev_err(dev, "Ucode %s invalid size\n", ucode_filename);
917*4882a593Smuzhiyun ret = -EINVAL;
918*4882a593Smuzhiyun goto release_fw;
919*4882a593Smuzhiyun }
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun ret = get_ucode_type(ucode_hdr, &ucode->type);
922*4882a593Smuzhiyun if (ret) {
923*4882a593Smuzhiyun dev_err(dev, "Microcode %s unknown type 0x%x\n",
924*4882a593Smuzhiyun ucode->filename, ucode->type);
925*4882a593Smuzhiyun goto release_fw;
926*4882a593Smuzhiyun }
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun ret = copy_ucode_to_dma_mem(dev, ucode, fw->data);
929*4882a593Smuzhiyun if (ret)
930*4882a593Smuzhiyun goto release_fw;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun print_ucode_dbg_info(ucode);
933*4882a593Smuzhiyun release_fw:
934*4882a593Smuzhiyun release_firmware(fw);
935*4882a593Smuzhiyun return ret;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
enable_eng_grp(struct otx_cpt_eng_grp_info * eng_grp,void * obj)938*4882a593Smuzhiyun static int enable_eng_grp(struct otx_cpt_eng_grp_info *eng_grp,
939*4882a593Smuzhiyun void *obj)
940*4882a593Smuzhiyun {
941*4882a593Smuzhiyun int ret;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun ret = cpt_set_ucode_base(eng_grp, obj);
944*4882a593Smuzhiyun if (ret)
945*4882a593Smuzhiyun return ret;
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun ret = cpt_attach_and_enable_cores(eng_grp, obj);
948*4882a593Smuzhiyun return ret;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun
disable_eng_grp(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp,void * obj)951*4882a593Smuzhiyun static int disable_eng_grp(struct device *dev,
952*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp,
953*4882a593Smuzhiyun void *obj)
954*4882a593Smuzhiyun {
955*4882a593Smuzhiyun int i, ret;
956*4882a593Smuzhiyun
957*4882a593Smuzhiyun ret = cpt_detach_and_disable_cores(eng_grp, obj);
958*4882a593Smuzhiyun if (ret)
959*4882a593Smuzhiyun return ret;
960*4882a593Smuzhiyun
961*4882a593Smuzhiyun /* Unload ucode used by this engine group */
962*4882a593Smuzhiyun ucode_unload(dev, &eng_grp->ucode[0]);
963*4882a593Smuzhiyun
964*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
965*4882a593Smuzhiyun if (!eng_grp->engs[i].type)
966*4882a593Smuzhiyun continue;
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun eng_grp->engs[i].ucode = &eng_grp->ucode[0];
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun ret = cpt_set_ucode_base(eng_grp, obj);
972*4882a593Smuzhiyun
973*4882a593Smuzhiyun return ret;
974*4882a593Smuzhiyun }
975*4882a593Smuzhiyun
setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info * dst_grp,struct otx_cpt_eng_grp_info * src_grp)976*4882a593Smuzhiyun static void setup_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp,
977*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *src_grp)
978*4882a593Smuzhiyun {
979*4882a593Smuzhiyun /* Setup fields for engine group which is mirrored */
980*4882a593Smuzhiyun src_grp->mirror.is_ena = false;
981*4882a593Smuzhiyun src_grp->mirror.idx = 0;
982*4882a593Smuzhiyun src_grp->mirror.ref_count++;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun /* Setup fields for mirroring engine group */
985*4882a593Smuzhiyun dst_grp->mirror.is_ena = true;
986*4882a593Smuzhiyun dst_grp->mirror.idx = src_grp->idx;
987*4882a593Smuzhiyun dst_grp->mirror.ref_count = 0;
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun
remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info * dst_grp)990*4882a593Smuzhiyun static void remove_eng_grp_mirroring(struct otx_cpt_eng_grp_info *dst_grp)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *src_grp;
993*4882a593Smuzhiyun
994*4882a593Smuzhiyun if (!dst_grp->mirror.is_ena)
995*4882a593Smuzhiyun return;
996*4882a593Smuzhiyun
997*4882a593Smuzhiyun src_grp = &dst_grp->g->grp[dst_grp->mirror.idx];
998*4882a593Smuzhiyun
999*4882a593Smuzhiyun src_grp->mirror.ref_count--;
1000*4882a593Smuzhiyun dst_grp->mirror.is_ena = false;
1001*4882a593Smuzhiyun dst_grp->mirror.idx = 0;
1002*4882a593Smuzhiyun dst_grp->mirror.ref_count = 0;
1003*4882a593Smuzhiyun }
1004*4882a593Smuzhiyun
update_requested_engs(struct otx_cpt_eng_grp_info * mirrored_eng_grp,struct otx_cpt_engines * engs,int engs_cnt)1005*4882a593Smuzhiyun static void update_requested_engs(struct otx_cpt_eng_grp_info *mirrored_eng_grp,
1006*4882a593Smuzhiyun struct otx_cpt_engines *engs, int engs_cnt)
1007*4882a593Smuzhiyun {
1008*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *mirrored_engs;
1009*4882a593Smuzhiyun int i;
1010*4882a593Smuzhiyun
1011*4882a593Smuzhiyun for (i = 0; i < engs_cnt; i++) {
1012*4882a593Smuzhiyun mirrored_engs = find_engines_by_type(mirrored_eng_grp,
1013*4882a593Smuzhiyun engs[i].type);
1014*4882a593Smuzhiyun if (!mirrored_engs)
1015*4882a593Smuzhiyun continue;
1016*4882a593Smuzhiyun
1017*4882a593Smuzhiyun /*
1018*4882a593Smuzhiyun * If mirrored group has this type of engines attached then
1019*4882a593Smuzhiyun * there are 3 scenarios possible:
1020*4882a593Smuzhiyun * 1) mirrored_engs.count == engs[i].count then all engines
1021*4882a593Smuzhiyun * from mirrored engine group will be shared with this engine
1022*4882a593Smuzhiyun * group
1023*4882a593Smuzhiyun * 2) mirrored_engs.count > engs[i].count then only a subset of
1024*4882a593Smuzhiyun * engines from mirrored engine group will be shared with this
1025*4882a593Smuzhiyun * engine group
1026*4882a593Smuzhiyun * 3) mirrored_engs.count < engs[i].count then all engines
1027*4882a593Smuzhiyun * from mirrored engine group will be shared with this group
1028*4882a593Smuzhiyun * and additional engines will be reserved for exclusively use
1029*4882a593Smuzhiyun * by this engine group
1030*4882a593Smuzhiyun */
1031*4882a593Smuzhiyun engs[i].count -= mirrored_engs->count;
1032*4882a593Smuzhiyun }
1033*4882a593Smuzhiyun }
1034*4882a593Smuzhiyun
find_mirrored_eng_grp(struct otx_cpt_eng_grp_info * grp)1035*4882a593Smuzhiyun static struct otx_cpt_eng_grp_info *find_mirrored_eng_grp(
1036*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *grp)
1037*4882a593Smuzhiyun {
1038*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps = grp->g;
1039*4882a593Smuzhiyun int i;
1040*4882a593Smuzhiyun
1041*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1042*4882a593Smuzhiyun if (!eng_grps->grp[i].is_enabled)
1043*4882a593Smuzhiyun continue;
1044*4882a593Smuzhiyun if (eng_grps->grp[i].ucode[0].type)
1045*4882a593Smuzhiyun continue;
1046*4882a593Smuzhiyun if (grp->idx == i)
1047*4882a593Smuzhiyun continue;
1048*4882a593Smuzhiyun if (!strncasecmp(eng_grps->grp[i].ucode[0].ver_str,
1049*4882a593Smuzhiyun grp->ucode[0].ver_str,
1050*4882a593Smuzhiyun OTX_CPT_UCODE_VER_STR_SZ))
1051*4882a593Smuzhiyun return &eng_grps->grp[i];
1052*4882a593Smuzhiyun }
1053*4882a593Smuzhiyun
1054*4882a593Smuzhiyun return NULL;
1055*4882a593Smuzhiyun }
1056*4882a593Smuzhiyun
find_unused_eng_grp(struct otx_cpt_eng_grps * eng_grps)1057*4882a593Smuzhiyun static struct otx_cpt_eng_grp_info *find_unused_eng_grp(
1058*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps)
1059*4882a593Smuzhiyun {
1060*4882a593Smuzhiyun int i;
1061*4882a593Smuzhiyun
1062*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1063*4882a593Smuzhiyun if (!eng_grps->grp[i].is_enabled)
1064*4882a593Smuzhiyun return &eng_grps->grp[i];
1065*4882a593Smuzhiyun }
1066*4882a593Smuzhiyun return NULL;
1067*4882a593Smuzhiyun }
1068*4882a593Smuzhiyun
eng_grp_update_masks(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)1069*4882a593Smuzhiyun static int eng_grp_update_masks(struct device *dev,
1070*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun struct otx_cpt_engs_rsvd *engs, *mirrored_engs;
1073*4882a593Smuzhiyun struct otx_cpt_bitmap tmp_bmap = { {0} };
1074*4882a593Smuzhiyun int i, j, cnt, max_cnt;
1075*4882a593Smuzhiyun int bit;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1078*4882a593Smuzhiyun engs = &eng_grp->engs[i];
1079*4882a593Smuzhiyun if (!engs->type)
1080*4882a593Smuzhiyun continue;
1081*4882a593Smuzhiyun if (engs->count <= 0)
1082*4882a593Smuzhiyun continue;
1083*4882a593Smuzhiyun
1084*4882a593Smuzhiyun switch (engs->type) {
1085*4882a593Smuzhiyun case OTX_CPT_SE_TYPES:
1086*4882a593Smuzhiyun max_cnt = eng_grp->g->avail.max_se_cnt;
1087*4882a593Smuzhiyun break;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun case OTX_CPT_AE_TYPES:
1090*4882a593Smuzhiyun max_cnt = eng_grp->g->avail.max_ae_cnt;
1091*4882a593Smuzhiyun break;
1092*4882a593Smuzhiyun
1093*4882a593Smuzhiyun default:
1094*4882a593Smuzhiyun dev_err(dev, "Invalid engine type %d\n", engs->type);
1095*4882a593Smuzhiyun return -EINVAL;
1096*4882a593Smuzhiyun }
1097*4882a593Smuzhiyun
1098*4882a593Smuzhiyun cnt = engs->count;
1099*4882a593Smuzhiyun WARN_ON(engs->offset + max_cnt > OTX_CPT_MAX_ENGINES);
1100*4882a593Smuzhiyun bitmap_zero(tmp_bmap.bits, eng_grp->g->engs_num);
1101*4882a593Smuzhiyun for (j = engs->offset; j < engs->offset + max_cnt; j++) {
1102*4882a593Smuzhiyun if (!eng_grp->g->eng_ref_cnt[j]) {
1103*4882a593Smuzhiyun bitmap_set(tmp_bmap.bits, j, 1);
1104*4882a593Smuzhiyun cnt--;
1105*4882a593Smuzhiyun if (!cnt)
1106*4882a593Smuzhiyun break;
1107*4882a593Smuzhiyun }
1108*4882a593Smuzhiyun }
1109*4882a593Smuzhiyun
1110*4882a593Smuzhiyun if (cnt)
1111*4882a593Smuzhiyun return -ENOSPC;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun bitmap_copy(engs->bmap, tmp_bmap.bits, eng_grp->g->engs_num);
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun
1116*4882a593Smuzhiyun if (!eng_grp->mirror.is_ena)
1117*4882a593Smuzhiyun return 0;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ETYPES_PER_GRP; i++) {
1120*4882a593Smuzhiyun engs = &eng_grp->engs[i];
1121*4882a593Smuzhiyun if (!engs->type)
1122*4882a593Smuzhiyun continue;
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun mirrored_engs = find_engines_by_type(
1125*4882a593Smuzhiyun &eng_grp->g->grp[eng_grp->mirror.idx],
1126*4882a593Smuzhiyun engs->type);
1127*4882a593Smuzhiyun WARN_ON(!mirrored_engs && engs->count <= 0);
1128*4882a593Smuzhiyun if (!mirrored_engs)
1129*4882a593Smuzhiyun continue;
1130*4882a593Smuzhiyun
1131*4882a593Smuzhiyun bitmap_copy(tmp_bmap.bits, mirrored_engs->bmap,
1132*4882a593Smuzhiyun eng_grp->g->engs_num);
1133*4882a593Smuzhiyun if (engs->count < 0) {
1134*4882a593Smuzhiyun bit = find_first_bit(mirrored_engs->bmap,
1135*4882a593Smuzhiyun eng_grp->g->engs_num);
1136*4882a593Smuzhiyun bitmap_clear(tmp_bmap.bits, bit, -engs->count);
1137*4882a593Smuzhiyun }
1138*4882a593Smuzhiyun bitmap_or(engs->bmap, engs->bmap, tmp_bmap.bits,
1139*4882a593Smuzhiyun eng_grp->g->engs_num);
1140*4882a593Smuzhiyun }
1141*4882a593Smuzhiyun return 0;
1142*4882a593Smuzhiyun }
1143*4882a593Smuzhiyun
delete_engine_group(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp)1144*4882a593Smuzhiyun static int delete_engine_group(struct device *dev,
1145*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp)
1146*4882a593Smuzhiyun {
1147*4882a593Smuzhiyun int i, ret;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun if (!eng_grp->is_enabled)
1150*4882a593Smuzhiyun return -EINVAL;
1151*4882a593Smuzhiyun
1152*4882a593Smuzhiyun if (eng_grp->mirror.ref_count) {
1153*4882a593Smuzhiyun dev_err(dev, "Can't delete engine_group%d as it is used by engine_group(s):",
1154*4882a593Smuzhiyun eng_grp->idx);
1155*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1156*4882a593Smuzhiyun if (eng_grp->g->grp[i].mirror.is_ena &&
1157*4882a593Smuzhiyun eng_grp->g->grp[i].mirror.idx == eng_grp->idx)
1158*4882a593Smuzhiyun pr_cont(" %d", i);
1159*4882a593Smuzhiyun }
1160*4882a593Smuzhiyun pr_cont("\n");
1161*4882a593Smuzhiyun return -EINVAL;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* Removing engine group mirroring if enabled */
1165*4882a593Smuzhiyun remove_eng_grp_mirroring(eng_grp);
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun /* Disable engine group */
1168*4882a593Smuzhiyun ret = disable_eng_grp(dev, eng_grp, eng_grp->g->obj);
1169*4882a593Smuzhiyun if (ret)
1170*4882a593Smuzhiyun return ret;
1171*4882a593Smuzhiyun
1172*4882a593Smuzhiyun /* Release all engines held by this engine group */
1173*4882a593Smuzhiyun ret = release_engines(dev, eng_grp);
1174*4882a593Smuzhiyun if (ret)
1175*4882a593Smuzhiyun return ret;
1176*4882a593Smuzhiyun
1177*4882a593Smuzhiyun device_remove_file(dev, &eng_grp->info_attr);
1178*4882a593Smuzhiyun eng_grp->is_enabled = false;
1179*4882a593Smuzhiyun
1180*4882a593Smuzhiyun return 0;
1181*4882a593Smuzhiyun }
1182*4882a593Smuzhiyun
validate_1_ucode_scenario(struct device * dev,struct otx_cpt_eng_grp_info * eng_grp,struct otx_cpt_engines * engs,int engs_cnt)1183*4882a593Smuzhiyun static int validate_1_ucode_scenario(struct device *dev,
1184*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp,
1185*4882a593Smuzhiyun struct otx_cpt_engines *engs, int engs_cnt)
1186*4882a593Smuzhiyun {
1187*4882a593Smuzhiyun int i;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun /* Verify that ucode loaded supports requested engine types */
1190*4882a593Smuzhiyun for (i = 0; i < engs_cnt; i++) {
1191*4882a593Smuzhiyun if (!otx_cpt_uc_supports_eng_type(&eng_grp->ucode[0],
1192*4882a593Smuzhiyun engs[i].type)) {
1193*4882a593Smuzhiyun dev_err(dev,
1194*4882a593Smuzhiyun "Microcode %s does not support %s engines\n",
1195*4882a593Smuzhiyun eng_grp->ucode[0].filename,
1196*4882a593Smuzhiyun get_eng_type_str(engs[i].type));
1197*4882a593Smuzhiyun return -EINVAL;
1198*4882a593Smuzhiyun }
1199*4882a593Smuzhiyun }
1200*4882a593Smuzhiyun return 0;
1201*4882a593Smuzhiyun }
1202*4882a593Smuzhiyun
update_ucode_ptrs(struct otx_cpt_eng_grp_info * eng_grp)1203*4882a593Smuzhiyun static void update_ucode_ptrs(struct otx_cpt_eng_grp_info *eng_grp)
1204*4882a593Smuzhiyun {
1205*4882a593Smuzhiyun struct otx_cpt_ucode *ucode;
1206*4882a593Smuzhiyun
1207*4882a593Smuzhiyun if (eng_grp->mirror.is_ena)
1208*4882a593Smuzhiyun ucode = &eng_grp->g->grp[eng_grp->mirror.idx].ucode[0];
1209*4882a593Smuzhiyun else
1210*4882a593Smuzhiyun ucode = &eng_grp->ucode[0];
1211*4882a593Smuzhiyun WARN_ON(!eng_grp->engs[0].type);
1212*4882a593Smuzhiyun eng_grp->engs[0].ucode = ucode;
1213*4882a593Smuzhiyun }
1214*4882a593Smuzhiyun
create_engine_group(struct device * dev,struct otx_cpt_eng_grps * eng_grps,struct otx_cpt_engines * engs,int engs_cnt,void * ucode_data[],int ucodes_cnt,bool use_uc_from_tar_arch)1215*4882a593Smuzhiyun static int create_engine_group(struct device *dev,
1216*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps,
1217*4882a593Smuzhiyun struct otx_cpt_engines *engs, int engs_cnt,
1218*4882a593Smuzhiyun void *ucode_data[], int ucodes_cnt,
1219*4882a593Smuzhiyun bool use_uc_from_tar_arch)
1220*4882a593Smuzhiyun {
1221*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *mirrored_eng_grp;
1222*4882a593Smuzhiyun struct tar_ucode_info_t *tar_info;
1223*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *eng_grp;
1224*4882a593Smuzhiyun int i, ret = 0;
1225*4882a593Smuzhiyun
1226*4882a593Smuzhiyun if (ucodes_cnt > OTX_CPT_MAX_ETYPES_PER_GRP)
1227*4882a593Smuzhiyun return -EINVAL;
1228*4882a593Smuzhiyun
1229*4882a593Smuzhiyun /* Validate if requested engine types are supported by this device */
1230*4882a593Smuzhiyun for (i = 0; i < engs_cnt; i++)
1231*4882a593Smuzhiyun if (!dev_supports_eng_type(eng_grps, engs[i].type)) {
1232*4882a593Smuzhiyun dev_err(dev, "Device does not support %s engines\n",
1233*4882a593Smuzhiyun get_eng_type_str(engs[i].type));
1234*4882a593Smuzhiyun return -EPERM;
1235*4882a593Smuzhiyun }
1236*4882a593Smuzhiyun
1237*4882a593Smuzhiyun /* Find engine group which is not used */
1238*4882a593Smuzhiyun eng_grp = find_unused_eng_grp(eng_grps);
1239*4882a593Smuzhiyun if (!eng_grp) {
1240*4882a593Smuzhiyun dev_err(dev, "Error all engine groups are being used\n");
1241*4882a593Smuzhiyun return -ENOSPC;
1242*4882a593Smuzhiyun }
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun /* Load ucode */
1245*4882a593Smuzhiyun for (i = 0; i < ucodes_cnt; i++) {
1246*4882a593Smuzhiyun if (use_uc_from_tar_arch) {
1247*4882a593Smuzhiyun tar_info = (struct tar_ucode_info_t *) ucode_data[i];
1248*4882a593Smuzhiyun eng_grp->ucode[i] = tar_info->ucode;
1249*4882a593Smuzhiyun ret = copy_ucode_to_dma_mem(dev, &eng_grp->ucode[i],
1250*4882a593Smuzhiyun tar_info->ucode_ptr);
1251*4882a593Smuzhiyun } else
1252*4882a593Smuzhiyun ret = ucode_load(dev, &eng_grp->ucode[i],
1253*4882a593Smuzhiyun (char *) ucode_data[i]);
1254*4882a593Smuzhiyun if (ret)
1255*4882a593Smuzhiyun goto err_ucode_unload;
1256*4882a593Smuzhiyun }
1257*4882a593Smuzhiyun
1258*4882a593Smuzhiyun /* Validate scenario where 1 ucode is used */
1259*4882a593Smuzhiyun ret = validate_1_ucode_scenario(dev, eng_grp, engs, engs_cnt);
1260*4882a593Smuzhiyun if (ret)
1261*4882a593Smuzhiyun goto err_ucode_unload;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun /* Check if this group mirrors another existing engine group */
1264*4882a593Smuzhiyun mirrored_eng_grp = find_mirrored_eng_grp(eng_grp);
1265*4882a593Smuzhiyun if (mirrored_eng_grp) {
1266*4882a593Smuzhiyun /* Setup mirroring */
1267*4882a593Smuzhiyun setup_eng_grp_mirroring(eng_grp, mirrored_eng_grp);
1268*4882a593Smuzhiyun
1269*4882a593Smuzhiyun /*
1270*4882a593Smuzhiyun * Update count of requested engines because some
1271*4882a593Smuzhiyun * of them might be shared with mirrored group
1272*4882a593Smuzhiyun */
1273*4882a593Smuzhiyun update_requested_engs(mirrored_eng_grp, engs, engs_cnt);
1274*4882a593Smuzhiyun }
1275*4882a593Smuzhiyun
1276*4882a593Smuzhiyun /* Reserve engines */
1277*4882a593Smuzhiyun ret = reserve_engines(dev, eng_grp, engs, engs_cnt);
1278*4882a593Smuzhiyun if (ret)
1279*4882a593Smuzhiyun goto err_ucode_unload;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun /* Update ucode pointers used by engines */
1282*4882a593Smuzhiyun update_ucode_ptrs(eng_grp);
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun /* Update engine masks used by this group */
1285*4882a593Smuzhiyun ret = eng_grp_update_masks(dev, eng_grp);
1286*4882a593Smuzhiyun if (ret)
1287*4882a593Smuzhiyun goto err_release_engs;
1288*4882a593Smuzhiyun
1289*4882a593Smuzhiyun /* Create sysfs entry for engine group info */
1290*4882a593Smuzhiyun ret = create_sysfs_eng_grps_info(dev, eng_grp);
1291*4882a593Smuzhiyun if (ret)
1292*4882a593Smuzhiyun goto err_release_engs;
1293*4882a593Smuzhiyun
1294*4882a593Smuzhiyun /* Enable engine group */
1295*4882a593Smuzhiyun ret = enable_eng_grp(eng_grp, eng_grps->obj);
1296*4882a593Smuzhiyun if (ret)
1297*4882a593Smuzhiyun goto err_release_engs;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun /*
1300*4882a593Smuzhiyun * If this engine group mirrors another engine group
1301*4882a593Smuzhiyun * then we need to unload ucode as we will use ucode
1302*4882a593Smuzhiyun * from mirrored engine group
1303*4882a593Smuzhiyun */
1304*4882a593Smuzhiyun if (eng_grp->mirror.is_ena)
1305*4882a593Smuzhiyun ucode_unload(dev, &eng_grp->ucode[0]);
1306*4882a593Smuzhiyun
1307*4882a593Smuzhiyun eng_grp->is_enabled = true;
1308*4882a593Smuzhiyun if (eng_grp->mirror.is_ena)
1309*4882a593Smuzhiyun dev_info(dev,
1310*4882a593Smuzhiyun "Engine_group%d: reuse microcode %s from group %d\n",
1311*4882a593Smuzhiyun eng_grp->idx, mirrored_eng_grp->ucode[0].ver_str,
1312*4882a593Smuzhiyun mirrored_eng_grp->idx);
1313*4882a593Smuzhiyun else
1314*4882a593Smuzhiyun dev_info(dev, "Engine_group%d: microcode loaded %s\n",
1315*4882a593Smuzhiyun eng_grp->idx, eng_grp->ucode[0].ver_str);
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun return 0;
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun err_release_engs:
1320*4882a593Smuzhiyun release_engines(dev, eng_grp);
1321*4882a593Smuzhiyun err_ucode_unload:
1322*4882a593Smuzhiyun ucode_unload(dev, &eng_grp->ucode[0]);
1323*4882a593Smuzhiyun return ret;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
ucode_load_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)1326*4882a593Smuzhiyun static ssize_t ucode_load_store(struct device *dev,
1327*4882a593Smuzhiyun struct device_attribute *attr,
1328*4882a593Smuzhiyun const char *buf, size_t count)
1329*4882a593Smuzhiyun {
1330*4882a593Smuzhiyun struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = { {0} };
1331*4882a593Smuzhiyun char *ucode_filename[OTX_CPT_MAX_ETYPES_PER_GRP];
1332*4882a593Smuzhiyun char tmp_buf[OTX_CPT_UCODE_NAME_LENGTH] = { 0 };
1333*4882a593Smuzhiyun char *start, *val, *err_msg, *tmp;
1334*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps;
1335*4882a593Smuzhiyun int grp_idx = 0, ret = -EINVAL;
1336*4882a593Smuzhiyun bool has_se, has_ie, has_ae;
1337*4882a593Smuzhiyun int del_grp_idx = -1;
1338*4882a593Smuzhiyun int ucode_idx = 0;
1339*4882a593Smuzhiyun
1340*4882a593Smuzhiyun if (strlen(buf) > OTX_CPT_UCODE_NAME_LENGTH)
1341*4882a593Smuzhiyun return -EINVAL;
1342*4882a593Smuzhiyun
1343*4882a593Smuzhiyun eng_grps = container_of(attr, struct otx_cpt_eng_grps, ucode_load_attr);
1344*4882a593Smuzhiyun err_msg = "Invalid engine group format";
1345*4882a593Smuzhiyun strlcpy(tmp_buf, buf, OTX_CPT_UCODE_NAME_LENGTH);
1346*4882a593Smuzhiyun start = tmp_buf;
1347*4882a593Smuzhiyun
1348*4882a593Smuzhiyun has_se = has_ie = has_ae = false;
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun for (;;) {
1351*4882a593Smuzhiyun val = strsep(&start, ";");
1352*4882a593Smuzhiyun if (!val)
1353*4882a593Smuzhiyun break;
1354*4882a593Smuzhiyun val = strim(val);
1355*4882a593Smuzhiyun if (!*val)
1356*4882a593Smuzhiyun continue;
1357*4882a593Smuzhiyun
1358*4882a593Smuzhiyun if (!strncasecmp(val, "engine_group", 12)) {
1359*4882a593Smuzhiyun if (del_grp_idx != -1)
1360*4882a593Smuzhiyun goto err_print;
1361*4882a593Smuzhiyun tmp = strim(strsep(&val, ":"));
1362*4882a593Smuzhiyun if (!val)
1363*4882a593Smuzhiyun goto err_print;
1364*4882a593Smuzhiyun if (strlen(tmp) != 13)
1365*4882a593Smuzhiyun goto err_print;
1366*4882a593Smuzhiyun if (kstrtoint((tmp + 12), 10, &del_grp_idx))
1367*4882a593Smuzhiyun goto err_print;
1368*4882a593Smuzhiyun val = strim(val);
1369*4882a593Smuzhiyun if (strncasecmp(val, "null", 4))
1370*4882a593Smuzhiyun goto err_print;
1371*4882a593Smuzhiyun if (strlen(val) != 4)
1372*4882a593Smuzhiyun goto err_print;
1373*4882a593Smuzhiyun } else if (!strncasecmp(val, "se", 2) && strchr(val, ':')) {
1374*4882a593Smuzhiyun if (has_se || ucode_idx)
1375*4882a593Smuzhiyun goto err_print;
1376*4882a593Smuzhiyun tmp = strim(strsep(&val, ":"));
1377*4882a593Smuzhiyun if (!val)
1378*4882a593Smuzhiyun goto err_print;
1379*4882a593Smuzhiyun if (strlen(tmp) != 2)
1380*4882a593Smuzhiyun goto err_print;
1381*4882a593Smuzhiyun if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1382*4882a593Smuzhiyun goto err_print;
1383*4882a593Smuzhiyun engs[grp_idx++].type = OTX_CPT_SE_TYPES;
1384*4882a593Smuzhiyun has_se = true;
1385*4882a593Smuzhiyun } else if (!strncasecmp(val, "ae", 2) && strchr(val, ':')) {
1386*4882a593Smuzhiyun if (has_ae || ucode_idx)
1387*4882a593Smuzhiyun goto err_print;
1388*4882a593Smuzhiyun tmp = strim(strsep(&val, ":"));
1389*4882a593Smuzhiyun if (!val)
1390*4882a593Smuzhiyun goto err_print;
1391*4882a593Smuzhiyun if (strlen(tmp) != 2)
1392*4882a593Smuzhiyun goto err_print;
1393*4882a593Smuzhiyun if (kstrtoint(strim(val), 10, &engs[grp_idx].count))
1394*4882a593Smuzhiyun goto err_print;
1395*4882a593Smuzhiyun engs[grp_idx++].type = OTX_CPT_AE_TYPES;
1396*4882a593Smuzhiyun has_ae = true;
1397*4882a593Smuzhiyun } else {
1398*4882a593Smuzhiyun if (ucode_idx > 1)
1399*4882a593Smuzhiyun goto err_print;
1400*4882a593Smuzhiyun if (!strlen(val))
1401*4882a593Smuzhiyun goto err_print;
1402*4882a593Smuzhiyun if (strnstr(val, " ", strlen(val)))
1403*4882a593Smuzhiyun goto err_print;
1404*4882a593Smuzhiyun ucode_filename[ucode_idx++] = val;
1405*4882a593Smuzhiyun }
1406*4882a593Smuzhiyun }
1407*4882a593Smuzhiyun
1408*4882a593Smuzhiyun /* Validate input parameters */
1409*4882a593Smuzhiyun if (del_grp_idx == -1) {
1410*4882a593Smuzhiyun if (!(grp_idx && ucode_idx))
1411*4882a593Smuzhiyun goto err_print;
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun if (ucode_idx > 1 && grp_idx < 2)
1414*4882a593Smuzhiyun goto err_print;
1415*4882a593Smuzhiyun
1416*4882a593Smuzhiyun if (grp_idx > OTX_CPT_MAX_ETYPES_PER_GRP) {
1417*4882a593Smuzhiyun err_msg = "Error max 2 engine types can be attached";
1418*4882a593Smuzhiyun goto err_print;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun } else {
1422*4882a593Smuzhiyun if (del_grp_idx < 0 ||
1423*4882a593Smuzhiyun del_grp_idx >= OTX_CPT_MAX_ENGINE_GROUPS) {
1424*4882a593Smuzhiyun dev_err(dev, "Invalid engine group index %d\n",
1425*4882a593Smuzhiyun del_grp_idx);
1426*4882a593Smuzhiyun ret = -EINVAL;
1427*4882a593Smuzhiyun return ret;
1428*4882a593Smuzhiyun }
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun if (!eng_grps->grp[del_grp_idx].is_enabled) {
1431*4882a593Smuzhiyun dev_err(dev, "Error engine_group%d is not configured\n",
1432*4882a593Smuzhiyun del_grp_idx);
1433*4882a593Smuzhiyun ret = -EINVAL;
1434*4882a593Smuzhiyun return ret;
1435*4882a593Smuzhiyun }
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun if (grp_idx || ucode_idx)
1438*4882a593Smuzhiyun goto err_print;
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun mutex_lock(&eng_grps->lock);
1442*4882a593Smuzhiyun
1443*4882a593Smuzhiyun if (eng_grps->is_rdonly) {
1444*4882a593Smuzhiyun dev_err(dev, "Disable VFs before modifying engine groups\n");
1445*4882a593Smuzhiyun ret = -EACCES;
1446*4882a593Smuzhiyun goto err_unlock;
1447*4882a593Smuzhiyun }
1448*4882a593Smuzhiyun
1449*4882a593Smuzhiyun if (del_grp_idx == -1)
1450*4882a593Smuzhiyun /* create engine group */
1451*4882a593Smuzhiyun ret = create_engine_group(dev, eng_grps, engs, grp_idx,
1452*4882a593Smuzhiyun (void **) ucode_filename,
1453*4882a593Smuzhiyun ucode_idx, false);
1454*4882a593Smuzhiyun else
1455*4882a593Smuzhiyun /* delete engine group */
1456*4882a593Smuzhiyun ret = delete_engine_group(dev, &eng_grps->grp[del_grp_idx]);
1457*4882a593Smuzhiyun if (ret)
1458*4882a593Smuzhiyun goto err_unlock;
1459*4882a593Smuzhiyun
1460*4882a593Smuzhiyun print_dbg_info(dev, eng_grps);
1461*4882a593Smuzhiyun err_unlock:
1462*4882a593Smuzhiyun mutex_unlock(&eng_grps->lock);
1463*4882a593Smuzhiyun return ret ? ret : count;
1464*4882a593Smuzhiyun err_print:
1465*4882a593Smuzhiyun dev_err(dev, "%s\n", err_msg);
1466*4882a593Smuzhiyun
1467*4882a593Smuzhiyun return ret;
1468*4882a593Smuzhiyun }
1469*4882a593Smuzhiyun
otx_cpt_try_create_default_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps,int pf_type)1470*4882a593Smuzhiyun int otx_cpt_try_create_default_eng_grps(struct pci_dev *pdev,
1471*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps,
1472*4882a593Smuzhiyun int pf_type)
1473*4882a593Smuzhiyun {
1474*4882a593Smuzhiyun struct tar_ucode_info_t *tar_info[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1475*4882a593Smuzhiyun struct otx_cpt_engines engs[OTX_CPT_MAX_ETYPES_PER_GRP] = {};
1476*4882a593Smuzhiyun struct tar_arch_info_t *tar_arch = NULL;
1477*4882a593Smuzhiyun char *tar_filename;
1478*4882a593Smuzhiyun int i, ret = 0;
1479*4882a593Smuzhiyun
1480*4882a593Smuzhiyun mutex_lock(&eng_grps->lock);
1481*4882a593Smuzhiyun
1482*4882a593Smuzhiyun /*
1483*4882a593Smuzhiyun * We don't create engine group for kernel crypto if attempt to create
1484*4882a593Smuzhiyun * it was already made (when user enabled VFs for the first time)
1485*4882a593Smuzhiyun */
1486*4882a593Smuzhiyun if (eng_grps->is_first_try)
1487*4882a593Smuzhiyun goto unlock_mutex;
1488*4882a593Smuzhiyun eng_grps->is_first_try = true;
1489*4882a593Smuzhiyun
1490*4882a593Smuzhiyun /* We create group for kcrypto only if no groups are configured */
1491*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1492*4882a593Smuzhiyun if (eng_grps->grp[i].is_enabled)
1493*4882a593Smuzhiyun goto unlock_mutex;
1494*4882a593Smuzhiyun
1495*4882a593Smuzhiyun switch (pf_type) {
1496*4882a593Smuzhiyun case OTX_CPT_AE:
1497*4882a593Smuzhiyun case OTX_CPT_SE:
1498*4882a593Smuzhiyun tar_filename = OTX_CPT_UCODE_TAR_FILE_NAME;
1499*4882a593Smuzhiyun break;
1500*4882a593Smuzhiyun
1501*4882a593Smuzhiyun default:
1502*4882a593Smuzhiyun dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1503*4882a593Smuzhiyun ret = -EINVAL;
1504*4882a593Smuzhiyun goto unlock_mutex;
1505*4882a593Smuzhiyun }
1506*4882a593Smuzhiyun
1507*4882a593Smuzhiyun tar_arch = load_tar_archive(&pdev->dev, tar_filename);
1508*4882a593Smuzhiyun if (!tar_arch)
1509*4882a593Smuzhiyun goto unlock_mutex;
1510*4882a593Smuzhiyun
1511*4882a593Smuzhiyun /*
1512*4882a593Smuzhiyun * If device supports SE engines and there is SE microcode in tar
1513*4882a593Smuzhiyun * archive try to create engine group with SE engines for kernel
1514*4882a593Smuzhiyun * crypto functionality (symmetric crypto)
1515*4882a593Smuzhiyun */
1516*4882a593Smuzhiyun tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_SE_TYPES);
1517*4882a593Smuzhiyun if (tar_info[0] &&
1518*4882a593Smuzhiyun dev_supports_eng_type(eng_grps, OTX_CPT_SE_TYPES)) {
1519*4882a593Smuzhiyun
1520*4882a593Smuzhiyun engs[0].type = OTX_CPT_SE_TYPES;
1521*4882a593Smuzhiyun engs[0].count = eng_grps->avail.max_se_cnt;
1522*4882a593Smuzhiyun
1523*4882a593Smuzhiyun ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1524*4882a593Smuzhiyun (void **) tar_info, 1, true);
1525*4882a593Smuzhiyun if (ret)
1526*4882a593Smuzhiyun goto release_tar_arch;
1527*4882a593Smuzhiyun }
1528*4882a593Smuzhiyun /*
1529*4882a593Smuzhiyun * If device supports AE engines and there is AE microcode in tar
1530*4882a593Smuzhiyun * archive try to create engine group with AE engines for asymmetric
1531*4882a593Smuzhiyun * crypto functionality.
1532*4882a593Smuzhiyun */
1533*4882a593Smuzhiyun tar_info[0] = get_uc_from_tar_archive(tar_arch, OTX_CPT_AE_TYPES);
1534*4882a593Smuzhiyun if (tar_info[0] &&
1535*4882a593Smuzhiyun dev_supports_eng_type(eng_grps, OTX_CPT_AE_TYPES)) {
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun engs[0].type = OTX_CPT_AE_TYPES;
1538*4882a593Smuzhiyun engs[0].count = eng_grps->avail.max_ae_cnt;
1539*4882a593Smuzhiyun
1540*4882a593Smuzhiyun ret = create_engine_group(&pdev->dev, eng_grps, engs, 1,
1541*4882a593Smuzhiyun (void **) tar_info, 1, true);
1542*4882a593Smuzhiyun if (ret)
1543*4882a593Smuzhiyun goto release_tar_arch;
1544*4882a593Smuzhiyun }
1545*4882a593Smuzhiyun
1546*4882a593Smuzhiyun print_dbg_info(&pdev->dev, eng_grps);
1547*4882a593Smuzhiyun release_tar_arch:
1548*4882a593Smuzhiyun release_tar_archive(tar_arch);
1549*4882a593Smuzhiyun unlock_mutex:
1550*4882a593Smuzhiyun mutex_unlock(&eng_grps->lock);
1551*4882a593Smuzhiyun return ret;
1552*4882a593Smuzhiyun }
1553*4882a593Smuzhiyun
otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps * eng_grps,bool is_rdonly)1554*4882a593Smuzhiyun void otx_cpt_set_eng_grps_is_rdonly(struct otx_cpt_eng_grps *eng_grps,
1555*4882a593Smuzhiyun bool is_rdonly)
1556*4882a593Smuzhiyun {
1557*4882a593Smuzhiyun mutex_lock(&eng_grps->lock);
1558*4882a593Smuzhiyun
1559*4882a593Smuzhiyun eng_grps->is_rdonly = is_rdonly;
1560*4882a593Smuzhiyun
1561*4882a593Smuzhiyun mutex_unlock(&eng_grps->lock);
1562*4882a593Smuzhiyun }
1563*4882a593Smuzhiyun
otx_cpt_disable_all_cores(struct otx_cpt_device * cpt)1564*4882a593Smuzhiyun void otx_cpt_disable_all_cores(struct otx_cpt_device *cpt)
1565*4882a593Smuzhiyun {
1566*4882a593Smuzhiyun int grp, timeout = 100;
1567*4882a593Smuzhiyun u64 reg;
1568*4882a593Smuzhiyun
1569*4882a593Smuzhiyun /* Disengage the cores from groups */
1570*4882a593Smuzhiyun for (grp = 0; grp < OTX_CPT_MAX_ENGINE_GROUPS; grp++) {
1571*4882a593Smuzhiyun writeq(0, cpt->reg_base + OTX_CPT_PF_GX_EN(grp));
1572*4882a593Smuzhiyun udelay(CSR_DELAY);
1573*4882a593Smuzhiyun }
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1576*4882a593Smuzhiyun while (reg) {
1577*4882a593Smuzhiyun udelay(CSR_DELAY);
1578*4882a593Smuzhiyun reg = readq(cpt->reg_base + OTX_CPT_PF_EXEC_BUSY);
1579*4882a593Smuzhiyun if (timeout--) {
1580*4882a593Smuzhiyun dev_warn(&cpt->pdev->dev, "Cores still busy\n");
1581*4882a593Smuzhiyun break;
1582*4882a593Smuzhiyun }
1583*4882a593Smuzhiyun }
1584*4882a593Smuzhiyun
1585*4882a593Smuzhiyun /* Disable the cores */
1586*4882a593Smuzhiyun writeq(0, cpt->reg_base + OTX_CPT_PF_EXE_CTL);
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
otx_cpt_cleanup_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps)1589*4882a593Smuzhiyun void otx_cpt_cleanup_eng_grps(struct pci_dev *pdev,
1590*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps)
1591*4882a593Smuzhiyun {
1592*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *grp;
1593*4882a593Smuzhiyun int i, j;
1594*4882a593Smuzhiyun
1595*4882a593Smuzhiyun mutex_lock(&eng_grps->lock);
1596*4882a593Smuzhiyun if (eng_grps->is_ucode_load_created) {
1597*4882a593Smuzhiyun device_remove_file(&pdev->dev,
1598*4882a593Smuzhiyun &eng_grps->ucode_load_attr);
1599*4882a593Smuzhiyun eng_grps->is_ucode_load_created = false;
1600*4882a593Smuzhiyun }
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun /* First delete all mirroring engine groups */
1603*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1604*4882a593Smuzhiyun if (eng_grps->grp[i].mirror.is_ena)
1605*4882a593Smuzhiyun delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun /* Delete remaining engine groups */
1608*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++)
1609*4882a593Smuzhiyun delete_engine_group(&pdev->dev, &eng_grps->grp[i]);
1610*4882a593Smuzhiyun
1611*4882a593Smuzhiyun /* Release memory */
1612*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1613*4882a593Smuzhiyun grp = &eng_grps->grp[i];
1614*4882a593Smuzhiyun for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1615*4882a593Smuzhiyun kfree(grp->engs[j].bmap);
1616*4882a593Smuzhiyun grp->engs[j].bmap = NULL;
1617*4882a593Smuzhiyun }
1618*4882a593Smuzhiyun }
1619*4882a593Smuzhiyun
1620*4882a593Smuzhiyun mutex_unlock(&eng_grps->lock);
1621*4882a593Smuzhiyun }
1622*4882a593Smuzhiyun
otx_cpt_init_eng_grps(struct pci_dev * pdev,struct otx_cpt_eng_grps * eng_grps,int pf_type)1623*4882a593Smuzhiyun int otx_cpt_init_eng_grps(struct pci_dev *pdev,
1624*4882a593Smuzhiyun struct otx_cpt_eng_grps *eng_grps, int pf_type)
1625*4882a593Smuzhiyun {
1626*4882a593Smuzhiyun struct otx_cpt_eng_grp_info *grp;
1627*4882a593Smuzhiyun int i, j, ret = 0;
1628*4882a593Smuzhiyun
1629*4882a593Smuzhiyun mutex_init(&eng_grps->lock);
1630*4882a593Smuzhiyun eng_grps->obj = pci_get_drvdata(pdev);
1631*4882a593Smuzhiyun eng_grps->avail.se_cnt = eng_grps->avail.max_se_cnt;
1632*4882a593Smuzhiyun eng_grps->avail.ae_cnt = eng_grps->avail.max_ae_cnt;
1633*4882a593Smuzhiyun
1634*4882a593Smuzhiyun eng_grps->engs_num = eng_grps->avail.max_se_cnt +
1635*4882a593Smuzhiyun eng_grps->avail.max_ae_cnt;
1636*4882a593Smuzhiyun if (eng_grps->engs_num > OTX_CPT_MAX_ENGINES) {
1637*4882a593Smuzhiyun dev_err(&pdev->dev,
1638*4882a593Smuzhiyun "Number of engines %d > than max supported %d\n",
1639*4882a593Smuzhiyun eng_grps->engs_num, OTX_CPT_MAX_ENGINES);
1640*4882a593Smuzhiyun ret = -EINVAL;
1641*4882a593Smuzhiyun goto err;
1642*4882a593Smuzhiyun }
1643*4882a593Smuzhiyun
1644*4882a593Smuzhiyun for (i = 0; i < OTX_CPT_MAX_ENGINE_GROUPS; i++) {
1645*4882a593Smuzhiyun grp = &eng_grps->grp[i];
1646*4882a593Smuzhiyun grp->g = eng_grps;
1647*4882a593Smuzhiyun grp->idx = i;
1648*4882a593Smuzhiyun
1649*4882a593Smuzhiyun snprintf(grp->sysfs_info_name, OTX_CPT_UCODE_NAME_LENGTH,
1650*4882a593Smuzhiyun "engine_group%d", i);
1651*4882a593Smuzhiyun for (j = 0; j < OTX_CPT_MAX_ETYPES_PER_GRP; j++) {
1652*4882a593Smuzhiyun grp->engs[j].bmap =
1653*4882a593Smuzhiyun kcalloc(BITS_TO_LONGS(eng_grps->engs_num),
1654*4882a593Smuzhiyun sizeof(long), GFP_KERNEL);
1655*4882a593Smuzhiyun if (!grp->engs[j].bmap) {
1656*4882a593Smuzhiyun ret = -ENOMEM;
1657*4882a593Smuzhiyun goto err;
1658*4882a593Smuzhiyun }
1659*4882a593Smuzhiyun }
1660*4882a593Smuzhiyun }
1661*4882a593Smuzhiyun
1662*4882a593Smuzhiyun switch (pf_type) {
1663*4882a593Smuzhiyun case OTX_CPT_SE:
1664*4882a593Smuzhiyun /* OcteonTX 83XX SE CPT PF has only SE engines attached */
1665*4882a593Smuzhiyun eng_grps->eng_types_supported = 1 << OTX_CPT_SE_TYPES;
1666*4882a593Smuzhiyun break;
1667*4882a593Smuzhiyun
1668*4882a593Smuzhiyun case OTX_CPT_AE:
1669*4882a593Smuzhiyun /* OcteonTX 83XX AE CPT PF has only AE engines attached */
1670*4882a593Smuzhiyun eng_grps->eng_types_supported = 1 << OTX_CPT_AE_TYPES;
1671*4882a593Smuzhiyun break;
1672*4882a593Smuzhiyun
1673*4882a593Smuzhiyun default:
1674*4882a593Smuzhiyun dev_err(&pdev->dev, "Unknown PF type %d\n", pf_type);
1675*4882a593Smuzhiyun ret = -EINVAL;
1676*4882a593Smuzhiyun goto err;
1677*4882a593Smuzhiyun }
1678*4882a593Smuzhiyun
1679*4882a593Smuzhiyun eng_grps->ucode_load_attr.show = NULL;
1680*4882a593Smuzhiyun eng_grps->ucode_load_attr.store = ucode_load_store;
1681*4882a593Smuzhiyun eng_grps->ucode_load_attr.attr.name = "ucode_load";
1682*4882a593Smuzhiyun eng_grps->ucode_load_attr.attr.mode = 0220;
1683*4882a593Smuzhiyun sysfs_attr_init(&eng_grps->ucode_load_attr.attr);
1684*4882a593Smuzhiyun ret = device_create_file(&pdev->dev,
1685*4882a593Smuzhiyun &eng_grps->ucode_load_attr);
1686*4882a593Smuzhiyun if (ret)
1687*4882a593Smuzhiyun goto err;
1688*4882a593Smuzhiyun eng_grps->is_ucode_load_created = true;
1689*4882a593Smuzhiyun
1690*4882a593Smuzhiyun print_dbg_info(&pdev->dev, eng_grps);
1691*4882a593Smuzhiyun return ret;
1692*4882a593Smuzhiyun err:
1693*4882a593Smuzhiyun otx_cpt_cleanup_eng_grps(pdev, eng_grps);
1694*4882a593Smuzhiyun return ret;
1695*4882a593Smuzhiyun }
1696