1*4882a593Smuzhiyun // SPDX-License-Identifier: (BSD-3-Clause OR GPL-2.0-only)
2*4882a593Smuzhiyun /* Copyright(c) 2014 - 2020 Intel Corporation */
3*4882a593Smuzhiyun #include <linux/module.h>
4*4882a593Smuzhiyun #include <linux/slab.h>
5*4882a593Smuzhiyun #include "adf_accel_devices.h"
6*4882a593Smuzhiyun #include "adf_common_drv.h"
7*4882a593Smuzhiyun #include "adf_transport.h"
8*4882a593Smuzhiyun #include "adf_transport_access_macros.h"
9*4882a593Smuzhiyun #include "adf_cfg.h"
10*4882a593Smuzhiyun #include "adf_cfg_strings.h"
11*4882a593Smuzhiyun #include "qat_crypto.h"
12*4882a593Smuzhiyun #include "icp_qat_fw.h"
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun #define SEC ADF_KERNEL_SEC
15*4882a593Smuzhiyun
16*4882a593Smuzhiyun static struct service_hndl qat_crypto;
17*4882a593Smuzhiyun
qat_crypto_put_instance(struct qat_crypto_instance * inst)18*4882a593Smuzhiyun void qat_crypto_put_instance(struct qat_crypto_instance *inst)
19*4882a593Smuzhiyun {
20*4882a593Smuzhiyun atomic_dec(&inst->refctr);
21*4882a593Smuzhiyun adf_dev_put(inst->accel_dev);
22*4882a593Smuzhiyun }
23*4882a593Smuzhiyun
qat_crypto_free_instances(struct adf_accel_dev * accel_dev)24*4882a593Smuzhiyun static int qat_crypto_free_instances(struct adf_accel_dev *accel_dev)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun struct qat_crypto_instance *inst, *tmp;
27*4882a593Smuzhiyun int i;
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun list_for_each_entry_safe(inst, tmp, &accel_dev->crypto_list, list) {
30*4882a593Smuzhiyun for (i = 0; i < atomic_read(&inst->refctr); i++)
31*4882a593Smuzhiyun qat_crypto_put_instance(inst);
32*4882a593Smuzhiyun
33*4882a593Smuzhiyun if (inst->sym_tx)
34*4882a593Smuzhiyun adf_remove_ring(inst->sym_tx);
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun if (inst->sym_rx)
37*4882a593Smuzhiyun adf_remove_ring(inst->sym_rx);
38*4882a593Smuzhiyun
39*4882a593Smuzhiyun if (inst->pke_tx)
40*4882a593Smuzhiyun adf_remove_ring(inst->pke_tx);
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun if (inst->pke_rx)
43*4882a593Smuzhiyun adf_remove_ring(inst->pke_rx);
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun list_del(&inst->list);
46*4882a593Smuzhiyun kfree(inst);
47*4882a593Smuzhiyun }
48*4882a593Smuzhiyun return 0;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
qat_crypto_get_instance_node(int node)51*4882a593Smuzhiyun struct qat_crypto_instance *qat_crypto_get_instance_node(int node)
52*4882a593Smuzhiyun {
53*4882a593Smuzhiyun struct adf_accel_dev *accel_dev = NULL, *tmp_dev;
54*4882a593Smuzhiyun struct qat_crypto_instance *inst = NULL, *tmp_inst;
55*4882a593Smuzhiyun unsigned long best = ~0;
56*4882a593Smuzhiyun
57*4882a593Smuzhiyun list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
58*4882a593Smuzhiyun unsigned long ctr;
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun if ((node == dev_to_node(&GET_DEV(tmp_dev)) ||
61*4882a593Smuzhiyun dev_to_node(&GET_DEV(tmp_dev)) < 0) &&
62*4882a593Smuzhiyun adf_dev_started(tmp_dev) &&
63*4882a593Smuzhiyun !list_empty(&tmp_dev->crypto_list)) {
64*4882a593Smuzhiyun ctr = atomic_read(&tmp_dev->ref_count);
65*4882a593Smuzhiyun if (best > ctr) {
66*4882a593Smuzhiyun accel_dev = tmp_dev;
67*4882a593Smuzhiyun best = ctr;
68*4882a593Smuzhiyun }
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun if (!accel_dev) {
73*4882a593Smuzhiyun pr_info("QAT: Could not find a device on node %d\n", node);
74*4882a593Smuzhiyun /* Get any started device */
75*4882a593Smuzhiyun list_for_each_entry(tmp_dev, adf_devmgr_get_head(), list) {
76*4882a593Smuzhiyun if (adf_dev_started(tmp_dev) &&
77*4882a593Smuzhiyun !list_empty(&tmp_dev->crypto_list)) {
78*4882a593Smuzhiyun accel_dev = tmp_dev;
79*4882a593Smuzhiyun break;
80*4882a593Smuzhiyun }
81*4882a593Smuzhiyun }
82*4882a593Smuzhiyun }
83*4882a593Smuzhiyun
84*4882a593Smuzhiyun if (!accel_dev)
85*4882a593Smuzhiyun return NULL;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun best = ~0;
88*4882a593Smuzhiyun list_for_each_entry(tmp_inst, &accel_dev->crypto_list, list) {
89*4882a593Smuzhiyun unsigned long ctr;
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun ctr = atomic_read(&tmp_inst->refctr);
92*4882a593Smuzhiyun if (best > ctr) {
93*4882a593Smuzhiyun inst = tmp_inst;
94*4882a593Smuzhiyun best = ctr;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun }
97*4882a593Smuzhiyun if (inst) {
98*4882a593Smuzhiyun if (adf_dev_get(accel_dev)) {
99*4882a593Smuzhiyun dev_err(&GET_DEV(accel_dev), "Could not increment dev refctr\n");
100*4882a593Smuzhiyun return NULL;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun atomic_inc(&inst->refctr);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun return inst;
105*4882a593Smuzhiyun }
106*4882a593Smuzhiyun
107*4882a593Smuzhiyun /**
108*4882a593Smuzhiyun * qat_crypto_dev_config() - create dev config required to create crypto inst.
109*4882a593Smuzhiyun *
110*4882a593Smuzhiyun * @accel_dev: Pointer to acceleration device.
111*4882a593Smuzhiyun *
112*4882a593Smuzhiyun * Function creates device configuration required to create crypto instances
113*4882a593Smuzhiyun *
114*4882a593Smuzhiyun * Return: 0 on success, error code otherwise.
115*4882a593Smuzhiyun */
qat_crypto_dev_config(struct adf_accel_dev * accel_dev)116*4882a593Smuzhiyun int qat_crypto_dev_config(struct adf_accel_dev *accel_dev)
117*4882a593Smuzhiyun {
118*4882a593Smuzhiyun int cpus = num_online_cpus();
119*4882a593Smuzhiyun int banks = GET_MAX_BANKS(accel_dev);
120*4882a593Smuzhiyun int instances = min(cpus, banks);
121*4882a593Smuzhiyun char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
122*4882a593Smuzhiyun int i;
123*4882a593Smuzhiyun unsigned long val;
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun if (adf_cfg_section_add(accel_dev, ADF_KERNEL_SEC))
126*4882a593Smuzhiyun goto err;
127*4882a593Smuzhiyun if (adf_cfg_section_add(accel_dev, "Accelerator0"))
128*4882a593Smuzhiyun goto err;
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /* Temporarily set the number of crypto instances to zero to avoid
131*4882a593Smuzhiyun * registering the crypto algorithms.
132*4882a593Smuzhiyun * This will be removed when the algorithms will support the
133*4882a593Smuzhiyun * CRYPTO_TFM_REQ_MAY_BACKLOG flag
134*4882a593Smuzhiyun */
135*4882a593Smuzhiyun instances = 0;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun for (i = 0; i < instances; i++) {
138*4882a593Smuzhiyun val = i;
139*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
140*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
141*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
142*4882a593Smuzhiyun goto err;
143*4882a593Smuzhiyun
144*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_ETRMGR_CORE_AFFINITY,
145*4882a593Smuzhiyun i);
146*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
147*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
148*4882a593Smuzhiyun goto err;
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
151*4882a593Smuzhiyun val = 128;
152*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
153*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
154*4882a593Smuzhiyun goto err;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun val = 512;
157*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
158*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
159*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
160*4882a593Smuzhiyun goto err;
161*4882a593Smuzhiyun
162*4882a593Smuzhiyun val = 0;
163*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
164*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
165*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
166*4882a593Smuzhiyun goto err;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun val = 2;
169*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
170*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
171*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
172*4882a593Smuzhiyun goto err;
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun val = 8;
175*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
176*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
177*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
178*4882a593Smuzhiyun goto err;
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun val = 10;
181*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
182*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
183*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
184*4882a593Smuzhiyun goto err;
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun val = ADF_COALESCING_DEF_TIME;
187*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_ETRMGR_COALESCE_TIMER_FORMAT, i);
188*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, "Accelerator0",
189*4882a593Smuzhiyun key, (void *)&val, ADF_DEC))
190*4882a593Smuzhiyun goto err;
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun val = i;
194*4882a593Smuzhiyun if (adf_cfg_add_key_value_param(accel_dev, ADF_KERNEL_SEC,
195*4882a593Smuzhiyun ADF_NUM_CY, (void *)&val, ADF_DEC))
196*4882a593Smuzhiyun goto err;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun set_bit(ADF_STATUS_CONFIGURED, &accel_dev->status);
199*4882a593Smuzhiyun return 0;
200*4882a593Smuzhiyun err:
201*4882a593Smuzhiyun dev_err(&GET_DEV(accel_dev), "Failed to start QAT accel dev\n");
202*4882a593Smuzhiyun return -EINVAL;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(qat_crypto_dev_config);
205*4882a593Smuzhiyun
qat_crypto_create_instances(struct adf_accel_dev * accel_dev)206*4882a593Smuzhiyun static int qat_crypto_create_instances(struct adf_accel_dev *accel_dev)
207*4882a593Smuzhiyun {
208*4882a593Smuzhiyun int i;
209*4882a593Smuzhiyun unsigned long bank;
210*4882a593Smuzhiyun unsigned long num_inst, num_msg_sym, num_msg_asym;
211*4882a593Smuzhiyun int msg_size;
212*4882a593Smuzhiyun struct qat_crypto_instance *inst;
213*4882a593Smuzhiyun char key[ADF_CFG_MAX_KEY_LEN_IN_BYTES];
214*4882a593Smuzhiyun char val[ADF_CFG_MAX_VAL_LEN_IN_BYTES];
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun INIT_LIST_HEAD(&accel_dev->crypto_list);
217*4882a593Smuzhiyun if (adf_cfg_get_param_value(accel_dev, SEC, ADF_NUM_CY, val))
218*4882a593Smuzhiyun return -EFAULT;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun if (kstrtoul(val, 0, &num_inst))
221*4882a593Smuzhiyun return -EFAULT;
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun for (i = 0; i < num_inst; i++) {
224*4882a593Smuzhiyun inst = kzalloc_node(sizeof(*inst), GFP_KERNEL,
225*4882a593Smuzhiyun dev_to_node(&GET_DEV(accel_dev)));
226*4882a593Smuzhiyun if (!inst)
227*4882a593Smuzhiyun goto err;
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun list_add_tail(&inst->list, &accel_dev->crypto_list);
230*4882a593Smuzhiyun inst->id = i;
231*4882a593Smuzhiyun atomic_set(&inst->refctr, 0);
232*4882a593Smuzhiyun inst->accel_dev = accel_dev;
233*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_BANK_NUM, i);
234*4882a593Smuzhiyun if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
235*4882a593Smuzhiyun goto err;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun if (kstrtoul(val, 10, &bank))
238*4882a593Smuzhiyun goto err;
239*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_SIZE, i);
240*4882a593Smuzhiyun if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
241*4882a593Smuzhiyun goto err;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (kstrtoul(val, 10, &num_msg_sym))
244*4882a593Smuzhiyun goto err;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun num_msg_sym = num_msg_sym >> 1;
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_SIZE, i);
249*4882a593Smuzhiyun if (adf_cfg_get_param_value(accel_dev, SEC, key, val))
250*4882a593Smuzhiyun goto err;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (kstrtoul(val, 10, &num_msg_asym))
253*4882a593Smuzhiyun goto err;
254*4882a593Smuzhiyun num_msg_asym = num_msg_asym >> 1;
255*4882a593Smuzhiyun
256*4882a593Smuzhiyun msg_size = ICP_QAT_FW_REQ_DEFAULT_SZ;
257*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_TX, i);
258*4882a593Smuzhiyun if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
259*4882a593Smuzhiyun msg_size, key, NULL, 0, &inst->sym_tx))
260*4882a593Smuzhiyun goto err;
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun msg_size = msg_size >> 1;
263*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_TX, i);
264*4882a593Smuzhiyun if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
265*4882a593Smuzhiyun msg_size, key, NULL, 0, &inst->pke_tx))
266*4882a593Smuzhiyun goto err;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun msg_size = ICP_QAT_FW_RESP_DEFAULT_SZ;
269*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_SYM_RX, i);
270*4882a593Smuzhiyun if (adf_create_ring(accel_dev, SEC, bank, num_msg_sym,
271*4882a593Smuzhiyun msg_size, key, qat_alg_callback, 0,
272*4882a593Smuzhiyun &inst->sym_rx))
273*4882a593Smuzhiyun goto err;
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun snprintf(key, sizeof(key), ADF_CY "%d" ADF_RING_ASYM_RX, i);
276*4882a593Smuzhiyun if (adf_create_ring(accel_dev, SEC, bank, num_msg_asym,
277*4882a593Smuzhiyun msg_size, key, qat_alg_asym_callback, 0,
278*4882a593Smuzhiyun &inst->pke_rx))
279*4882a593Smuzhiyun goto err;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun return 0;
282*4882a593Smuzhiyun err:
283*4882a593Smuzhiyun qat_crypto_free_instances(accel_dev);
284*4882a593Smuzhiyun return -ENOMEM;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
qat_crypto_init(struct adf_accel_dev * accel_dev)287*4882a593Smuzhiyun static int qat_crypto_init(struct adf_accel_dev *accel_dev)
288*4882a593Smuzhiyun {
289*4882a593Smuzhiyun if (qat_crypto_create_instances(accel_dev))
290*4882a593Smuzhiyun return -EFAULT;
291*4882a593Smuzhiyun
292*4882a593Smuzhiyun return 0;
293*4882a593Smuzhiyun }
294*4882a593Smuzhiyun
qat_crypto_shutdown(struct adf_accel_dev * accel_dev)295*4882a593Smuzhiyun static int qat_crypto_shutdown(struct adf_accel_dev *accel_dev)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun return qat_crypto_free_instances(accel_dev);
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
qat_crypto_event_handler(struct adf_accel_dev * accel_dev,enum adf_event event)300*4882a593Smuzhiyun static int qat_crypto_event_handler(struct adf_accel_dev *accel_dev,
301*4882a593Smuzhiyun enum adf_event event)
302*4882a593Smuzhiyun {
303*4882a593Smuzhiyun int ret;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun switch (event) {
306*4882a593Smuzhiyun case ADF_EVENT_INIT:
307*4882a593Smuzhiyun ret = qat_crypto_init(accel_dev);
308*4882a593Smuzhiyun break;
309*4882a593Smuzhiyun case ADF_EVENT_SHUTDOWN:
310*4882a593Smuzhiyun ret = qat_crypto_shutdown(accel_dev);
311*4882a593Smuzhiyun break;
312*4882a593Smuzhiyun case ADF_EVENT_RESTARTING:
313*4882a593Smuzhiyun case ADF_EVENT_RESTARTED:
314*4882a593Smuzhiyun case ADF_EVENT_START:
315*4882a593Smuzhiyun case ADF_EVENT_STOP:
316*4882a593Smuzhiyun default:
317*4882a593Smuzhiyun ret = 0;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun return ret;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
qat_crypto_register(void)322*4882a593Smuzhiyun int qat_crypto_register(void)
323*4882a593Smuzhiyun {
324*4882a593Smuzhiyun memset(&qat_crypto, 0, sizeof(qat_crypto));
325*4882a593Smuzhiyun qat_crypto.event_hld = qat_crypto_event_handler;
326*4882a593Smuzhiyun qat_crypto.name = "qat_crypto";
327*4882a593Smuzhiyun return adf_service_register(&qat_crypto);
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun
qat_crypto_unregister(void)330*4882a593Smuzhiyun int qat_crypto_unregister(void)
331*4882a593Smuzhiyun {
332*4882a593Smuzhiyun return adf_service_unregister(&qat_crypto);
333*4882a593Smuzhiyun }
334