1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /* Copyright (C) 2012-2019 ARM Limited or its affiliates. */
3*4882a593Smuzhiyun
4*4882a593Smuzhiyun #include <linux/kernel.h>
5*4882a593Smuzhiyun #include <linux/module.h>
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/crypto.h>
8*4882a593Smuzhiyun #include <linux/moduleparam.h>
9*4882a593Smuzhiyun #include <linux/types.h>
10*4882a593Smuzhiyun #include <linux/interrupt.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/slab.h>
13*4882a593Smuzhiyun #include <linux/spinlock.h>
14*4882a593Smuzhiyun #include <linux/of.h>
15*4882a593Smuzhiyun #include <linux/clk.h>
16*4882a593Smuzhiyun #include <linux/of_address.h>
17*4882a593Smuzhiyun #include <linux/of_device.h>
18*4882a593Smuzhiyun #include <linux/pm_runtime.h>
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun #include "cc_driver.h"
21*4882a593Smuzhiyun #include "cc_request_mgr.h"
22*4882a593Smuzhiyun #include "cc_buffer_mgr.h"
23*4882a593Smuzhiyun #include "cc_debugfs.h"
24*4882a593Smuzhiyun #include "cc_cipher.h"
25*4882a593Smuzhiyun #include "cc_aead.h"
26*4882a593Smuzhiyun #include "cc_hash.h"
27*4882a593Smuzhiyun #include "cc_sram_mgr.h"
28*4882a593Smuzhiyun #include "cc_pm.h"
29*4882a593Smuzhiyun #include "cc_fips.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun bool cc_dump_desc;
32*4882a593Smuzhiyun module_param_named(dump_desc, cc_dump_desc, bool, 0600);
33*4882a593Smuzhiyun MODULE_PARM_DESC(cc_dump_desc, "Dump descriptors to kernel log as debugging aid");
34*4882a593Smuzhiyun bool cc_dump_bytes;
35*4882a593Smuzhiyun module_param_named(dump_bytes, cc_dump_bytes, bool, 0600);
36*4882a593Smuzhiyun MODULE_PARM_DESC(cc_dump_bytes, "Dump buffers to kernel log as debugging aid");
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun static bool cc_sec_disable;
39*4882a593Smuzhiyun module_param_named(sec_disable, cc_sec_disable, bool, 0600);
40*4882a593Smuzhiyun MODULE_PARM_DESC(cc_sec_disable, "Disable security functions");
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun struct cc_hw_data {
43*4882a593Smuzhiyun char *name;
44*4882a593Smuzhiyun enum cc_hw_rev rev;
45*4882a593Smuzhiyun u32 sig;
46*4882a593Smuzhiyun u32 cidr_0123;
47*4882a593Smuzhiyun u32 pidr_0124;
48*4882a593Smuzhiyun int std_bodies;
49*4882a593Smuzhiyun };
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun #define CC_NUM_IDRS 4
52*4882a593Smuzhiyun #define CC_HW_RESET_LOOP_COUNT 10
53*4882a593Smuzhiyun
54*4882a593Smuzhiyun /* Note: PIDR3 holds CMOD/Rev so ignored for HW identification purposes */
55*4882a593Smuzhiyun static const u32 pidr_0124_offsets[CC_NUM_IDRS] = {
56*4882a593Smuzhiyun CC_REG(PERIPHERAL_ID_0), CC_REG(PERIPHERAL_ID_1),
57*4882a593Smuzhiyun CC_REG(PERIPHERAL_ID_2), CC_REG(PERIPHERAL_ID_4)
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun
60*4882a593Smuzhiyun static const u32 cidr_0123_offsets[CC_NUM_IDRS] = {
61*4882a593Smuzhiyun CC_REG(COMPONENT_ID_0), CC_REG(COMPONENT_ID_1),
62*4882a593Smuzhiyun CC_REG(COMPONENT_ID_2), CC_REG(COMPONENT_ID_3)
63*4882a593Smuzhiyun };
64*4882a593Smuzhiyun
65*4882a593Smuzhiyun /* Hardware revisions defs. */
66*4882a593Smuzhiyun
67*4882a593Smuzhiyun /* The 703 is a OSCCA only variant of the 713 */
68*4882a593Smuzhiyun static const struct cc_hw_data cc703_hw = {
69*4882a593Smuzhiyun .name = "703", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
70*4882a593Smuzhiyun .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_OSCCA
71*4882a593Smuzhiyun };
72*4882a593Smuzhiyun
73*4882a593Smuzhiyun static const struct cc_hw_data cc713_hw = {
74*4882a593Smuzhiyun .name = "713", .rev = CC_HW_REV_713, .cidr_0123 = 0xB105F00DU,
75*4882a593Smuzhiyun .pidr_0124 = 0x040BB0D0U, .std_bodies = CC_STD_ALL
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun static const struct cc_hw_data cc712_hw = {
79*4882a593Smuzhiyun .name = "712", .rev = CC_HW_REV_712, .sig = 0xDCC71200U,
80*4882a593Smuzhiyun .std_bodies = CC_STD_ALL
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun static const struct cc_hw_data cc710_hw = {
84*4882a593Smuzhiyun .name = "710", .rev = CC_HW_REV_710, .sig = 0xDCC63200U,
85*4882a593Smuzhiyun .std_bodies = CC_STD_ALL
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun static const struct cc_hw_data cc630p_hw = {
89*4882a593Smuzhiyun .name = "630P", .rev = CC_HW_REV_630, .sig = 0xDCC63000U,
90*4882a593Smuzhiyun .std_bodies = CC_STD_ALL
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun static const struct of_device_id arm_ccree_dev_of_match[] = {
94*4882a593Smuzhiyun { .compatible = "arm,cryptocell-703-ree", .data = &cc703_hw },
95*4882a593Smuzhiyun { .compatible = "arm,cryptocell-713-ree", .data = &cc713_hw },
96*4882a593Smuzhiyun { .compatible = "arm,cryptocell-712-ree", .data = &cc712_hw },
97*4882a593Smuzhiyun { .compatible = "arm,cryptocell-710-ree", .data = &cc710_hw },
98*4882a593Smuzhiyun { .compatible = "arm,cryptocell-630p-ree", .data = &cc630p_hw },
99*4882a593Smuzhiyun {}
100*4882a593Smuzhiyun };
101*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, arm_ccree_dev_of_match);
102*4882a593Smuzhiyun
cc_read_idr(struct cc_drvdata * drvdata,const u32 * idr_offsets)103*4882a593Smuzhiyun static u32 cc_read_idr(struct cc_drvdata *drvdata, const u32 *idr_offsets)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun int i;
106*4882a593Smuzhiyun union {
107*4882a593Smuzhiyun u8 regs[CC_NUM_IDRS];
108*4882a593Smuzhiyun __le32 val;
109*4882a593Smuzhiyun } idr;
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun for (i = 0; i < CC_NUM_IDRS; ++i)
112*4882a593Smuzhiyun idr.regs[i] = cc_ioread(drvdata, idr_offsets[i]);
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun return le32_to_cpu(idr.val);
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
__dump_byte_array(const char * name,const u8 * buf,size_t len)117*4882a593Smuzhiyun void __dump_byte_array(const char *name, const u8 *buf, size_t len)
118*4882a593Smuzhiyun {
119*4882a593Smuzhiyun char prefix[64];
120*4882a593Smuzhiyun
121*4882a593Smuzhiyun if (!buf)
122*4882a593Smuzhiyun return;
123*4882a593Smuzhiyun
124*4882a593Smuzhiyun snprintf(prefix, sizeof(prefix), "%s[%zu]: ", name, len);
125*4882a593Smuzhiyun
126*4882a593Smuzhiyun print_hex_dump(KERN_DEBUG, prefix, DUMP_PREFIX_ADDRESS, 16, 1, buf,
127*4882a593Smuzhiyun len, false);
128*4882a593Smuzhiyun }
129*4882a593Smuzhiyun
cc_isr(int irq,void * dev_id)130*4882a593Smuzhiyun static irqreturn_t cc_isr(int irq, void *dev_id)
131*4882a593Smuzhiyun {
132*4882a593Smuzhiyun struct cc_drvdata *drvdata = (struct cc_drvdata *)dev_id;
133*4882a593Smuzhiyun struct device *dev = drvdata_to_dev(drvdata);
134*4882a593Smuzhiyun u32 irr;
135*4882a593Smuzhiyun u32 imr;
136*4882a593Smuzhiyun
137*4882a593Smuzhiyun /* STAT_OP_TYPE_GENERIC STAT_PHASE_0: Interrupt */
138*4882a593Smuzhiyun /* if driver suspended return, probably shared interrupt */
139*4882a593Smuzhiyun if (pm_runtime_suspended(dev))
140*4882a593Smuzhiyun return IRQ_NONE;
141*4882a593Smuzhiyun
142*4882a593Smuzhiyun /* read the interrupt status */
143*4882a593Smuzhiyun irr = cc_ioread(drvdata, CC_REG(HOST_IRR));
144*4882a593Smuzhiyun dev_dbg(dev, "Got IRR=0x%08X\n", irr);
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun if (irr == 0) /* Probably shared interrupt line */
147*4882a593Smuzhiyun return IRQ_NONE;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun imr = cc_ioread(drvdata, CC_REG(HOST_IMR));
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* clear interrupt - must be before processing events */
152*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(HOST_ICR), irr);
153*4882a593Smuzhiyun
154*4882a593Smuzhiyun drvdata->irq = irr;
155*4882a593Smuzhiyun /* Completion interrupt - most probable */
156*4882a593Smuzhiyun if (irr & drvdata->comp_mask) {
157*4882a593Smuzhiyun /* Mask all completion interrupts - will be unmasked in
158*4882a593Smuzhiyun * deferred service handler
159*4882a593Smuzhiyun */
160*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | drvdata->comp_mask);
161*4882a593Smuzhiyun irr &= ~drvdata->comp_mask;
162*4882a593Smuzhiyun complete_request(drvdata);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun #ifdef CONFIG_CRYPTO_FIPS
165*4882a593Smuzhiyun /* TEE FIPS interrupt */
166*4882a593Smuzhiyun if (irr & CC_GPR0_IRQ_MASK) {
167*4882a593Smuzhiyun /* Mask interrupt - will be unmasked in Deferred service
168*4882a593Smuzhiyun * handler
169*4882a593Smuzhiyun */
170*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(HOST_IMR), imr | CC_GPR0_IRQ_MASK);
171*4882a593Smuzhiyun irr &= ~CC_GPR0_IRQ_MASK;
172*4882a593Smuzhiyun fips_handler(drvdata);
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun #endif
175*4882a593Smuzhiyun /* AXI error interrupt */
176*4882a593Smuzhiyun if (irr & CC_AXI_ERR_IRQ_MASK) {
177*4882a593Smuzhiyun u32 axi_err;
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun /* Read the AXI error ID */
180*4882a593Smuzhiyun axi_err = cc_ioread(drvdata, CC_REG(AXIM_MON_ERR));
181*4882a593Smuzhiyun dev_dbg(dev, "AXI completion error: axim_mon_err=0x%08X\n",
182*4882a593Smuzhiyun axi_err);
183*4882a593Smuzhiyun
184*4882a593Smuzhiyun irr &= ~CC_AXI_ERR_IRQ_MASK;
185*4882a593Smuzhiyun }
186*4882a593Smuzhiyun
187*4882a593Smuzhiyun if (irr) {
188*4882a593Smuzhiyun dev_dbg_ratelimited(dev, "IRR includes unknown cause bits (0x%08X)\n",
189*4882a593Smuzhiyun irr);
190*4882a593Smuzhiyun /* Just warning */
191*4882a593Smuzhiyun }
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun return IRQ_HANDLED;
194*4882a593Smuzhiyun }
195*4882a593Smuzhiyun
cc_wait_for_reset_completion(struct cc_drvdata * drvdata)196*4882a593Smuzhiyun bool cc_wait_for_reset_completion(struct cc_drvdata *drvdata)
197*4882a593Smuzhiyun {
198*4882a593Smuzhiyun unsigned int val;
199*4882a593Smuzhiyun unsigned int i;
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /* 712/710/63 has no reset completion indication, always return true */
202*4882a593Smuzhiyun if (drvdata->hw_rev <= CC_HW_REV_712)
203*4882a593Smuzhiyun return true;
204*4882a593Smuzhiyun
205*4882a593Smuzhiyun for (i = 0; i < CC_HW_RESET_LOOP_COUNT; i++) {
206*4882a593Smuzhiyun /* in cc7x3 NVM_IS_IDLE indicates that CC reset is
207*4882a593Smuzhiyun * completed and device is fully functional
208*4882a593Smuzhiyun */
209*4882a593Smuzhiyun val = cc_ioread(drvdata, CC_REG(NVM_IS_IDLE));
210*4882a593Smuzhiyun if (val & CC_NVM_IS_IDLE_MASK) {
211*4882a593Smuzhiyun /* hw indicate reset completed */
212*4882a593Smuzhiyun return true;
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun /* allow scheduling other process on the processor */
215*4882a593Smuzhiyun schedule();
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun /* reset not completed */
218*4882a593Smuzhiyun return false;
219*4882a593Smuzhiyun }
220*4882a593Smuzhiyun
init_cc_regs(struct cc_drvdata * drvdata,bool is_probe)221*4882a593Smuzhiyun int init_cc_regs(struct cc_drvdata *drvdata, bool is_probe)
222*4882a593Smuzhiyun {
223*4882a593Smuzhiyun unsigned int val, cache_params;
224*4882a593Smuzhiyun struct device *dev = drvdata_to_dev(drvdata);
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun /* Unmask all AXI interrupt sources AXI_CFG1 register */
227*4882a593Smuzhiyun /* AXI interrupt config are obsoleted startign at cc7x3 */
228*4882a593Smuzhiyun if (drvdata->hw_rev <= CC_HW_REV_712) {
229*4882a593Smuzhiyun val = cc_ioread(drvdata, CC_REG(AXIM_CFG));
230*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(AXIM_CFG), val & ~CC_AXI_IRQ_MASK);
231*4882a593Smuzhiyun dev_dbg(dev, "AXIM_CFG=0x%08X\n",
232*4882a593Smuzhiyun cc_ioread(drvdata, CC_REG(AXIM_CFG)));
233*4882a593Smuzhiyun }
234*4882a593Smuzhiyun
235*4882a593Smuzhiyun /* Clear all pending interrupts */
236*4882a593Smuzhiyun val = cc_ioread(drvdata, CC_REG(HOST_IRR));
237*4882a593Smuzhiyun dev_dbg(dev, "IRR=0x%08X\n", val);
238*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(HOST_ICR), val);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Unmask relevant interrupt cause */
241*4882a593Smuzhiyun val = drvdata->comp_mask | CC_AXI_ERR_IRQ_MASK;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (drvdata->hw_rev >= CC_HW_REV_712)
244*4882a593Smuzhiyun val |= CC_GPR0_IRQ_MASK;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(HOST_IMR), ~val);
247*4882a593Smuzhiyun
248*4882a593Smuzhiyun cache_params = (drvdata->coherent ? CC_COHERENT_CACHE_PARAMS : 0x0);
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun if (is_probe)
253*4882a593Smuzhiyun dev_dbg(dev, "Cache params previous: 0x%08X\n", val);
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(AXIM_CACHE_PARAMS), cache_params);
256*4882a593Smuzhiyun val = cc_ioread(drvdata, CC_REG(AXIM_CACHE_PARAMS));
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun if (is_probe)
259*4882a593Smuzhiyun dev_dbg(dev, "Cache params current: 0x%08X (expect: 0x%08X)\n",
260*4882a593Smuzhiyun val, cache_params);
261*4882a593Smuzhiyun
262*4882a593Smuzhiyun return 0;
263*4882a593Smuzhiyun }
264*4882a593Smuzhiyun
init_cc_resources(struct platform_device * plat_dev)265*4882a593Smuzhiyun static int init_cc_resources(struct platform_device *plat_dev)
266*4882a593Smuzhiyun {
267*4882a593Smuzhiyun struct resource *req_mem_cc_regs = NULL;
268*4882a593Smuzhiyun struct cc_drvdata *new_drvdata;
269*4882a593Smuzhiyun struct device *dev = &plat_dev->dev;
270*4882a593Smuzhiyun struct device_node *np = dev->of_node;
271*4882a593Smuzhiyun u32 val, hw_rev_pidr, sig_cidr;
272*4882a593Smuzhiyun u64 dma_mask;
273*4882a593Smuzhiyun const struct cc_hw_data *hw_rev;
274*4882a593Smuzhiyun struct clk *clk;
275*4882a593Smuzhiyun int irq;
276*4882a593Smuzhiyun int rc = 0;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun new_drvdata = devm_kzalloc(dev, sizeof(*new_drvdata), GFP_KERNEL);
279*4882a593Smuzhiyun if (!new_drvdata)
280*4882a593Smuzhiyun return -ENOMEM;
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun hw_rev = of_device_get_match_data(dev);
283*4882a593Smuzhiyun new_drvdata->hw_rev_name = hw_rev->name;
284*4882a593Smuzhiyun new_drvdata->hw_rev = hw_rev->rev;
285*4882a593Smuzhiyun new_drvdata->std_bodies = hw_rev->std_bodies;
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun if (hw_rev->rev >= CC_HW_REV_712) {
288*4882a593Smuzhiyun new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP);
289*4882a593Smuzhiyun new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_712);
290*4882a593Smuzhiyun new_drvdata->ver_offset = CC_REG(HOST_VERSION_712);
291*4882a593Smuzhiyun } else {
292*4882a593Smuzhiyun new_drvdata->axim_mon_offset = CC_REG(AXIM_MON_COMP8);
293*4882a593Smuzhiyun new_drvdata->sig_offset = CC_REG(HOST_SIGNATURE_630);
294*4882a593Smuzhiyun new_drvdata->ver_offset = CC_REG(HOST_VERSION_630);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun new_drvdata->comp_mask = CC_COMP_IRQ_MASK;
298*4882a593Smuzhiyun
299*4882a593Smuzhiyun platform_set_drvdata(plat_dev, new_drvdata);
300*4882a593Smuzhiyun new_drvdata->plat_dev = plat_dev;
301*4882a593Smuzhiyun
302*4882a593Smuzhiyun clk = devm_clk_get_optional(dev, NULL);
303*4882a593Smuzhiyun if (IS_ERR(clk))
304*4882a593Smuzhiyun return dev_err_probe(dev, PTR_ERR(clk), "Error getting clock\n");
305*4882a593Smuzhiyun new_drvdata->clk = clk;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun new_drvdata->coherent = of_dma_is_coherent(np);
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun /* Get device resources */
310*4882a593Smuzhiyun /* First CC registers space */
311*4882a593Smuzhiyun req_mem_cc_regs = platform_get_resource(plat_dev, IORESOURCE_MEM, 0);
312*4882a593Smuzhiyun /* Map registers space */
313*4882a593Smuzhiyun new_drvdata->cc_base = devm_ioremap_resource(dev, req_mem_cc_regs);
314*4882a593Smuzhiyun if (IS_ERR(new_drvdata->cc_base)) {
315*4882a593Smuzhiyun dev_err(dev, "Failed to ioremap registers");
316*4882a593Smuzhiyun return PTR_ERR(new_drvdata->cc_base);
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun
319*4882a593Smuzhiyun dev_dbg(dev, "Got MEM resource (%s): %pR\n", req_mem_cc_regs->name,
320*4882a593Smuzhiyun req_mem_cc_regs);
321*4882a593Smuzhiyun dev_dbg(dev, "CC registers mapped from %pa to 0x%p\n",
322*4882a593Smuzhiyun &req_mem_cc_regs->start, new_drvdata->cc_base);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun /* Then IRQ */
325*4882a593Smuzhiyun irq = platform_get_irq(plat_dev, 0);
326*4882a593Smuzhiyun if (irq < 0)
327*4882a593Smuzhiyun return irq;
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun init_completion(&new_drvdata->hw_queue_avail);
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun if (!dev->dma_mask)
332*4882a593Smuzhiyun dev->dma_mask = &dev->coherent_dma_mask;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun dma_mask = DMA_BIT_MASK(DMA_BIT_MASK_LEN);
335*4882a593Smuzhiyun while (dma_mask > 0x7fffffffUL) {
336*4882a593Smuzhiyun if (dma_supported(dev, dma_mask)) {
337*4882a593Smuzhiyun rc = dma_set_coherent_mask(dev, dma_mask);
338*4882a593Smuzhiyun if (!rc)
339*4882a593Smuzhiyun break;
340*4882a593Smuzhiyun }
341*4882a593Smuzhiyun dma_mask >>= 1;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun
344*4882a593Smuzhiyun if (rc) {
345*4882a593Smuzhiyun dev_err(dev, "Failed in dma_set_mask, mask=%llx\n", dma_mask);
346*4882a593Smuzhiyun return rc;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun rc = clk_prepare_enable(new_drvdata->clk);
350*4882a593Smuzhiyun if (rc) {
351*4882a593Smuzhiyun dev_err(dev, "Failed to enable clock");
352*4882a593Smuzhiyun return rc;
353*4882a593Smuzhiyun }
354*4882a593Smuzhiyun
355*4882a593Smuzhiyun new_drvdata->sec_disabled = cc_sec_disable;
356*4882a593Smuzhiyun
357*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(dev, CC_SUSPEND_TIMEOUT);
358*4882a593Smuzhiyun pm_runtime_use_autosuspend(dev);
359*4882a593Smuzhiyun pm_runtime_set_active(dev);
360*4882a593Smuzhiyun pm_runtime_enable(dev);
361*4882a593Smuzhiyun rc = pm_runtime_get_sync(dev);
362*4882a593Smuzhiyun if (rc < 0) {
363*4882a593Smuzhiyun dev_err(dev, "pm_runtime_get_sync() failed: %d\n", rc);
364*4882a593Smuzhiyun goto post_pm_err;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun
367*4882a593Smuzhiyun /* Wait for Cryptocell reset completion */
368*4882a593Smuzhiyun if (!cc_wait_for_reset_completion(new_drvdata)) {
369*4882a593Smuzhiyun dev_err(dev, "Cryptocell reset not completed");
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (hw_rev->rev <= CC_HW_REV_712) {
373*4882a593Smuzhiyun /* Verify correct mapping */
374*4882a593Smuzhiyun val = cc_ioread(new_drvdata, new_drvdata->sig_offset);
375*4882a593Smuzhiyun if (val != hw_rev->sig) {
376*4882a593Smuzhiyun dev_err(dev, "Invalid CC signature: SIGNATURE=0x%08X != expected=0x%08X\n",
377*4882a593Smuzhiyun val, hw_rev->sig);
378*4882a593Smuzhiyun rc = -EINVAL;
379*4882a593Smuzhiyun goto post_pm_err;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun sig_cidr = val;
382*4882a593Smuzhiyun hw_rev_pidr = cc_ioread(new_drvdata, new_drvdata->ver_offset);
383*4882a593Smuzhiyun } else {
384*4882a593Smuzhiyun /* Verify correct mapping */
385*4882a593Smuzhiyun val = cc_read_idr(new_drvdata, pidr_0124_offsets);
386*4882a593Smuzhiyun if (val != hw_rev->pidr_0124) {
387*4882a593Smuzhiyun dev_err(dev, "Invalid CC PIDR: PIDR0124=0x%08X != expected=0x%08X\n",
388*4882a593Smuzhiyun val, hw_rev->pidr_0124);
389*4882a593Smuzhiyun rc = -EINVAL;
390*4882a593Smuzhiyun goto post_pm_err;
391*4882a593Smuzhiyun }
392*4882a593Smuzhiyun hw_rev_pidr = val;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun val = cc_read_idr(new_drvdata, cidr_0123_offsets);
395*4882a593Smuzhiyun if (val != hw_rev->cidr_0123) {
396*4882a593Smuzhiyun dev_err(dev, "Invalid CC CIDR: CIDR0123=0x%08X != expected=0x%08X\n",
397*4882a593Smuzhiyun val, hw_rev->cidr_0123);
398*4882a593Smuzhiyun rc = -EINVAL;
399*4882a593Smuzhiyun goto post_pm_err;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun sig_cidr = val;
402*4882a593Smuzhiyun
403*4882a593Smuzhiyun /* Check HW engine configuration */
404*4882a593Smuzhiyun val = cc_ioread(new_drvdata, CC_REG(HOST_REMOVE_INPUT_PINS));
405*4882a593Smuzhiyun switch (val) {
406*4882a593Smuzhiyun case CC_PINS_FULL:
407*4882a593Smuzhiyun /* This is fine */
408*4882a593Smuzhiyun break;
409*4882a593Smuzhiyun case CC_PINS_SLIM:
410*4882a593Smuzhiyun if (new_drvdata->std_bodies & CC_STD_NIST) {
411*4882a593Smuzhiyun dev_warn(dev, "703 mode forced due to HW configuration.\n");
412*4882a593Smuzhiyun new_drvdata->std_bodies = CC_STD_OSCCA;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun break;
415*4882a593Smuzhiyun default:
416*4882a593Smuzhiyun dev_err(dev, "Unsupported engines configuration.\n");
417*4882a593Smuzhiyun rc = -EINVAL;
418*4882a593Smuzhiyun goto post_pm_err;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun /* Check security disable state */
422*4882a593Smuzhiyun val = cc_ioread(new_drvdata, CC_REG(SECURITY_DISABLED));
423*4882a593Smuzhiyun val &= CC_SECURITY_DISABLED_MASK;
424*4882a593Smuzhiyun new_drvdata->sec_disabled |= !!val;
425*4882a593Smuzhiyun
426*4882a593Smuzhiyun if (!new_drvdata->sec_disabled) {
427*4882a593Smuzhiyun new_drvdata->comp_mask |= CC_CPP_SM4_ABORT_MASK;
428*4882a593Smuzhiyun if (new_drvdata->std_bodies & CC_STD_NIST)
429*4882a593Smuzhiyun new_drvdata->comp_mask |= CC_CPP_AES_ABORT_MASK;
430*4882a593Smuzhiyun }
431*4882a593Smuzhiyun }
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (new_drvdata->sec_disabled)
434*4882a593Smuzhiyun dev_info(dev, "Security Disabled mode is in effect. Security functions disabled.\n");
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun /* Display HW versions */
437*4882a593Smuzhiyun dev_info(dev, "ARM CryptoCell %s Driver: HW version 0x%08X/0x%8X, Driver version %s\n",
438*4882a593Smuzhiyun hw_rev->name, hw_rev_pidr, sig_cidr, DRV_MODULE_VERSION);
439*4882a593Smuzhiyun /* register the driver isr function */
440*4882a593Smuzhiyun rc = devm_request_irq(dev, irq, cc_isr, IRQF_SHARED, "ccree",
441*4882a593Smuzhiyun new_drvdata);
442*4882a593Smuzhiyun if (rc) {
443*4882a593Smuzhiyun dev_err(dev, "Could not register to interrupt %d\n", irq);
444*4882a593Smuzhiyun goto post_pm_err;
445*4882a593Smuzhiyun }
446*4882a593Smuzhiyun dev_dbg(dev, "Registered to IRQ: %d\n", irq);
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun rc = init_cc_regs(new_drvdata, true);
449*4882a593Smuzhiyun if (rc) {
450*4882a593Smuzhiyun dev_err(dev, "init_cc_regs failed\n");
451*4882a593Smuzhiyun goto post_pm_err;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun rc = cc_debugfs_init(new_drvdata);
455*4882a593Smuzhiyun if (rc) {
456*4882a593Smuzhiyun dev_err(dev, "Failed registering debugfs interface\n");
457*4882a593Smuzhiyun goto post_regs_err;
458*4882a593Smuzhiyun }
459*4882a593Smuzhiyun
460*4882a593Smuzhiyun rc = cc_fips_init(new_drvdata);
461*4882a593Smuzhiyun if (rc) {
462*4882a593Smuzhiyun dev_err(dev, "cc_fips_init failed 0x%x\n", rc);
463*4882a593Smuzhiyun goto post_debugfs_err;
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun rc = cc_sram_mgr_init(new_drvdata);
466*4882a593Smuzhiyun if (rc) {
467*4882a593Smuzhiyun dev_err(dev, "cc_sram_mgr_init failed\n");
468*4882a593Smuzhiyun goto post_fips_init_err;
469*4882a593Smuzhiyun }
470*4882a593Smuzhiyun
471*4882a593Smuzhiyun new_drvdata->mlli_sram_addr =
472*4882a593Smuzhiyun cc_sram_alloc(new_drvdata, MAX_MLLI_BUFF_SIZE);
473*4882a593Smuzhiyun if (new_drvdata->mlli_sram_addr == NULL_SRAM_ADDR) {
474*4882a593Smuzhiyun rc = -ENOMEM;
475*4882a593Smuzhiyun goto post_fips_init_err;
476*4882a593Smuzhiyun }
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun rc = cc_req_mgr_init(new_drvdata);
479*4882a593Smuzhiyun if (rc) {
480*4882a593Smuzhiyun dev_err(dev, "cc_req_mgr_init failed\n");
481*4882a593Smuzhiyun goto post_fips_init_err;
482*4882a593Smuzhiyun }
483*4882a593Smuzhiyun
484*4882a593Smuzhiyun rc = cc_buffer_mgr_init(new_drvdata);
485*4882a593Smuzhiyun if (rc) {
486*4882a593Smuzhiyun dev_err(dev, "cc_buffer_mgr_init failed\n");
487*4882a593Smuzhiyun goto post_req_mgr_err;
488*4882a593Smuzhiyun }
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun /* Allocate crypto algs */
491*4882a593Smuzhiyun rc = cc_cipher_alloc(new_drvdata);
492*4882a593Smuzhiyun if (rc) {
493*4882a593Smuzhiyun dev_err(dev, "cc_cipher_alloc failed\n");
494*4882a593Smuzhiyun goto post_buf_mgr_err;
495*4882a593Smuzhiyun }
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun /* hash must be allocated before aead since hash exports APIs */
498*4882a593Smuzhiyun rc = cc_hash_alloc(new_drvdata);
499*4882a593Smuzhiyun if (rc) {
500*4882a593Smuzhiyun dev_err(dev, "cc_hash_alloc failed\n");
501*4882a593Smuzhiyun goto post_cipher_err;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun rc = cc_aead_alloc(new_drvdata);
505*4882a593Smuzhiyun if (rc) {
506*4882a593Smuzhiyun dev_err(dev, "cc_aead_alloc failed\n");
507*4882a593Smuzhiyun goto post_hash_err;
508*4882a593Smuzhiyun }
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* If we got here and FIPS mode is enabled
511*4882a593Smuzhiyun * it means all FIPS test passed, so let TEE
512*4882a593Smuzhiyun * know we're good.
513*4882a593Smuzhiyun */
514*4882a593Smuzhiyun cc_set_ree_fips_status(new_drvdata, true);
515*4882a593Smuzhiyun
516*4882a593Smuzhiyun pm_runtime_put(dev);
517*4882a593Smuzhiyun return 0;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun post_hash_err:
520*4882a593Smuzhiyun cc_hash_free(new_drvdata);
521*4882a593Smuzhiyun post_cipher_err:
522*4882a593Smuzhiyun cc_cipher_free(new_drvdata);
523*4882a593Smuzhiyun post_buf_mgr_err:
524*4882a593Smuzhiyun cc_buffer_mgr_fini(new_drvdata);
525*4882a593Smuzhiyun post_req_mgr_err:
526*4882a593Smuzhiyun cc_req_mgr_fini(new_drvdata);
527*4882a593Smuzhiyun post_fips_init_err:
528*4882a593Smuzhiyun cc_fips_fini(new_drvdata);
529*4882a593Smuzhiyun post_debugfs_err:
530*4882a593Smuzhiyun cc_debugfs_fini(new_drvdata);
531*4882a593Smuzhiyun post_regs_err:
532*4882a593Smuzhiyun fini_cc_regs(new_drvdata);
533*4882a593Smuzhiyun post_pm_err:
534*4882a593Smuzhiyun pm_runtime_put_noidle(dev);
535*4882a593Smuzhiyun pm_runtime_disable(dev);
536*4882a593Smuzhiyun pm_runtime_set_suspended(dev);
537*4882a593Smuzhiyun clk_disable_unprepare(new_drvdata->clk);
538*4882a593Smuzhiyun return rc;
539*4882a593Smuzhiyun }
540*4882a593Smuzhiyun
fini_cc_regs(struct cc_drvdata * drvdata)541*4882a593Smuzhiyun void fini_cc_regs(struct cc_drvdata *drvdata)
542*4882a593Smuzhiyun {
543*4882a593Smuzhiyun /* Mask all interrupts */
544*4882a593Smuzhiyun cc_iowrite(drvdata, CC_REG(HOST_IMR), 0xFFFFFFFF);
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
cleanup_cc_resources(struct platform_device * plat_dev)547*4882a593Smuzhiyun static void cleanup_cc_resources(struct platform_device *plat_dev)
548*4882a593Smuzhiyun {
549*4882a593Smuzhiyun struct device *dev = &plat_dev->dev;
550*4882a593Smuzhiyun struct cc_drvdata *drvdata =
551*4882a593Smuzhiyun (struct cc_drvdata *)platform_get_drvdata(plat_dev);
552*4882a593Smuzhiyun
553*4882a593Smuzhiyun cc_aead_free(drvdata);
554*4882a593Smuzhiyun cc_hash_free(drvdata);
555*4882a593Smuzhiyun cc_cipher_free(drvdata);
556*4882a593Smuzhiyun cc_buffer_mgr_fini(drvdata);
557*4882a593Smuzhiyun cc_req_mgr_fini(drvdata);
558*4882a593Smuzhiyun cc_fips_fini(drvdata);
559*4882a593Smuzhiyun cc_debugfs_fini(drvdata);
560*4882a593Smuzhiyun fini_cc_regs(drvdata);
561*4882a593Smuzhiyun pm_runtime_put_noidle(dev);
562*4882a593Smuzhiyun pm_runtime_disable(dev);
563*4882a593Smuzhiyun pm_runtime_set_suspended(dev);
564*4882a593Smuzhiyun clk_disable_unprepare(drvdata->clk);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun
cc_get_default_hash_len(struct cc_drvdata * drvdata)567*4882a593Smuzhiyun unsigned int cc_get_default_hash_len(struct cc_drvdata *drvdata)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun if (drvdata->hw_rev >= CC_HW_REV_712)
570*4882a593Smuzhiyun return HASH_LEN_SIZE_712;
571*4882a593Smuzhiyun else
572*4882a593Smuzhiyun return HASH_LEN_SIZE_630;
573*4882a593Smuzhiyun }
574*4882a593Smuzhiyun
ccree_probe(struct platform_device * plat_dev)575*4882a593Smuzhiyun static int ccree_probe(struct platform_device *plat_dev)
576*4882a593Smuzhiyun {
577*4882a593Smuzhiyun int rc;
578*4882a593Smuzhiyun struct device *dev = &plat_dev->dev;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* Map registers space */
581*4882a593Smuzhiyun rc = init_cc_resources(plat_dev);
582*4882a593Smuzhiyun if (rc)
583*4882a593Smuzhiyun return rc;
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun dev_info(dev, "ARM ccree device initialized\n");
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun return 0;
588*4882a593Smuzhiyun }
589*4882a593Smuzhiyun
ccree_remove(struct platform_device * plat_dev)590*4882a593Smuzhiyun static int ccree_remove(struct platform_device *plat_dev)
591*4882a593Smuzhiyun {
592*4882a593Smuzhiyun struct device *dev = &plat_dev->dev;
593*4882a593Smuzhiyun
594*4882a593Smuzhiyun dev_dbg(dev, "Releasing ccree resources...\n");
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun cleanup_cc_resources(plat_dev);
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun dev_info(dev, "ARM ccree device terminated\n");
599*4882a593Smuzhiyun
600*4882a593Smuzhiyun return 0;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun static struct platform_driver ccree_driver = {
604*4882a593Smuzhiyun .driver = {
605*4882a593Smuzhiyun .name = "ccree",
606*4882a593Smuzhiyun .of_match_table = arm_ccree_dev_of_match,
607*4882a593Smuzhiyun #ifdef CONFIG_PM
608*4882a593Smuzhiyun .pm = &ccree_pm,
609*4882a593Smuzhiyun #endif
610*4882a593Smuzhiyun },
611*4882a593Smuzhiyun .probe = ccree_probe,
612*4882a593Smuzhiyun .remove = ccree_remove,
613*4882a593Smuzhiyun };
614*4882a593Smuzhiyun
ccree_init(void)615*4882a593Smuzhiyun static int __init ccree_init(void)
616*4882a593Smuzhiyun {
617*4882a593Smuzhiyun cc_debugfs_global_init();
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun return platform_driver_register(&ccree_driver);
620*4882a593Smuzhiyun }
621*4882a593Smuzhiyun module_init(ccree_init);
622*4882a593Smuzhiyun
ccree_exit(void)623*4882a593Smuzhiyun static void __exit ccree_exit(void)
624*4882a593Smuzhiyun {
625*4882a593Smuzhiyun platform_driver_unregister(&ccree_driver);
626*4882a593Smuzhiyun cc_debugfs_global_fini();
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun module_exit(ccree_exit);
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun /* Module description */
631*4882a593Smuzhiyun MODULE_DESCRIPTION("ARM TrustZone CryptoCell REE Driver");
632*4882a593Smuzhiyun MODULE_VERSION(DRV_MODULE_VERSION);
633*4882a593Smuzhiyun MODULE_AUTHOR("ARM");
634*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
635