xref: /OK3568_Linux_fs/kernel/drivers/crypto/inside-secure/safexcel_ring.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2017 Marvell
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Antoine Tenart <antoine.tenart@free-electrons.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/dma-mapping.h>
9*4882a593Smuzhiyun #include <linux/spinlock.h>
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #include "safexcel.h"
12*4882a593Smuzhiyun 
safexcel_init_ring_descriptors(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * cdr,struct safexcel_desc_ring * rdr)13*4882a593Smuzhiyun int safexcel_init_ring_descriptors(struct safexcel_crypto_priv *priv,
14*4882a593Smuzhiyun 				   struct safexcel_desc_ring *cdr,
15*4882a593Smuzhiyun 				   struct safexcel_desc_ring *rdr)
16*4882a593Smuzhiyun {
17*4882a593Smuzhiyun 	int i;
18*4882a593Smuzhiyun 	struct safexcel_command_desc *cdesc;
19*4882a593Smuzhiyun 	dma_addr_t atok;
20*4882a593Smuzhiyun 
21*4882a593Smuzhiyun 	/* Actual command descriptor ring */
22*4882a593Smuzhiyun 	cdr->offset = priv->config.cd_offset;
23*4882a593Smuzhiyun 	cdr->base = dmam_alloc_coherent(priv->dev,
24*4882a593Smuzhiyun 					cdr->offset * EIP197_DEFAULT_RING_SIZE,
25*4882a593Smuzhiyun 					&cdr->base_dma, GFP_KERNEL);
26*4882a593Smuzhiyun 	if (!cdr->base)
27*4882a593Smuzhiyun 		return -ENOMEM;
28*4882a593Smuzhiyun 	cdr->write = cdr->base;
29*4882a593Smuzhiyun 	cdr->base_end = cdr->base + cdr->offset * (EIP197_DEFAULT_RING_SIZE - 1);
30*4882a593Smuzhiyun 	cdr->read = cdr->base;
31*4882a593Smuzhiyun 
32*4882a593Smuzhiyun 	/* Command descriptor shadow ring for storing additional token data */
33*4882a593Smuzhiyun 	cdr->shoffset = priv->config.cdsh_offset;
34*4882a593Smuzhiyun 	cdr->shbase = dmam_alloc_coherent(priv->dev,
35*4882a593Smuzhiyun 					  cdr->shoffset *
36*4882a593Smuzhiyun 					  EIP197_DEFAULT_RING_SIZE,
37*4882a593Smuzhiyun 					  &cdr->shbase_dma, GFP_KERNEL);
38*4882a593Smuzhiyun 	if (!cdr->shbase)
39*4882a593Smuzhiyun 		return -ENOMEM;
40*4882a593Smuzhiyun 	cdr->shwrite = cdr->shbase;
41*4882a593Smuzhiyun 	cdr->shbase_end = cdr->shbase + cdr->shoffset *
42*4882a593Smuzhiyun 					(EIP197_DEFAULT_RING_SIZE - 1);
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun 	/*
45*4882a593Smuzhiyun 	 * Populate command descriptors with physical pointers to shadow descs.
46*4882a593Smuzhiyun 	 * Note that we only need to do this once if we don't overwrite them.
47*4882a593Smuzhiyun 	 */
48*4882a593Smuzhiyun 	cdesc = cdr->base;
49*4882a593Smuzhiyun 	atok = cdr->shbase_dma;
50*4882a593Smuzhiyun 	for (i = 0; i < EIP197_DEFAULT_RING_SIZE; i++) {
51*4882a593Smuzhiyun 		cdesc->atok_lo = lower_32_bits(atok);
52*4882a593Smuzhiyun 		cdesc->atok_hi = upper_32_bits(atok);
53*4882a593Smuzhiyun 		cdesc = (void *)cdesc + cdr->offset;
54*4882a593Smuzhiyun 		atok += cdr->shoffset;
55*4882a593Smuzhiyun 	}
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	rdr->offset = priv->config.rd_offset;
58*4882a593Smuzhiyun 	/* Use shoffset for result token offset here */
59*4882a593Smuzhiyun 	rdr->shoffset = priv->config.res_offset;
60*4882a593Smuzhiyun 	rdr->base = dmam_alloc_coherent(priv->dev,
61*4882a593Smuzhiyun 					rdr->offset * EIP197_DEFAULT_RING_SIZE,
62*4882a593Smuzhiyun 					&rdr->base_dma, GFP_KERNEL);
63*4882a593Smuzhiyun 	if (!rdr->base)
64*4882a593Smuzhiyun 		return -ENOMEM;
65*4882a593Smuzhiyun 	rdr->write = rdr->base;
66*4882a593Smuzhiyun 	rdr->base_end = rdr->base + rdr->offset  * (EIP197_DEFAULT_RING_SIZE - 1);
67*4882a593Smuzhiyun 	rdr->read = rdr->base;
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun 	return 0;
70*4882a593Smuzhiyun }
71*4882a593Smuzhiyun 
safexcel_select_ring(struct safexcel_crypto_priv * priv)72*4882a593Smuzhiyun inline int safexcel_select_ring(struct safexcel_crypto_priv *priv)
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun 	return (atomic_inc_return(&priv->ring_used) % priv->config.rings);
75*4882a593Smuzhiyun }
76*4882a593Smuzhiyun 
safexcel_ring_next_cwptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring,bool first,struct safexcel_token ** atoken)77*4882a593Smuzhiyun static void *safexcel_ring_next_cwptr(struct safexcel_crypto_priv *priv,
78*4882a593Smuzhiyun 				     struct safexcel_desc_ring *ring,
79*4882a593Smuzhiyun 				     bool first,
80*4882a593Smuzhiyun 				     struct safexcel_token **atoken)
81*4882a593Smuzhiyun {
82*4882a593Smuzhiyun 	void *ptr = ring->write;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	if (first)
85*4882a593Smuzhiyun 		*atoken = ring->shwrite;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	if ((ring->write == ring->read - ring->offset) ||
88*4882a593Smuzhiyun 	    (ring->read == ring->base && ring->write == ring->base_end))
89*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
90*4882a593Smuzhiyun 
91*4882a593Smuzhiyun 	if (ring->write == ring->base_end) {
92*4882a593Smuzhiyun 		ring->write = ring->base;
93*4882a593Smuzhiyun 		ring->shwrite = ring->shbase;
94*4882a593Smuzhiyun 	} else {
95*4882a593Smuzhiyun 		ring->write += ring->offset;
96*4882a593Smuzhiyun 		ring->shwrite += ring->shoffset;
97*4882a593Smuzhiyun 	}
98*4882a593Smuzhiyun 
99*4882a593Smuzhiyun 	return ptr;
100*4882a593Smuzhiyun }
101*4882a593Smuzhiyun 
safexcel_ring_next_rwptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring,struct result_data_desc ** rtoken)102*4882a593Smuzhiyun static void *safexcel_ring_next_rwptr(struct safexcel_crypto_priv *priv,
103*4882a593Smuzhiyun 				     struct safexcel_desc_ring *ring,
104*4882a593Smuzhiyun 				     struct result_data_desc **rtoken)
105*4882a593Smuzhiyun {
106*4882a593Smuzhiyun 	void *ptr = ring->write;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	/* Result token at relative offset shoffset */
109*4882a593Smuzhiyun 	*rtoken = ring->write + ring->shoffset;
110*4882a593Smuzhiyun 
111*4882a593Smuzhiyun 	if ((ring->write == ring->read - ring->offset) ||
112*4882a593Smuzhiyun 	    (ring->read == ring->base && ring->write == ring->base_end))
113*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	if (ring->write == ring->base_end)
116*4882a593Smuzhiyun 		ring->write = ring->base;
117*4882a593Smuzhiyun 	else
118*4882a593Smuzhiyun 		ring->write += ring->offset;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	return ptr;
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
safexcel_ring_next_rptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring)123*4882a593Smuzhiyun void *safexcel_ring_next_rptr(struct safexcel_crypto_priv *priv,
124*4882a593Smuzhiyun 			      struct safexcel_desc_ring *ring)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	void *ptr = ring->read;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	if (ring->write == ring->read)
129*4882a593Smuzhiyun 		return ERR_PTR(-ENOENT);
130*4882a593Smuzhiyun 
131*4882a593Smuzhiyun 	if (ring->read == ring->base_end)
132*4882a593Smuzhiyun 		ring->read = ring->base;
133*4882a593Smuzhiyun 	else
134*4882a593Smuzhiyun 		ring->read += ring->offset;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return ptr;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
safexcel_ring_curr_rptr(struct safexcel_crypto_priv * priv,int ring)139*4882a593Smuzhiyun inline void *safexcel_ring_curr_rptr(struct safexcel_crypto_priv *priv,
140*4882a593Smuzhiyun 				     int ring)
141*4882a593Smuzhiyun {
142*4882a593Smuzhiyun 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
143*4882a593Smuzhiyun 
144*4882a593Smuzhiyun 	return rdr->read;
145*4882a593Smuzhiyun }
146*4882a593Smuzhiyun 
safexcel_ring_first_rdr_index(struct safexcel_crypto_priv * priv,int ring)147*4882a593Smuzhiyun inline int safexcel_ring_first_rdr_index(struct safexcel_crypto_priv *priv,
148*4882a593Smuzhiyun 					 int ring)
149*4882a593Smuzhiyun {
150*4882a593Smuzhiyun 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	return (rdr->read - rdr->base) / rdr->offset;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv * priv,int ring,struct safexcel_result_desc * rdesc)155*4882a593Smuzhiyun inline int safexcel_ring_rdr_rdesc_index(struct safexcel_crypto_priv *priv,
156*4882a593Smuzhiyun 					 int ring,
157*4882a593Smuzhiyun 					 struct safexcel_result_desc *rdesc)
158*4882a593Smuzhiyun {
159*4882a593Smuzhiyun 	struct safexcel_desc_ring *rdr = &priv->ring[ring].rdr;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return ((void *)rdesc - rdr->base) / rdr->offset;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
safexcel_ring_rollback_wptr(struct safexcel_crypto_priv * priv,struct safexcel_desc_ring * ring)164*4882a593Smuzhiyun void safexcel_ring_rollback_wptr(struct safexcel_crypto_priv *priv,
165*4882a593Smuzhiyun 				 struct safexcel_desc_ring *ring)
166*4882a593Smuzhiyun {
167*4882a593Smuzhiyun 	if (ring->write == ring->read)
168*4882a593Smuzhiyun 		return;
169*4882a593Smuzhiyun 
170*4882a593Smuzhiyun 	if (ring->write == ring->base) {
171*4882a593Smuzhiyun 		ring->write = ring->base_end;
172*4882a593Smuzhiyun 		ring->shwrite = ring->shbase_end;
173*4882a593Smuzhiyun 	} else {
174*4882a593Smuzhiyun 		ring->write -= ring->offset;
175*4882a593Smuzhiyun 		ring->shwrite -= ring->shoffset;
176*4882a593Smuzhiyun 	}
177*4882a593Smuzhiyun }
178*4882a593Smuzhiyun 
safexcel_add_cdesc(struct safexcel_crypto_priv * priv,int ring_id,bool first,bool last,dma_addr_t data,u32 data_len,u32 full_data_len,dma_addr_t context,struct safexcel_token ** atoken)179*4882a593Smuzhiyun struct safexcel_command_desc *safexcel_add_cdesc(struct safexcel_crypto_priv *priv,
180*4882a593Smuzhiyun 						 int ring_id,
181*4882a593Smuzhiyun 						 bool first, bool last,
182*4882a593Smuzhiyun 						 dma_addr_t data, u32 data_len,
183*4882a593Smuzhiyun 						 u32 full_data_len,
184*4882a593Smuzhiyun 						 dma_addr_t context,
185*4882a593Smuzhiyun 						 struct safexcel_token **atoken)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun 	struct safexcel_command_desc *cdesc;
188*4882a593Smuzhiyun 
189*4882a593Smuzhiyun 	cdesc = safexcel_ring_next_cwptr(priv, &priv->ring[ring_id].cdr,
190*4882a593Smuzhiyun 					 first, atoken);
191*4882a593Smuzhiyun 	if (IS_ERR(cdesc))
192*4882a593Smuzhiyun 		return cdesc;
193*4882a593Smuzhiyun 
194*4882a593Smuzhiyun 	cdesc->particle_size = data_len;
195*4882a593Smuzhiyun 	cdesc->rsvd0 = 0;
196*4882a593Smuzhiyun 	cdesc->last_seg = last;
197*4882a593Smuzhiyun 	cdesc->first_seg = first;
198*4882a593Smuzhiyun 	cdesc->additional_cdata_size = 0;
199*4882a593Smuzhiyun 	cdesc->rsvd1 = 0;
200*4882a593Smuzhiyun 	cdesc->data_lo = lower_32_bits(data);
201*4882a593Smuzhiyun 	cdesc->data_hi = upper_32_bits(data);
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	if (first) {
204*4882a593Smuzhiyun 		/*
205*4882a593Smuzhiyun 		 * Note that the length here MUST be >0 or else the EIP(1)97
206*4882a593Smuzhiyun 		 * may hang. Newer EIP197 firmware actually incorporates this
207*4882a593Smuzhiyun 		 * fix already, but that doesn't help the EIP97 and we may
208*4882a593Smuzhiyun 		 * also be running older firmware.
209*4882a593Smuzhiyun 		 */
210*4882a593Smuzhiyun 		cdesc->control_data.packet_length = full_data_len ?: 1;
211*4882a593Smuzhiyun 		cdesc->control_data.options = EIP197_OPTION_MAGIC_VALUE |
212*4882a593Smuzhiyun 					      EIP197_OPTION_64BIT_CTX |
213*4882a593Smuzhiyun 					      EIP197_OPTION_CTX_CTRL_IN_CMD |
214*4882a593Smuzhiyun 					      EIP197_OPTION_RC_AUTO;
215*4882a593Smuzhiyun 		cdesc->control_data.type = EIP197_TYPE_BCLA;
216*4882a593Smuzhiyun 		cdesc->control_data.context_lo = lower_32_bits(context) |
217*4882a593Smuzhiyun 						 EIP197_CONTEXT_SMALL;
218*4882a593Smuzhiyun 		cdesc->control_data.context_hi = upper_32_bits(context);
219*4882a593Smuzhiyun 	}
220*4882a593Smuzhiyun 
221*4882a593Smuzhiyun 	return cdesc;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun 
safexcel_add_rdesc(struct safexcel_crypto_priv * priv,int ring_id,bool first,bool last,dma_addr_t data,u32 len)224*4882a593Smuzhiyun struct safexcel_result_desc *safexcel_add_rdesc(struct safexcel_crypto_priv *priv,
225*4882a593Smuzhiyun 						int ring_id,
226*4882a593Smuzhiyun 						bool first, bool last,
227*4882a593Smuzhiyun 						dma_addr_t data, u32 len)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	struct safexcel_result_desc *rdesc;
230*4882a593Smuzhiyun 	struct result_data_desc *rtoken;
231*4882a593Smuzhiyun 
232*4882a593Smuzhiyun 	rdesc = safexcel_ring_next_rwptr(priv, &priv->ring[ring_id].rdr,
233*4882a593Smuzhiyun 					 &rtoken);
234*4882a593Smuzhiyun 	if (IS_ERR(rdesc))
235*4882a593Smuzhiyun 		return rdesc;
236*4882a593Smuzhiyun 
237*4882a593Smuzhiyun 	rdesc->particle_size = len;
238*4882a593Smuzhiyun 	rdesc->rsvd0 = 0;
239*4882a593Smuzhiyun 	rdesc->descriptor_overflow = 1; /* assume error */
240*4882a593Smuzhiyun 	rdesc->buffer_overflow = 1;     /* assume error */
241*4882a593Smuzhiyun 	rdesc->last_seg = last;
242*4882a593Smuzhiyun 	rdesc->first_seg = first;
243*4882a593Smuzhiyun 	rdesc->result_size = EIP197_RD64_RESULT_SIZE;
244*4882a593Smuzhiyun 	rdesc->rsvd1 = 0;
245*4882a593Smuzhiyun 	rdesc->data_lo = lower_32_bits(data);
246*4882a593Smuzhiyun 	rdesc->data_hi = upper_32_bits(data);
247*4882a593Smuzhiyun 
248*4882a593Smuzhiyun 	/* Clear length in result token */
249*4882a593Smuzhiyun 	rtoken->packet_length = 0;
250*4882a593Smuzhiyun 	/* Assume errors - HW will clear if not the case */
251*4882a593Smuzhiyun 	rtoken->error_code = 0x7fff;
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 	return rdesc;
254*4882a593Smuzhiyun }
255