xref: /OK3568_Linux_fs/kernel/drivers/crypto/qce/core.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-only */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2010-2014, The Linux Foundation. All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #ifndef _CORE_H_
7*4882a593Smuzhiyun #define _CORE_H_
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include "dma.h"
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun /**
12*4882a593Smuzhiyun  * struct qce_device - crypto engine device structure
13*4882a593Smuzhiyun  * @queue: crypto request queue
14*4882a593Smuzhiyun  * @lock: the lock protects queue and req
15*4882a593Smuzhiyun  * @done_tasklet: done tasklet object
16*4882a593Smuzhiyun  * @req: current active request
17*4882a593Smuzhiyun  * @result: result of current transform
18*4882a593Smuzhiyun  * @base: virtual IO base
19*4882a593Smuzhiyun  * @dev: pointer to device structure
20*4882a593Smuzhiyun  * @core: core device clock
21*4882a593Smuzhiyun  * @iface: interface clock
22*4882a593Smuzhiyun  * @bus: bus clock
23*4882a593Smuzhiyun  * @dma: pointer to dma data
24*4882a593Smuzhiyun  * @burst_size: the crypto burst size
25*4882a593Smuzhiyun  * @pipe_pair_id: which pipe pair id the device using
26*4882a593Smuzhiyun  * @async_req_enqueue: invoked by every algorithm to enqueue a request
27*4882a593Smuzhiyun  * @async_req_done: invoked by every algorithm to finish its request
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun struct qce_device {
30*4882a593Smuzhiyun 	struct crypto_queue queue;
31*4882a593Smuzhiyun 	spinlock_t lock;
32*4882a593Smuzhiyun 	struct tasklet_struct done_tasklet;
33*4882a593Smuzhiyun 	struct crypto_async_request *req;
34*4882a593Smuzhiyun 	int result;
35*4882a593Smuzhiyun 	void __iomem *base;
36*4882a593Smuzhiyun 	struct device *dev;
37*4882a593Smuzhiyun 	struct clk *core, *iface, *bus;
38*4882a593Smuzhiyun 	struct qce_dma_data dma;
39*4882a593Smuzhiyun 	int burst_size;
40*4882a593Smuzhiyun 	unsigned int pipe_pair_id;
41*4882a593Smuzhiyun 	int (*async_req_enqueue)(struct qce_device *qce,
42*4882a593Smuzhiyun 				 struct crypto_async_request *req);
43*4882a593Smuzhiyun 	void (*async_req_done)(struct qce_device *qce, int ret);
44*4882a593Smuzhiyun };
45*4882a593Smuzhiyun 
46*4882a593Smuzhiyun /**
47*4882a593Smuzhiyun  * struct qce_algo_ops - algorithm operations per crypto type
48*4882a593Smuzhiyun  * @type: should be CRYPTO_ALG_TYPE_XXX
49*4882a593Smuzhiyun  * @register_algs: invoked by core to register the algorithms
50*4882a593Smuzhiyun  * @unregister_algs: invoked by core to unregister the algorithms
51*4882a593Smuzhiyun  * @async_req_handle: invoked by core to handle enqueued request
52*4882a593Smuzhiyun  */
53*4882a593Smuzhiyun struct qce_algo_ops {
54*4882a593Smuzhiyun 	u32 type;
55*4882a593Smuzhiyun 	int (*register_algs)(struct qce_device *qce);
56*4882a593Smuzhiyun 	void (*unregister_algs)(struct qce_device *qce);
57*4882a593Smuzhiyun 	int (*async_req_handle)(struct crypto_async_request *async_req);
58*4882a593Smuzhiyun };
59*4882a593Smuzhiyun 
60*4882a593Smuzhiyun #endif /* _CORE_H_ */
61