xref: /OK3568_Linux_fs/kernel/drivers/media/platform/sti/hva/hva-hw.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) STMicroelectronics SA 2015
4*4882a593Smuzhiyun  * Authors: Yannick Fertre <yannick.fertre@st.com>
5*4882a593Smuzhiyun  *          Hugues Fruchet <hugues.fruchet@st.com>
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #include <linux/clk.h>
9*4882a593Smuzhiyun #include <linux/interrupt.h>
10*4882a593Smuzhiyun #include <linux/platform_device.h>
11*4882a593Smuzhiyun #include <linux/pm_runtime.h>
12*4882a593Smuzhiyun #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
13*4882a593Smuzhiyun #include <linux/seq_file.h>
14*4882a593Smuzhiyun #endif
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include "hva.h"
17*4882a593Smuzhiyun #include "hva-hw.h"
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /* HVA register offsets */
20*4882a593Smuzhiyun #define HVA_HIF_REG_RST                 0x0100U
21*4882a593Smuzhiyun #define HVA_HIF_REG_RST_ACK             0x0104U
22*4882a593Smuzhiyun #define HVA_HIF_REG_MIF_CFG             0x0108U
23*4882a593Smuzhiyun #define HVA_HIF_REG_HEC_MIF_CFG         0x010CU
24*4882a593Smuzhiyun #define HVA_HIF_REG_CFL                 0x0110U
25*4882a593Smuzhiyun #define HVA_HIF_FIFO_CMD                0x0114U
26*4882a593Smuzhiyun #define HVA_HIF_FIFO_STS                0x0118U
27*4882a593Smuzhiyun #define HVA_HIF_REG_SFL                 0x011CU
28*4882a593Smuzhiyun #define HVA_HIF_REG_IT_ACK              0x0120U
29*4882a593Smuzhiyun #define HVA_HIF_REG_ERR_IT_ACK          0x0124U
30*4882a593Smuzhiyun #define HVA_HIF_REG_LMI_ERR             0x0128U
31*4882a593Smuzhiyun #define HVA_HIF_REG_EMI_ERR             0x012CU
32*4882a593Smuzhiyun #define HVA_HIF_REG_HEC_MIF_ERR         0x0130U
33*4882a593Smuzhiyun #define HVA_HIF_REG_HEC_STS             0x0134U
34*4882a593Smuzhiyun #define HVA_HIF_REG_HVC_STS             0x0138U
35*4882a593Smuzhiyun #define HVA_HIF_REG_HJE_STS             0x013CU
36*4882a593Smuzhiyun #define HVA_HIF_REG_CNT                 0x0140U
37*4882a593Smuzhiyun #define HVA_HIF_REG_HEC_CHKSYN_DIS      0x0144U
38*4882a593Smuzhiyun #define HVA_HIF_REG_CLK_GATING          0x0148U
39*4882a593Smuzhiyun #define HVA_HIF_REG_VERSION             0x014CU
40*4882a593Smuzhiyun #define HVA_HIF_REG_BSM                 0x0150U
41*4882a593Smuzhiyun 
42*4882a593Smuzhiyun /* define value for version id register (HVA_HIF_REG_VERSION) */
43*4882a593Smuzhiyun #define VERSION_ID_MASK	0x0000FFFF
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun /* define values for BSM register (HVA_HIF_REG_BSM) */
46*4882a593Smuzhiyun #define BSM_CFG_VAL1	0x0003F000
47*4882a593Smuzhiyun #define BSM_CFG_VAL2	0x003F0000
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun /* define values for memory interface register (HVA_HIF_REG_MIF_CFG) */
50*4882a593Smuzhiyun #define MIF_CFG_VAL1	0x04460446
51*4882a593Smuzhiyun #define MIF_CFG_VAL2	0x04460806
52*4882a593Smuzhiyun #define MIF_CFG_VAL3	0x00000000
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /* define value for HEC memory interface register (HVA_HIF_REG_MIF_CFG) */
55*4882a593Smuzhiyun #define HEC_MIF_CFG_VAL	0x000000C4
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun /*  Bits definition for clock gating register (HVA_HIF_REG_CLK_GATING) */
58*4882a593Smuzhiyun #define CLK_GATING_HVC	BIT(0)
59*4882a593Smuzhiyun #define CLK_GATING_HEC	BIT(1)
60*4882a593Smuzhiyun #define CLK_GATING_HJE	BIT(2)
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun /* fix hva clock rate */
63*4882a593Smuzhiyun #define CLK_RATE		300000000
64*4882a593Smuzhiyun 
65*4882a593Smuzhiyun /* fix delay for pmruntime */
66*4882a593Smuzhiyun #define AUTOSUSPEND_DELAY_MS	3
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun /*
69*4882a593Smuzhiyun  * hw encode error values
70*4882a593Smuzhiyun  * NO_ERROR: Success, Task OK
71*4882a593Smuzhiyun  * H264_BITSTREAM_OVERSIZE: VECH264 Bitstream size > bitstream buffer
72*4882a593Smuzhiyun  * H264_FRAME_SKIPPED: VECH264 Frame skipped (refers to CPB Buffer Size)
73*4882a593Smuzhiyun  * H264_SLICE_LIMIT_SIZE: VECH264 MB > slice limit size
74*4882a593Smuzhiyun  * H264_MAX_SLICE_NUMBER: VECH264 max slice number reached
75*4882a593Smuzhiyun  * H264_SLICE_READY: VECH264 Slice ready
76*4882a593Smuzhiyun  * TASK_LIST_FULL: HVA/FPC task list full
77*4882a593Smuzhiyun 		   (discard latest transform command)
78*4882a593Smuzhiyun  * UNKNOWN_COMMAND: Transform command not known by HVA/FPC
79*4882a593Smuzhiyun  * WRONG_CODEC_OR_RESOLUTION: Wrong Codec or Resolution Selection
80*4882a593Smuzhiyun  * NO_INT_COMPLETION: Time-out on interrupt completion
81*4882a593Smuzhiyun  * LMI_ERR: Local Memory Interface Error
82*4882a593Smuzhiyun  * EMI_ERR: External Memory Interface Error
83*4882a593Smuzhiyun  * HECMI_ERR: HEC Memory Interface Error
84*4882a593Smuzhiyun  */
85*4882a593Smuzhiyun enum hva_hw_error {
86*4882a593Smuzhiyun 	NO_ERROR = 0x0,
87*4882a593Smuzhiyun 	H264_BITSTREAM_OVERSIZE = 0x2,
88*4882a593Smuzhiyun 	H264_FRAME_SKIPPED = 0x4,
89*4882a593Smuzhiyun 	H264_SLICE_LIMIT_SIZE = 0x5,
90*4882a593Smuzhiyun 	H264_MAX_SLICE_NUMBER = 0x7,
91*4882a593Smuzhiyun 	H264_SLICE_READY = 0x8,
92*4882a593Smuzhiyun 	TASK_LIST_FULL = 0xF0,
93*4882a593Smuzhiyun 	UNKNOWN_COMMAND = 0xF1,
94*4882a593Smuzhiyun 	WRONG_CODEC_OR_RESOLUTION = 0xF4,
95*4882a593Smuzhiyun 	NO_INT_COMPLETION = 0x100,
96*4882a593Smuzhiyun 	LMI_ERR = 0x101,
97*4882a593Smuzhiyun 	EMI_ERR = 0x102,
98*4882a593Smuzhiyun 	HECMI_ERR = 0x103,
99*4882a593Smuzhiyun };
100*4882a593Smuzhiyun 
hva_hw_its_interrupt(int irq,void * data)101*4882a593Smuzhiyun static irqreturn_t hva_hw_its_interrupt(int irq, void *data)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun 	struct hva_dev *hva = data;
104*4882a593Smuzhiyun 
105*4882a593Smuzhiyun 	/* read status registers */
106*4882a593Smuzhiyun 	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
107*4882a593Smuzhiyun 	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
108*4882a593Smuzhiyun 
109*4882a593Smuzhiyun 	/* acknowledge interruption */
110*4882a593Smuzhiyun 	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	return IRQ_WAKE_THREAD;
113*4882a593Smuzhiyun }
114*4882a593Smuzhiyun 
hva_hw_its_irq_thread(int irq,void * arg)115*4882a593Smuzhiyun static irqreturn_t hva_hw_its_irq_thread(int irq, void *arg)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	struct hva_dev *hva = arg;
118*4882a593Smuzhiyun 	struct device *dev = hva_to_dev(hva);
119*4882a593Smuzhiyun 	u32 status = hva->sts_reg & 0xFF;
120*4882a593Smuzhiyun 	u8 ctx_id = 0;
121*4882a593Smuzhiyun 	struct hva_ctx *ctx = NULL;
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun 	dev_dbg(dev, "%s     %s: status: 0x%02x fifo level: 0x%02x\n",
124*4882a593Smuzhiyun 		HVA_PREFIX, __func__, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	/*
127*4882a593Smuzhiyun 	 * status: task_id[31:16] client_id[15:8] status[7:0]
128*4882a593Smuzhiyun 	 * the context identifier is retrieved from the client identifier
129*4882a593Smuzhiyun 	 */
130*4882a593Smuzhiyun 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
131*4882a593Smuzhiyun 	if (ctx_id >= HVA_MAX_INSTANCES) {
132*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: bad context identifier: %d\n",
133*4882a593Smuzhiyun 			HVA_PREFIX, __func__, ctx_id);
134*4882a593Smuzhiyun 		goto out;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	ctx = hva->instances[ctx_id];
138*4882a593Smuzhiyun 	if (!ctx)
139*4882a593Smuzhiyun 		goto out;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 	switch (status) {
142*4882a593Smuzhiyun 	case NO_ERROR:
143*4882a593Smuzhiyun 		dev_dbg(dev, "%s     %s: no error\n",
144*4882a593Smuzhiyun 			ctx->name, __func__);
145*4882a593Smuzhiyun 		ctx->hw_err = false;
146*4882a593Smuzhiyun 		break;
147*4882a593Smuzhiyun 	case H264_SLICE_READY:
148*4882a593Smuzhiyun 		dev_dbg(dev, "%s     %s: h264 slice ready\n",
149*4882a593Smuzhiyun 			ctx->name, __func__);
150*4882a593Smuzhiyun 		ctx->hw_err = false;
151*4882a593Smuzhiyun 		break;
152*4882a593Smuzhiyun 	case H264_FRAME_SKIPPED:
153*4882a593Smuzhiyun 		dev_dbg(dev, "%s     %s: h264 frame skipped\n",
154*4882a593Smuzhiyun 			ctx->name, __func__);
155*4882a593Smuzhiyun 		ctx->hw_err = false;
156*4882a593Smuzhiyun 		break;
157*4882a593Smuzhiyun 	case H264_BITSTREAM_OVERSIZE:
158*4882a593Smuzhiyun 		dev_err(dev, "%s     %s:h264 bitstream oversize\n",
159*4882a593Smuzhiyun 			ctx->name, __func__);
160*4882a593Smuzhiyun 		ctx->hw_err = true;
161*4882a593Smuzhiyun 		break;
162*4882a593Smuzhiyun 	case H264_SLICE_LIMIT_SIZE:
163*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: h264 slice limit size is reached\n",
164*4882a593Smuzhiyun 			ctx->name, __func__);
165*4882a593Smuzhiyun 		ctx->hw_err = true;
166*4882a593Smuzhiyun 		break;
167*4882a593Smuzhiyun 	case H264_MAX_SLICE_NUMBER:
168*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: h264 max slice number is reached\n",
169*4882a593Smuzhiyun 			ctx->name, __func__);
170*4882a593Smuzhiyun 		ctx->hw_err = true;
171*4882a593Smuzhiyun 		break;
172*4882a593Smuzhiyun 	case TASK_LIST_FULL:
173*4882a593Smuzhiyun 		dev_err(dev, "%s     %s:task list full\n",
174*4882a593Smuzhiyun 			ctx->name, __func__);
175*4882a593Smuzhiyun 		ctx->hw_err = true;
176*4882a593Smuzhiyun 		break;
177*4882a593Smuzhiyun 	case UNKNOWN_COMMAND:
178*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: command not known\n",
179*4882a593Smuzhiyun 			ctx->name, __func__);
180*4882a593Smuzhiyun 		ctx->hw_err = true;
181*4882a593Smuzhiyun 		break;
182*4882a593Smuzhiyun 	case WRONG_CODEC_OR_RESOLUTION:
183*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: wrong codec or resolution\n",
184*4882a593Smuzhiyun 			ctx->name, __func__);
185*4882a593Smuzhiyun 		ctx->hw_err = true;
186*4882a593Smuzhiyun 		break;
187*4882a593Smuzhiyun 	default:
188*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: status not recognized\n",
189*4882a593Smuzhiyun 			ctx->name, __func__);
190*4882a593Smuzhiyun 		ctx->hw_err = true;
191*4882a593Smuzhiyun 		break;
192*4882a593Smuzhiyun 	}
193*4882a593Smuzhiyun out:
194*4882a593Smuzhiyun 	complete(&hva->interrupt);
195*4882a593Smuzhiyun 
196*4882a593Smuzhiyun 	return IRQ_HANDLED;
197*4882a593Smuzhiyun }
198*4882a593Smuzhiyun 
hva_hw_err_interrupt(int irq,void * data)199*4882a593Smuzhiyun static irqreturn_t hva_hw_err_interrupt(int irq, void *data)
200*4882a593Smuzhiyun {
201*4882a593Smuzhiyun 	struct hva_dev *hva = data;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/* read status registers */
204*4882a593Smuzhiyun 	hva->sts_reg = readl_relaxed(hva->regs + HVA_HIF_FIFO_STS);
205*4882a593Smuzhiyun 	hva->sfl_reg = readl_relaxed(hva->regs + HVA_HIF_REG_SFL);
206*4882a593Smuzhiyun 
207*4882a593Smuzhiyun 	/* read error registers */
208*4882a593Smuzhiyun 	hva->lmi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_LMI_ERR);
209*4882a593Smuzhiyun 	hva->emi_err_reg = readl_relaxed(hva->regs + HVA_HIF_REG_EMI_ERR);
210*4882a593Smuzhiyun 	hva->hec_mif_err_reg = readl_relaxed(hva->regs +
211*4882a593Smuzhiyun 					     HVA_HIF_REG_HEC_MIF_ERR);
212*4882a593Smuzhiyun 
213*4882a593Smuzhiyun 	/* acknowledge interruption */
214*4882a593Smuzhiyun 	writel_relaxed(0x1, hva->regs + HVA_HIF_REG_IT_ACK);
215*4882a593Smuzhiyun 
216*4882a593Smuzhiyun 	return IRQ_WAKE_THREAD;
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun 
hva_hw_err_irq_thread(int irq,void * arg)219*4882a593Smuzhiyun static irqreturn_t hva_hw_err_irq_thread(int irq, void *arg)
220*4882a593Smuzhiyun {
221*4882a593Smuzhiyun 	struct hva_dev *hva = arg;
222*4882a593Smuzhiyun 	struct device *dev = hva_to_dev(hva);
223*4882a593Smuzhiyun 	u8 ctx_id = 0;
224*4882a593Smuzhiyun 	struct hva_ctx *ctx;
225*4882a593Smuzhiyun 
226*4882a593Smuzhiyun 	dev_dbg(dev, "%s     status: 0x%02x fifo level: 0x%02x\n",
227*4882a593Smuzhiyun 		HVA_PREFIX, hva->sts_reg & 0xFF, hva->sfl_reg & 0xF);
228*4882a593Smuzhiyun 
229*4882a593Smuzhiyun 	/*
230*4882a593Smuzhiyun 	 * status: task_id[31:16] client_id[15:8] status[7:0]
231*4882a593Smuzhiyun 	 * the context identifier is retrieved from the client identifier
232*4882a593Smuzhiyun 	 */
233*4882a593Smuzhiyun 	ctx_id = (hva->sts_reg & 0xFF00) >> 8;
234*4882a593Smuzhiyun 	if (ctx_id >= HVA_MAX_INSTANCES) {
235*4882a593Smuzhiyun 		dev_err(dev, "%s     bad context identifier: %d\n", HVA_PREFIX,
236*4882a593Smuzhiyun 			ctx_id);
237*4882a593Smuzhiyun 		goto out;
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	ctx = hva->instances[ctx_id];
241*4882a593Smuzhiyun 	if (!ctx)
242*4882a593Smuzhiyun 		goto out;
243*4882a593Smuzhiyun 
244*4882a593Smuzhiyun 	if (hva->lmi_err_reg) {
245*4882a593Smuzhiyun 		dev_err(dev, "%s     local memory interface error: 0x%08x\n",
246*4882a593Smuzhiyun 			ctx->name, hva->lmi_err_reg);
247*4882a593Smuzhiyun 		ctx->hw_err = true;
248*4882a593Smuzhiyun 	}
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun 	if (hva->emi_err_reg) {
251*4882a593Smuzhiyun 		dev_err(dev, "%s     external memory interface error: 0x%08x\n",
252*4882a593Smuzhiyun 			ctx->name, hva->emi_err_reg);
253*4882a593Smuzhiyun 		ctx->hw_err = true;
254*4882a593Smuzhiyun 	}
255*4882a593Smuzhiyun 
256*4882a593Smuzhiyun 	if (hva->hec_mif_err_reg) {
257*4882a593Smuzhiyun 		dev_err(dev, "%s     hec memory interface error: 0x%08x\n",
258*4882a593Smuzhiyun 			ctx->name, hva->hec_mif_err_reg);
259*4882a593Smuzhiyun 		ctx->hw_err = true;
260*4882a593Smuzhiyun 	}
261*4882a593Smuzhiyun out:
262*4882a593Smuzhiyun 	complete(&hva->interrupt);
263*4882a593Smuzhiyun 
264*4882a593Smuzhiyun 	return IRQ_HANDLED;
265*4882a593Smuzhiyun }
266*4882a593Smuzhiyun 
hva_hw_get_ip_version(struct hva_dev * hva)267*4882a593Smuzhiyun static unsigned long int hva_hw_get_ip_version(struct hva_dev *hva)
268*4882a593Smuzhiyun {
269*4882a593Smuzhiyun 	struct device *dev = hva_to_dev(hva);
270*4882a593Smuzhiyun 	unsigned long int version;
271*4882a593Smuzhiyun 
272*4882a593Smuzhiyun 	if (pm_runtime_get_sync(dev) < 0) {
273*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to get pm_runtime\n", HVA_PREFIX);
274*4882a593Smuzhiyun 		pm_runtime_put_noidle(dev);
275*4882a593Smuzhiyun 		mutex_unlock(&hva->protect_mutex);
276*4882a593Smuzhiyun 		return -EFAULT;
277*4882a593Smuzhiyun 	}
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 	version = readl_relaxed(hva->regs + HVA_HIF_REG_VERSION) &
280*4882a593Smuzhiyun 				VERSION_ID_MASK;
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
283*4882a593Smuzhiyun 
284*4882a593Smuzhiyun 	switch (version) {
285*4882a593Smuzhiyun 	case HVA_VERSION_V400:
286*4882a593Smuzhiyun 		dev_dbg(dev, "%s     IP hardware version 0x%lx\n",
287*4882a593Smuzhiyun 			HVA_PREFIX, version);
288*4882a593Smuzhiyun 		break;
289*4882a593Smuzhiyun 	default:
290*4882a593Smuzhiyun 		dev_err(dev, "%s     unknown IP hardware version 0x%lx\n",
291*4882a593Smuzhiyun 			HVA_PREFIX, version);
292*4882a593Smuzhiyun 		version = HVA_VERSION_UNKNOWN;
293*4882a593Smuzhiyun 		break;
294*4882a593Smuzhiyun 	}
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return version;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
hva_hw_probe(struct platform_device * pdev,struct hva_dev * hva)299*4882a593Smuzhiyun int hva_hw_probe(struct platform_device *pdev, struct hva_dev *hva)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct device *dev = &pdev->dev;
302*4882a593Smuzhiyun 	struct resource *regs;
303*4882a593Smuzhiyun 	struct resource *esram;
304*4882a593Smuzhiyun 	int ret;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	WARN_ON(!hva);
307*4882a593Smuzhiyun 
308*4882a593Smuzhiyun 	/* get memory for registers */
309*4882a593Smuzhiyun 	regs = platform_get_resource(pdev, IORESOURCE_MEM, 0);
310*4882a593Smuzhiyun 	hva->regs = devm_ioremap_resource(dev, regs);
311*4882a593Smuzhiyun 	if (IS_ERR(hva->regs)) {
312*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to get regs\n", HVA_PREFIX);
313*4882a593Smuzhiyun 		return PTR_ERR(hva->regs);
314*4882a593Smuzhiyun 	}
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	/* get memory for esram */
317*4882a593Smuzhiyun 	esram = platform_get_resource(pdev, IORESOURCE_MEM, 1);
318*4882a593Smuzhiyun 	if (!esram) {
319*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to get esram\n", HVA_PREFIX);
320*4882a593Smuzhiyun 		return -ENODEV;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 	hva->esram_addr = esram->start;
323*4882a593Smuzhiyun 	hva->esram_size = resource_size(esram);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	dev_info(dev, "%s     esram reserved for address: 0x%x size:%d\n",
326*4882a593Smuzhiyun 		 HVA_PREFIX, hva->esram_addr, hva->esram_size);
327*4882a593Smuzhiyun 
328*4882a593Smuzhiyun 	/* get clock resource */
329*4882a593Smuzhiyun 	hva->clk = devm_clk_get(dev, "clk_hva");
330*4882a593Smuzhiyun 	if (IS_ERR(hva->clk)) {
331*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to get clock\n", HVA_PREFIX);
332*4882a593Smuzhiyun 		return PTR_ERR(hva->clk);
333*4882a593Smuzhiyun 	}
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	ret = clk_prepare(hva->clk);
336*4882a593Smuzhiyun 	if (ret < 0) {
337*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to prepare clock\n", HVA_PREFIX);
338*4882a593Smuzhiyun 		hva->clk = ERR_PTR(-EINVAL);
339*4882a593Smuzhiyun 		return ret;
340*4882a593Smuzhiyun 	}
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	/* get status interruption resource */
343*4882a593Smuzhiyun 	ret  = platform_get_irq(pdev, 0);
344*4882a593Smuzhiyun 	if (ret < 0)
345*4882a593Smuzhiyun 		goto err_clk;
346*4882a593Smuzhiyun 	hva->irq_its = ret;
347*4882a593Smuzhiyun 
348*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(dev, hva->irq_its, hva_hw_its_interrupt,
349*4882a593Smuzhiyun 					hva_hw_its_irq_thread,
350*4882a593Smuzhiyun 					IRQF_ONESHOT,
351*4882a593Smuzhiyun 					"hva_its_irq", hva);
352*4882a593Smuzhiyun 	if (ret) {
353*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to install status IRQ 0x%x\n",
354*4882a593Smuzhiyun 			HVA_PREFIX, hva->irq_its);
355*4882a593Smuzhiyun 		goto err_clk;
356*4882a593Smuzhiyun 	}
357*4882a593Smuzhiyun 	disable_irq(hva->irq_its);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* get error interruption resource */
360*4882a593Smuzhiyun 	ret = platform_get_irq(pdev, 1);
361*4882a593Smuzhiyun 	if (ret < 0)
362*4882a593Smuzhiyun 		goto err_clk;
363*4882a593Smuzhiyun 	hva->irq_err = ret;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	ret = devm_request_threaded_irq(dev, hva->irq_err, hva_hw_err_interrupt,
366*4882a593Smuzhiyun 					hva_hw_err_irq_thread,
367*4882a593Smuzhiyun 					IRQF_ONESHOT,
368*4882a593Smuzhiyun 					"hva_err_irq", hva);
369*4882a593Smuzhiyun 	if (ret) {
370*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to install error IRQ 0x%x\n",
371*4882a593Smuzhiyun 			HVA_PREFIX, hva->irq_err);
372*4882a593Smuzhiyun 		goto err_clk;
373*4882a593Smuzhiyun 	}
374*4882a593Smuzhiyun 	disable_irq(hva->irq_err);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	/* initialise protection mutex */
377*4882a593Smuzhiyun 	mutex_init(&hva->protect_mutex);
378*4882a593Smuzhiyun 
379*4882a593Smuzhiyun 	/* initialise completion signal */
380*4882a593Smuzhiyun 	init_completion(&hva->interrupt);
381*4882a593Smuzhiyun 
382*4882a593Smuzhiyun 	/* initialise runtime power management */
383*4882a593Smuzhiyun 	pm_runtime_set_autosuspend_delay(dev, AUTOSUSPEND_DELAY_MS);
384*4882a593Smuzhiyun 	pm_runtime_use_autosuspend(dev);
385*4882a593Smuzhiyun 	pm_runtime_set_suspended(dev);
386*4882a593Smuzhiyun 	pm_runtime_enable(dev);
387*4882a593Smuzhiyun 
388*4882a593Smuzhiyun 	ret = pm_runtime_get_sync(dev);
389*4882a593Smuzhiyun 	if (ret < 0) {
390*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to set PM\n", HVA_PREFIX);
391*4882a593Smuzhiyun 		goto err_pm;
392*4882a593Smuzhiyun 	}
393*4882a593Smuzhiyun 
394*4882a593Smuzhiyun 	/* check IP hardware version */
395*4882a593Smuzhiyun 	hva->ip_version = hva_hw_get_ip_version(hva);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	if (hva->ip_version == HVA_VERSION_UNKNOWN) {
398*4882a593Smuzhiyun 		ret = -EINVAL;
399*4882a593Smuzhiyun 		goto err_pm;
400*4882a593Smuzhiyun 	}
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	dev_info(dev, "%s     found hva device (version 0x%lx)\n", HVA_PREFIX,
403*4882a593Smuzhiyun 		 hva->ip_version);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	return 0;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun err_pm:
408*4882a593Smuzhiyun 	pm_runtime_put(dev);
409*4882a593Smuzhiyun err_clk:
410*4882a593Smuzhiyun 	if (hva->clk)
411*4882a593Smuzhiyun 		clk_unprepare(hva->clk);
412*4882a593Smuzhiyun 
413*4882a593Smuzhiyun 	return ret;
414*4882a593Smuzhiyun }
415*4882a593Smuzhiyun 
hva_hw_remove(struct hva_dev * hva)416*4882a593Smuzhiyun void hva_hw_remove(struct hva_dev *hva)
417*4882a593Smuzhiyun {
418*4882a593Smuzhiyun 	struct device *dev = hva_to_dev(hva);
419*4882a593Smuzhiyun 
420*4882a593Smuzhiyun 	disable_irq(hva->irq_its);
421*4882a593Smuzhiyun 	disable_irq(hva->irq_err);
422*4882a593Smuzhiyun 
423*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
424*4882a593Smuzhiyun 	pm_runtime_disable(dev);
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
hva_hw_runtime_suspend(struct device * dev)427*4882a593Smuzhiyun int hva_hw_runtime_suspend(struct device *dev)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct hva_dev *hva = dev_get_drvdata(dev);
430*4882a593Smuzhiyun 
431*4882a593Smuzhiyun 	clk_disable_unprepare(hva->clk);
432*4882a593Smuzhiyun 
433*4882a593Smuzhiyun 	return 0;
434*4882a593Smuzhiyun }
435*4882a593Smuzhiyun 
hva_hw_runtime_resume(struct device * dev)436*4882a593Smuzhiyun int hva_hw_runtime_resume(struct device *dev)
437*4882a593Smuzhiyun {
438*4882a593Smuzhiyun 	struct hva_dev *hva = dev_get_drvdata(dev);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if (clk_prepare_enable(hva->clk)) {
441*4882a593Smuzhiyun 		dev_err(hva->dev, "%s     failed to prepare hva clk\n",
442*4882a593Smuzhiyun 			HVA_PREFIX);
443*4882a593Smuzhiyun 		return -EINVAL;
444*4882a593Smuzhiyun 	}
445*4882a593Smuzhiyun 
446*4882a593Smuzhiyun 	if (clk_set_rate(hva->clk, CLK_RATE)) {
447*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to set clock frequency\n",
448*4882a593Smuzhiyun 			HVA_PREFIX);
449*4882a593Smuzhiyun 		return -EINVAL;
450*4882a593Smuzhiyun 	}
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 	return 0;
453*4882a593Smuzhiyun }
454*4882a593Smuzhiyun 
hva_hw_execute_task(struct hva_ctx * ctx,enum hva_hw_cmd_type cmd,struct hva_buffer * task)455*4882a593Smuzhiyun int hva_hw_execute_task(struct hva_ctx *ctx, enum hva_hw_cmd_type cmd,
456*4882a593Smuzhiyun 			struct hva_buffer *task)
457*4882a593Smuzhiyun {
458*4882a593Smuzhiyun 	struct hva_dev *hva = ctx_to_hdev(ctx);
459*4882a593Smuzhiyun 	struct device *dev = hva_to_dev(hva);
460*4882a593Smuzhiyun 	u8 client_id = ctx->id;
461*4882a593Smuzhiyun 	int ret;
462*4882a593Smuzhiyun 	u32 reg = 0;
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun 	mutex_lock(&hva->protect_mutex);
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	/* enable irqs */
467*4882a593Smuzhiyun 	enable_irq(hva->irq_its);
468*4882a593Smuzhiyun 	enable_irq(hva->irq_err);
469*4882a593Smuzhiyun 
470*4882a593Smuzhiyun 	if (pm_runtime_get_sync(dev) < 0) {
471*4882a593Smuzhiyun 		dev_err(dev, "%s     failed to get pm_runtime\n", ctx->name);
472*4882a593Smuzhiyun 		ctx->sys_errors++;
473*4882a593Smuzhiyun 		ret = -EFAULT;
474*4882a593Smuzhiyun 		goto out;
475*4882a593Smuzhiyun 	}
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	reg = readl_relaxed(hva->regs + HVA_HIF_REG_CLK_GATING);
478*4882a593Smuzhiyun 	switch (cmd) {
479*4882a593Smuzhiyun 	case H264_ENC:
480*4882a593Smuzhiyun 		reg |= CLK_GATING_HVC;
481*4882a593Smuzhiyun 		break;
482*4882a593Smuzhiyun 	default:
483*4882a593Smuzhiyun 		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
484*4882a593Smuzhiyun 		ctx->encode_errors++;
485*4882a593Smuzhiyun 		ret = -EFAULT;
486*4882a593Smuzhiyun 		goto out;
487*4882a593Smuzhiyun 	}
488*4882a593Smuzhiyun 	writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
489*4882a593Smuzhiyun 
490*4882a593Smuzhiyun 	dev_dbg(dev, "%s     %s: write configuration registers\n", ctx->name,
491*4882a593Smuzhiyun 		__func__);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	/* byte swap config */
494*4882a593Smuzhiyun 	writel_relaxed(BSM_CFG_VAL1, hva->regs + HVA_HIF_REG_BSM);
495*4882a593Smuzhiyun 
496*4882a593Smuzhiyun 	/* define Max Opcode Size and Max Message Size for LMI and EMI */
497*4882a593Smuzhiyun 	writel_relaxed(MIF_CFG_VAL3, hva->regs + HVA_HIF_REG_MIF_CFG);
498*4882a593Smuzhiyun 	writel_relaxed(HEC_MIF_CFG_VAL, hva->regs + HVA_HIF_REG_HEC_MIF_CFG);
499*4882a593Smuzhiyun 
500*4882a593Smuzhiyun 	/*
501*4882a593Smuzhiyun 	 * command FIFO: task_id[31:16] client_id[15:8] command_type[7:0]
502*4882a593Smuzhiyun 	 * the context identifier is provided as client identifier to the
503*4882a593Smuzhiyun 	 * hardware, and is retrieved in the interrupt functions from the
504*4882a593Smuzhiyun 	 * status register
505*4882a593Smuzhiyun 	 */
506*4882a593Smuzhiyun 	dev_dbg(dev, "%s     %s: send task (cmd: %d, task_desc: %pad)\n",
507*4882a593Smuzhiyun 		ctx->name, __func__, cmd + (client_id << 8), &task->paddr);
508*4882a593Smuzhiyun 	writel_relaxed(cmd + (client_id << 8), hva->regs + HVA_HIF_FIFO_CMD);
509*4882a593Smuzhiyun 	writel_relaxed(task->paddr, hva->regs + HVA_HIF_FIFO_CMD);
510*4882a593Smuzhiyun 
511*4882a593Smuzhiyun 	if (!wait_for_completion_timeout(&hva->interrupt,
512*4882a593Smuzhiyun 					 msecs_to_jiffies(2000))) {
513*4882a593Smuzhiyun 		dev_err(dev, "%s     %s: time out on completion\n", ctx->name,
514*4882a593Smuzhiyun 			__func__);
515*4882a593Smuzhiyun 		ctx->encode_errors++;
516*4882a593Smuzhiyun 		ret = -EFAULT;
517*4882a593Smuzhiyun 		goto out;
518*4882a593Smuzhiyun 	}
519*4882a593Smuzhiyun 
520*4882a593Smuzhiyun 	/* get encoding status */
521*4882a593Smuzhiyun 	ret = ctx->hw_err ? -EFAULT : 0;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	ctx->encode_errors += ctx->hw_err ? 1 : 0;
524*4882a593Smuzhiyun 
525*4882a593Smuzhiyun out:
526*4882a593Smuzhiyun 	disable_irq(hva->irq_its);
527*4882a593Smuzhiyun 	disable_irq(hva->irq_err);
528*4882a593Smuzhiyun 
529*4882a593Smuzhiyun 	switch (cmd) {
530*4882a593Smuzhiyun 	case H264_ENC:
531*4882a593Smuzhiyun 		reg &= ~CLK_GATING_HVC;
532*4882a593Smuzhiyun 		writel_relaxed(reg, hva->regs + HVA_HIF_REG_CLK_GATING);
533*4882a593Smuzhiyun 		break;
534*4882a593Smuzhiyun 	default:
535*4882a593Smuzhiyun 		dev_dbg(dev, "%s     unknown command 0x%x\n", ctx->name, cmd);
536*4882a593Smuzhiyun 	}
537*4882a593Smuzhiyun 
538*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
539*4882a593Smuzhiyun 	mutex_unlock(&hva->protect_mutex);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return ret;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
544*4882a593Smuzhiyun #ifdef CONFIG_VIDEO_STI_HVA_DEBUGFS
545*4882a593Smuzhiyun #define DUMP(reg) seq_printf(s, "%-30s: 0x%08X\n",\
546*4882a593Smuzhiyun 			     #reg, readl_relaxed(hva->regs + reg))
547*4882a593Smuzhiyun 
hva_hw_dump_regs(struct hva_dev * hva,struct seq_file * s)548*4882a593Smuzhiyun void hva_hw_dump_regs(struct hva_dev *hva, struct seq_file *s)
549*4882a593Smuzhiyun {
550*4882a593Smuzhiyun 	struct device *dev = hva_to_dev(hva);
551*4882a593Smuzhiyun 
552*4882a593Smuzhiyun 	mutex_lock(&hva->protect_mutex);
553*4882a593Smuzhiyun 
554*4882a593Smuzhiyun 	if (pm_runtime_get_sync(dev) < 0) {
555*4882a593Smuzhiyun 		seq_puts(s, "Cannot wake up IP\n");
556*4882a593Smuzhiyun 		pm_runtime_put_noidle(dev);
557*4882a593Smuzhiyun 		mutex_unlock(&hva->protect_mutex);
558*4882a593Smuzhiyun 		return;
559*4882a593Smuzhiyun 	}
560*4882a593Smuzhiyun 
561*4882a593Smuzhiyun 	seq_printf(s, "Registers:\nReg @ = 0x%p\n", hva->regs);
562*4882a593Smuzhiyun 
563*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_RST);
564*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_RST_ACK);
565*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_MIF_CFG);
566*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_HEC_MIF_CFG);
567*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_CFL);
568*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_SFL);
569*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_LMI_ERR);
570*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_EMI_ERR);
571*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_HEC_MIF_ERR);
572*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_HEC_STS);
573*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_HVC_STS);
574*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_HJE_STS);
575*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_CNT);
576*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_HEC_CHKSYN_DIS);
577*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_CLK_GATING);
578*4882a593Smuzhiyun 	DUMP(HVA_HIF_REG_VERSION);
579*4882a593Smuzhiyun 
580*4882a593Smuzhiyun 	pm_runtime_put_autosuspend(dev);
581*4882a593Smuzhiyun 	mutex_unlock(&hva->protect_mutex);
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun #endif
584