xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/imx/dcss/dcss-ctxld.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2019 NXP.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/dma-mapping.h>
8*4882a593Smuzhiyun #include <linux/interrupt.h>
9*4882a593Smuzhiyun #include <linux/platform_device.h>
10*4882a593Smuzhiyun #include <linux/slab.h>
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include "dcss-dev.h"
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #define DCSS_CTXLD_CONTROL_STATUS	0x0
15*4882a593Smuzhiyun #define   CTXLD_ENABLE			BIT(0)
16*4882a593Smuzhiyun #define   ARB_SEL			BIT(1)
17*4882a593Smuzhiyun #define   RD_ERR_EN			BIT(2)
18*4882a593Smuzhiyun #define   DB_COMP_EN			BIT(3)
19*4882a593Smuzhiyun #define   SB_HP_COMP_EN			BIT(4)
20*4882a593Smuzhiyun #define   SB_LP_COMP_EN			BIT(5)
21*4882a593Smuzhiyun #define   DB_PEND_SB_REC_EN		BIT(6)
22*4882a593Smuzhiyun #define   SB_PEND_DISP_ACTIVE_EN	BIT(7)
23*4882a593Smuzhiyun #define   AHB_ERR_EN			BIT(8)
24*4882a593Smuzhiyun #define   RD_ERR			BIT(16)
25*4882a593Smuzhiyun #define   DB_COMP			BIT(17)
26*4882a593Smuzhiyun #define   SB_HP_COMP			BIT(18)
27*4882a593Smuzhiyun #define   SB_LP_COMP			BIT(19)
28*4882a593Smuzhiyun #define   DB_PEND_SB_REC		BIT(20)
29*4882a593Smuzhiyun #define   SB_PEND_DISP_ACTIVE		BIT(21)
30*4882a593Smuzhiyun #define   AHB_ERR			BIT(22)
31*4882a593Smuzhiyun #define DCSS_CTXLD_DB_BASE_ADDR		0x10
32*4882a593Smuzhiyun #define DCSS_CTXLD_DB_COUNT		0x14
33*4882a593Smuzhiyun #define DCSS_CTXLD_SB_BASE_ADDR		0x18
34*4882a593Smuzhiyun #define DCSS_CTXLD_SB_COUNT		0x1C
35*4882a593Smuzhiyun #define   SB_HP_COUNT_POS		0
36*4882a593Smuzhiyun #define   SB_HP_COUNT_MASK		0xffff
37*4882a593Smuzhiyun #define   SB_LP_COUNT_POS		16
38*4882a593Smuzhiyun #define   SB_LP_COUNT_MASK		0xffff0000
39*4882a593Smuzhiyun #define DCSS_AHB_ERR_ADDR		0x20
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun #define CTXLD_IRQ_COMPLETION		(DB_COMP | SB_HP_COMP | SB_LP_COMP)
42*4882a593Smuzhiyun #define CTXLD_IRQ_ERROR			(RD_ERR | DB_PEND_SB_REC | AHB_ERR)
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun /* The following sizes are in context loader entries, 8 bytes each. */
45*4882a593Smuzhiyun #define CTXLD_DB_CTX_ENTRIES		1024	/* max 65536 */
46*4882a593Smuzhiyun #define CTXLD_SB_LP_CTX_ENTRIES		10240	/* max 65536 */
47*4882a593Smuzhiyun #define CTXLD_SB_HP_CTX_ENTRIES		20000	/* max 65536 */
48*4882a593Smuzhiyun #define CTXLD_SB_CTX_ENTRIES		(CTXLD_SB_LP_CTX_ENTRIES + \
49*4882a593Smuzhiyun 					 CTXLD_SB_HP_CTX_ENTRIES)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun /* Sizes, in entries, of the DB, SB_HP and SB_LP context regions. */
52*4882a593Smuzhiyun static u16 dcss_ctxld_ctx_size[3] = {
53*4882a593Smuzhiyun 	CTXLD_DB_CTX_ENTRIES,
54*4882a593Smuzhiyun 	CTXLD_SB_HP_CTX_ENTRIES,
55*4882a593Smuzhiyun 	CTXLD_SB_LP_CTX_ENTRIES
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun 
58*4882a593Smuzhiyun /* this represents an entry in the context loader map */
59*4882a593Smuzhiyun struct dcss_ctxld_item {
60*4882a593Smuzhiyun 	u32 val;
61*4882a593Smuzhiyun 	u32 ofs;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun #define CTX_ITEM_SIZE			sizeof(struct dcss_ctxld_item)
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun struct dcss_ctxld {
67*4882a593Smuzhiyun 	struct device *dev;
68*4882a593Smuzhiyun 	void __iomem *ctxld_reg;
69*4882a593Smuzhiyun 	int irq;
70*4882a593Smuzhiyun 	bool irq_en;
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun 	struct dcss_ctxld_item *db[2];
73*4882a593Smuzhiyun 	struct dcss_ctxld_item *sb_hp[2];
74*4882a593Smuzhiyun 	struct dcss_ctxld_item *sb_lp[2];
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun 	dma_addr_t db_paddr[2];
77*4882a593Smuzhiyun 	dma_addr_t sb_paddr[2];
78*4882a593Smuzhiyun 
79*4882a593Smuzhiyun 	u16 ctx_size[2][3]; /* holds the sizes of DB, SB_HP and SB_LP ctx */
80*4882a593Smuzhiyun 	u8 current_ctx;
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun 	bool in_use;
83*4882a593Smuzhiyun 	bool armed;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	spinlock_t lock; /* protects concurent access to private data */
86*4882a593Smuzhiyun };
87*4882a593Smuzhiyun 
dcss_ctxld_irq_handler(int irq,void * data)88*4882a593Smuzhiyun static irqreturn_t dcss_ctxld_irq_handler(int irq, void *data)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	struct dcss_ctxld *ctxld = data;
91*4882a593Smuzhiyun 	struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
92*4882a593Smuzhiyun 	u32 irq_status;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	irq_status = dcss_readl(ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun 	if (irq_status & CTXLD_IRQ_COMPLETION &&
97*4882a593Smuzhiyun 	    !(irq_status & CTXLD_ENABLE) && ctxld->in_use) {
98*4882a593Smuzhiyun 		ctxld->in_use = false;
99*4882a593Smuzhiyun 
100*4882a593Smuzhiyun 		if (dcss && dcss->disable_callback)
101*4882a593Smuzhiyun 			dcss->disable_callback(dcss);
102*4882a593Smuzhiyun 	} else if (irq_status & CTXLD_IRQ_ERROR) {
103*4882a593Smuzhiyun 		/*
104*4882a593Smuzhiyun 		 * Except for throwing an error message and clearing the status
105*4882a593Smuzhiyun 		 * register, there's not much we can do here.
106*4882a593Smuzhiyun 		 */
107*4882a593Smuzhiyun 		dev_err(ctxld->dev, "ctxld: error encountered: %08x\n",
108*4882a593Smuzhiyun 			irq_status);
109*4882a593Smuzhiyun 		dev_err(ctxld->dev, "ctxld: db=%d, sb_hp=%d, sb_lp=%d\n",
110*4882a593Smuzhiyun 			ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_DB],
111*4882a593Smuzhiyun 			ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_HP],
112*4882a593Smuzhiyun 			ctxld->ctx_size[ctxld->current_ctx ^ 1][CTX_SB_LP]);
113*4882a593Smuzhiyun 	}
114*4882a593Smuzhiyun 
115*4882a593Smuzhiyun 	dcss_clr(irq_status & (CTXLD_IRQ_ERROR | CTXLD_IRQ_COMPLETION),
116*4882a593Smuzhiyun 		 ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	return IRQ_HANDLED;
119*4882a593Smuzhiyun }
120*4882a593Smuzhiyun 
dcss_ctxld_irq_config(struct dcss_ctxld * ctxld,struct platform_device * pdev)121*4882a593Smuzhiyun static int dcss_ctxld_irq_config(struct dcss_ctxld *ctxld,
122*4882a593Smuzhiyun 				 struct platform_device *pdev)
123*4882a593Smuzhiyun {
124*4882a593Smuzhiyun 	int ret;
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun 	ctxld->irq = platform_get_irq_byname(pdev, "ctxld");
127*4882a593Smuzhiyun 	if (ctxld->irq < 0)
128*4882a593Smuzhiyun 		return ctxld->irq;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	ret = request_irq(ctxld->irq, dcss_ctxld_irq_handler,
131*4882a593Smuzhiyun 			  0, "dcss_ctxld", ctxld);
132*4882a593Smuzhiyun 	if (ret) {
133*4882a593Smuzhiyun 		dev_err(ctxld->dev, "ctxld: irq request failed.\n");
134*4882a593Smuzhiyun 		return ret;
135*4882a593Smuzhiyun 	}
136*4882a593Smuzhiyun 
137*4882a593Smuzhiyun 	ctxld->irq_en = true;
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun 	return 0;
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
dcss_ctxld_hw_cfg(struct dcss_ctxld * ctxld)142*4882a593Smuzhiyun static void dcss_ctxld_hw_cfg(struct dcss_ctxld *ctxld)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	dcss_writel(RD_ERR_EN | SB_HP_COMP_EN |
145*4882a593Smuzhiyun 		    DB_PEND_SB_REC_EN | AHB_ERR_EN | RD_ERR | AHB_ERR,
146*4882a593Smuzhiyun 		    ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun 
dcss_ctxld_free_ctx(struct dcss_ctxld * ctxld)149*4882a593Smuzhiyun static void dcss_ctxld_free_ctx(struct dcss_ctxld *ctxld)
150*4882a593Smuzhiyun {
151*4882a593Smuzhiyun 	struct dcss_ctxld_item *ctx;
152*4882a593Smuzhiyun 	int i;
153*4882a593Smuzhiyun 
154*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
155*4882a593Smuzhiyun 		if (ctxld->db[i]) {
156*4882a593Smuzhiyun 			dma_free_coherent(ctxld->dev,
157*4882a593Smuzhiyun 					  CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
158*4882a593Smuzhiyun 					  ctxld->db[i], ctxld->db_paddr[i]);
159*4882a593Smuzhiyun 			ctxld->db[i] = NULL;
160*4882a593Smuzhiyun 			ctxld->db_paddr[i] = 0;
161*4882a593Smuzhiyun 		}
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 		if (ctxld->sb_hp[i]) {
164*4882a593Smuzhiyun 			dma_free_coherent(ctxld->dev,
165*4882a593Smuzhiyun 					  CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
166*4882a593Smuzhiyun 					  ctxld->sb_hp[i], ctxld->sb_paddr[i]);
167*4882a593Smuzhiyun 			ctxld->sb_hp[i] = NULL;
168*4882a593Smuzhiyun 			ctxld->sb_paddr[i] = 0;
169*4882a593Smuzhiyun 		}
170*4882a593Smuzhiyun 	}
171*4882a593Smuzhiyun }
172*4882a593Smuzhiyun 
dcss_ctxld_alloc_ctx(struct dcss_ctxld * ctxld)173*4882a593Smuzhiyun static int dcss_ctxld_alloc_ctx(struct dcss_ctxld *ctxld)
174*4882a593Smuzhiyun {
175*4882a593Smuzhiyun 	struct dcss_ctxld_item *ctx;
176*4882a593Smuzhiyun 	int i;
177*4882a593Smuzhiyun 
178*4882a593Smuzhiyun 	for (i = 0; i < 2; i++) {
179*4882a593Smuzhiyun 		ctx = dma_alloc_coherent(ctxld->dev,
180*4882a593Smuzhiyun 					 CTXLD_DB_CTX_ENTRIES * sizeof(*ctx),
181*4882a593Smuzhiyun 					 &ctxld->db_paddr[i], GFP_KERNEL);
182*4882a593Smuzhiyun 		if (!ctx)
183*4882a593Smuzhiyun 			return -ENOMEM;
184*4882a593Smuzhiyun 
185*4882a593Smuzhiyun 		ctxld->db[i] = ctx;
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		ctx = dma_alloc_coherent(ctxld->dev,
188*4882a593Smuzhiyun 					 CTXLD_SB_CTX_ENTRIES * sizeof(*ctx),
189*4882a593Smuzhiyun 					 &ctxld->sb_paddr[i], GFP_KERNEL);
190*4882a593Smuzhiyun 		if (!ctx)
191*4882a593Smuzhiyun 			return -ENOMEM;
192*4882a593Smuzhiyun 
193*4882a593Smuzhiyun 		ctxld->sb_hp[i] = ctx;
194*4882a593Smuzhiyun 		ctxld->sb_lp[i] = ctx + CTXLD_SB_HP_CTX_ENTRIES;
195*4882a593Smuzhiyun 	}
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
dcss_ctxld_init(struct dcss_dev * dcss,unsigned long ctxld_base)200*4882a593Smuzhiyun int dcss_ctxld_init(struct dcss_dev *dcss, unsigned long ctxld_base)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun 	struct dcss_ctxld *ctxld;
203*4882a593Smuzhiyun 	int ret;
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 	ctxld = kzalloc(sizeof(*ctxld), GFP_KERNEL);
206*4882a593Smuzhiyun 	if (!ctxld)
207*4882a593Smuzhiyun 		return -ENOMEM;
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	dcss->ctxld = ctxld;
210*4882a593Smuzhiyun 	ctxld->dev = dcss->dev;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	spin_lock_init(&ctxld->lock);
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	ret = dcss_ctxld_alloc_ctx(ctxld);
215*4882a593Smuzhiyun 	if (ret) {
216*4882a593Smuzhiyun 		dev_err(dcss->dev, "ctxld: cannot allocate context memory.\n");
217*4882a593Smuzhiyun 		goto err;
218*4882a593Smuzhiyun 	}
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 	ctxld->ctxld_reg = ioremap(ctxld_base, SZ_4K);
221*4882a593Smuzhiyun 	if (!ctxld->ctxld_reg) {
222*4882a593Smuzhiyun 		dev_err(dcss->dev, "ctxld: unable to remap ctxld base\n");
223*4882a593Smuzhiyun 		ret = -ENOMEM;
224*4882a593Smuzhiyun 		goto err;
225*4882a593Smuzhiyun 	}
226*4882a593Smuzhiyun 
227*4882a593Smuzhiyun 	ret = dcss_ctxld_irq_config(ctxld, to_platform_device(dcss->dev));
228*4882a593Smuzhiyun 	if (ret)
229*4882a593Smuzhiyun 		goto err_irq;
230*4882a593Smuzhiyun 
231*4882a593Smuzhiyun 	dcss_ctxld_hw_cfg(ctxld);
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return 0;
234*4882a593Smuzhiyun 
235*4882a593Smuzhiyun err_irq:
236*4882a593Smuzhiyun 	iounmap(ctxld->ctxld_reg);
237*4882a593Smuzhiyun 
238*4882a593Smuzhiyun err:
239*4882a593Smuzhiyun 	dcss_ctxld_free_ctx(ctxld);
240*4882a593Smuzhiyun 	kfree(ctxld);
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun 	return ret;
243*4882a593Smuzhiyun }
244*4882a593Smuzhiyun 
dcss_ctxld_exit(struct dcss_ctxld * ctxld)245*4882a593Smuzhiyun void dcss_ctxld_exit(struct dcss_ctxld *ctxld)
246*4882a593Smuzhiyun {
247*4882a593Smuzhiyun 	free_irq(ctxld->irq, ctxld);
248*4882a593Smuzhiyun 
249*4882a593Smuzhiyun 	if (ctxld->ctxld_reg)
250*4882a593Smuzhiyun 		iounmap(ctxld->ctxld_reg);
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	dcss_ctxld_free_ctx(ctxld);
253*4882a593Smuzhiyun 	kfree(ctxld);
254*4882a593Smuzhiyun }
255*4882a593Smuzhiyun 
dcss_ctxld_enable_locked(struct dcss_ctxld * ctxld)256*4882a593Smuzhiyun static int dcss_ctxld_enable_locked(struct dcss_ctxld *ctxld)
257*4882a593Smuzhiyun {
258*4882a593Smuzhiyun 	int curr_ctx = ctxld->current_ctx;
259*4882a593Smuzhiyun 	u32 db_base, sb_base, sb_count;
260*4882a593Smuzhiyun 	u32 sb_hp_cnt, sb_lp_cnt, db_cnt;
261*4882a593Smuzhiyun 	struct dcss_dev *dcss = dcss_drv_dev_to_dcss(ctxld->dev);
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun 	if (!dcss)
264*4882a593Smuzhiyun 		return 0;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	dcss_dpr_write_sysctrl(dcss->dpr);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	dcss_scaler_write_sclctrl(dcss->scaler);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	sb_hp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_HP];
271*4882a593Smuzhiyun 	sb_lp_cnt = ctxld->ctx_size[curr_ctx][CTX_SB_LP];
272*4882a593Smuzhiyun 	db_cnt = ctxld->ctx_size[curr_ctx][CTX_DB];
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	/* make sure SB_LP context area comes after SB_HP */
275*4882a593Smuzhiyun 	if (sb_lp_cnt &&
276*4882a593Smuzhiyun 	    ctxld->sb_lp[curr_ctx] != ctxld->sb_hp[curr_ctx] + sb_hp_cnt) {
277*4882a593Smuzhiyun 		struct dcss_ctxld_item *sb_lp_adjusted;
278*4882a593Smuzhiyun 
279*4882a593Smuzhiyun 		sb_lp_adjusted = ctxld->sb_hp[curr_ctx] + sb_hp_cnt;
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 		memcpy(sb_lp_adjusted, ctxld->sb_lp[curr_ctx],
282*4882a593Smuzhiyun 		       sb_lp_cnt * CTX_ITEM_SIZE);
283*4882a593Smuzhiyun 	}
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	db_base = db_cnt ? ctxld->db_paddr[curr_ctx] : 0;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	dcss_writel(db_base, ctxld->ctxld_reg + DCSS_CTXLD_DB_BASE_ADDR);
288*4882a593Smuzhiyun 	dcss_writel(db_cnt, ctxld->ctxld_reg + DCSS_CTXLD_DB_COUNT);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (sb_hp_cnt)
291*4882a593Smuzhiyun 		sb_count = ((sb_hp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK) |
292*4882a593Smuzhiyun 			   ((sb_lp_cnt << SB_LP_COUNT_POS) & SB_LP_COUNT_MASK);
293*4882a593Smuzhiyun 	else
294*4882a593Smuzhiyun 		sb_count = (sb_lp_cnt << SB_HP_COUNT_POS) & SB_HP_COUNT_MASK;
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	sb_base = sb_count ? ctxld->sb_paddr[curr_ctx] : 0;
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun 	dcss_writel(sb_base, ctxld->ctxld_reg + DCSS_CTXLD_SB_BASE_ADDR);
299*4882a593Smuzhiyun 	dcss_writel(sb_count, ctxld->ctxld_reg + DCSS_CTXLD_SB_COUNT);
300*4882a593Smuzhiyun 
301*4882a593Smuzhiyun 	/* enable the context loader */
302*4882a593Smuzhiyun 	dcss_set(CTXLD_ENABLE, ctxld->ctxld_reg + DCSS_CTXLD_CONTROL_STATUS);
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	ctxld->in_use = true;
305*4882a593Smuzhiyun 
306*4882a593Smuzhiyun 	/*
307*4882a593Smuzhiyun 	 * Toggle the current context to the alternate one so that any updates
308*4882a593Smuzhiyun 	 * in the modules' settings take place there.
309*4882a593Smuzhiyun 	 */
310*4882a593Smuzhiyun 	ctxld->current_ctx ^= 1;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	ctxld->ctx_size[ctxld->current_ctx][CTX_DB] = 0;
313*4882a593Smuzhiyun 	ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] = 0;
314*4882a593Smuzhiyun 	ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] = 0;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
dcss_ctxld_enable(struct dcss_ctxld * ctxld)319*4882a593Smuzhiyun int dcss_ctxld_enable(struct dcss_ctxld *ctxld)
320*4882a593Smuzhiyun {
321*4882a593Smuzhiyun 	spin_lock_irq(&ctxld->lock);
322*4882a593Smuzhiyun 	ctxld->armed = true;
323*4882a593Smuzhiyun 	spin_unlock_irq(&ctxld->lock);
324*4882a593Smuzhiyun 
325*4882a593Smuzhiyun 	return 0;
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun 
dcss_ctxld_kick(struct dcss_ctxld * ctxld)328*4882a593Smuzhiyun void dcss_ctxld_kick(struct dcss_ctxld *ctxld)
329*4882a593Smuzhiyun {
330*4882a593Smuzhiyun 	unsigned long flags;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	spin_lock_irqsave(&ctxld->lock, flags);
333*4882a593Smuzhiyun 	if (ctxld->armed && !ctxld->in_use) {
334*4882a593Smuzhiyun 		ctxld->armed = false;
335*4882a593Smuzhiyun 		dcss_ctxld_enable_locked(ctxld);
336*4882a593Smuzhiyun 	}
337*4882a593Smuzhiyun 	spin_unlock_irqrestore(&ctxld->lock, flags);
338*4882a593Smuzhiyun }
339*4882a593Smuzhiyun 
dcss_ctxld_write_irqsafe(struct dcss_ctxld * ctxld,u32 ctx_id,u32 val,u32 reg_ofs)340*4882a593Smuzhiyun void dcss_ctxld_write_irqsafe(struct dcss_ctxld *ctxld, u32 ctx_id, u32 val,
341*4882a593Smuzhiyun 			      u32 reg_ofs)
342*4882a593Smuzhiyun {
343*4882a593Smuzhiyun 	int curr_ctx = ctxld->current_ctx;
344*4882a593Smuzhiyun 	struct dcss_ctxld_item *ctx[] = {
345*4882a593Smuzhiyun 		[CTX_DB] = ctxld->db[curr_ctx],
346*4882a593Smuzhiyun 		[CTX_SB_HP] = ctxld->sb_hp[curr_ctx],
347*4882a593Smuzhiyun 		[CTX_SB_LP] = ctxld->sb_lp[curr_ctx]
348*4882a593Smuzhiyun 	};
349*4882a593Smuzhiyun 	int item_idx = ctxld->ctx_size[curr_ctx][ctx_id];
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (item_idx + 1 > dcss_ctxld_ctx_size[ctx_id]) {
352*4882a593Smuzhiyun 		WARN_ON(1);
353*4882a593Smuzhiyun 		return;
354*4882a593Smuzhiyun 	}
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	ctx[ctx_id][item_idx].val = val;
357*4882a593Smuzhiyun 	ctx[ctx_id][item_idx].ofs = reg_ofs;
358*4882a593Smuzhiyun 	ctxld->ctx_size[curr_ctx][ctx_id] += 1;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun 
dcss_ctxld_write(struct dcss_ctxld * ctxld,u32 ctx_id,u32 val,u32 reg_ofs)361*4882a593Smuzhiyun void dcss_ctxld_write(struct dcss_ctxld *ctxld, u32 ctx_id,
362*4882a593Smuzhiyun 		      u32 val, u32 reg_ofs)
363*4882a593Smuzhiyun {
364*4882a593Smuzhiyun 	spin_lock_irq(&ctxld->lock);
365*4882a593Smuzhiyun 	dcss_ctxld_write_irqsafe(ctxld, ctx_id, val, reg_ofs);
366*4882a593Smuzhiyun 	spin_unlock_irq(&ctxld->lock);
367*4882a593Smuzhiyun }
368*4882a593Smuzhiyun 
dcss_ctxld_is_flushed(struct dcss_ctxld * ctxld)369*4882a593Smuzhiyun bool dcss_ctxld_is_flushed(struct dcss_ctxld *ctxld)
370*4882a593Smuzhiyun {
371*4882a593Smuzhiyun 	return ctxld->ctx_size[ctxld->current_ctx][CTX_DB] == 0 &&
372*4882a593Smuzhiyun 		ctxld->ctx_size[ctxld->current_ctx][CTX_SB_HP] == 0 &&
373*4882a593Smuzhiyun 		ctxld->ctx_size[ctxld->current_ctx][CTX_SB_LP] == 0;
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun 
dcss_ctxld_resume(struct dcss_ctxld * ctxld)376*4882a593Smuzhiyun int dcss_ctxld_resume(struct dcss_ctxld *ctxld)
377*4882a593Smuzhiyun {
378*4882a593Smuzhiyun 	dcss_ctxld_hw_cfg(ctxld);
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (!ctxld->irq_en) {
381*4882a593Smuzhiyun 		enable_irq(ctxld->irq);
382*4882a593Smuzhiyun 		ctxld->irq_en = true;
383*4882a593Smuzhiyun 	}
384*4882a593Smuzhiyun 
385*4882a593Smuzhiyun 	return 0;
386*4882a593Smuzhiyun }
387*4882a593Smuzhiyun 
dcss_ctxld_suspend(struct dcss_ctxld * ctxld)388*4882a593Smuzhiyun int dcss_ctxld_suspend(struct dcss_ctxld *ctxld)
389*4882a593Smuzhiyun {
390*4882a593Smuzhiyun 	int ret = 0;
391*4882a593Smuzhiyun 	unsigned long timeout = jiffies + msecs_to_jiffies(500);
392*4882a593Smuzhiyun 
393*4882a593Smuzhiyun 	if (!dcss_ctxld_is_flushed(ctxld)) {
394*4882a593Smuzhiyun 		dcss_ctxld_kick(ctxld);
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		while (!time_after(jiffies, timeout) && ctxld->in_use)
397*4882a593Smuzhiyun 			msleep(20);
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 		if (time_after(jiffies, timeout))
400*4882a593Smuzhiyun 			return -ETIMEDOUT;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	spin_lock_irq(&ctxld->lock);
404*4882a593Smuzhiyun 
405*4882a593Smuzhiyun 	if (ctxld->irq_en) {
406*4882a593Smuzhiyun 		disable_irq_nosync(ctxld->irq);
407*4882a593Smuzhiyun 		ctxld->irq_en = false;
408*4882a593Smuzhiyun 	}
409*4882a593Smuzhiyun 
410*4882a593Smuzhiyun 	/* reset context region and sizes */
411*4882a593Smuzhiyun 	ctxld->current_ctx = 0;
412*4882a593Smuzhiyun 	ctxld->ctx_size[0][CTX_DB] = 0;
413*4882a593Smuzhiyun 	ctxld->ctx_size[0][CTX_SB_HP] = 0;
414*4882a593Smuzhiyun 	ctxld->ctx_size[0][CTX_SB_LP] = 0;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	spin_unlock_irq(&ctxld->lock);
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 	return ret;
419*4882a593Smuzhiyun }
420*4882a593Smuzhiyun 
dcss_ctxld_assert_locked(struct dcss_ctxld * ctxld)421*4882a593Smuzhiyun void dcss_ctxld_assert_locked(struct dcss_ctxld *ctxld)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	lockdep_assert_held(&ctxld->lock);
424*4882a593Smuzhiyun }
425