xref: /OK3568_Linux_fs/kernel/drivers/gpu/drm/sun4i/sun4i_backend.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-or-later
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (C) 2015 Free Electrons
4*4882a593Smuzhiyun  * Copyright (C) 2015 NextThing Co
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Maxime Ripard <maxime.ripard@free-electrons.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/component.h>
10*4882a593Smuzhiyun #include <linux/list.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/of_graph.h>
14*4882a593Smuzhiyun #include <linux/dma-mapping.h>
15*4882a593Smuzhiyun #include <linux/platform_device.h>
16*4882a593Smuzhiyun #include <linux/reset.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <drm/drm_atomic.h>
19*4882a593Smuzhiyun #include <drm/drm_atomic_helper.h>
20*4882a593Smuzhiyun #include <drm/drm_crtc.h>
21*4882a593Smuzhiyun #include <drm/drm_fb_cma_helper.h>
22*4882a593Smuzhiyun #include <drm/drm_fourcc.h>
23*4882a593Smuzhiyun #include <drm/drm_gem_cma_helper.h>
24*4882a593Smuzhiyun #include <drm/drm_plane_helper.h>
25*4882a593Smuzhiyun #include <drm/drm_probe_helper.h>
26*4882a593Smuzhiyun 
27*4882a593Smuzhiyun #include "sun4i_backend.h"
28*4882a593Smuzhiyun #include "sun4i_drv.h"
29*4882a593Smuzhiyun #include "sun4i_frontend.h"
30*4882a593Smuzhiyun #include "sun4i_layer.h"
31*4882a593Smuzhiyun #include "sunxi_engine.h"
32*4882a593Smuzhiyun 
33*4882a593Smuzhiyun struct sun4i_backend_quirks {
34*4882a593Smuzhiyun 	/* backend <-> TCON muxing selection done in backend */
35*4882a593Smuzhiyun 	bool needs_output_muxing;
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun 	/* alpha at the lowest z position is not always supported */
38*4882a593Smuzhiyun 	bool supports_lowest_plane_alpha;
39*4882a593Smuzhiyun };
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static const u32 sunxi_rgb2yuv_coef[12] = {
42*4882a593Smuzhiyun 	0x00000107, 0x00000204, 0x00000064, 0x00000108,
43*4882a593Smuzhiyun 	0x00003f69, 0x00003ed6, 0x000001c1, 0x00000808,
44*4882a593Smuzhiyun 	0x000001c1, 0x00003e88, 0x00003fb8, 0x00000808
45*4882a593Smuzhiyun };
46*4882a593Smuzhiyun 
sun4i_backend_apply_color_correction(struct sunxi_engine * engine)47*4882a593Smuzhiyun static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
48*4882a593Smuzhiyun {
49*4882a593Smuzhiyun 	int i;
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Applying RGB to YUV color correction\n");
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun 	/* Set color correction */
54*4882a593Smuzhiyun 	regmap_write(engine->regs, SUN4I_BACKEND_OCCTL_REG,
55*4882a593Smuzhiyun 		     SUN4I_BACKEND_OCCTL_ENABLE);
56*4882a593Smuzhiyun 
57*4882a593Smuzhiyun 	for (i = 0; i < 12; i++)
58*4882a593Smuzhiyun 		regmap_write(engine->regs, SUN4I_BACKEND_OCRCOEF_REG(i),
59*4882a593Smuzhiyun 			     sunxi_rgb2yuv_coef[i]);
60*4882a593Smuzhiyun }
61*4882a593Smuzhiyun 
sun4i_backend_disable_color_correction(struct sunxi_engine * engine)62*4882a593Smuzhiyun static void sun4i_backend_disable_color_correction(struct sunxi_engine *engine)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Disabling color correction\n");
65*4882a593Smuzhiyun 
66*4882a593Smuzhiyun 	/* Disable color correction */
67*4882a593Smuzhiyun 	regmap_update_bits(engine->regs, SUN4I_BACKEND_OCCTL_REG,
68*4882a593Smuzhiyun 			   SUN4I_BACKEND_OCCTL_ENABLE, 0);
69*4882a593Smuzhiyun }
70*4882a593Smuzhiyun 
sun4i_backend_commit(struct sunxi_engine * engine)71*4882a593Smuzhiyun static void sun4i_backend_commit(struct sunxi_engine *engine)
72*4882a593Smuzhiyun {
73*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Committing changes\n");
74*4882a593Smuzhiyun 
75*4882a593Smuzhiyun 	regmap_write(engine->regs, SUN4I_BACKEND_REGBUFFCTL_REG,
76*4882a593Smuzhiyun 		     SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS |
77*4882a593Smuzhiyun 		     SUN4I_BACKEND_REGBUFFCTL_LOADCTL);
78*4882a593Smuzhiyun }
79*4882a593Smuzhiyun 
sun4i_backend_layer_enable(struct sun4i_backend * backend,int layer,bool enable)80*4882a593Smuzhiyun void sun4i_backend_layer_enable(struct sun4i_backend *backend,
81*4882a593Smuzhiyun 				int layer, bool enable)
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun 	u32 val;
84*4882a593Smuzhiyun 
85*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("%sabling layer %d\n", enable ? "En" : "Dis",
86*4882a593Smuzhiyun 			 layer);
87*4882a593Smuzhiyun 
88*4882a593Smuzhiyun 	if (enable)
89*4882a593Smuzhiyun 		val = SUN4I_BACKEND_MODCTL_LAY_EN(layer);
90*4882a593Smuzhiyun 	else
91*4882a593Smuzhiyun 		val = 0;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
94*4882a593Smuzhiyun 			   SUN4I_BACKEND_MODCTL_LAY_EN(layer), val);
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun 
sun4i_backend_drm_format_to_layer(u32 format,u32 * mode)97*4882a593Smuzhiyun static int sun4i_backend_drm_format_to_layer(u32 format, u32 *mode)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun 	switch (format) {
100*4882a593Smuzhiyun 	case DRM_FORMAT_ARGB8888:
101*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB8888;
102*4882a593Smuzhiyun 		break;
103*4882a593Smuzhiyun 
104*4882a593Smuzhiyun 	case DRM_FORMAT_ARGB4444:
105*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB4444;
106*4882a593Smuzhiyun 		break;
107*4882a593Smuzhiyun 
108*4882a593Smuzhiyun 	case DRM_FORMAT_ARGB1555:
109*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_ARGB1555;
110*4882a593Smuzhiyun 		break;
111*4882a593Smuzhiyun 
112*4882a593Smuzhiyun 	case DRM_FORMAT_RGBA5551:
113*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGBA5551;
114*4882a593Smuzhiyun 		break;
115*4882a593Smuzhiyun 
116*4882a593Smuzhiyun 	case DRM_FORMAT_RGBA4444:
117*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGBA4444;
118*4882a593Smuzhiyun 		break;
119*4882a593Smuzhiyun 
120*4882a593Smuzhiyun 	case DRM_FORMAT_XRGB8888:
121*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_XRGB8888;
122*4882a593Smuzhiyun 		break;
123*4882a593Smuzhiyun 
124*4882a593Smuzhiyun 	case DRM_FORMAT_RGB888:
125*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGB888;
126*4882a593Smuzhiyun 		break;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	case DRM_FORMAT_RGB565:
129*4882a593Smuzhiyun 		*mode = SUN4I_BACKEND_LAY_FBFMT_RGB565;
130*4882a593Smuzhiyun 		break;
131*4882a593Smuzhiyun 
132*4882a593Smuzhiyun 	default:
133*4882a593Smuzhiyun 		return -EINVAL;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	return 0;
137*4882a593Smuzhiyun }
138*4882a593Smuzhiyun 
139*4882a593Smuzhiyun static const uint32_t sun4i_backend_formats[] = {
140*4882a593Smuzhiyun 	DRM_FORMAT_ARGB1555,
141*4882a593Smuzhiyun 	DRM_FORMAT_ARGB4444,
142*4882a593Smuzhiyun 	DRM_FORMAT_ARGB8888,
143*4882a593Smuzhiyun 	DRM_FORMAT_RGB565,
144*4882a593Smuzhiyun 	DRM_FORMAT_RGB888,
145*4882a593Smuzhiyun 	DRM_FORMAT_RGBA4444,
146*4882a593Smuzhiyun 	DRM_FORMAT_RGBA5551,
147*4882a593Smuzhiyun 	DRM_FORMAT_UYVY,
148*4882a593Smuzhiyun 	DRM_FORMAT_VYUY,
149*4882a593Smuzhiyun 	DRM_FORMAT_XRGB8888,
150*4882a593Smuzhiyun 	DRM_FORMAT_YUYV,
151*4882a593Smuzhiyun 	DRM_FORMAT_YVYU,
152*4882a593Smuzhiyun };
153*4882a593Smuzhiyun 
sun4i_backend_format_is_supported(uint32_t fmt,uint64_t modifier)154*4882a593Smuzhiyun bool sun4i_backend_format_is_supported(uint32_t fmt, uint64_t modifier)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	unsigned int i;
157*4882a593Smuzhiyun 
158*4882a593Smuzhiyun 	if (modifier != DRM_FORMAT_MOD_LINEAR)
159*4882a593Smuzhiyun 		return false;
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(sun4i_backend_formats); i++)
162*4882a593Smuzhiyun 		if (sun4i_backend_formats[i] == fmt)
163*4882a593Smuzhiyun 			return true;
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun 	return false;
166*4882a593Smuzhiyun }
167*4882a593Smuzhiyun 
sun4i_backend_update_layer_coord(struct sun4i_backend * backend,int layer,struct drm_plane * plane)168*4882a593Smuzhiyun int sun4i_backend_update_layer_coord(struct sun4i_backend *backend,
169*4882a593Smuzhiyun 				     int layer, struct drm_plane *plane)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun 	struct drm_plane_state *state = plane->state;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Updating layer %d\n", layer);
174*4882a593Smuzhiyun 
175*4882a593Smuzhiyun 	if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
176*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Primary layer, updating global size W: %u H: %u\n",
177*4882a593Smuzhiyun 				 state->crtc_w, state->crtc_h);
178*4882a593Smuzhiyun 		regmap_write(backend->engine.regs, SUN4I_BACKEND_DISSIZE_REG,
179*4882a593Smuzhiyun 			     SUN4I_BACKEND_DISSIZE(state->crtc_w,
180*4882a593Smuzhiyun 						   state->crtc_h));
181*4882a593Smuzhiyun 	}
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	/* Set height and width */
184*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Layer size W: %u H: %u\n",
185*4882a593Smuzhiyun 			 state->crtc_w, state->crtc_h);
186*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYSIZE_REG(layer),
187*4882a593Smuzhiyun 		     SUN4I_BACKEND_LAYSIZE(state->crtc_w,
188*4882a593Smuzhiyun 					   state->crtc_h));
189*4882a593Smuzhiyun 
190*4882a593Smuzhiyun 	/* Set base coordinates */
191*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Layer coordinates X: %d Y: %d\n",
192*4882a593Smuzhiyun 			 state->crtc_x, state->crtc_y);
193*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_LAYCOOR_REG(layer),
194*4882a593Smuzhiyun 		     SUN4I_BACKEND_LAYCOOR(state->crtc_x,
195*4882a593Smuzhiyun 					   state->crtc_y));
196*4882a593Smuzhiyun 
197*4882a593Smuzhiyun 	return 0;
198*4882a593Smuzhiyun }
199*4882a593Smuzhiyun 
sun4i_backend_update_yuv_format(struct sun4i_backend * backend,int layer,struct drm_plane * plane)200*4882a593Smuzhiyun static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
201*4882a593Smuzhiyun 					   int layer, struct drm_plane *plane)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun 	struct drm_plane_state *state = plane->state;
204*4882a593Smuzhiyun 	struct drm_framebuffer *fb = state->fb;
205*4882a593Smuzhiyun 	const struct drm_format_info *format = fb->format;
206*4882a593Smuzhiyun 	const uint32_t fmt = format->format;
207*4882a593Smuzhiyun 	u32 val = SUN4I_BACKEND_IYUVCTL_EN;
208*4882a593Smuzhiyun 	int i;
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	for (i = 0; i < ARRAY_SIZE(sunxi_bt601_yuv2rgb_coef); i++)
211*4882a593Smuzhiyun 		regmap_write(backend->engine.regs,
212*4882a593Smuzhiyun 			     SUN4I_BACKEND_YGCOEF_REG(i),
213*4882a593Smuzhiyun 			     sunxi_bt601_yuv2rgb_coef[i]);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	/*
216*4882a593Smuzhiyun 	 * We should do that only for a single plane, but the
217*4882a593Smuzhiyun 	 * framebuffer's atomic_check has our back on this.
218*4882a593Smuzhiyun 	 */
219*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
220*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN,
221*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
222*4882a593Smuzhiyun 
223*4882a593Smuzhiyun 	/* TODO: Add support for the multi-planar YUV formats */
224*4882a593Smuzhiyun 	if (drm_format_info_is_yuv_packed(format) &&
225*4882a593Smuzhiyun 	    drm_format_info_is_yuv_sampling_422(format))
226*4882a593Smuzhiyun 		val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
227*4882a593Smuzhiyun 	else
228*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	/*
231*4882a593Smuzhiyun 	 * Allwinner seems to list the pixel sequence from right to left, while
232*4882a593Smuzhiyun 	 * DRM lists it from left to right.
233*4882a593Smuzhiyun 	 */
234*4882a593Smuzhiyun 	switch (fmt) {
235*4882a593Smuzhiyun 	case DRM_FORMAT_YUYV:
236*4882a593Smuzhiyun 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
237*4882a593Smuzhiyun 		break;
238*4882a593Smuzhiyun 	case DRM_FORMAT_YVYU:
239*4882a593Smuzhiyun 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_UYVY;
240*4882a593Smuzhiyun 		break;
241*4882a593Smuzhiyun 	case DRM_FORMAT_UYVY:
242*4882a593Smuzhiyun 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_YVYU;
243*4882a593Smuzhiyun 		break;
244*4882a593Smuzhiyun 	case DRM_FORMAT_VYUY:
245*4882a593Smuzhiyun 		val |= SUN4I_BACKEND_IYUVCTL_FBPS_YUYV;
246*4882a593Smuzhiyun 		break;
247*4882a593Smuzhiyun 	default:
248*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
249*4882a593Smuzhiyun 				 fmt);
250*4882a593Smuzhiyun 	}
251*4882a593Smuzhiyun 
252*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
253*4882a593Smuzhiyun 
254*4882a593Smuzhiyun 	return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun 
sun4i_backend_update_layer_formats(struct sun4i_backend * backend,int layer,struct drm_plane * plane)257*4882a593Smuzhiyun int sun4i_backend_update_layer_formats(struct sun4i_backend *backend,
258*4882a593Smuzhiyun 				       int layer, struct drm_plane *plane)
259*4882a593Smuzhiyun {
260*4882a593Smuzhiyun 	struct drm_plane_state *state = plane->state;
261*4882a593Smuzhiyun 	struct drm_framebuffer *fb = state->fb;
262*4882a593Smuzhiyun 	bool interlaced = false;
263*4882a593Smuzhiyun 	u32 val;
264*4882a593Smuzhiyun 	int ret;
265*4882a593Smuzhiyun 
266*4882a593Smuzhiyun 	/* Clear the YUV mode */
267*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
268*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	if (plane->state->crtc)
271*4882a593Smuzhiyun 		interlaced = plane->state->crtc->state->adjusted_mode.flags
272*4882a593Smuzhiyun 			& DRM_MODE_FLAG_INTERLACE;
273*4882a593Smuzhiyun 
274*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
275*4882a593Smuzhiyun 			   SUN4I_BACKEND_MODCTL_ITLMOD_EN,
276*4882a593Smuzhiyun 			   interlaced ? SUN4I_BACKEND_MODCTL_ITLMOD_EN : 0);
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Switching display backend interlaced mode %s\n",
279*4882a593Smuzhiyun 			 interlaced ? "on" : "off");
280*4882a593Smuzhiyun 
281*4882a593Smuzhiyun 	val = SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA(state->alpha >> 8);
282*4882a593Smuzhiyun 	if (state->alpha != DRM_BLEND_ALPHA_OPAQUE)
283*4882a593Smuzhiyun 		val |= SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN;
284*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs,
285*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0(layer),
286*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_MASK |
287*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_GLBALPHA_EN,
288*4882a593Smuzhiyun 			   val);
289*4882a593Smuzhiyun 
290*4882a593Smuzhiyun 	if (fb->format->is_yuv)
291*4882a593Smuzhiyun 		return sun4i_backend_update_yuv_format(backend, layer, plane);
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	ret = sun4i_backend_drm_format_to_layer(fb->format->format, &val);
294*4882a593Smuzhiyun 	if (ret) {
295*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Invalid format\n");
296*4882a593Smuzhiyun 		return ret;
297*4882a593Smuzhiyun 	}
298*4882a593Smuzhiyun 
299*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs,
300*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG1(layer),
301*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	return 0;
304*4882a593Smuzhiyun }
305*4882a593Smuzhiyun 
sun4i_backend_update_layer_frontend(struct sun4i_backend * backend,int layer,uint32_t fmt)306*4882a593Smuzhiyun int sun4i_backend_update_layer_frontend(struct sun4i_backend *backend,
307*4882a593Smuzhiyun 					int layer, uint32_t fmt)
308*4882a593Smuzhiyun {
309*4882a593Smuzhiyun 	u32 val;
310*4882a593Smuzhiyun 	int ret;
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun 	ret = sun4i_backend_drm_format_to_layer(fmt, &val);
313*4882a593Smuzhiyun 	if (ret) {
314*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Invalid format\n");
315*4882a593Smuzhiyun 		return ret;
316*4882a593Smuzhiyun 	}
317*4882a593Smuzhiyun 
318*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs,
319*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0(layer),
320*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN,
321*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN);
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs,
324*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG1(layer),
325*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG1_LAY_FBFMT, val);
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	return 0;
328*4882a593Smuzhiyun }
329*4882a593Smuzhiyun 
sun4i_backend_update_yuv_buffer(struct sun4i_backend * backend,struct drm_framebuffer * fb,dma_addr_t paddr)330*4882a593Smuzhiyun static int sun4i_backend_update_yuv_buffer(struct sun4i_backend *backend,
331*4882a593Smuzhiyun 					   struct drm_framebuffer *fb,
332*4882a593Smuzhiyun 					   dma_addr_t paddr)
333*4882a593Smuzhiyun {
334*4882a593Smuzhiyun 	/* TODO: Add support for the multi-planar YUV formats */
335*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Setting packed YUV buffer address to %pad\n", &paddr);
336*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVADD_REG(0), paddr);
337*4882a593Smuzhiyun 
338*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
339*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVLINEWIDTH_REG(0),
340*4882a593Smuzhiyun 		     fb->pitches[0] * 8);
341*4882a593Smuzhiyun 
342*4882a593Smuzhiyun 	return 0;
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun 
sun4i_backend_update_layer_buffer(struct sun4i_backend * backend,int layer,struct drm_plane * plane)345*4882a593Smuzhiyun int sun4i_backend_update_layer_buffer(struct sun4i_backend *backend,
346*4882a593Smuzhiyun 				      int layer, struct drm_plane *plane)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct drm_plane_state *state = plane->state;
349*4882a593Smuzhiyun 	struct drm_framebuffer *fb = state->fb;
350*4882a593Smuzhiyun 	u32 lo_paddr, hi_paddr;
351*4882a593Smuzhiyun 	dma_addr_t paddr;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	/* Set the line width */
354*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Layer line width: %d bits\n", fb->pitches[0] * 8);
355*4882a593Smuzhiyun 	regmap_write(backend->engine.regs,
356*4882a593Smuzhiyun 		     SUN4I_BACKEND_LAYLINEWIDTH_REG(layer),
357*4882a593Smuzhiyun 		     fb->pitches[0] * 8);
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun 	/* Get the start of the displayed memory */
360*4882a593Smuzhiyun 	paddr = drm_fb_cma_get_gem_addr(fb, state, 0);
361*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Setting buffer address to %pad\n", &paddr);
362*4882a593Smuzhiyun 
363*4882a593Smuzhiyun 	if (fb->format->is_yuv)
364*4882a593Smuzhiyun 		return sun4i_backend_update_yuv_buffer(backend, fb, paddr);
365*4882a593Smuzhiyun 
366*4882a593Smuzhiyun 	/* Write the 32 lower bits of the address (in bits) */
367*4882a593Smuzhiyun 	lo_paddr = paddr << 3;
368*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Setting address lower bits to 0x%x\n", lo_paddr);
369*4882a593Smuzhiyun 	regmap_write(backend->engine.regs,
370*4882a593Smuzhiyun 		     SUN4I_BACKEND_LAYFB_L32ADD_REG(layer),
371*4882a593Smuzhiyun 		     lo_paddr);
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	/* And the upper bits */
374*4882a593Smuzhiyun 	hi_paddr = paddr >> 29;
375*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Setting address high bits to 0x%x\n", hi_paddr);
376*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_LAYFB_H4ADD_REG,
377*4882a593Smuzhiyun 			   SUN4I_BACKEND_LAYFB_H4ADD_MSK(layer),
378*4882a593Smuzhiyun 			   SUN4I_BACKEND_LAYFB_H4ADD(layer, hi_paddr));
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	return 0;
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun 
sun4i_backend_update_layer_zpos(struct sun4i_backend * backend,int layer,struct drm_plane * plane)383*4882a593Smuzhiyun int sun4i_backend_update_layer_zpos(struct sun4i_backend *backend, int layer,
384*4882a593Smuzhiyun 				    struct drm_plane *plane)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun 	struct drm_plane_state *state = plane->state;
387*4882a593Smuzhiyun 	struct sun4i_layer_state *p_state = state_to_sun4i_layer_state(state);
388*4882a593Smuzhiyun 	unsigned int priority = state->normalized_zpos;
389*4882a593Smuzhiyun 	unsigned int pipe = p_state->pipe;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Setting layer %d's priority to %d and pipe %d\n",
392*4882a593Smuzhiyun 			 layer, priority, pipe);
393*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs, SUN4I_BACKEND_ATTCTL_REG0(layer),
394*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL_MASK |
395*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL_MASK,
396*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PIPESEL(p_state->pipe) |
397*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_PRISEL(priority));
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 	return 0;
400*4882a593Smuzhiyun }
401*4882a593Smuzhiyun 
sun4i_backend_cleanup_layer(struct sun4i_backend * backend,int layer)402*4882a593Smuzhiyun void sun4i_backend_cleanup_layer(struct sun4i_backend *backend,
403*4882a593Smuzhiyun 				 int layer)
404*4882a593Smuzhiyun {
405*4882a593Smuzhiyun 	regmap_update_bits(backend->engine.regs,
406*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0(layer),
407*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_VDOEN |
408*4882a593Smuzhiyun 			   SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN, 0);
409*4882a593Smuzhiyun }
410*4882a593Smuzhiyun 
sun4i_backend_plane_uses_scaler(struct drm_plane_state * state)411*4882a593Smuzhiyun static bool sun4i_backend_plane_uses_scaler(struct drm_plane_state *state)
412*4882a593Smuzhiyun {
413*4882a593Smuzhiyun 	u16 src_h = state->src_h >> 16;
414*4882a593Smuzhiyun 	u16 src_w = state->src_w >> 16;
415*4882a593Smuzhiyun 
416*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Input size %dx%d, output size %dx%d\n",
417*4882a593Smuzhiyun 			 src_w, src_h, state->crtc_w, state->crtc_h);
418*4882a593Smuzhiyun 
419*4882a593Smuzhiyun 	if ((state->crtc_h != src_h) || (state->crtc_w != src_w))
420*4882a593Smuzhiyun 		return true;
421*4882a593Smuzhiyun 
422*4882a593Smuzhiyun 	return false;
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun 
sun4i_backend_plane_uses_frontend(struct drm_plane_state * state)425*4882a593Smuzhiyun static bool sun4i_backend_plane_uses_frontend(struct drm_plane_state *state)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun 	struct sun4i_layer *layer = plane_to_sun4i_layer(state->plane);
428*4882a593Smuzhiyun 	struct sun4i_backend *backend = layer->backend;
429*4882a593Smuzhiyun 	uint32_t format = state->fb->format->format;
430*4882a593Smuzhiyun 	uint64_t modifier = state->fb->modifier;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (IS_ERR(backend->frontend))
433*4882a593Smuzhiyun 		return false;
434*4882a593Smuzhiyun 
435*4882a593Smuzhiyun 	if (!sun4i_frontend_format_is_supported(format, modifier))
436*4882a593Smuzhiyun 		return false;
437*4882a593Smuzhiyun 
438*4882a593Smuzhiyun 	if (!sun4i_backend_format_is_supported(format, modifier))
439*4882a593Smuzhiyun 		return true;
440*4882a593Smuzhiyun 
441*4882a593Smuzhiyun 	/*
442*4882a593Smuzhiyun 	 * TODO: The backend alone allows 2x and 4x integer scaling, including
443*4882a593Smuzhiyun 	 * support for an alpha component (which the frontend doesn't support).
444*4882a593Smuzhiyun 	 * Use the backend directly instead of the frontend in this case, with
445*4882a593Smuzhiyun 	 * another test to return false.
446*4882a593Smuzhiyun 	 */
447*4882a593Smuzhiyun 
448*4882a593Smuzhiyun 	if (sun4i_backend_plane_uses_scaler(state))
449*4882a593Smuzhiyun 		return true;
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 	/*
452*4882a593Smuzhiyun 	 * Here the format is supported by both the frontend and the backend
453*4882a593Smuzhiyun 	 * and no frontend scaling is required, so use the backend directly.
454*4882a593Smuzhiyun 	 */
455*4882a593Smuzhiyun 	return false;
456*4882a593Smuzhiyun }
457*4882a593Smuzhiyun 
sun4i_backend_plane_is_supported(struct drm_plane_state * state,bool * uses_frontend)458*4882a593Smuzhiyun static bool sun4i_backend_plane_is_supported(struct drm_plane_state *state,
459*4882a593Smuzhiyun 					     bool *uses_frontend)
460*4882a593Smuzhiyun {
461*4882a593Smuzhiyun 	if (sun4i_backend_plane_uses_frontend(state)) {
462*4882a593Smuzhiyun 		*uses_frontend = true;
463*4882a593Smuzhiyun 		return true;
464*4882a593Smuzhiyun 	}
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	*uses_frontend = false;
467*4882a593Smuzhiyun 
468*4882a593Smuzhiyun 	/* Scaling is not supported without the frontend. */
469*4882a593Smuzhiyun 	if (sun4i_backend_plane_uses_scaler(state))
470*4882a593Smuzhiyun 		return false;
471*4882a593Smuzhiyun 
472*4882a593Smuzhiyun 	return true;
473*4882a593Smuzhiyun }
474*4882a593Smuzhiyun 
sun4i_backend_atomic_begin(struct sunxi_engine * engine,struct drm_crtc_state * old_state)475*4882a593Smuzhiyun static void sun4i_backend_atomic_begin(struct sunxi_engine *engine,
476*4882a593Smuzhiyun 				       struct drm_crtc_state *old_state)
477*4882a593Smuzhiyun {
478*4882a593Smuzhiyun 	u32 val;
479*4882a593Smuzhiyun 
480*4882a593Smuzhiyun 	WARN_ON(regmap_read_poll_timeout(engine->regs,
481*4882a593Smuzhiyun 					 SUN4I_BACKEND_REGBUFFCTL_REG,
482*4882a593Smuzhiyun 					 val, !(val & SUN4I_BACKEND_REGBUFFCTL_LOADCTL),
483*4882a593Smuzhiyun 					 100, 50000));
484*4882a593Smuzhiyun }
485*4882a593Smuzhiyun 
sun4i_backend_atomic_check(struct sunxi_engine * engine,struct drm_crtc_state * crtc_state)486*4882a593Smuzhiyun static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
487*4882a593Smuzhiyun 				      struct drm_crtc_state *crtc_state)
488*4882a593Smuzhiyun {
489*4882a593Smuzhiyun 	struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
490*4882a593Smuzhiyun 	struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
491*4882a593Smuzhiyun 	struct drm_atomic_state *state = crtc_state->state;
492*4882a593Smuzhiyun 	struct drm_device *drm = state->dev;
493*4882a593Smuzhiyun 	struct drm_plane *plane;
494*4882a593Smuzhiyun 	unsigned int num_planes = 0;
495*4882a593Smuzhiyun 	unsigned int num_alpha_planes = 0;
496*4882a593Smuzhiyun 	unsigned int num_frontend_planes = 0;
497*4882a593Smuzhiyun 	unsigned int num_alpha_planes_max = 1;
498*4882a593Smuzhiyun 	unsigned int num_yuv_planes = 0;
499*4882a593Smuzhiyun 	unsigned int current_pipe = 0;
500*4882a593Smuzhiyun 	unsigned int i;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("Starting checking our planes\n");
503*4882a593Smuzhiyun 
504*4882a593Smuzhiyun 	if (!crtc_state->planes_changed)
505*4882a593Smuzhiyun 		return 0;
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun 	drm_for_each_plane_mask(plane, drm, crtc_state->plane_mask) {
508*4882a593Smuzhiyun 		struct drm_plane_state *plane_state =
509*4882a593Smuzhiyun 			drm_atomic_get_plane_state(state, plane);
510*4882a593Smuzhiyun 		struct sun4i_layer_state *layer_state =
511*4882a593Smuzhiyun 			state_to_sun4i_layer_state(plane_state);
512*4882a593Smuzhiyun 		struct drm_framebuffer *fb = plane_state->fb;
513*4882a593Smuzhiyun 		struct drm_format_name_buf format_name;
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 		if (!sun4i_backend_plane_is_supported(plane_state,
516*4882a593Smuzhiyun 						      &layer_state->uses_frontend))
517*4882a593Smuzhiyun 			return -EINVAL;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 		if (layer_state->uses_frontend) {
520*4882a593Smuzhiyun 			DRM_DEBUG_DRIVER("Using the frontend for plane %d\n",
521*4882a593Smuzhiyun 					 plane->index);
522*4882a593Smuzhiyun 			num_frontend_planes++;
523*4882a593Smuzhiyun 		} else {
524*4882a593Smuzhiyun 			if (fb->format->is_yuv) {
525*4882a593Smuzhiyun 				DRM_DEBUG_DRIVER("Plane FB format is YUV\n");
526*4882a593Smuzhiyun 				num_yuv_planes++;
527*4882a593Smuzhiyun 			}
528*4882a593Smuzhiyun 		}
529*4882a593Smuzhiyun 
530*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Plane FB format is %s\n",
531*4882a593Smuzhiyun 				 drm_get_format_name(fb->format->format,
532*4882a593Smuzhiyun 						     &format_name));
533*4882a593Smuzhiyun 		if (fb->format->has_alpha || (plane_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
534*4882a593Smuzhiyun 			num_alpha_planes++;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Plane zpos is %d\n",
537*4882a593Smuzhiyun 				 plane_state->normalized_zpos);
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 		/* Sort our planes by Zpos */
540*4882a593Smuzhiyun 		plane_states[plane_state->normalized_zpos] = plane_state;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 		num_planes++;
543*4882a593Smuzhiyun 	}
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	/* All our planes were disabled, bail out */
546*4882a593Smuzhiyun 	if (!num_planes)
547*4882a593Smuzhiyun 		return 0;
548*4882a593Smuzhiyun 
549*4882a593Smuzhiyun 	/*
550*4882a593Smuzhiyun 	 * The hardware is a bit unusual here.
551*4882a593Smuzhiyun 	 *
552*4882a593Smuzhiyun 	 * Even though it supports 4 layers, it does the composition
553*4882a593Smuzhiyun 	 * in two separate steps.
554*4882a593Smuzhiyun 	 *
555*4882a593Smuzhiyun 	 * The first one is assigning a layer to one of its two
556*4882a593Smuzhiyun 	 * pipes. If more that 1 layer is assigned to the same pipe,
557*4882a593Smuzhiyun 	 * and if pixels overlaps, the pipe will take the pixel from
558*4882a593Smuzhiyun 	 * the layer with the highest priority.
559*4882a593Smuzhiyun 	 *
560*4882a593Smuzhiyun 	 * The second step is the actual alpha blending, that takes
561*4882a593Smuzhiyun 	 * the two pipes as input, and uses the potential alpha
562*4882a593Smuzhiyun 	 * component to do the transparency between the two.
563*4882a593Smuzhiyun 	 *
564*4882a593Smuzhiyun 	 * This two-step scenario makes us unable to guarantee a
565*4882a593Smuzhiyun 	 * robust alpha blending between the 4 layers in all
566*4882a593Smuzhiyun 	 * situations, since this means that we need to have one layer
567*4882a593Smuzhiyun 	 * with alpha at the lowest position of our two pipes.
568*4882a593Smuzhiyun 	 *
569*4882a593Smuzhiyun 	 * However, we cannot even do that on every platform, since
570*4882a593Smuzhiyun 	 * the hardware has a bug where the lowest plane of the lowest
571*4882a593Smuzhiyun 	 * pipe (pipe 0, priority 0), if it has any alpha, will
572*4882a593Smuzhiyun 	 * discard the pixel data entirely and just display the pixels
573*4882a593Smuzhiyun 	 * in the background color (black by default).
574*4882a593Smuzhiyun 	 *
575*4882a593Smuzhiyun 	 * This means that on the affected platforms, we effectively
576*4882a593Smuzhiyun 	 * have only three valid configurations with alpha, all of
577*4882a593Smuzhiyun 	 * them with the alpha being on pipe1 with the lowest
578*4882a593Smuzhiyun 	 * position, which can be 1, 2 or 3 depending on the number of
579*4882a593Smuzhiyun 	 * planes and their zpos.
580*4882a593Smuzhiyun 	 */
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	/* For platforms that are not affected by the issue described above. */
583*4882a593Smuzhiyun 	if (backend->quirks->supports_lowest_plane_alpha)
584*4882a593Smuzhiyun 		num_alpha_planes_max++;
585*4882a593Smuzhiyun 
586*4882a593Smuzhiyun 	if (num_alpha_planes > num_alpha_planes_max) {
587*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
588*4882a593Smuzhiyun 		return -EINVAL;
589*4882a593Smuzhiyun 	}
590*4882a593Smuzhiyun 
591*4882a593Smuzhiyun 	/* We can't have an alpha plane at the lowest position */
592*4882a593Smuzhiyun 	if (!backend->quirks->supports_lowest_plane_alpha &&
593*4882a593Smuzhiyun 	    (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
594*4882a593Smuzhiyun 		return -EINVAL;
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 	for (i = 1; i < num_planes; i++) {
597*4882a593Smuzhiyun 		struct drm_plane_state *p_state = plane_states[i];
598*4882a593Smuzhiyun 		struct drm_framebuffer *fb = p_state->fb;
599*4882a593Smuzhiyun 		struct sun4i_layer_state *s_state = state_to_sun4i_layer_state(p_state);
600*4882a593Smuzhiyun 
601*4882a593Smuzhiyun 		/*
602*4882a593Smuzhiyun 		 * The only alpha position is the lowest plane of the
603*4882a593Smuzhiyun 		 * second pipe.
604*4882a593Smuzhiyun 		 */
605*4882a593Smuzhiyun 		if (fb->format->has_alpha || (p_state->alpha != DRM_BLEND_ALPHA_OPAQUE))
606*4882a593Smuzhiyun 			current_pipe++;
607*4882a593Smuzhiyun 
608*4882a593Smuzhiyun 		s_state->pipe = current_pipe;
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	/* We can only have a single YUV plane at a time */
612*4882a593Smuzhiyun 	if (num_yuv_planes > SUN4I_BACKEND_NUM_YUV_PLANES) {
613*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Too many planes with YUV, rejecting...\n");
614*4882a593Smuzhiyun 		return -EINVAL;
615*4882a593Smuzhiyun 	}
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 	if (num_frontend_planes > SUN4I_BACKEND_NUM_FRONTEND_LAYERS) {
618*4882a593Smuzhiyun 		DRM_DEBUG_DRIVER("Too many planes going through the frontend, rejecting\n");
619*4882a593Smuzhiyun 		return -EINVAL;
620*4882a593Smuzhiyun 	}
621*4882a593Smuzhiyun 
622*4882a593Smuzhiyun 	DRM_DEBUG_DRIVER("State valid with %u planes, %u alpha, %u video, %u YUV\n",
623*4882a593Smuzhiyun 			 num_planes, num_alpha_planes, num_frontend_planes,
624*4882a593Smuzhiyun 			 num_yuv_planes);
625*4882a593Smuzhiyun 
626*4882a593Smuzhiyun 	return 0;
627*4882a593Smuzhiyun }
628*4882a593Smuzhiyun 
sun4i_backend_vblank_quirk(struct sunxi_engine * engine)629*4882a593Smuzhiyun static void sun4i_backend_vblank_quirk(struct sunxi_engine *engine)
630*4882a593Smuzhiyun {
631*4882a593Smuzhiyun 	struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
632*4882a593Smuzhiyun 	struct sun4i_frontend *frontend = backend->frontend;
633*4882a593Smuzhiyun 
634*4882a593Smuzhiyun 	if (!frontend)
635*4882a593Smuzhiyun 		return;
636*4882a593Smuzhiyun 
637*4882a593Smuzhiyun 	/*
638*4882a593Smuzhiyun 	 * In a teardown scenario with the frontend involved, we have
639*4882a593Smuzhiyun 	 * to keep the frontend enabled until the next vblank, and
640*4882a593Smuzhiyun 	 * only then disable it.
641*4882a593Smuzhiyun 	 *
642*4882a593Smuzhiyun 	 * This is due to the fact that the backend will not take into
643*4882a593Smuzhiyun 	 * account the new configuration (with the plane that used to
644*4882a593Smuzhiyun 	 * be fed by the frontend now disabled) until we write to the
645*4882a593Smuzhiyun 	 * commit bit and the hardware fetches the new configuration
646*4882a593Smuzhiyun 	 * during the next vblank.
647*4882a593Smuzhiyun 	 *
648*4882a593Smuzhiyun 	 * So we keep the frontend around in order to prevent any
649*4882a593Smuzhiyun 	 * visual artifacts.
650*4882a593Smuzhiyun 	 */
651*4882a593Smuzhiyun 	spin_lock(&backend->frontend_lock);
652*4882a593Smuzhiyun 	if (backend->frontend_teardown) {
653*4882a593Smuzhiyun 		sun4i_frontend_exit(frontend);
654*4882a593Smuzhiyun 		backend->frontend_teardown = false;
655*4882a593Smuzhiyun 	}
656*4882a593Smuzhiyun 	spin_unlock(&backend->frontend_lock);
657*4882a593Smuzhiyun };
658*4882a593Smuzhiyun 
sun4i_backend_init_sat(struct device * dev)659*4882a593Smuzhiyun static int sun4i_backend_init_sat(struct device *dev) {
660*4882a593Smuzhiyun 	struct sun4i_backend *backend = dev_get_drvdata(dev);
661*4882a593Smuzhiyun 	int ret;
662*4882a593Smuzhiyun 
663*4882a593Smuzhiyun 	backend->sat_reset = devm_reset_control_get(dev, "sat");
664*4882a593Smuzhiyun 	if (IS_ERR(backend->sat_reset)) {
665*4882a593Smuzhiyun 		dev_err(dev, "Couldn't get the SAT reset line\n");
666*4882a593Smuzhiyun 		return PTR_ERR(backend->sat_reset);
667*4882a593Smuzhiyun 	}
668*4882a593Smuzhiyun 
669*4882a593Smuzhiyun 	ret = reset_control_deassert(backend->sat_reset);
670*4882a593Smuzhiyun 	if (ret) {
671*4882a593Smuzhiyun 		dev_err(dev, "Couldn't deassert the SAT reset line\n");
672*4882a593Smuzhiyun 		return ret;
673*4882a593Smuzhiyun 	}
674*4882a593Smuzhiyun 
675*4882a593Smuzhiyun 	backend->sat_clk = devm_clk_get(dev, "sat");
676*4882a593Smuzhiyun 	if (IS_ERR(backend->sat_clk)) {
677*4882a593Smuzhiyun 		dev_err(dev, "Couldn't get our SAT clock\n");
678*4882a593Smuzhiyun 		ret = PTR_ERR(backend->sat_clk);
679*4882a593Smuzhiyun 		goto err_assert_reset;
680*4882a593Smuzhiyun 	}
681*4882a593Smuzhiyun 
682*4882a593Smuzhiyun 	ret = clk_prepare_enable(backend->sat_clk);
683*4882a593Smuzhiyun 	if (ret) {
684*4882a593Smuzhiyun 		dev_err(dev, "Couldn't enable the SAT clock\n");
685*4882a593Smuzhiyun 		return ret;
686*4882a593Smuzhiyun 	}
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 	return 0;
689*4882a593Smuzhiyun 
690*4882a593Smuzhiyun err_assert_reset:
691*4882a593Smuzhiyun 	reset_control_assert(backend->sat_reset);
692*4882a593Smuzhiyun 	return ret;
693*4882a593Smuzhiyun }
694*4882a593Smuzhiyun 
sun4i_backend_free_sat(struct device * dev)695*4882a593Smuzhiyun static int sun4i_backend_free_sat(struct device *dev) {
696*4882a593Smuzhiyun 	struct sun4i_backend *backend = dev_get_drvdata(dev);
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 	clk_disable_unprepare(backend->sat_clk);
699*4882a593Smuzhiyun 	reset_control_assert(backend->sat_reset);
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 	return 0;
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun 
704*4882a593Smuzhiyun /*
705*4882a593Smuzhiyun  * The display backend can take video output from the display frontend, or
706*4882a593Smuzhiyun  * the display enhancement unit on the A80, as input for one it its layers.
707*4882a593Smuzhiyun  * This relationship within the display pipeline is encoded in the device
708*4882a593Smuzhiyun  * tree with of_graph, and we use it here to figure out which backend, if
709*4882a593Smuzhiyun  * there are 2 or more, we are currently probing. The number would be in
710*4882a593Smuzhiyun  * the "reg" property of the upstream output port endpoint.
711*4882a593Smuzhiyun  */
sun4i_backend_of_get_id(struct device_node * node)712*4882a593Smuzhiyun static int sun4i_backend_of_get_id(struct device_node *node)
713*4882a593Smuzhiyun {
714*4882a593Smuzhiyun 	struct device_node *ep, *remote;
715*4882a593Smuzhiyun 	struct of_endpoint of_ep;
716*4882a593Smuzhiyun 
717*4882a593Smuzhiyun 	/* Input port is 0, and we want the first endpoint. */
718*4882a593Smuzhiyun 	ep = of_graph_get_endpoint_by_regs(node, 0, -1);
719*4882a593Smuzhiyun 	if (!ep)
720*4882a593Smuzhiyun 		return -EINVAL;
721*4882a593Smuzhiyun 
722*4882a593Smuzhiyun 	remote = of_graph_get_remote_endpoint(ep);
723*4882a593Smuzhiyun 	of_node_put(ep);
724*4882a593Smuzhiyun 	if (!remote)
725*4882a593Smuzhiyun 		return -EINVAL;
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	of_graph_parse_endpoint(remote, &of_ep);
728*4882a593Smuzhiyun 	of_node_put(remote);
729*4882a593Smuzhiyun 	return of_ep.id;
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun 
732*4882a593Smuzhiyun /* TODO: This needs to take multiple pipelines into account */
sun4i_backend_find_frontend(struct sun4i_drv * drv,struct device_node * node)733*4882a593Smuzhiyun static struct sun4i_frontend *sun4i_backend_find_frontend(struct sun4i_drv *drv,
734*4882a593Smuzhiyun 							  struct device_node *node)
735*4882a593Smuzhiyun {
736*4882a593Smuzhiyun 	struct device_node *port, *ep, *remote;
737*4882a593Smuzhiyun 	struct sun4i_frontend *frontend;
738*4882a593Smuzhiyun 
739*4882a593Smuzhiyun 	port = of_graph_get_port_by_id(node, 0);
740*4882a593Smuzhiyun 	if (!port)
741*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
742*4882a593Smuzhiyun 
743*4882a593Smuzhiyun 	for_each_available_child_of_node(port, ep) {
744*4882a593Smuzhiyun 		remote = of_graph_get_remote_port_parent(ep);
745*4882a593Smuzhiyun 		if (!remote)
746*4882a593Smuzhiyun 			continue;
747*4882a593Smuzhiyun 		of_node_put(remote);
748*4882a593Smuzhiyun 
749*4882a593Smuzhiyun 		/* does this node match any registered engines? */
750*4882a593Smuzhiyun 		list_for_each_entry(frontend, &drv->frontend_list, list) {
751*4882a593Smuzhiyun 			if (remote == frontend->node) {
752*4882a593Smuzhiyun 				of_node_put(port);
753*4882a593Smuzhiyun 				of_node_put(ep);
754*4882a593Smuzhiyun 				return frontend;
755*4882a593Smuzhiyun 			}
756*4882a593Smuzhiyun 		}
757*4882a593Smuzhiyun 	}
758*4882a593Smuzhiyun 	of_node_put(port);
759*4882a593Smuzhiyun 	return ERR_PTR(-EINVAL);
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun 
762*4882a593Smuzhiyun static const struct sunxi_engine_ops sun4i_backend_engine_ops = {
763*4882a593Smuzhiyun 	.atomic_begin			= sun4i_backend_atomic_begin,
764*4882a593Smuzhiyun 	.atomic_check			= sun4i_backend_atomic_check,
765*4882a593Smuzhiyun 	.commit				= sun4i_backend_commit,
766*4882a593Smuzhiyun 	.layers_init			= sun4i_layers_init,
767*4882a593Smuzhiyun 	.apply_color_correction		= sun4i_backend_apply_color_correction,
768*4882a593Smuzhiyun 	.disable_color_correction	= sun4i_backend_disable_color_correction,
769*4882a593Smuzhiyun 	.vblank_quirk			= sun4i_backend_vblank_quirk,
770*4882a593Smuzhiyun };
771*4882a593Smuzhiyun 
772*4882a593Smuzhiyun static const struct regmap_config sun4i_backend_regmap_config = {
773*4882a593Smuzhiyun 	.reg_bits	= 32,
774*4882a593Smuzhiyun 	.val_bits	= 32,
775*4882a593Smuzhiyun 	.reg_stride	= 4,
776*4882a593Smuzhiyun 	.max_register	= 0x5800,
777*4882a593Smuzhiyun };
778*4882a593Smuzhiyun 
sun4i_backend_bind(struct device * dev,struct device * master,void * data)779*4882a593Smuzhiyun static int sun4i_backend_bind(struct device *dev, struct device *master,
780*4882a593Smuzhiyun 			      void *data)
781*4882a593Smuzhiyun {
782*4882a593Smuzhiyun 	struct platform_device *pdev = to_platform_device(dev);
783*4882a593Smuzhiyun 	struct drm_device *drm = data;
784*4882a593Smuzhiyun 	struct sun4i_drv *drv = drm->dev_private;
785*4882a593Smuzhiyun 	struct sun4i_backend *backend;
786*4882a593Smuzhiyun 	const struct sun4i_backend_quirks *quirks;
787*4882a593Smuzhiyun 	struct resource *res;
788*4882a593Smuzhiyun 	void __iomem *regs;
789*4882a593Smuzhiyun 	int i, ret;
790*4882a593Smuzhiyun 
791*4882a593Smuzhiyun 	backend = devm_kzalloc(dev, sizeof(*backend), GFP_KERNEL);
792*4882a593Smuzhiyun 	if (!backend)
793*4882a593Smuzhiyun 		return -ENOMEM;
794*4882a593Smuzhiyun 	dev_set_drvdata(dev, backend);
795*4882a593Smuzhiyun 	spin_lock_init(&backend->frontend_lock);
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (of_find_property(dev->of_node, "interconnects", NULL)) {
798*4882a593Smuzhiyun 		/*
799*4882a593Smuzhiyun 		 * This assume we have the same DMA constraints for all our the
800*4882a593Smuzhiyun 		 * devices in our pipeline (all the backends, but also the
801*4882a593Smuzhiyun 		 * frontends). This sounds bad, but it has always been the case
802*4882a593Smuzhiyun 		 * for us, and DRM doesn't do per-device allocation either, so
803*4882a593Smuzhiyun 		 * we would need to fix DRM first...
804*4882a593Smuzhiyun 		 */
805*4882a593Smuzhiyun 		ret = of_dma_configure(drm->dev, dev->of_node, true);
806*4882a593Smuzhiyun 		if (ret)
807*4882a593Smuzhiyun 			return ret;
808*4882a593Smuzhiyun 	} else {
809*4882a593Smuzhiyun 		/*
810*4882a593Smuzhiyun 		 * If we don't have the interconnect property, most likely
811*4882a593Smuzhiyun 		 * because of an old DT, we need to set the DMA offset by hand
812*4882a593Smuzhiyun 		 * on our device since the RAM mapping is at 0 for the DMA bus,
813*4882a593Smuzhiyun 		 * unlike the CPU.
814*4882a593Smuzhiyun 		 *
815*4882a593Smuzhiyun 		 * XXX(hch): this has no business in a driver and needs to move
816*4882a593Smuzhiyun 		 * to the device tree.
817*4882a593Smuzhiyun 		 *
818*4882a593Smuzhiyun 		 * If we have two subsequent calls to dma_direct_set_offset
819*4882a593Smuzhiyun 		 * returns -EINVAL. Unfortunately, this happens when we have two
820*4882a593Smuzhiyun 		 * backends in the system, and will result in the driver
821*4882a593Smuzhiyun 		 * reporting an error while it has been setup properly before.
822*4882a593Smuzhiyun 		 * Ignore EINVAL, but it should really be removed eventually.
823*4882a593Smuzhiyun 		 */
824*4882a593Smuzhiyun 		ret = dma_direct_set_offset(drm->dev, PHYS_OFFSET, 0, SZ_4G);
825*4882a593Smuzhiyun 		if (ret && ret != -EINVAL)
826*4882a593Smuzhiyun 			return ret;
827*4882a593Smuzhiyun 	}
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun 	backend->engine.node = dev->of_node;
830*4882a593Smuzhiyun 	backend->engine.ops = &sun4i_backend_engine_ops;
831*4882a593Smuzhiyun 	backend->engine.id = sun4i_backend_of_get_id(dev->of_node);
832*4882a593Smuzhiyun 	if (backend->engine.id < 0)
833*4882a593Smuzhiyun 		return backend->engine.id;
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun 	backend->frontend = sun4i_backend_find_frontend(drv, dev->of_node);
836*4882a593Smuzhiyun 	if (IS_ERR(backend->frontend))
837*4882a593Smuzhiyun 		dev_warn(dev, "Couldn't find matching frontend, frontend features disabled\n");
838*4882a593Smuzhiyun 
839*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
840*4882a593Smuzhiyun 	regs = devm_ioremap_resource(dev, res);
841*4882a593Smuzhiyun 	if (IS_ERR(regs))
842*4882a593Smuzhiyun 		return PTR_ERR(regs);
843*4882a593Smuzhiyun 
844*4882a593Smuzhiyun 	backend->reset = devm_reset_control_get(dev, NULL);
845*4882a593Smuzhiyun 	if (IS_ERR(backend->reset)) {
846*4882a593Smuzhiyun 		dev_err(dev, "Couldn't get our reset line\n");
847*4882a593Smuzhiyun 		return PTR_ERR(backend->reset);
848*4882a593Smuzhiyun 	}
849*4882a593Smuzhiyun 
850*4882a593Smuzhiyun 	ret = reset_control_deassert(backend->reset);
851*4882a593Smuzhiyun 	if (ret) {
852*4882a593Smuzhiyun 		dev_err(dev, "Couldn't deassert our reset line\n");
853*4882a593Smuzhiyun 		return ret;
854*4882a593Smuzhiyun 	}
855*4882a593Smuzhiyun 
856*4882a593Smuzhiyun 	backend->bus_clk = devm_clk_get(dev, "ahb");
857*4882a593Smuzhiyun 	if (IS_ERR(backend->bus_clk)) {
858*4882a593Smuzhiyun 		dev_err(dev, "Couldn't get the backend bus clock\n");
859*4882a593Smuzhiyun 		ret = PTR_ERR(backend->bus_clk);
860*4882a593Smuzhiyun 		goto err_assert_reset;
861*4882a593Smuzhiyun 	}
862*4882a593Smuzhiyun 	clk_prepare_enable(backend->bus_clk);
863*4882a593Smuzhiyun 
864*4882a593Smuzhiyun 	backend->mod_clk = devm_clk_get(dev, "mod");
865*4882a593Smuzhiyun 	if (IS_ERR(backend->mod_clk)) {
866*4882a593Smuzhiyun 		dev_err(dev, "Couldn't get the backend module clock\n");
867*4882a593Smuzhiyun 		ret = PTR_ERR(backend->mod_clk);
868*4882a593Smuzhiyun 		goto err_disable_bus_clk;
869*4882a593Smuzhiyun 	}
870*4882a593Smuzhiyun 
871*4882a593Smuzhiyun 	ret = clk_set_rate_exclusive(backend->mod_clk, 300000000);
872*4882a593Smuzhiyun 	if (ret) {
873*4882a593Smuzhiyun 		dev_err(dev, "Couldn't set the module clock frequency\n");
874*4882a593Smuzhiyun 		goto err_disable_bus_clk;
875*4882a593Smuzhiyun 	}
876*4882a593Smuzhiyun 
877*4882a593Smuzhiyun 	clk_prepare_enable(backend->mod_clk);
878*4882a593Smuzhiyun 
879*4882a593Smuzhiyun 	backend->ram_clk = devm_clk_get(dev, "ram");
880*4882a593Smuzhiyun 	if (IS_ERR(backend->ram_clk)) {
881*4882a593Smuzhiyun 		dev_err(dev, "Couldn't get the backend RAM clock\n");
882*4882a593Smuzhiyun 		ret = PTR_ERR(backend->ram_clk);
883*4882a593Smuzhiyun 		goto err_disable_mod_clk;
884*4882a593Smuzhiyun 	}
885*4882a593Smuzhiyun 	clk_prepare_enable(backend->ram_clk);
886*4882a593Smuzhiyun 
887*4882a593Smuzhiyun 	if (of_device_is_compatible(dev->of_node,
888*4882a593Smuzhiyun 				    "allwinner,sun8i-a33-display-backend")) {
889*4882a593Smuzhiyun 		ret = sun4i_backend_init_sat(dev);
890*4882a593Smuzhiyun 		if (ret) {
891*4882a593Smuzhiyun 			dev_err(dev, "Couldn't init SAT resources\n");
892*4882a593Smuzhiyun 			goto err_disable_ram_clk;
893*4882a593Smuzhiyun 		}
894*4882a593Smuzhiyun 	}
895*4882a593Smuzhiyun 
896*4882a593Smuzhiyun 	backend->engine.regs = devm_regmap_init_mmio(dev, regs,
897*4882a593Smuzhiyun 						     &sun4i_backend_regmap_config);
898*4882a593Smuzhiyun 	if (IS_ERR(backend->engine.regs)) {
899*4882a593Smuzhiyun 		dev_err(dev, "Couldn't create the backend regmap\n");
900*4882a593Smuzhiyun 		return PTR_ERR(backend->engine.regs);
901*4882a593Smuzhiyun 	}
902*4882a593Smuzhiyun 
903*4882a593Smuzhiyun 	list_add_tail(&backend->engine.list, &drv->engine_list);
904*4882a593Smuzhiyun 
905*4882a593Smuzhiyun 	/*
906*4882a593Smuzhiyun 	 * Many of the backend's layer configuration registers have
907*4882a593Smuzhiyun 	 * undefined default values. This poses a risk as we use
908*4882a593Smuzhiyun 	 * regmap_update_bits in some places, and don't overwrite
909*4882a593Smuzhiyun 	 * the whole register.
910*4882a593Smuzhiyun 	 *
911*4882a593Smuzhiyun 	 * Clear the registers here to have something predictable.
912*4882a593Smuzhiyun 	 */
913*4882a593Smuzhiyun 	for (i = 0x800; i < 0x1000; i += 4)
914*4882a593Smuzhiyun 		regmap_write(backend->engine.regs, i, 0);
915*4882a593Smuzhiyun 
916*4882a593Smuzhiyun 	/* Disable registers autoloading */
917*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_REGBUFFCTL_REG,
918*4882a593Smuzhiyun 		     SUN4I_BACKEND_REGBUFFCTL_AUTOLOAD_DIS);
919*4882a593Smuzhiyun 
920*4882a593Smuzhiyun 	/* Enable the backend */
921*4882a593Smuzhiyun 	regmap_write(backend->engine.regs, SUN4I_BACKEND_MODCTL_REG,
922*4882a593Smuzhiyun 		     SUN4I_BACKEND_MODCTL_DEBE_EN |
923*4882a593Smuzhiyun 		     SUN4I_BACKEND_MODCTL_START_CTL);
924*4882a593Smuzhiyun 
925*4882a593Smuzhiyun 	/* Set output selection if needed */
926*4882a593Smuzhiyun 	quirks = of_device_get_match_data(dev);
927*4882a593Smuzhiyun 	if (quirks->needs_output_muxing) {
928*4882a593Smuzhiyun 		/*
929*4882a593Smuzhiyun 		 * We assume there is no dynamic muxing of backends
930*4882a593Smuzhiyun 		 * and TCONs, so we select the backend with same ID.
931*4882a593Smuzhiyun 		 *
932*4882a593Smuzhiyun 		 * While dynamic selection might be interesting, since
933*4882a593Smuzhiyun 		 * the CRTC is tied to the TCON, while the layers are
934*4882a593Smuzhiyun 		 * tied to the backends, this means, we will need to
935*4882a593Smuzhiyun 		 * switch between groups of layers. There might not be
936*4882a593Smuzhiyun 		 * a way to represent this constraint in DRM.
937*4882a593Smuzhiyun 		 */
938*4882a593Smuzhiyun 		regmap_update_bits(backend->engine.regs,
939*4882a593Smuzhiyun 				   SUN4I_BACKEND_MODCTL_REG,
940*4882a593Smuzhiyun 				   SUN4I_BACKEND_MODCTL_OUT_SEL,
941*4882a593Smuzhiyun 				   (backend->engine.id
942*4882a593Smuzhiyun 				    ? SUN4I_BACKEND_MODCTL_OUT_LCD1
943*4882a593Smuzhiyun 				    : SUN4I_BACKEND_MODCTL_OUT_LCD0));
944*4882a593Smuzhiyun 	}
945*4882a593Smuzhiyun 
946*4882a593Smuzhiyun 	backend->quirks = quirks;
947*4882a593Smuzhiyun 
948*4882a593Smuzhiyun 	return 0;
949*4882a593Smuzhiyun 
950*4882a593Smuzhiyun err_disable_ram_clk:
951*4882a593Smuzhiyun 	clk_disable_unprepare(backend->ram_clk);
952*4882a593Smuzhiyun err_disable_mod_clk:
953*4882a593Smuzhiyun 	clk_rate_exclusive_put(backend->mod_clk);
954*4882a593Smuzhiyun 	clk_disable_unprepare(backend->mod_clk);
955*4882a593Smuzhiyun err_disable_bus_clk:
956*4882a593Smuzhiyun 	clk_disable_unprepare(backend->bus_clk);
957*4882a593Smuzhiyun err_assert_reset:
958*4882a593Smuzhiyun 	reset_control_assert(backend->reset);
959*4882a593Smuzhiyun 	return ret;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun 
sun4i_backend_unbind(struct device * dev,struct device * master,void * data)962*4882a593Smuzhiyun static void sun4i_backend_unbind(struct device *dev, struct device *master,
963*4882a593Smuzhiyun 				 void *data)
964*4882a593Smuzhiyun {
965*4882a593Smuzhiyun 	struct sun4i_backend *backend = dev_get_drvdata(dev);
966*4882a593Smuzhiyun 
967*4882a593Smuzhiyun 	list_del(&backend->engine.list);
968*4882a593Smuzhiyun 
969*4882a593Smuzhiyun 	if (of_device_is_compatible(dev->of_node,
970*4882a593Smuzhiyun 				    "allwinner,sun8i-a33-display-backend"))
971*4882a593Smuzhiyun 		sun4i_backend_free_sat(dev);
972*4882a593Smuzhiyun 
973*4882a593Smuzhiyun 	clk_disable_unprepare(backend->ram_clk);
974*4882a593Smuzhiyun 	clk_rate_exclusive_put(backend->mod_clk);
975*4882a593Smuzhiyun 	clk_disable_unprepare(backend->mod_clk);
976*4882a593Smuzhiyun 	clk_disable_unprepare(backend->bus_clk);
977*4882a593Smuzhiyun 	reset_control_assert(backend->reset);
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun 
980*4882a593Smuzhiyun static const struct component_ops sun4i_backend_ops = {
981*4882a593Smuzhiyun 	.bind	= sun4i_backend_bind,
982*4882a593Smuzhiyun 	.unbind	= sun4i_backend_unbind,
983*4882a593Smuzhiyun };
984*4882a593Smuzhiyun 
sun4i_backend_probe(struct platform_device * pdev)985*4882a593Smuzhiyun static int sun4i_backend_probe(struct platform_device *pdev)
986*4882a593Smuzhiyun {
987*4882a593Smuzhiyun 	return component_add(&pdev->dev, &sun4i_backend_ops);
988*4882a593Smuzhiyun }
989*4882a593Smuzhiyun 
sun4i_backend_remove(struct platform_device * pdev)990*4882a593Smuzhiyun static int sun4i_backend_remove(struct platform_device *pdev)
991*4882a593Smuzhiyun {
992*4882a593Smuzhiyun 	component_del(&pdev->dev, &sun4i_backend_ops);
993*4882a593Smuzhiyun 
994*4882a593Smuzhiyun 	return 0;
995*4882a593Smuzhiyun }
996*4882a593Smuzhiyun 
997*4882a593Smuzhiyun static const struct sun4i_backend_quirks sun4i_backend_quirks = {
998*4882a593Smuzhiyun 	.needs_output_muxing = true,
999*4882a593Smuzhiyun };
1000*4882a593Smuzhiyun 
1001*4882a593Smuzhiyun static const struct sun4i_backend_quirks sun5i_backend_quirks = {
1002*4882a593Smuzhiyun };
1003*4882a593Smuzhiyun 
1004*4882a593Smuzhiyun static const struct sun4i_backend_quirks sun6i_backend_quirks = {
1005*4882a593Smuzhiyun };
1006*4882a593Smuzhiyun 
1007*4882a593Smuzhiyun static const struct sun4i_backend_quirks sun7i_backend_quirks = {
1008*4882a593Smuzhiyun 	.needs_output_muxing = true,
1009*4882a593Smuzhiyun };
1010*4882a593Smuzhiyun 
1011*4882a593Smuzhiyun static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
1012*4882a593Smuzhiyun 	.supports_lowest_plane_alpha = true,
1013*4882a593Smuzhiyun };
1014*4882a593Smuzhiyun 
1015*4882a593Smuzhiyun static const struct sun4i_backend_quirks sun9i_backend_quirks = {
1016*4882a593Smuzhiyun };
1017*4882a593Smuzhiyun 
1018*4882a593Smuzhiyun static const struct of_device_id sun4i_backend_of_table[] = {
1019*4882a593Smuzhiyun 	{
1020*4882a593Smuzhiyun 		.compatible = "allwinner,sun4i-a10-display-backend",
1021*4882a593Smuzhiyun 		.data = &sun4i_backend_quirks,
1022*4882a593Smuzhiyun 	},
1023*4882a593Smuzhiyun 	{
1024*4882a593Smuzhiyun 		.compatible = "allwinner,sun5i-a13-display-backend",
1025*4882a593Smuzhiyun 		.data = &sun5i_backend_quirks,
1026*4882a593Smuzhiyun 	},
1027*4882a593Smuzhiyun 	{
1028*4882a593Smuzhiyun 		.compatible = "allwinner,sun6i-a31-display-backend",
1029*4882a593Smuzhiyun 		.data = &sun6i_backend_quirks,
1030*4882a593Smuzhiyun 	},
1031*4882a593Smuzhiyun 	{
1032*4882a593Smuzhiyun 		.compatible = "allwinner,sun7i-a20-display-backend",
1033*4882a593Smuzhiyun 		.data = &sun7i_backend_quirks,
1034*4882a593Smuzhiyun 	},
1035*4882a593Smuzhiyun 	{
1036*4882a593Smuzhiyun 		.compatible = "allwinner,sun8i-a23-display-backend",
1037*4882a593Smuzhiyun 		.data = &sun8i_a33_backend_quirks,
1038*4882a593Smuzhiyun 	},
1039*4882a593Smuzhiyun 	{
1040*4882a593Smuzhiyun 		.compatible = "allwinner,sun8i-a33-display-backend",
1041*4882a593Smuzhiyun 		.data = &sun8i_a33_backend_quirks,
1042*4882a593Smuzhiyun 	},
1043*4882a593Smuzhiyun 	{
1044*4882a593Smuzhiyun 		.compatible = "allwinner,sun9i-a80-display-backend",
1045*4882a593Smuzhiyun 		.data = &sun9i_backend_quirks,
1046*4882a593Smuzhiyun 	},
1047*4882a593Smuzhiyun 	{ }
1048*4882a593Smuzhiyun };
1049*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, sun4i_backend_of_table);
1050*4882a593Smuzhiyun 
1051*4882a593Smuzhiyun static struct platform_driver sun4i_backend_platform_driver = {
1052*4882a593Smuzhiyun 	.probe		= sun4i_backend_probe,
1053*4882a593Smuzhiyun 	.remove		= sun4i_backend_remove,
1054*4882a593Smuzhiyun 	.driver		= {
1055*4882a593Smuzhiyun 		.name		= "sun4i-backend",
1056*4882a593Smuzhiyun 		.of_match_table	= sun4i_backend_of_table,
1057*4882a593Smuzhiyun 	},
1058*4882a593Smuzhiyun };
1059*4882a593Smuzhiyun module_platform_driver(sun4i_backend_platform_driver);
1060*4882a593Smuzhiyun 
1061*4882a593Smuzhiyun MODULE_AUTHOR("Maxime Ripard <maxime.ripard@free-electrons.com>");
1062*4882a593Smuzhiyun MODULE_DESCRIPTION("Allwinner A10 Display Backend Driver");
1063*4882a593Smuzhiyun MODULE_LICENSE("GPL");
1064