1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * (C) COPYRIGHT 2016 ARM Limited. All rights reserved.
4*4882a593Smuzhiyun * Author: Liviu Dudau <Liviu.Dudau@arm.com>
5*4882a593Smuzhiyun *
6*4882a593Smuzhiyun * ARM Mali DP500/DP550/DP650 KMS/DRM driver
7*4882a593Smuzhiyun */
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/clk.h>
11*4882a593Smuzhiyun #include <linux/component.h>
12*4882a593Smuzhiyun #include <linux/of_device.h>
13*4882a593Smuzhiyun #include <linux/of_graph.h>
14*4882a593Smuzhiyun #include <linux/of_reserved_mem.h>
15*4882a593Smuzhiyun #include <linux/pm_runtime.h>
16*4882a593Smuzhiyun #include <linux/debugfs.h>
17*4882a593Smuzhiyun
18*4882a593Smuzhiyun #include <drm/drm_atomic.h>
19*4882a593Smuzhiyun #include <drm/drm_atomic_helper.h>
20*4882a593Smuzhiyun #include <drm/drm_crtc.h>
21*4882a593Smuzhiyun #include <drm/drm_drv.h>
22*4882a593Smuzhiyun #include <drm/drm_fb_cma_helper.h>
23*4882a593Smuzhiyun #include <drm/drm_fb_helper.h>
24*4882a593Smuzhiyun #include <drm/drm_fourcc.h>
25*4882a593Smuzhiyun #include <drm/drm_gem_cma_helper.h>
26*4882a593Smuzhiyun #include <drm/drm_gem_framebuffer_helper.h>
27*4882a593Smuzhiyun #include <drm/drm_modeset_helper.h>
28*4882a593Smuzhiyun #include <drm/drm_of.h>
29*4882a593Smuzhiyun #include <drm/drm_probe_helper.h>
30*4882a593Smuzhiyun #include <drm/drm_vblank.h>
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun #include "malidp_drv.h"
33*4882a593Smuzhiyun #include "malidp_mw.h"
34*4882a593Smuzhiyun #include "malidp_regs.h"
35*4882a593Smuzhiyun #include "malidp_hw.h"
36*4882a593Smuzhiyun
37*4882a593Smuzhiyun #define MALIDP_CONF_VALID_TIMEOUT 250
38*4882a593Smuzhiyun #define AFBC_HEADER_SIZE 16
39*4882a593Smuzhiyun #define AFBC_SUPERBLK_ALIGNMENT 128
40*4882a593Smuzhiyun
malidp_write_gamma_table(struct malidp_hw_device * hwdev,u32 data[MALIDP_COEFFTAB_NUM_COEFFS])41*4882a593Smuzhiyun static void malidp_write_gamma_table(struct malidp_hw_device *hwdev,
42*4882a593Smuzhiyun u32 data[MALIDP_COEFFTAB_NUM_COEFFS])
43*4882a593Smuzhiyun {
44*4882a593Smuzhiyun int i;
45*4882a593Smuzhiyun /* Update all channels with a single gamma curve. */
46*4882a593Smuzhiyun const u32 gamma_write_mask = GENMASK(18, 16);
47*4882a593Smuzhiyun /*
48*4882a593Smuzhiyun * Always write an entire table, so the address field in
49*4882a593Smuzhiyun * DE_COEFFTAB_ADDR is 0 and we can use the gamma_write_mask bitmask
50*4882a593Smuzhiyun * directly.
51*4882a593Smuzhiyun */
52*4882a593Smuzhiyun malidp_hw_write(hwdev, gamma_write_mask,
53*4882a593Smuzhiyun hwdev->hw->map.coeffs_base + MALIDP_COEF_TABLE_ADDR);
54*4882a593Smuzhiyun for (i = 0; i < MALIDP_COEFFTAB_NUM_COEFFS; ++i)
55*4882a593Smuzhiyun malidp_hw_write(hwdev, data[i],
56*4882a593Smuzhiyun hwdev->hw->map.coeffs_base +
57*4882a593Smuzhiyun MALIDP_COEF_TABLE_DATA);
58*4882a593Smuzhiyun }
59*4882a593Smuzhiyun
malidp_atomic_commit_update_gamma(struct drm_crtc * crtc,struct drm_crtc_state * old_state)60*4882a593Smuzhiyun static void malidp_atomic_commit_update_gamma(struct drm_crtc *crtc,
61*4882a593Smuzhiyun struct drm_crtc_state *old_state)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
64*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun if (!crtc->state->color_mgmt_changed)
67*4882a593Smuzhiyun return;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun if (!crtc->state->gamma_lut) {
70*4882a593Smuzhiyun malidp_hw_clearbits(hwdev,
71*4882a593Smuzhiyun MALIDP_DISP_FUNC_GAMMA,
72*4882a593Smuzhiyun MALIDP_DE_DISPLAY_FUNC);
73*4882a593Smuzhiyun } else {
74*4882a593Smuzhiyun struct malidp_crtc_state *mc =
75*4882a593Smuzhiyun to_malidp_crtc_state(crtc->state);
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun if (!old_state->gamma_lut || (crtc->state->gamma_lut->base.id !=
78*4882a593Smuzhiyun old_state->gamma_lut->base.id))
79*4882a593Smuzhiyun malidp_write_gamma_table(hwdev, mc->gamma_coeffs);
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_GAMMA,
82*4882a593Smuzhiyun MALIDP_DE_DISPLAY_FUNC);
83*4882a593Smuzhiyun }
84*4882a593Smuzhiyun }
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun static
malidp_atomic_commit_update_coloradj(struct drm_crtc * crtc,struct drm_crtc_state * old_state)87*4882a593Smuzhiyun void malidp_atomic_commit_update_coloradj(struct drm_crtc *crtc,
88*4882a593Smuzhiyun struct drm_crtc_state *old_state)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
91*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
92*4882a593Smuzhiyun int i;
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun if (!crtc->state->color_mgmt_changed)
95*4882a593Smuzhiyun return;
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun if (!crtc->state->ctm) {
98*4882a593Smuzhiyun malidp_hw_clearbits(hwdev, MALIDP_DISP_FUNC_CADJ,
99*4882a593Smuzhiyun MALIDP_DE_DISPLAY_FUNC);
100*4882a593Smuzhiyun } else {
101*4882a593Smuzhiyun struct malidp_crtc_state *mc =
102*4882a593Smuzhiyun to_malidp_crtc_state(crtc->state);
103*4882a593Smuzhiyun
104*4882a593Smuzhiyun if (!old_state->ctm || (crtc->state->ctm->base.id !=
105*4882a593Smuzhiyun old_state->ctm->base.id))
106*4882a593Smuzhiyun for (i = 0; i < MALIDP_COLORADJ_NUM_COEFFS; ++i)
107*4882a593Smuzhiyun malidp_hw_write(hwdev,
108*4882a593Smuzhiyun mc->coloradj_coeffs[i],
109*4882a593Smuzhiyun hwdev->hw->map.coeffs_base +
110*4882a593Smuzhiyun MALIDP_COLOR_ADJ_COEF + 4 * i);
111*4882a593Smuzhiyun
112*4882a593Smuzhiyun malidp_hw_setbits(hwdev, MALIDP_DISP_FUNC_CADJ,
113*4882a593Smuzhiyun MALIDP_DE_DISPLAY_FUNC);
114*4882a593Smuzhiyun }
115*4882a593Smuzhiyun }
116*4882a593Smuzhiyun
malidp_atomic_commit_se_config(struct drm_crtc * crtc,struct drm_crtc_state * old_state)117*4882a593Smuzhiyun static void malidp_atomic_commit_se_config(struct drm_crtc *crtc,
118*4882a593Smuzhiyun struct drm_crtc_state *old_state)
119*4882a593Smuzhiyun {
120*4882a593Smuzhiyun struct malidp_crtc_state *cs = to_malidp_crtc_state(crtc->state);
121*4882a593Smuzhiyun struct malidp_crtc_state *old_cs = to_malidp_crtc_state(old_state);
122*4882a593Smuzhiyun struct malidp_drm *malidp = crtc_to_malidp_device(crtc);
123*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
124*4882a593Smuzhiyun struct malidp_se_config *s = &cs->scaler_config;
125*4882a593Smuzhiyun struct malidp_se_config *old_s = &old_cs->scaler_config;
126*4882a593Smuzhiyun u32 se_control = hwdev->hw->map.se_base +
127*4882a593Smuzhiyun ((hwdev->hw->map.features & MALIDP_REGMAP_HAS_CLEARIRQ) ?
128*4882a593Smuzhiyun 0x10 : 0xC);
129*4882a593Smuzhiyun u32 layer_control = se_control + MALIDP_SE_LAYER_CONTROL;
130*4882a593Smuzhiyun u32 scr = se_control + MALIDP_SE_SCALING_CONTROL;
131*4882a593Smuzhiyun u32 val;
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Set SE_CONTROL */
134*4882a593Smuzhiyun if (!s->scale_enable) {
135*4882a593Smuzhiyun val = malidp_hw_read(hwdev, se_control);
136*4882a593Smuzhiyun val &= ~MALIDP_SE_SCALING_EN;
137*4882a593Smuzhiyun malidp_hw_write(hwdev, val, se_control);
138*4882a593Smuzhiyun return;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun hwdev->hw->se_set_scaling_coeffs(hwdev, s, old_s);
142*4882a593Smuzhiyun val = malidp_hw_read(hwdev, se_control);
143*4882a593Smuzhiyun val |= MALIDP_SE_SCALING_EN | MALIDP_SE_ALPHA_EN;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun val &= ~MALIDP_SE_ENH(MALIDP_SE_ENH_MASK);
146*4882a593Smuzhiyun val |= s->enhancer_enable ? MALIDP_SE_ENH(3) : 0;
147*4882a593Smuzhiyun
148*4882a593Smuzhiyun val |= MALIDP_SE_RGBO_IF_EN;
149*4882a593Smuzhiyun malidp_hw_write(hwdev, val, se_control);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Set IN_SIZE & OUT_SIZE. */
152*4882a593Smuzhiyun val = MALIDP_SE_SET_V_SIZE(s->input_h) |
153*4882a593Smuzhiyun MALIDP_SE_SET_H_SIZE(s->input_w);
154*4882a593Smuzhiyun malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_IN_SIZE);
155*4882a593Smuzhiyun val = MALIDP_SE_SET_V_SIZE(s->output_h) |
156*4882a593Smuzhiyun MALIDP_SE_SET_H_SIZE(s->output_w);
157*4882a593Smuzhiyun malidp_hw_write(hwdev, val, layer_control + MALIDP_SE_L0_OUT_SIZE);
158*4882a593Smuzhiyun
159*4882a593Smuzhiyun /* Set phase regs. */
160*4882a593Smuzhiyun malidp_hw_write(hwdev, s->h_init_phase, scr + MALIDP_SE_H_INIT_PH);
161*4882a593Smuzhiyun malidp_hw_write(hwdev, s->h_delta_phase, scr + MALIDP_SE_H_DELTA_PH);
162*4882a593Smuzhiyun malidp_hw_write(hwdev, s->v_init_phase, scr + MALIDP_SE_V_INIT_PH);
163*4882a593Smuzhiyun malidp_hw_write(hwdev, s->v_delta_phase, scr + MALIDP_SE_V_DELTA_PH);
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun /*
167*4882a593Smuzhiyun * set the "config valid" bit and wait until the hardware acts on it
168*4882a593Smuzhiyun */
malidp_set_and_wait_config_valid(struct drm_device * drm)169*4882a593Smuzhiyun static int malidp_set_and_wait_config_valid(struct drm_device *drm)
170*4882a593Smuzhiyun {
171*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
172*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
173*4882a593Smuzhiyun int ret;
174*4882a593Smuzhiyun
175*4882a593Smuzhiyun hwdev->hw->set_config_valid(hwdev, 1);
176*4882a593Smuzhiyun /* don't wait for config_valid flag if we are in config mode */
177*4882a593Smuzhiyun if (hwdev->hw->in_config_mode(hwdev)) {
178*4882a593Smuzhiyun atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_DONE);
179*4882a593Smuzhiyun return 0;
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun ret = wait_event_interruptible_timeout(malidp->wq,
183*4882a593Smuzhiyun atomic_read(&malidp->config_valid) == MALIDP_CONFIG_VALID_DONE,
184*4882a593Smuzhiyun msecs_to_jiffies(MALIDP_CONF_VALID_TIMEOUT));
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun return (ret > 0) ? 0 : -ETIMEDOUT;
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun
malidp_atomic_commit_hw_done(struct drm_atomic_state * state)189*4882a593Smuzhiyun static void malidp_atomic_commit_hw_done(struct drm_atomic_state *state)
190*4882a593Smuzhiyun {
191*4882a593Smuzhiyun struct drm_device *drm = state->dev;
192*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
193*4882a593Smuzhiyun int loop = 5;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun malidp->event = malidp->crtc.state->event;
196*4882a593Smuzhiyun malidp->crtc.state->event = NULL;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun if (malidp->crtc.state->active) {
199*4882a593Smuzhiyun /*
200*4882a593Smuzhiyun * if we have an event to deliver to userspace, make sure
201*4882a593Smuzhiyun * the vblank is enabled as we are sending it from the IRQ
202*4882a593Smuzhiyun * handler.
203*4882a593Smuzhiyun */
204*4882a593Smuzhiyun if (malidp->event)
205*4882a593Smuzhiyun drm_crtc_vblank_get(&malidp->crtc);
206*4882a593Smuzhiyun
207*4882a593Smuzhiyun /* only set config_valid if the CRTC is enabled */
208*4882a593Smuzhiyun if (malidp_set_and_wait_config_valid(drm) < 0) {
209*4882a593Smuzhiyun /*
210*4882a593Smuzhiyun * make a loop around the second CVAL setting and
211*4882a593Smuzhiyun * try 5 times before giving up.
212*4882a593Smuzhiyun */
213*4882a593Smuzhiyun while (loop--) {
214*4882a593Smuzhiyun if (!malidp_set_and_wait_config_valid(drm))
215*4882a593Smuzhiyun break;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun DRM_DEBUG_DRIVER("timed out waiting for updated configuration\n");
218*4882a593Smuzhiyun }
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun } else if (malidp->event) {
221*4882a593Smuzhiyun /* CRTC inactive means vblank IRQ is disabled, send event directly */
222*4882a593Smuzhiyun spin_lock_irq(&drm->event_lock);
223*4882a593Smuzhiyun drm_crtc_send_vblank_event(&malidp->crtc, malidp->event);
224*4882a593Smuzhiyun malidp->event = NULL;
225*4882a593Smuzhiyun spin_unlock_irq(&drm->event_lock);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun drm_atomic_helper_commit_hw_done(state);
228*4882a593Smuzhiyun }
229*4882a593Smuzhiyun
malidp_atomic_commit_tail(struct drm_atomic_state * state)230*4882a593Smuzhiyun static void malidp_atomic_commit_tail(struct drm_atomic_state *state)
231*4882a593Smuzhiyun {
232*4882a593Smuzhiyun struct drm_device *drm = state->dev;
233*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
234*4882a593Smuzhiyun struct drm_crtc *crtc;
235*4882a593Smuzhiyun struct drm_crtc_state *old_crtc_state;
236*4882a593Smuzhiyun int i;
237*4882a593Smuzhiyun
238*4882a593Smuzhiyun pm_runtime_get_sync(drm->dev);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /*
241*4882a593Smuzhiyun * set config_valid to a special value to let IRQ handlers
242*4882a593Smuzhiyun * know that we are updating registers
243*4882a593Smuzhiyun */
244*4882a593Smuzhiyun atomic_set(&malidp->config_valid, MALIDP_CONFIG_START);
245*4882a593Smuzhiyun malidp->dev->hw->set_config_valid(malidp->dev, 0);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_disables(drm, state);
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun for_each_old_crtc_in_state(state, crtc, old_crtc_state, i) {
250*4882a593Smuzhiyun malidp_atomic_commit_update_gamma(crtc, old_crtc_state);
251*4882a593Smuzhiyun malidp_atomic_commit_update_coloradj(crtc, old_crtc_state);
252*4882a593Smuzhiyun malidp_atomic_commit_se_config(crtc, old_crtc_state);
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun
255*4882a593Smuzhiyun drm_atomic_helper_commit_planes(drm, state, DRM_PLANE_COMMIT_ACTIVE_ONLY);
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun malidp_mw_atomic_commit(drm, state);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun drm_atomic_helper_commit_modeset_enables(drm, state);
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun malidp_atomic_commit_hw_done(state);
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun pm_runtime_put(drm->dev);
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun drm_atomic_helper_cleanup_planes(drm, state);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun static const struct drm_mode_config_helper_funcs malidp_mode_config_helpers = {
269*4882a593Smuzhiyun .atomic_commit_tail = malidp_atomic_commit_tail,
270*4882a593Smuzhiyun };
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun static bool
malidp_verify_afbc_framebuffer_caps(struct drm_device * dev,const struct drm_mode_fb_cmd2 * mode_cmd)273*4882a593Smuzhiyun malidp_verify_afbc_framebuffer_caps(struct drm_device *dev,
274*4882a593Smuzhiyun const struct drm_mode_fb_cmd2 *mode_cmd)
275*4882a593Smuzhiyun {
276*4882a593Smuzhiyun if (malidp_format_mod_supported(dev, mode_cmd->pixel_format,
277*4882a593Smuzhiyun mode_cmd->modifier[0]) == false)
278*4882a593Smuzhiyun return false;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun if (mode_cmd->offsets[0] != 0) {
281*4882a593Smuzhiyun DRM_DEBUG_KMS("AFBC buffers' plane offset should be 0\n");
282*4882a593Smuzhiyun return false;
283*4882a593Smuzhiyun }
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
286*4882a593Smuzhiyun case AFBC_SIZE_16X16:
287*4882a593Smuzhiyun if ((mode_cmd->width % 16) || (mode_cmd->height % 16)) {
288*4882a593Smuzhiyun DRM_DEBUG_KMS("AFBC buffers must be aligned to 16 pixels\n");
289*4882a593Smuzhiyun return false;
290*4882a593Smuzhiyun }
291*4882a593Smuzhiyun break;
292*4882a593Smuzhiyun default:
293*4882a593Smuzhiyun DRM_DEBUG_KMS("Unsupported AFBC block size\n");
294*4882a593Smuzhiyun return false;
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun
297*4882a593Smuzhiyun return true;
298*4882a593Smuzhiyun }
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun static bool
malidp_verify_afbc_framebuffer_size(struct drm_device * dev,struct drm_file * file,const struct drm_mode_fb_cmd2 * mode_cmd)301*4882a593Smuzhiyun malidp_verify_afbc_framebuffer_size(struct drm_device *dev,
302*4882a593Smuzhiyun struct drm_file *file,
303*4882a593Smuzhiyun const struct drm_mode_fb_cmd2 *mode_cmd)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun int n_superblocks = 0;
306*4882a593Smuzhiyun const struct drm_format_info *info;
307*4882a593Smuzhiyun struct drm_gem_object *objs = NULL;
308*4882a593Smuzhiyun u32 afbc_superblock_size = 0, afbc_superblock_height = 0;
309*4882a593Smuzhiyun u32 afbc_superblock_width = 0, afbc_size = 0;
310*4882a593Smuzhiyun int bpp = 0;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun switch (mode_cmd->modifier[0] & AFBC_SIZE_MASK) {
313*4882a593Smuzhiyun case AFBC_SIZE_16X16:
314*4882a593Smuzhiyun afbc_superblock_height = 16;
315*4882a593Smuzhiyun afbc_superblock_width = 16;
316*4882a593Smuzhiyun break;
317*4882a593Smuzhiyun default:
318*4882a593Smuzhiyun DRM_DEBUG_KMS("AFBC superblock size is not supported\n");
319*4882a593Smuzhiyun return false;
320*4882a593Smuzhiyun }
321*4882a593Smuzhiyun
322*4882a593Smuzhiyun info = drm_get_format_info(dev, mode_cmd);
323*4882a593Smuzhiyun
324*4882a593Smuzhiyun n_superblocks = (mode_cmd->width / afbc_superblock_width) *
325*4882a593Smuzhiyun (mode_cmd->height / afbc_superblock_height);
326*4882a593Smuzhiyun
327*4882a593Smuzhiyun bpp = malidp_format_get_bpp(info->format);
328*4882a593Smuzhiyun
329*4882a593Smuzhiyun afbc_superblock_size = (bpp * afbc_superblock_width * afbc_superblock_height)
330*4882a593Smuzhiyun / BITS_PER_BYTE;
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun afbc_size = ALIGN(n_superblocks * AFBC_HEADER_SIZE, AFBC_SUPERBLK_ALIGNMENT);
333*4882a593Smuzhiyun afbc_size += n_superblocks * ALIGN(afbc_superblock_size, AFBC_SUPERBLK_ALIGNMENT);
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun if ((mode_cmd->width * bpp) != (mode_cmd->pitches[0] * BITS_PER_BYTE)) {
336*4882a593Smuzhiyun DRM_DEBUG_KMS("Invalid value of (pitch * BITS_PER_BYTE) (=%u) "
337*4882a593Smuzhiyun "should be same as width (=%u) * bpp (=%u)\n",
338*4882a593Smuzhiyun (mode_cmd->pitches[0] * BITS_PER_BYTE),
339*4882a593Smuzhiyun mode_cmd->width, bpp);
340*4882a593Smuzhiyun return false;
341*4882a593Smuzhiyun }
342*4882a593Smuzhiyun
343*4882a593Smuzhiyun objs = drm_gem_object_lookup(file, mode_cmd->handles[0]);
344*4882a593Smuzhiyun if (!objs) {
345*4882a593Smuzhiyun DRM_DEBUG_KMS("Failed to lookup GEM object\n");
346*4882a593Smuzhiyun return false;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun if (objs->size < afbc_size) {
350*4882a593Smuzhiyun DRM_DEBUG_KMS("buffer size (%zu) too small for AFBC buffer size = %u\n",
351*4882a593Smuzhiyun objs->size, afbc_size);
352*4882a593Smuzhiyun drm_gem_object_put(objs);
353*4882a593Smuzhiyun return false;
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun drm_gem_object_put(objs);
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun return true;
359*4882a593Smuzhiyun }
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun static bool
malidp_verify_afbc_framebuffer(struct drm_device * dev,struct drm_file * file,const struct drm_mode_fb_cmd2 * mode_cmd)362*4882a593Smuzhiyun malidp_verify_afbc_framebuffer(struct drm_device *dev, struct drm_file *file,
363*4882a593Smuzhiyun const struct drm_mode_fb_cmd2 *mode_cmd)
364*4882a593Smuzhiyun {
365*4882a593Smuzhiyun if (malidp_verify_afbc_framebuffer_caps(dev, mode_cmd))
366*4882a593Smuzhiyun return malidp_verify_afbc_framebuffer_size(dev, file, mode_cmd);
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return false;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
371*4882a593Smuzhiyun static struct drm_framebuffer *
malidp_fb_create(struct drm_device * dev,struct drm_file * file,const struct drm_mode_fb_cmd2 * mode_cmd)372*4882a593Smuzhiyun malidp_fb_create(struct drm_device *dev, struct drm_file *file,
373*4882a593Smuzhiyun const struct drm_mode_fb_cmd2 *mode_cmd)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun if (mode_cmd->modifier[0]) {
376*4882a593Smuzhiyun if (!malidp_verify_afbc_framebuffer(dev, file, mode_cmd))
377*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
378*4882a593Smuzhiyun }
379*4882a593Smuzhiyun
380*4882a593Smuzhiyun return drm_gem_fb_create(dev, file, mode_cmd);
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun static const struct drm_mode_config_funcs malidp_mode_config_funcs = {
384*4882a593Smuzhiyun .fb_create = malidp_fb_create,
385*4882a593Smuzhiyun .atomic_check = drm_atomic_helper_check,
386*4882a593Smuzhiyun .atomic_commit = drm_atomic_helper_commit,
387*4882a593Smuzhiyun };
388*4882a593Smuzhiyun
malidp_init(struct drm_device * drm)389*4882a593Smuzhiyun static int malidp_init(struct drm_device *drm)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun int ret;
392*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
393*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun drm_mode_config_init(drm);
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun drm->mode_config.min_width = hwdev->min_line_size;
398*4882a593Smuzhiyun drm->mode_config.min_height = hwdev->min_line_size;
399*4882a593Smuzhiyun drm->mode_config.max_width = hwdev->max_line_size;
400*4882a593Smuzhiyun drm->mode_config.max_height = hwdev->max_line_size;
401*4882a593Smuzhiyun drm->mode_config.funcs = &malidp_mode_config_funcs;
402*4882a593Smuzhiyun drm->mode_config.helper_private = &malidp_mode_config_helpers;
403*4882a593Smuzhiyun drm->mode_config.allow_fb_modifiers = true;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun ret = malidp_crtc_init(drm);
406*4882a593Smuzhiyun if (ret)
407*4882a593Smuzhiyun goto crtc_fail;
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun ret = malidp_mw_connector_init(drm);
410*4882a593Smuzhiyun if (ret)
411*4882a593Smuzhiyun goto crtc_fail;
412*4882a593Smuzhiyun
413*4882a593Smuzhiyun return 0;
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun crtc_fail:
416*4882a593Smuzhiyun drm_mode_config_cleanup(drm);
417*4882a593Smuzhiyun return ret;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun
malidp_fini(struct drm_device * drm)420*4882a593Smuzhiyun static void malidp_fini(struct drm_device *drm)
421*4882a593Smuzhiyun {
422*4882a593Smuzhiyun drm_mode_config_cleanup(drm);
423*4882a593Smuzhiyun }
424*4882a593Smuzhiyun
malidp_irq_init(struct platform_device * pdev)425*4882a593Smuzhiyun static int malidp_irq_init(struct platform_device *pdev)
426*4882a593Smuzhiyun {
427*4882a593Smuzhiyun int irq_de, irq_se, ret = 0;
428*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(&pdev->dev);
429*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
430*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun /* fetch the interrupts from DT */
433*4882a593Smuzhiyun irq_de = platform_get_irq_byname(pdev, "DE");
434*4882a593Smuzhiyun if (irq_de < 0) {
435*4882a593Smuzhiyun DRM_ERROR("no 'DE' IRQ specified!\n");
436*4882a593Smuzhiyun return irq_de;
437*4882a593Smuzhiyun }
438*4882a593Smuzhiyun irq_se = platform_get_irq_byname(pdev, "SE");
439*4882a593Smuzhiyun if (irq_se < 0) {
440*4882a593Smuzhiyun DRM_ERROR("no 'SE' IRQ specified!\n");
441*4882a593Smuzhiyun return irq_se;
442*4882a593Smuzhiyun }
443*4882a593Smuzhiyun
444*4882a593Smuzhiyun ret = malidp_de_irq_init(drm, irq_de);
445*4882a593Smuzhiyun if (ret)
446*4882a593Smuzhiyun return ret;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun ret = malidp_se_irq_init(drm, irq_se);
449*4882a593Smuzhiyun if (ret) {
450*4882a593Smuzhiyun malidp_de_irq_fini(hwdev);
451*4882a593Smuzhiyun return ret;
452*4882a593Smuzhiyun }
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun return 0;
455*4882a593Smuzhiyun }
456*4882a593Smuzhiyun
457*4882a593Smuzhiyun DEFINE_DRM_GEM_CMA_FOPS(fops);
458*4882a593Smuzhiyun
malidp_dumb_create(struct drm_file * file_priv,struct drm_device * drm,struct drm_mode_create_dumb * args)459*4882a593Smuzhiyun static int malidp_dumb_create(struct drm_file *file_priv,
460*4882a593Smuzhiyun struct drm_device *drm,
461*4882a593Smuzhiyun struct drm_mode_create_dumb *args)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
464*4882a593Smuzhiyun /* allocate for the worst case scenario, i.e. rotated buffers */
465*4882a593Smuzhiyun u8 alignment = malidp_hw_get_pitch_align(malidp->dev, 1);
466*4882a593Smuzhiyun
467*4882a593Smuzhiyun args->pitch = ALIGN(DIV_ROUND_UP(args->width * args->bpp, 8), alignment);
468*4882a593Smuzhiyun
469*4882a593Smuzhiyun return drm_gem_cma_dumb_create_internal(file_priv, drm, args);
470*4882a593Smuzhiyun }
471*4882a593Smuzhiyun
472*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
473*4882a593Smuzhiyun
malidp_error_stats_init(struct malidp_error_stats * error_stats)474*4882a593Smuzhiyun static void malidp_error_stats_init(struct malidp_error_stats *error_stats)
475*4882a593Smuzhiyun {
476*4882a593Smuzhiyun error_stats->num_errors = 0;
477*4882a593Smuzhiyun error_stats->last_error_status = 0;
478*4882a593Smuzhiyun error_stats->last_error_vblank = -1;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
malidp_error(struct malidp_drm * malidp,struct malidp_error_stats * error_stats,u32 status,u64 vblank)481*4882a593Smuzhiyun void malidp_error(struct malidp_drm *malidp,
482*4882a593Smuzhiyun struct malidp_error_stats *error_stats, u32 status,
483*4882a593Smuzhiyun u64 vblank)
484*4882a593Smuzhiyun {
485*4882a593Smuzhiyun unsigned long irqflags;
486*4882a593Smuzhiyun
487*4882a593Smuzhiyun spin_lock_irqsave(&malidp->errors_lock, irqflags);
488*4882a593Smuzhiyun error_stats->last_error_status = status;
489*4882a593Smuzhiyun error_stats->last_error_vblank = vblank;
490*4882a593Smuzhiyun error_stats->num_errors++;
491*4882a593Smuzhiyun spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun
malidp_error_stats_dump(const char * prefix,struct malidp_error_stats error_stats,struct seq_file * m)494*4882a593Smuzhiyun static void malidp_error_stats_dump(const char *prefix,
495*4882a593Smuzhiyun struct malidp_error_stats error_stats,
496*4882a593Smuzhiyun struct seq_file *m)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun seq_printf(m, "[%s] num_errors : %d\n", prefix,
499*4882a593Smuzhiyun error_stats.num_errors);
500*4882a593Smuzhiyun seq_printf(m, "[%s] last_error_status : 0x%08x\n", prefix,
501*4882a593Smuzhiyun error_stats.last_error_status);
502*4882a593Smuzhiyun seq_printf(m, "[%s] last_error_vblank : %lld\n", prefix,
503*4882a593Smuzhiyun error_stats.last_error_vblank);
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
malidp_show_stats(struct seq_file * m,void * arg)506*4882a593Smuzhiyun static int malidp_show_stats(struct seq_file *m, void *arg)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun struct drm_device *drm = m->private;
509*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
510*4882a593Smuzhiyun unsigned long irqflags;
511*4882a593Smuzhiyun struct malidp_error_stats de_errors, se_errors;
512*4882a593Smuzhiyun
513*4882a593Smuzhiyun spin_lock_irqsave(&malidp->errors_lock, irqflags);
514*4882a593Smuzhiyun de_errors = malidp->de_errors;
515*4882a593Smuzhiyun se_errors = malidp->se_errors;
516*4882a593Smuzhiyun spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
517*4882a593Smuzhiyun malidp_error_stats_dump("DE", de_errors, m);
518*4882a593Smuzhiyun malidp_error_stats_dump("SE", se_errors, m);
519*4882a593Smuzhiyun return 0;
520*4882a593Smuzhiyun }
521*4882a593Smuzhiyun
malidp_debugfs_open(struct inode * inode,struct file * file)522*4882a593Smuzhiyun static int malidp_debugfs_open(struct inode *inode, struct file *file)
523*4882a593Smuzhiyun {
524*4882a593Smuzhiyun return single_open(file, malidp_show_stats, inode->i_private);
525*4882a593Smuzhiyun }
526*4882a593Smuzhiyun
malidp_debugfs_write(struct file * file,const char __user * ubuf,size_t len,loff_t * offp)527*4882a593Smuzhiyun static ssize_t malidp_debugfs_write(struct file *file, const char __user *ubuf,
528*4882a593Smuzhiyun size_t len, loff_t *offp)
529*4882a593Smuzhiyun {
530*4882a593Smuzhiyun struct seq_file *m = file->private_data;
531*4882a593Smuzhiyun struct drm_device *drm = m->private;
532*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
533*4882a593Smuzhiyun unsigned long irqflags;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun spin_lock_irqsave(&malidp->errors_lock, irqflags);
536*4882a593Smuzhiyun malidp_error_stats_init(&malidp->de_errors);
537*4882a593Smuzhiyun malidp_error_stats_init(&malidp->se_errors);
538*4882a593Smuzhiyun spin_unlock_irqrestore(&malidp->errors_lock, irqflags);
539*4882a593Smuzhiyun return len;
540*4882a593Smuzhiyun }
541*4882a593Smuzhiyun
542*4882a593Smuzhiyun static const struct file_operations malidp_debugfs_fops = {
543*4882a593Smuzhiyun .owner = THIS_MODULE,
544*4882a593Smuzhiyun .open = malidp_debugfs_open,
545*4882a593Smuzhiyun .read = seq_read,
546*4882a593Smuzhiyun .write = malidp_debugfs_write,
547*4882a593Smuzhiyun .llseek = seq_lseek,
548*4882a593Smuzhiyun .release = single_release,
549*4882a593Smuzhiyun };
550*4882a593Smuzhiyun
malidp_debugfs_init(struct drm_minor * minor)551*4882a593Smuzhiyun static void malidp_debugfs_init(struct drm_minor *minor)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun struct malidp_drm *malidp = minor->dev->dev_private;
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun malidp_error_stats_init(&malidp->de_errors);
556*4882a593Smuzhiyun malidp_error_stats_init(&malidp->se_errors);
557*4882a593Smuzhiyun spin_lock_init(&malidp->errors_lock);
558*4882a593Smuzhiyun debugfs_create_file("debug", S_IRUGO | S_IWUSR, minor->debugfs_root,
559*4882a593Smuzhiyun minor->dev, &malidp_debugfs_fops);
560*4882a593Smuzhiyun }
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun #endif //CONFIG_DEBUG_FS
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun static struct drm_driver malidp_driver = {
565*4882a593Smuzhiyun .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC,
566*4882a593Smuzhiyun DRM_GEM_CMA_DRIVER_OPS_WITH_DUMB_CREATE(malidp_dumb_create),
567*4882a593Smuzhiyun #ifdef CONFIG_DEBUG_FS
568*4882a593Smuzhiyun .debugfs_init = malidp_debugfs_init,
569*4882a593Smuzhiyun #endif
570*4882a593Smuzhiyun .fops = &fops,
571*4882a593Smuzhiyun .name = "mali-dp",
572*4882a593Smuzhiyun .desc = "ARM Mali Display Processor driver",
573*4882a593Smuzhiyun .date = "20160106",
574*4882a593Smuzhiyun .major = 1,
575*4882a593Smuzhiyun .minor = 0,
576*4882a593Smuzhiyun };
577*4882a593Smuzhiyun
578*4882a593Smuzhiyun static const struct of_device_id malidp_drm_of_match[] = {
579*4882a593Smuzhiyun {
580*4882a593Smuzhiyun .compatible = "arm,mali-dp500",
581*4882a593Smuzhiyun .data = &malidp_device[MALIDP_500]
582*4882a593Smuzhiyun },
583*4882a593Smuzhiyun {
584*4882a593Smuzhiyun .compatible = "arm,mali-dp550",
585*4882a593Smuzhiyun .data = &malidp_device[MALIDP_550]
586*4882a593Smuzhiyun },
587*4882a593Smuzhiyun {
588*4882a593Smuzhiyun .compatible = "arm,mali-dp650",
589*4882a593Smuzhiyun .data = &malidp_device[MALIDP_650]
590*4882a593Smuzhiyun },
591*4882a593Smuzhiyun {},
592*4882a593Smuzhiyun };
593*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, malidp_drm_of_match);
594*4882a593Smuzhiyun
malidp_is_compatible_hw_id(struct malidp_hw_device * hwdev,const struct of_device_id * dev_id)595*4882a593Smuzhiyun static bool malidp_is_compatible_hw_id(struct malidp_hw_device *hwdev,
596*4882a593Smuzhiyun const struct of_device_id *dev_id)
597*4882a593Smuzhiyun {
598*4882a593Smuzhiyun u32 core_id;
599*4882a593Smuzhiyun const char *compatstr_dp500 = "arm,mali-dp500";
600*4882a593Smuzhiyun bool is_dp500;
601*4882a593Smuzhiyun bool dt_is_dp500;
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun /*
604*4882a593Smuzhiyun * The DP500 CORE_ID register is in a different location, so check it
605*4882a593Smuzhiyun * first. If the product id field matches, then this is DP500, otherwise
606*4882a593Smuzhiyun * check the DP550/650 CORE_ID register.
607*4882a593Smuzhiyun */
608*4882a593Smuzhiyun core_id = malidp_hw_read(hwdev, MALIDP500_DC_BASE + MALIDP_DE_CORE_ID);
609*4882a593Smuzhiyun /* Offset 0x18 will never read 0x500 on products other than DP500. */
610*4882a593Smuzhiyun is_dp500 = (MALIDP_PRODUCT_ID(core_id) == 0x500);
611*4882a593Smuzhiyun dt_is_dp500 = strnstr(dev_id->compatible, compatstr_dp500,
612*4882a593Smuzhiyun sizeof(dev_id->compatible)) != NULL;
613*4882a593Smuzhiyun if (is_dp500 != dt_is_dp500) {
614*4882a593Smuzhiyun DRM_ERROR("Device-tree expects %s, but hardware %s DP500.\n",
615*4882a593Smuzhiyun dev_id->compatible, is_dp500 ? "is" : "is not");
616*4882a593Smuzhiyun return false;
617*4882a593Smuzhiyun } else if (!dt_is_dp500) {
618*4882a593Smuzhiyun u16 product_id;
619*4882a593Smuzhiyun char buf[32];
620*4882a593Smuzhiyun
621*4882a593Smuzhiyun core_id = malidp_hw_read(hwdev,
622*4882a593Smuzhiyun MALIDP550_DC_BASE + MALIDP_DE_CORE_ID);
623*4882a593Smuzhiyun product_id = MALIDP_PRODUCT_ID(core_id);
624*4882a593Smuzhiyun snprintf(buf, sizeof(buf), "arm,mali-dp%X", product_id);
625*4882a593Smuzhiyun if (!strnstr(dev_id->compatible, buf,
626*4882a593Smuzhiyun sizeof(dev_id->compatible))) {
627*4882a593Smuzhiyun DRM_ERROR("Device-tree expects %s, but hardware is DP%03X.\n",
628*4882a593Smuzhiyun dev_id->compatible, product_id);
629*4882a593Smuzhiyun return false;
630*4882a593Smuzhiyun }
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun return true;
633*4882a593Smuzhiyun }
634*4882a593Smuzhiyun
malidp_has_sufficient_address_space(const struct resource * res,const struct of_device_id * dev_id)635*4882a593Smuzhiyun static bool malidp_has_sufficient_address_space(const struct resource *res,
636*4882a593Smuzhiyun const struct of_device_id *dev_id)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun resource_size_t res_size = resource_size(res);
639*4882a593Smuzhiyun const char *compatstr_dp500 = "arm,mali-dp500";
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun if (!strnstr(dev_id->compatible, compatstr_dp500,
642*4882a593Smuzhiyun sizeof(dev_id->compatible)))
643*4882a593Smuzhiyun return res_size >= MALIDP550_ADDR_SPACE_SIZE;
644*4882a593Smuzhiyun else if (res_size < MALIDP500_ADDR_SPACE_SIZE)
645*4882a593Smuzhiyun return false;
646*4882a593Smuzhiyun return true;
647*4882a593Smuzhiyun }
648*4882a593Smuzhiyun
core_id_show(struct device * dev,struct device_attribute * attr,char * buf)649*4882a593Smuzhiyun static ssize_t core_id_show(struct device *dev, struct device_attribute *attr,
650*4882a593Smuzhiyun char *buf)
651*4882a593Smuzhiyun {
652*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
653*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun return snprintf(buf, PAGE_SIZE, "%08x\n", malidp->core_id);
656*4882a593Smuzhiyun }
657*4882a593Smuzhiyun
658*4882a593Smuzhiyun static DEVICE_ATTR_RO(core_id);
659*4882a593Smuzhiyun
660*4882a593Smuzhiyun static struct attribute *mali_dp_attrs[] = {
661*4882a593Smuzhiyun &dev_attr_core_id.attr,
662*4882a593Smuzhiyun NULL,
663*4882a593Smuzhiyun };
664*4882a593Smuzhiyun ATTRIBUTE_GROUPS(mali_dp);
665*4882a593Smuzhiyun
666*4882a593Smuzhiyun #define MAX_OUTPUT_CHANNELS 3
667*4882a593Smuzhiyun
malidp_runtime_pm_suspend(struct device * dev)668*4882a593Smuzhiyun static int malidp_runtime_pm_suspend(struct device *dev)
669*4882a593Smuzhiyun {
670*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
671*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
672*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
673*4882a593Smuzhiyun
674*4882a593Smuzhiyun /* we can only suspend if the hardware is in config mode */
675*4882a593Smuzhiyun WARN_ON(!hwdev->hw->in_config_mode(hwdev));
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun malidp_se_irq_fini(hwdev);
678*4882a593Smuzhiyun malidp_de_irq_fini(hwdev);
679*4882a593Smuzhiyun hwdev->pm_suspended = true;
680*4882a593Smuzhiyun clk_disable_unprepare(hwdev->mclk);
681*4882a593Smuzhiyun clk_disable_unprepare(hwdev->aclk);
682*4882a593Smuzhiyun clk_disable_unprepare(hwdev->pclk);
683*4882a593Smuzhiyun
684*4882a593Smuzhiyun return 0;
685*4882a593Smuzhiyun }
686*4882a593Smuzhiyun
malidp_runtime_pm_resume(struct device * dev)687*4882a593Smuzhiyun static int malidp_runtime_pm_resume(struct device *dev)
688*4882a593Smuzhiyun {
689*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
690*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
691*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun clk_prepare_enable(hwdev->pclk);
694*4882a593Smuzhiyun clk_prepare_enable(hwdev->aclk);
695*4882a593Smuzhiyun clk_prepare_enable(hwdev->mclk);
696*4882a593Smuzhiyun hwdev->pm_suspended = false;
697*4882a593Smuzhiyun malidp_de_irq_hw_init(hwdev);
698*4882a593Smuzhiyun malidp_se_irq_hw_init(hwdev);
699*4882a593Smuzhiyun
700*4882a593Smuzhiyun return 0;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun
malidp_bind(struct device * dev)703*4882a593Smuzhiyun static int malidp_bind(struct device *dev)
704*4882a593Smuzhiyun {
705*4882a593Smuzhiyun struct resource *res;
706*4882a593Smuzhiyun struct drm_device *drm;
707*4882a593Smuzhiyun struct malidp_drm *malidp;
708*4882a593Smuzhiyun struct malidp_hw_device *hwdev;
709*4882a593Smuzhiyun struct platform_device *pdev = to_platform_device(dev);
710*4882a593Smuzhiyun struct of_device_id const *dev_id;
711*4882a593Smuzhiyun struct drm_encoder *encoder;
712*4882a593Smuzhiyun /* number of lines for the R, G and B output */
713*4882a593Smuzhiyun u8 output_width[MAX_OUTPUT_CHANNELS];
714*4882a593Smuzhiyun int ret = 0, i;
715*4882a593Smuzhiyun u32 version, out_depth = 0;
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun malidp = devm_kzalloc(dev, sizeof(*malidp), GFP_KERNEL);
718*4882a593Smuzhiyun if (!malidp)
719*4882a593Smuzhiyun return -ENOMEM;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun hwdev = devm_kzalloc(dev, sizeof(*hwdev), GFP_KERNEL);
722*4882a593Smuzhiyun if (!hwdev)
723*4882a593Smuzhiyun return -ENOMEM;
724*4882a593Smuzhiyun
725*4882a593Smuzhiyun hwdev->hw = (struct malidp_hw *)of_device_get_match_data(dev);
726*4882a593Smuzhiyun malidp->dev = hwdev;
727*4882a593Smuzhiyun
728*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
729*4882a593Smuzhiyun hwdev->regs = devm_ioremap_resource(dev, res);
730*4882a593Smuzhiyun if (IS_ERR(hwdev->regs))
731*4882a593Smuzhiyun return PTR_ERR(hwdev->regs);
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun hwdev->pclk = devm_clk_get(dev, "pclk");
734*4882a593Smuzhiyun if (IS_ERR(hwdev->pclk))
735*4882a593Smuzhiyun return PTR_ERR(hwdev->pclk);
736*4882a593Smuzhiyun
737*4882a593Smuzhiyun hwdev->aclk = devm_clk_get(dev, "aclk");
738*4882a593Smuzhiyun if (IS_ERR(hwdev->aclk))
739*4882a593Smuzhiyun return PTR_ERR(hwdev->aclk);
740*4882a593Smuzhiyun
741*4882a593Smuzhiyun hwdev->mclk = devm_clk_get(dev, "mclk");
742*4882a593Smuzhiyun if (IS_ERR(hwdev->mclk))
743*4882a593Smuzhiyun return PTR_ERR(hwdev->mclk);
744*4882a593Smuzhiyun
745*4882a593Smuzhiyun hwdev->pxlclk = devm_clk_get(dev, "pxlclk");
746*4882a593Smuzhiyun if (IS_ERR(hwdev->pxlclk))
747*4882a593Smuzhiyun return PTR_ERR(hwdev->pxlclk);
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun /* Get the optional framebuffer memory resource */
750*4882a593Smuzhiyun ret = of_reserved_mem_device_init(dev);
751*4882a593Smuzhiyun if (ret && ret != -ENODEV)
752*4882a593Smuzhiyun return ret;
753*4882a593Smuzhiyun
754*4882a593Smuzhiyun drm = drm_dev_alloc(&malidp_driver, dev);
755*4882a593Smuzhiyun if (IS_ERR(drm)) {
756*4882a593Smuzhiyun ret = PTR_ERR(drm);
757*4882a593Smuzhiyun goto alloc_fail;
758*4882a593Smuzhiyun }
759*4882a593Smuzhiyun
760*4882a593Smuzhiyun drm->dev_private = malidp;
761*4882a593Smuzhiyun dev_set_drvdata(dev, drm);
762*4882a593Smuzhiyun
763*4882a593Smuzhiyun /* Enable power management */
764*4882a593Smuzhiyun pm_runtime_enable(dev);
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun /* Resume device to enable the clocks */
767*4882a593Smuzhiyun if (pm_runtime_enabled(dev))
768*4882a593Smuzhiyun pm_runtime_get_sync(dev);
769*4882a593Smuzhiyun else
770*4882a593Smuzhiyun malidp_runtime_pm_resume(dev);
771*4882a593Smuzhiyun
772*4882a593Smuzhiyun dev_id = of_match_device(malidp_drm_of_match, dev);
773*4882a593Smuzhiyun if (!dev_id) {
774*4882a593Smuzhiyun ret = -EINVAL;
775*4882a593Smuzhiyun goto query_hw_fail;
776*4882a593Smuzhiyun }
777*4882a593Smuzhiyun
778*4882a593Smuzhiyun if (!malidp_has_sufficient_address_space(res, dev_id)) {
779*4882a593Smuzhiyun DRM_ERROR("Insufficient address space in device-tree.\n");
780*4882a593Smuzhiyun ret = -EINVAL;
781*4882a593Smuzhiyun goto query_hw_fail;
782*4882a593Smuzhiyun }
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun if (!malidp_is_compatible_hw_id(hwdev, dev_id)) {
785*4882a593Smuzhiyun ret = -EINVAL;
786*4882a593Smuzhiyun goto query_hw_fail;
787*4882a593Smuzhiyun }
788*4882a593Smuzhiyun
789*4882a593Smuzhiyun ret = hwdev->hw->query_hw(hwdev);
790*4882a593Smuzhiyun if (ret) {
791*4882a593Smuzhiyun DRM_ERROR("Invalid HW configuration\n");
792*4882a593Smuzhiyun goto query_hw_fail;
793*4882a593Smuzhiyun }
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun version = malidp_hw_read(hwdev, hwdev->hw->map.dc_base + MALIDP_DE_CORE_ID);
796*4882a593Smuzhiyun DRM_INFO("found ARM Mali-DP%3x version r%dp%d\n", version >> 16,
797*4882a593Smuzhiyun (version >> 12) & 0xf, (version >> 8) & 0xf);
798*4882a593Smuzhiyun
799*4882a593Smuzhiyun malidp->core_id = version;
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun ret = of_property_read_u32(dev->of_node,
802*4882a593Smuzhiyun "arm,malidp-arqos-value",
803*4882a593Smuzhiyun &hwdev->arqos_value);
804*4882a593Smuzhiyun if (ret)
805*4882a593Smuzhiyun hwdev->arqos_value = 0x0;
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun /* set the number of lines used for output of RGB data */
808*4882a593Smuzhiyun ret = of_property_read_u8_array(dev->of_node,
809*4882a593Smuzhiyun "arm,malidp-output-port-lines",
810*4882a593Smuzhiyun output_width, MAX_OUTPUT_CHANNELS);
811*4882a593Smuzhiyun if (ret)
812*4882a593Smuzhiyun goto query_hw_fail;
813*4882a593Smuzhiyun
814*4882a593Smuzhiyun for (i = 0; i < MAX_OUTPUT_CHANNELS; i++)
815*4882a593Smuzhiyun out_depth = (out_depth << 8) | (output_width[i] & 0xf);
816*4882a593Smuzhiyun malidp_hw_write(hwdev, out_depth, hwdev->hw->map.out_depth_base);
817*4882a593Smuzhiyun hwdev->output_color_depth = out_depth;
818*4882a593Smuzhiyun
819*4882a593Smuzhiyun atomic_set(&malidp->config_valid, MALIDP_CONFIG_VALID_INIT);
820*4882a593Smuzhiyun init_waitqueue_head(&malidp->wq);
821*4882a593Smuzhiyun
822*4882a593Smuzhiyun ret = malidp_init(drm);
823*4882a593Smuzhiyun if (ret < 0)
824*4882a593Smuzhiyun goto query_hw_fail;
825*4882a593Smuzhiyun
826*4882a593Smuzhiyun /* Set the CRTC's port so that the encoder component can find it */
827*4882a593Smuzhiyun malidp->crtc.port = of_graph_get_port_by_id(dev->of_node, 0);
828*4882a593Smuzhiyun
829*4882a593Smuzhiyun ret = component_bind_all(dev, drm);
830*4882a593Smuzhiyun if (ret) {
831*4882a593Smuzhiyun DRM_ERROR("Failed to bind all components\n");
832*4882a593Smuzhiyun goto bind_fail;
833*4882a593Smuzhiyun }
834*4882a593Smuzhiyun
835*4882a593Smuzhiyun /* We expect to have a maximum of two encoders one for the actual
836*4882a593Smuzhiyun * display and a virtual one for the writeback connector
837*4882a593Smuzhiyun */
838*4882a593Smuzhiyun WARN_ON(drm->mode_config.num_encoder > 2);
839*4882a593Smuzhiyun list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
840*4882a593Smuzhiyun encoder->possible_clones =
841*4882a593Smuzhiyun (1 << drm->mode_config.num_encoder) - 1;
842*4882a593Smuzhiyun }
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun ret = malidp_irq_init(pdev);
845*4882a593Smuzhiyun if (ret < 0)
846*4882a593Smuzhiyun goto irq_init_fail;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun drm->irq_enabled = true;
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun ret = drm_vblank_init(drm, drm->mode_config.num_crtc);
851*4882a593Smuzhiyun if (ret < 0) {
852*4882a593Smuzhiyun DRM_ERROR("failed to initialise vblank\n");
853*4882a593Smuzhiyun goto vblank_fail;
854*4882a593Smuzhiyun }
855*4882a593Smuzhiyun pm_runtime_put(dev);
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun drm_mode_config_reset(drm);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun drm_kms_helper_poll_init(drm);
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun ret = drm_dev_register(drm, 0);
862*4882a593Smuzhiyun if (ret)
863*4882a593Smuzhiyun goto register_fail;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun drm_fbdev_generic_setup(drm, 32);
866*4882a593Smuzhiyun
867*4882a593Smuzhiyun return 0;
868*4882a593Smuzhiyun
869*4882a593Smuzhiyun register_fail:
870*4882a593Smuzhiyun drm_kms_helper_poll_fini(drm);
871*4882a593Smuzhiyun pm_runtime_get_sync(dev);
872*4882a593Smuzhiyun vblank_fail:
873*4882a593Smuzhiyun malidp_se_irq_fini(hwdev);
874*4882a593Smuzhiyun malidp_de_irq_fini(hwdev);
875*4882a593Smuzhiyun drm->irq_enabled = false;
876*4882a593Smuzhiyun irq_init_fail:
877*4882a593Smuzhiyun drm_atomic_helper_shutdown(drm);
878*4882a593Smuzhiyun component_unbind_all(dev, drm);
879*4882a593Smuzhiyun bind_fail:
880*4882a593Smuzhiyun of_node_put(malidp->crtc.port);
881*4882a593Smuzhiyun malidp->crtc.port = NULL;
882*4882a593Smuzhiyun malidp_fini(drm);
883*4882a593Smuzhiyun query_hw_fail:
884*4882a593Smuzhiyun pm_runtime_put(dev);
885*4882a593Smuzhiyun if (pm_runtime_enabled(dev))
886*4882a593Smuzhiyun pm_runtime_disable(dev);
887*4882a593Smuzhiyun else
888*4882a593Smuzhiyun malidp_runtime_pm_suspend(dev);
889*4882a593Smuzhiyun drm->dev_private = NULL;
890*4882a593Smuzhiyun dev_set_drvdata(dev, NULL);
891*4882a593Smuzhiyun drm_dev_put(drm);
892*4882a593Smuzhiyun alloc_fail:
893*4882a593Smuzhiyun of_reserved_mem_device_release(dev);
894*4882a593Smuzhiyun
895*4882a593Smuzhiyun return ret;
896*4882a593Smuzhiyun }
897*4882a593Smuzhiyun
malidp_unbind(struct device * dev)898*4882a593Smuzhiyun static void malidp_unbind(struct device *dev)
899*4882a593Smuzhiyun {
900*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
901*4882a593Smuzhiyun struct malidp_drm *malidp = drm->dev_private;
902*4882a593Smuzhiyun struct malidp_hw_device *hwdev = malidp->dev;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun drm_dev_unregister(drm);
905*4882a593Smuzhiyun drm_kms_helper_poll_fini(drm);
906*4882a593Smuzhiyun pm_runtime_get_sync(dev);
907*4882a593Smuzhiyun drm_atomic_helper_shutdown(drm);
908*4882a593Smuzhiyun malidp_se_irq_fini(hwdev);
909*4882a593Smuzhiyun malidp_de_irq_fini(hwdev);
910*4882a593Smuzhiyun drm->irq_enabled = false;
911*4882a593Smuzhiyun component_unbind_all(dev, drm);
912*4882a593Smuzhiyun of_node_put(malidp->crtc.port);
913*4882a593Smuzhiyun malidp->crtc.port = NULL;
914*4882a593Smuzhiyun malidp_fini(drm);
915*4882a593Smuzhiyun pm_runtime_put(dev);
916*4882a593Smuzhiyun if (pm_runtime_enabled(dev))
917*4882a593Smuzhiyun pm_runtime_disable(dev);
918*4882a593Smuzhiyun else
919*4882a593Smuzhiyun malidp_runtime_pm_suspend(dev);
920*4882a593Smuzhiyun drm->dev_private = NULL;
921*4882a593Smuzhiyun dev_set_drvdata(dev, NULL);
922*4882a593Smuzhiyun drm_dev_put(drm);
923*4882a593Smuzhiyun of_reserved_mem_device_release(dev);
924*4882a593Smuzhiyun }
925*4882a593Smuzhiyun
926*4882a593Smuzhiyun static const struct component_master_ops malidp_master_ops = {
927*4882a593Smuzhiyun .bind = malidp_bind,
928*4882a593Smuzhiyun .unbind = malidp_unbind,
929*4882a593Smuzhiyun };
930*4882a593Smuzhiyun
malidp_compare_dev(struct device * dev,void * data)931*4882a593Smuzhiyun static int malidp_compare_dev(struct device *dev, void *data)
932*4882a593Smuzhiyun {
933*4882a593Smuzhiyun struct device_node *np = data;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun return dev->of_node == np;
936*4882a593Smuzhiyun }
937*4882a593Smuzhiyun
malidp_platform_probe(struct platform_device * pdev)938*4882a593Smuzhiyun static int malidp_platform_probe(struct platform_device *pdev)
939*4882a593Smuzhiyun {
940*4882a593Smuzhiyun struct device_node *port;
941*4882a593Smuzhiyun struct component_match *match = NULL;
942*4882a593Smuzhiyun
943*4882a593Smuzhiyun if (!pdev->dev.of_node)
944*4882a593Smuzhiyun return -ENODEV;
945*4882a593Smuzhiyun
946*4882a593Smuzhiyun /* there is only one output port inside each device, find it */
947*4882a593Smuzhiyun port = of_graph_get_remote_node(pdev->dev.of_node, 0, 0);
948*4882a593Smuzhiyun if (!port)
949*4882a593Smuzhiyun return -ENODEV;
950*4882a593Smuzhiyun
951*4882a593Smuzhiyun drm_of_component_match_add(&pdev->dev, &match, malidp_compare_dev,
952*4882a593Smuzhiyun port);
953*4882a593Smuzhiyun of_node_put(port);
954*4882a593Smuzhiyun return component_master_add_with_match(&pdev->dev, &malidp_master_ops,
955*4882a593Smuzhiyun match);
956*4882a593Smuzhiyun }
957*4882a593Smuzhiyun
malidp_platform_remove(struct platform_device * pdev)958*4882a593Smuzhiyun static int malidp_platform_remove(struct platform_device *pdev)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun component_master_del(&pdev->dev, &malidp_master_ops);
961*4882a593Smuzhiyun return 0;
962*4882a593Smuzhiyun }
963*4882a593Smuzhiyun
malidp_pm_suspend(struct device * dev)964*4882a593Smuzhiyun static int __maybe_unused malidp_pm_suspend(struct device *dev)
965*4882a593Smuzhiyun {
966*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
967*4882a593Smuzhiyun
968*4882a593Smuzhiyun return drm_mode_config_helper_suspend(drm);
969*4882a593Smuzhiyun }
970*4882a593Smuzhiyun
malidp_pm_resume(struct device * dev)971*4882a593Smuzhiyun static int __maybe_unused malidp_pm_resume(struct device *dev)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun struct drm_device *drm = dev_get_drvdata(dev);
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun drm_mode_config_helper_resume(drm);
976*4882a593Smuzhiyun
977*4882a593Smuzhiyun return 0;
978*4882a593Smuzhiyun }
979*4882a593Smuzhiyun
malidp_pm_suspend_late(struct device * dev)980*4882a593Smuzhiyun static int __maybe_unused malidp_pm_suspend_late(struct device *dev)
981*4882a593Smuzhiyun {
982*4882a593Smuzhiyun if (!pm_runtime_status_suspended(dev)) {
983*4882a593Smuzhiyun malidp_runtime_pm_suspend(dev);
984*4882a593Smuzhiyun pm_runtime_set_suspended(dev);
985*4882a593Smuzhiyun }
986*4882a593Smuzhiyun return 0;
987*4882a593Smuzhiyun }
988*4882a593Smuzhiyun
malidp_pm_resume_early(struct device * dev)989*4882a593Smuzhiyun static int __maybe_unused malidp_pm_resume_early(struct device *dev)
990*4882a593Smuzhiyun {
991*4882a593Smuzhiyun malidp_runtime_pm_resume(dev);
992*4882a593Smuzhiyun pm_runtime_set_active(dev);
993*4882a593Smuzhiyun return 0;
994*4882a593Smuzhiyun }
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun static const struct dev_pm_ops malidp_pm_ops = {
997*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend, malidp_pm_resume) \
998*4882a593Smuzhiyun SET_LATE_SYSTEM_SLEEP_PM_OPS(malidp_pm_suspend_late, malidp_pm_resume_early) \
999*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(malidp_runtime_pm_suspend, malidp_runtime_pm_resume, NULL)
1000*4882a593Smuzhiyun };
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun static struct platform_driver malidp_platform_driver = {
1003*4882a593Smuzhiyun .probe = malidp_platform_probe,
1004*4882a593Smuzhiyun .remove = malidp_platform_remove,
1005*4882a593Smuzhiyun .driver = {
1006*4882a593Smuzhiyun .name = "mali-dp",
1007*4882a593Smuzhiyun .pm = &malidp_pm_ops,
1008*4882a593Smuzhiyun .of_match_table = malidp_drm_of_match,
1009*4882a593Smuzhiyun .dev_groups = mali_dp_groups,
1010*4882a593Smuzhiyun },
1011*4882a593Smuzhiyun };
1012*4882a593Smuzhiyun
1013*4882a593Smuzhiyun module_platform_driver(malidp_platform_driver);
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun MODULE_AUTHOR("Liviu Dudau <Liviu.Dudau@arm.com>");
1016*4882a593Smuzhiyun MODULE_DESCRIPTION("ARM Mali DP DRM driver");
1017*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
1018