1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (C) 2012 Samsung Electronics Co.Ltd
4*4882a593Smuzhiyun * Authors: Joonyoung Shim <jy0922.shim@samsung.com>
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/clk.h>
8*4882a593Smuzhiyun #include <linux/component.h>
9*4882a593Smuzhiyun #include <linux/delay.h>
10*4882a593Smuzhiyun #include <linux/dma-mapping.h>
11*4882a593Smuzhiyun #include <linux/err.h>
12*4882a593Smuzhiyun #include <linux/interrupt.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/kernel.h>
15*4882a593Smuzhiyun #include <linux/of.h>
16*4882a593Smuzhiyun #include <linux/platform_device.h>
17*4882a593Smuzhiyun #include <linux/pm_runtime.h>
18*4882a593Smuzhiyun #include <linux/slab.h>
19*4882a593Smuzhiyun #include <linux/uaccess.h>
20*4882a593Smuzhiyun #include <linux/workqueue.h>
21*4882a593Smuzhiyun
22*4882a593Smuzhiyun #include <drm/drm_file.h>
23*4882a593Smuzhiyun #include <drm/exynos_drm.h>
24*4882a593Smuzhiyun
25*4882a593Smuzhiyun #include "exynos_drm_drv.h"
26*4882a593Smuzhiyun #include "exynos_drm_g2d.h"
27*4882a593Smuzhiyun #include "exynos_drm_gem.h"
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun #define G2D_HW_MAJOR_VER 4
30*4882a593Smuzhiyun #define G2D_HW_MINOR_VER 1
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /* vaild register range set from user: 0x0104 ~ 0x0880 */
33*4882a593Smuzhiyun #define G2D_VALID_START 0x0104
34*4882a593Smuzhiyun #define G2D_VALID_END 0x0880
35*4882a593Smuzhiyun
36*4882a593Smuzhiyun /* general registers */
37*4882a593Smuzhiyun #define G2D_SOFT_RESET 0x0000
38*4882a593Smuzhiyun #define G2D_INTEN 0x0004
39*4882a593Smuzhiyun #define G2D_INTC_PEND 0x000C
40*4882a593Smuzhiyun #define G2D_DMA_SFR_BASE_ADDR 0x0080
41*4882a593Smuzhiyun #define G2D_DMA_COMMAND 0x0084
42*4882a593Smuzhiyun #define G2D_DMA_STATUS 0x008C
43*4882a593Smuzhiyun #define G2D_DMA_HOLD_CMD 0x0090
44*4882a593Smuzhiyun
45*4882a593Smuzhiyun /* command registers */
46*4882a593Smuzhiyun #define G2D_BITBLT_START 0x0100
47*4882a593Smuzhiyun
48*4882a593Smuzhiyun /* registers for base address */
49*4882a593Smuzhiyun #define G2D_SRC_BASE_ADDR 0x0304
50*4882a593Smuzhiyun #define G2D_SRC_STRIDE 0x0308
51*4882a593Smuzhiyun #define G2D_SRC_COLOR_MODE 0x030C
52*4882a593Smuzhiyun #define G2D_SRC_LEFT_TOP 0x0310
53*4882a593Smuzhiyun #define G2D_SRC_RIGHT_BOTTOM 0x0314
54*4882a593Smuzhiyun #define G2D_SRC_PLANE2_BASE_ADDR 0x0318
55*4882a593Smuzhiyun #define G2D_DST_BASE_ADDR 0x0404
56*4882a593Smuzhiyun #define G2D_DST_STRIDE 0x0408
57*4882a593Smuzhiyun #define G2D_DST_COLOR_MODE 0x040C
58*4882a593Smuzhiyun #define G2D_DST_LEFT_TOP 0x0410
59*4882a593Smuzhiyun #define G2D_DST_RIGHT_BOTTOM 0x0414
60*4882a593Smuzhiyun #define G2D_DST_PLANE2_BASE_ADDR 0x0418
61*4882a593Smuzhiyun #define G2D_PAT_BASE_ADDR 0x0500
62*4882a593Smuzhiyun #define G2D_MSK_BASE_ADDR 0x0520
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun /* G2D_SOFT_RESET */
65*4882a593Smuzhiyun #define G2D_SFRCLEAR (1 << 1)
66*4882a593Smuzhiyun #define G2D_R (1 << 0)
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun /* G2D_INTEN */
69*4882a593Smuzhiyun #define G2D_INTEN_ACF (1 << 3)
70*4882a593Smuzhiyun #define G2D_INTEN_UCF (1 << 2)
71*4882a593Smuzhiyun #define G2D_INTEN_GCF (1 << 1)
72*4882a593Smuzhiyun #define G2D_INTEN_SCF (1 << 0)
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /* G2D_INTC_PEND */
75*4882a593Smuzhiyun #define G2D_INTP_ACMD_FIN (1 << 3)
76*4882a593Smuzhiyun #define G2D_INTP_UCMD_FIN (1 << 2)
77*4882a593Smuzhiyun #define G2D_INTP_GCMD_FIN (1 << 1)
78*4882a593Smuzhiyun #define G2D_INTP_SCMD_FIN (1 << 0)
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun /* G2D_DMA_COMMAND */
81*4882a593Smuzhiyun #define G2D_DMA_HALT (1 << 2)
82*4882a593Smuzhiyun #define G2D_DMA_CONTINUE (1 << 1)
83*4882a593Smuzhiyun #define G2D_DMA_START (1 << 0)
84*4882a593Smuzhiyun
85*4882a593Smuzhiyun /* G2D_DMA_STATUS */
86*4882a593Smuzhiyun #define G2D_DMA_LIST_DONE_COUNT (0xFF << 17)
87*4882a593Smuzhiyun #define G2D_DMA_BITBLT_DONE_COUNT (0xFFFF << 1)
88*4882a593Smuzhiyun #define G2D_DMA_DONE (1 << 0)
89*4882a593Smuzhiyun #define G2D_DMA_LIST_DONE_COUNT_OFFSET 17
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun /* G2D_DMA_HOLD_CMD */
92*4882a593Smuzhiyun #define G2D_USER_HOLD (1 << 2)
93*4882a593Smuzhiyun #define G2D_LIST_HOLD (1 << 1)
94*4882a593Smuzhiyun #define G2D_BITBLT_HOLD (1 << 0)
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun /* G2D_BITBLT_START */
97*4882a593Smuzhiyun #define G2D_START_CASESEL (1 << 2)
98*4882a593Smuzhiyun #define G2D_START_NHOLT (1 << 1)
99*4882a593Smuzhiyun #define G2D_START_BITBLT (1 << 0)
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun /* buffer color format */
102*4882a593Smuzhiyun #define G2D_FMT_XRGB8888 0
103*4882a593Smuzhiyun #define G2D_FMT_ARGB8888 1
104*4882a593Smuzhiyun #define G2D_FMT_RGB565 2
105*4882a593Smuzhiyun #define G2D_FMT_XRGB1555 3
106*4882a593Smuzhiyun #define G2D_FMT_ARGB1555 4
107*4882a593Smuzhiyun #define G2D_FMT_XRGB4444 5
108*4882a593Smuzhiyun #define G2D_FMT_ARGB4444 6
109*4882a593Smuzhiyun #define G2D_FMT_PACKED_RGB888 7
110*4882a593Smuzhiyun #define G2D_FMT_A8 11
111*4882a593Smuzhiyun #define G2D_FMT_L8 12
112*4882a593Smuzhiyun
113*4882a593Smuzhiyun /* buffer valid length */
114*4882a593Smuzhiyun #define G2D_LEN_MIN 1
115*4882a593Smuzhiyun #define G2D_LEN_MAX 8000
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun #define G2D_CMDLIST_SIZE (PAGE_SIZE / 4)
118*4882a593Smuzhiyun #define G2D_CMDLIST_NUM 64
119*4882a593Smuzhiyun #define G2D_CMDLIST_POOL_SIZE (G2D_CMDLIST_SIZE * G2D_CMDLIST_NUM)
120*4882a593Smuzhiyun #define G2D_CMDLIST_DATA_NUM (G2D_CMDLIST_SIZE / sizeof(u32) - 2)
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun /* maximum buffer pool size of userptr is 64MB as default */
123*4882a593Smuzhiyun #define MAX_POOL (64 * 1024 * 1024)
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun enum {
126*4882a593Smuzhiyun BUF_TYPE_GEM = 1,
127*4882a593Smuzhiyun BUF_TYPE_USERPTR,
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun enum g2d_reg_type {
131*4882a593Smuzhiyun REG_TYPE_NONE = -1,
132*4882a593Smuzhiyun REG_TYPE_SRC,
133*4882a593Smuzhiyun REG_TYPE_SRC_PLANE2,
134*4882a593Smuzhiyun REG_TYPE_DST,
135*4882a593Smuzhiyun REG_TYPE_DST_PLANE2,
136*4882a593Smuzhiyun REG_TYPE_PAT,
137*4882a593Smuzhiyun REG_TYPE_MSK,
138*4882a593Smuzhiyun MAX_REG_TYPE_NR
139*4882a593Smuzhiyun };
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun enum g2d_flag_bits {
142*4882a593Smuzhiyun /*
143*4882a593Smuzhiyun * If set, suspends the runqueue worker after the currently
144*4882a593Smuzhiyun * processed node is finished.
145*4882a593Smuzhiyun */
146*4882a593Smuzhiyun G2D_BIT_SUSPEND_RUNQUEUE,
147*4882a593Smuzhiyun /*
148*4882a593Smuzhiyun * If set, indicates that the engine is currently busy.
149*4882a593Smuzhiyun */
150*4882a593Smuzhiyun G2D_BIT_ENGINE_BUSY,
151*4882a593Smuzhiyun };
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun /* cmdlist data structure */
154*4882a593Smuzhiyun struct g2d_cmdlist {
155*4882a593Smuzhiyun u32 head;
156*4882a593Smuzhiyun unsigned long data[G2D_CMDLIST_DATA_NUM];
157*4882a593Smuzhiyun u32 last; /* last data offset */
158*4882a593Smuzhiyun };
159*4882a593Smuzhiyun
160*4882a593Smuzhiyun /*
161*4882a593Smuzhiyun * A structure of buffer description
162*4882a593Smuzhiyun *
163*4882a593Smuzhiyun * @format: color format
164*4882a593Smuzhiyun * @stride: buffer stride/pitch in bytes
165*4882a593Smuzhiyun * @left_x: the x coordinates of left top corner
166*4882a593Smuzhiyun * @top_y: the y coordinates of left top corner
167*4882a593Smuzhiyun * @right_x: the x coordinates of right bottom corner
168*4882a593Smuzhiyun * @bottom_y: the y coordinates of right bottom corner
169*4882a593Smuzhiyun *
170*4882a593Smuzhiyun */
171*4882a593Smuzhiyun struct g2d_buf_desc {
172*4882a593Smuzhiyun unsigned int format;
173*4882a593Smuzhiyun unsigned int stride;
174*4882a593Smuzhiyun unsigned int left_x;
175*4882a593Smuzhiyun unsigned int top_y;
176*4882a593Smuzhiyun unsigned int right_x;
177*4882a593Smuzhiyun unsigned int bottom_y;
178*4882a593Smuzhiyun };
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun /*
181*4882a593Smuzhiyun * A structure of buffer information
182*4882a593Smuzhiyun *
183*4882a593Smuzhiyun * @map_nr: manages the number of mapped buffers
184*4882a593Smuzhiyun * @reg_types: stores regitster type in the order of requested command
185*4882a593Smuzhiyun * @handles: stores buffer handle in its reg_type position
186*4882a593Smuzhiyun * @types: stores buffer type in its reg_type position
187*4882a593Smuzhiyun * @descs: stores buffer description in its reg_type position
188*4882a593Smuzhiyun *
189*4882a593Smuzhiyun */
190*4882a593Smuzhiyun struct g2d_buf_info {
191*4882a593Smuzhiyun unsigned int map_nr;
192*4882a593Smuzhiyun enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
193*4882a593Smuzhiyun void *obj[MAX_REG_TYPE_NR];
194*4882a593Smuzhiyun unsigned int types[MAX_REG_TYPE_NR];
195*4882a593Smuzhiyun struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
196*4882a593Smuzhiyun };
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun struct drm_exynos_pending_g2d_event {
199*4882a593Smuzhiyun struct drm_pending_event base;
200*4882a593Smuzhiyun struct drm_exynos_g2d_event event;
201*4882a593Smuzhiyun };
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun struct g2d_cmdlist_userptr {
204*4882a593Smuzhiyun struct list_head list;
205*4882a593Smuzhiyun dma_addr_t dma_addr;
206*4882a593Smuzhiyun unsigned long userptr;
207*4882a593Smuzhiyun unsigned long size;
208*4882a593Smuzhiyun struct frame_vector *vec;
209*4882a593Smuzhiyun struct sg_table *sgt;
210*4882a593Smuzhiyun atomic_t refcount;
211*4882a593Smuzhiyun bool in_pool;
212*4882a593Smuzhiyun bool out_of_list;
213*4882a593Smuzhiyun };
214*4882a593Smuzhiyun struct g2d_cmdlist_node {
215*4882a593Smuzhiyun struct list_head list;
216*4882a593Smuzhiyun struct g2d_cmdlist *cmdlist;
217*4882a593Smuzhiyun dma_addr_t dma_addr;
218*4882a593Smuzhiyun struct g2d_buf_info buf_info;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun struct drm_exynos_pending_g2d_event *event;
221*4882a593Smuzhiyun };
222*4882a593Smuzhiyun
223*4882a593Smuzhiyun struct g2d_runqueue_node {
224*4882a593Smuzhiyun struct list_head list;
225*4882a593Smuzhiyun struct list_head run_cmdlist;
226*4882a593Smuzhiyun struct list_head event_list;
227*4882a593Smuzhiyun struct drm_file *filp;
228*4882a593Smuzhiyun pid_t pid;
229*4882a593Smuzhiyun struct completion complete;
230*4882a593Smuzhiyun int async;
231*4882a593Smuzhiyun };
232*4882a593Smuzhiyun
233*4882a593Smuzhiyun struct g2d_data {
234*4882a593Smuzhiyun struct device *dev;
235*4882a593Smuzhiyun void *dma_priv;
236*4882a593Smuzhiyun struct clk *gate_clk;
237*4882a593Smuzhiyun void __iomem *regs;
238*4882a593Smuzhiyun int irq;
239*4882a593Smuzhiyun struct workqueue_struct *g2d_workq;
240*4882a593Smuzhiyun struct work_struct runqueue_work;
241*4882a593Smuzhiyun struct drm_device *drm_dev;
242*4882a593Smuzhiyun unsigned long flags;
243*4882a593Smuzhiyun
244*4882a593Smuzhiyun /* cmdlist */
245*4882a593Smuzhiyun struct g2d_cmdlist_node *cmdlist_node;
246*4882a593Smuzhiyun struct list_head free_cmdlist;
247*4882a593Smuzhiyun struct mutex cmdlist_mutex;
248*4882a593Smuzhiyun dma_addr_t cmdlist_pool;
249*4882a593Smuzhiyun void *cmdlist_pool_virt;
250*4882a593Smuzhiyun unsigned long cmdlist_dma_attrs;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* runqueue*/
253*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node;
254*4882a593Smuzhiyun struct list_head runqueue;
255*4882a593Smuzhiyun struct mutex runqueue_mutex;
256*4882a593Smuzhiyun struct kmem_cache *runqueue_slab;
257*4882a593Smuzhiyun
258*4882a593Smuzhiyun unsigned long current_pool;
259*4882a593Smuzhiyun unsigned long max_pool;
260*4882a593Smuzhiyun };
261*4882a593Smuzhiyun
g2d_hw_reset(struct g2d_data * g2d)262*4882a593Smuzhiyun static inline void g2d_hw_reset(struct g2d_data *g2d)
263*4882a593Smuzhiyun {
264*4882a593Smuzhiyun writel(G2D_R | G2D_SFRCLEAR, g2d->regs + G2D_SOFT_RESET);
265*4882a593Smuzhiyun clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
g2d_init_cmdlist(struct g2d_data * g2d)268*4882a593Smuzhiyun static int g2d_init_cmdlist(struct g2d_data *g2d)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct device *dev = g2d->dev;
271*4882a593Smuzhiyun struct g2d_cmdlist_node *node;
272*4882a593Smuzhiyun int nr;
273*4882a593Smuzhiyun int ret;
274*4882a593Smuzhiyun struct g2d_buf_info *buf_info;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
277*4882a593Smuzhiyun
278*4882a593Smuzhiyun g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
279*4882a593Smuzhiyun G2D_CMDLIST_POOL_SIZE,
280*4882a593Smuzhiyun &g2d->cmdlist_pool, GFP_KERNEL,
281*4882a593Smuzhiyun g2d->cmdlist_dma_attrs);
282*4882a593Smuzhiyun if (!g2d->cmdlist_pool_virt) {
283*4882a593Smuzhiyun dev_err(dev, "failed to allocate dma memory\n");
284*4882a593Smuzhiyun return -ENOMEM;
285*4882a593Smuzhiyun }
286*4882a593Smuzhiyun
287*4882a593Smuzhiyun node = kcalloc(G2D_CMDLIST_NUM, sizeof(*node), GFP_KERNEL);
288*4882a593Smuzhiyun if (!node) {
289*4882a593Smuzhiyun ret = -ENOMEM;
290*4882a593Smuzhiyun goto err;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun for (nr = 0; nr < G2D_CMDLIST_NUM; nr++) {
294*4882a593Smuzhiyun unsigned int i;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun node[nr].cmdlist =
297*4882a593Smuzhiyun g2d->cmdlist_pool_virt + nr * G2D_CMDLIST_SIZE;
298*4882a593Smuzhiyun node[nr].dma_addr =
299*4882a593Smuzhiyun g2d->cmdlist_pool + nr * G2D_CMDLIST_SIZE;
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun buf_info = &node[nr].buf_info;
302*4882a593Smuzhiyun for (i = 0; i < MAX_REG_TYPE_NR; i++)
303*4882a593Smuzhiyun buf_info->reg_types[i] = REG_TYPE_NONE;
304*4882a593Smuzhiyun
305*4882a593Smuzhiyun list_add_tail(&node[nr].list, &g2d->free_cmdlist);
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun err:
311*4882a593Smuzhiyun dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
312*4882a593Smuzhiyun g2d->cmdlist_pool_virt,
313*4882a593Smuzhiyun g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
314*4882a593Smuzhiyun return ret;
315*4882a593Smuzhiyun }
316*4882a593Smuzhiyun
g2d_fini_cmdlist(struct g2d_data * g2d)317*4882a593Smuzhiyun static void g2d_fini_cmdlist(struct g2d_data *g2d)
318*4882a593Smuzhiyun {
319*4882a593Smuzhiyun kfree(g2d->cmdlist_node);
320*4882a593Smuzhiyun
321*4882a593Smuzhiyun if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
322*4882a593Smuzhiyun dma_free_attrs(to_dma_dev(g2d->drm_dev),
323*4882a593Smuzhiyun G2D_CMDLIST_POOL_SIZE,
324*4882a593Smuzhiyun g2d->cmdlist_pool_virt,
325*4882a593Smuzhiyun g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
326*4882a593Smuzhiyun }
327*4882a593Smuzhiyun }
328*4882a593Smuzhiyun
g2d_get_cmdlist(struct g2d_data * g2d)329*4882a593Smuzhiyun static struct g2d_cmdlist_node *g2d_get_cmdlist(struct g2d_data *g2d)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun struct device *dev = g2d->dev;
332*4882a593Smuzhiyun struct g2d_cmdlist_node *node;
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun mutex_lock(&g2d->cmdlist_mutex);
335*4882a593Smuzhiyun if (list_empty(&g2d->free_cmdlist)) {
336*4882a593Smuzhiyun dev_err(dev, "there is no free cmdlist\n");
337*4882a593Smuzhiyun mutex_unlock(&g2d->cmdlist_mutex);
338*4882a593Smuzhiyun return NULL;
339*4882a593Smuzhiyun }
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun node = list_first_entry(&g2d->free_cmdlist, struct g2d_cmdlist_node,
342*4882a593Smuzhiyun list);
343*4882a593Smuzhiyun list_del_init(&node->list);
344*4882a593Smuzhiyun mutex_unlock(&g2d->cmdlist_mutex);
345*4882a593Smuzhiyun
346*4882a593Smuzhiyun return node;
347*4882a593Smuzhiyun }
348*4882a593Smuzhiyun
g2d_put_cmdlist(struct g2d_data * g2d,struct g2d_cmdlist_node * node)349*4882a593Smuzhiyun static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
350*4882a593Smuzhiyun {
351*4882a593Smuzhiyun mutex_lock(&g2d->cmdlist_mutex);
352*4882a593Smuzhiyun list_move_tail(&node->list, &g2d->free_cmdlist);
353*4882a593Smuzhiyun mutex_unlock(&g2d->cmdlist_mutex);
354*4882a593Smuzhiyun }
355*4882a593Smuzhiyun
g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private * file_priv,struct g2d_cmdlist_node * node)356*4882a593Smuzhiyun static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
357*4882a593Smuzhiyun struct g2d_cmdlist_node *node)
358*4882a593Smuzhiyun {
359*4882a593Smuzhiyun struct g2d_cmdlist_node *lnode;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun if (list_empty(&file_priv->inuse_cmdlist))
362*4882a593Smuzhiyun goto add_to_list;
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun /* this links to base address of new cmdlist */
365*4882a593Smuzhiyun lnode = list_entry(file_priv->inuse_cmdlist.prev,
366*4882a593Smuzhiyun struct g2d_cmdlist_node, list);
367*4882a593Smuzhiyun lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
368*4882a593Smuzhiyun
369*4882a593Smuzhiyun add_to_list:
370*4882a593Smuzhiyun list_add_tail(&node->list, &file_priv->inuse_cmdlist);
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun if (node->event)
373*4882a593Smuzhiyun list_add_tail(&node->event->base.link, &file_priv->event_list);
374*4882a593Smuzhiyun }
375*4882a593Smuzhiyun
g2d_userptr_put_dma_addr(struct g2d_data * g2d,void * obj,bool force)376*4882a593Smuzhiyun static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
377*4882a593Smuzhiyun void *obj,
378*4882a593Smuzhiyun bool force)
379*4882a593Smuzhiyun {
380*4882a593Smuzhiyun struct g2d_cmdlist_userptr *g2d_userptr = obj;
381*4882a593Smuzhiyun struct page **pages;
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun if (!obj)
384*4882a593Smuzhiyun return;
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun if (force)
387*4882a593Smuzhiyun goto out;
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun atomic_dec(&g2d_userptr->refcount);
390*4882a593Smuzhiyun
391*4882a593Smuzhiyun if (atomic_read(&g2d_userptr->refcount) > 0)
392*4882a593Smuzhiyun return;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun if (g2d_userptr->in_pool)
395*4882a593Smuzhiyun return;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun out:
398*4882a593Smuzhiyun dma_unmap_sgtable(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt,
399*4882a593Smuzhiyun DMA_BIDIRECTIONAL, 0);
400*4882a593Smuzhiyun
401*4882a593Smuzhiyun pages = frame_vector_pages(g2d_userptr->vec);
402*4882a593Smuzhiyun if (!IS_ERR(pages)) {
403*4882a593Smuzhiyun int i;
404*4882a593Smuzhiyun
405*4882a593Smuzhiyun for (i = 0; i < frame_vector_count(g2d_userptr->vec); i++)
406*4882a593Smuzhiyun set_page_dirty_lock(pages[i]);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun put_vaddr_frames(g2d_userptr->vec);
409*4882a593Smuzhiyun frame_vector_destroy(g2d_userptr->vec);
410*4882a593Smuzhiyun
411*4882a593Smuzhiyun if (!g2d_userptr->out_of_list)
412*4882a593Smuzhiyun list_del_init(&g2d_userptr->list);
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun sg_free_table(g2d_userptr->sgt);
415*4882a593Smuzhiyun kfree(g2d_userptr->sgt);
416*4882a593Smuzhiyun kfree(g2d_userptr);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
g2d_userptr_get_dma_addr(struct g2d_data * g2d,unsigned long userptr,unsigned long size,struct drm_file * filp,void ** obj)419*4882a593Smuzhiyun static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
420*4882a593Smuzhiyun unsigned long userptr,
421*4882a593Smuzhiyun unsigned long size,
422*4882a593Smuzhiyun struct drm_file *filp,
423*4882a593Smuzhiyun void **obj)
424*4882a593Smuzhiyun {
425*4882a593Smuzhiyun struct drm_exynos_file_private *file_priv = filp->driver_priv;
426*4882a593Smuzhiyun struct g2d_cmdlist_userptr *g2d_userptr;
427*4882a593Smuzhiyun struct sg_table *sgt;
428*4882a593Smuzhiyun unsigned long start, end;
429*4882a593Smuzhiyun unsigned int npages, offset;
430*4882a593Smuzhiyun int ret;
431*4882a593Smuzhiyun
432*4882a593Smuzhiyun if (!size) {
433*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev, "invalid userptr size.\n");
434*4882a593Smuzhiyun return ERR_PTR(-EINVAL);
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun
437*4882a593Smuzhiyun /* check if userptr already exists in userptr_list. */
438*4882a593Smuzhiyun list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
439*4882a593Smuzhiyun if (g2d_userptr->userptr == userptr) {
440*4882a593Smuzhiyun /*
441*4882a593Smuzhiyun * also check size because there could be same address
442*4882a593Smuzhiyun * and different size.
443*4882a593Smuzhiyun */
444*4882a593Smuzhiyun if (g2d_userptr->size == size) {
445*4882a593Smuzhiyun atomic_inc(&g2d_userptr->refcount);
446*4882a593Smuzhiyun *obj = g2d_userptr;
447*4882a593Smuzhiyun
448*4882a593Smuzhiyun return &g2d_userptr->dma_addr;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun /*
452*4882a593Smuzhiyun * at this moment, maybe g2d dma is accessing this
453*4882a593Smuzhiyun * g2d_userptr memory region so just remove this
454*4882a593Smuzhiyun * g2d_userptr object from userptr_list not to be
455*4882a593Smuzhiyun * referred again and also except it the userptr
456*4882a593Smuzhiyun * pool to be released after the dma access completion.
457*4882a593Smuzhiyun */
458*4882a593Smuzhiyun g2d_userptr->out_of_list = true;
459*4882a593Smuzhiyun g2d_userptr->in_pool = false;
460*4882a593Smuzhiyun list_del_init(&g2d_userptr->list);
461*4882a593Smuzhiyun
462*4882a593Smuzhiyun break;
463*4882a593Smuzhiyun }
464*4882a593Smuzhiyun }
465*4882a593Smuzhiyun
466*4882a593Smuzhiyun g2d_userptr = kzalloc(sizeof(*g2d_userptr), GFP_KERNEL);
467*4882a593Smuzhiyun if (!g2d_userptr)
468*4882a593Smuzhiyun return ERR_PTR(-ENOMEM);
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun atomic_set(&g2d_userptr->refcount, 1);
471*4882a593Smuzhiyun g2d_userptr->size = size;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun start = userptr & PAGE_MASK;
474*4882a593Smuzhiyun offset = userptr & ~PAGE_MASK;
475*4882a593Smuzhiyun end = PAGE_ALIGN(userptr + size);
476*4882a593Smuzhiyun npages = (end - start) >> PAGE_SHIFT;
477*4882a593Smuzhiyun g2d_userptr->vec = frame_vector_create(npages);
478*4882a593Smuzhiyun if (!g2d_userptr->vec) {
479*4882a593Smuzhiyun ret = -ENOMEM;
480*4882a593Smuzhiyun goto err_free;
481*4882a593Smuzhiyun }
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun ret = get_vaddr_frames(start, npages, FOLL_FORCE | FOLL_WRITE,
484*4882a593Smuzhiyun g2d_userptr->vec);
485*4882a593Smuzhiyun if (ret != npages) {
486*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev,
487*4882a593Smuzhiyun "failed to get user pages from userptr.\n");
488*4882a593Smuzhiyun if (ret < 0)
489*4882a593Smuzhiyun goto err_destroy_framevec;
490*4882a593Smuzhiyun ret = -EFAULT;
491*4882a593Smuzhiyun goto err_put_framevec;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun if (frame_vector_to_pages(g2d_userptr->vec) < 0) {
494*4882a593Smuzhiyun ret = -EFAULT;
495*4882a593Smuzhiyun goto err_put_framevec;
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun
498*4882a593Smuzhiyun sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
499*4882a593Smuzhiyun if (!sgt) {
500*4882a593Smuzhiyun ret = -ENOMEM;
501*4882a593Smuzhiyun goto err_put_framevec;
502*4882a593Smuzhiyun }
503*4882a593Smuzhiyun
504*4882a593Smuzhiyun ret = sg_alloc_table_from_pages(sgt,
505*4882a593Smuzhiyun frame_vector_pages(g2d_userptr->vec),
506*4882a593Smuzhiyun npages, offset, size, GFP_KERNEL);
507*4882a593Smuzhiyun if (ret < 0) {
508*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev, "failed to get sgt from pages.\n");
509*4882a593Smuzhiyun goto err_free_sgt;
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
512*4882a593Smuzhiyun g2d_userptr->sgt = sgt;
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun ret = dma_map_sgtable(to_dma_dev(g2d->drm_dev), sgt,
515*4882a593Smuzhiyun DMA_BIDIRECTIONAL, 0);
516*4882a593Smuzhiyun if (ret) {
517*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev, "failed to map sgt with dma region.\n");
518*4882a593Smuzhiyun goto err_sg_free_table;
519*4882a593Smuzhiyun }
520*4882a593Smuzhiyun
521*4882a593Smuzhiyun g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
522*4882a593Smuzhiyun g2d_userptr->userptr = userptr;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
525*4882a593Smuzhiyun
526*4882a593Smuzhiyun if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
527*4882a593Smuzhiyun g2d->current_pool += npages << PAGE_SHIFT;
528*4882a593Smuzhiyun g2d_userptr->in_pool = true;
529*4882a593Smuzhiyun }
530*4882a593Smuzhiyun
531*4882a593Smuzhiyun *obj = g2d_userptr;
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun return &g2d_userptr->dma_addr;
534*4882a593Smuzhiyun
535*4882a593Smuzhiyun err_sg_free_table:
536*4882a593Smuzhiyun sg_free_table(sgt);
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun err_free_sgt:
539*4882a593Smuzhiyun kfree(sgt);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun err_put_framevec:
542*4882a593Smuzhiyun put_vaddr_frames(g2d_userptr->vec);
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun err_destroy_framevec:
545*4882a593Smuzhiyun frame_vector_destroy(g2d_userptr->vec);
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun err_free:
548*4882a593Smuzhiyun kfree(g2d_userptr);
549*4882a593Smuzhiyun
550*4882a593Smuzhiyun return ERR_PTR(ret);
551*4882a593Smuzhiyun }
552*4882a593Smuzhiyun
g2d_userptr_free_all(struct g2d_data * g2d,struct drm_file * filp)553*4882a593Smuzhiyun static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
554*4882a593Smuzhiyun {
555*4882a593Smuzhiyun struct drm_exynos_file_private *file_priv = filp->driver_priv;
556*4882a593Smuzhiyun struct g2d_cmdlist_userptr *g2d_userptr, *n;
557*4882a593Smuzhiyun
558*4882a593Smuzhiyun list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
559*4882a593Smuzhiyun if (g2d_userptr->in_pool)
560*4882a593Smuzhiyun g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
561*4882a593Smuzhiyun
562*4882a593Smuzhiyun g2d->current_pool = 0;
563*4882a593Smuzhiyun }
564*4882a593Smuzhiyun
g2d_get_reg_type(struct g2d_data * g2d,int reg_offset)565*4882a593Smuzhiyun static enum g2d_reg_type g2d_get_reg_type(struct g2d_data *g2d, int reg_offset)
566*4882a593Smuzhiyun {
567*4882a593Smuzhiyun enum g2d_reg_type reg_type;
568*4882a593Smuzhiyun
569*4882a593Smuzhiyun switch (reg_offset) {
570*4882a593Smuzhiyun case G2D_SRC_BASE_ADDR:
571*4882a593Smuzhiyun case G2D_SRC_STRIDE:
572*4882a593Smuzhiyun case G2D_SRC_COLOR_MODE:
573*4882a593Smuzhiyun case G2D_SRC_LEFT_TOP:
574*4882a593Smuzhiyun case G2D_SRC_RIGHT_BOTTOM:
575*4882a593Smuzhiyun reg_type = REG_TYPE_SRC;
576*4882a593Smuzhiyun break;
577*4882a593Smuzhiyun case G2D_SRC_PLANE2_BASE_ADDR:
578*4882a593Smuzhiyun reg_type = REG_TYPE_SRC_PLANE2;
579*4882a593Smuzhiyun break;
580*4882a593Smuzhiyun case G2D_DST_BASE_ADDR:
581*4882a593Smuzhiyun case G2D_DST_STRIDE:
582*4882a593Smuzhiyun case G2D_DST_COLOR_MODE:
583*4882a593Smuzhiyun case G2D_DST_LEFT_TOP:
584*4882a593Smuzhiyun case G2D_DST_RIGHT_BOTTOM:
585*4882a593Smuzhiyun reg_type = REG_TYPE_DST;
586*4882a593Smuzhiyun break;
587*4882a593Smuzhiyun case G2D_DST_PLANE2_BASE_ADDR:
588*4882a593Smuzhiyun reg_type = REG_TYPE_DST_PLANE2;
589*4882a593Smuzhiyun break;
590*4882a593Smuzhiyun case G2D_PAT_BASE_ADDR:
591*4882a593Smuzhiyun reg_type = REG_TYPE_PAT;
592*4882a593Smuzhiyun break;
593*4882a593Smuzhiyun case G2D_MSK_BASE_ADDR:
594*4882a593Smuzhiyun reg_type = REG_TYPE_MSK;
595*4882a593Smuzhiyun break;
596*4882a593Smuzhiyun default:
597*4882a593Smuzhiyun reg_type = REG_TYPE_NONE;
598*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev, "Unknown register offset![%d]\n",
599*4882a593Smuzhiyun reg_offset);
600*4882a593Smuzhiyun break;
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun return reg_type;
604*4882a593Smuzhiyun }
605*4882a593Smuzhiyun
g2d_get_buf_bpp(unsigned int format)606*4882a593Smuzhiyun static unsigned long g2d_get_buf_bpp(unsigned int format)
607*4882a593Smuzhiyun {
608*4882a593Smuzhiyun unsigned long bpp;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun switch (format) {
611*4882a593Smuzhiyun case G2D_FMT_XRGB8888:
612*4882a593Smuzhiyun case G2D_FMT_ARGB8888:
613*4882a593Smuzhiyun bpp = 4;
614*4882a593Smuzhiyun break;
615*4882a593Smuzhiyun case G2D_FMT_RGB565:
616*4882a593Smuzhiyun case G2D_FMT_XRGB1555:
617*4882a593Smuzhiyun case G2D_FMT_ARGB1555:
618*4882a593Smuzhiyun case G2D_FMT_XRGB4444:
619*4882a593Smuzhiyun case G2D_FMT_ARGB4444:
620*4882a593Smuzhiyun bpp = 2;
621*4882a593Smuzhiyun break;
622*4882a593Smuzhiyun case G2D_FMT_PACKED_RGB888:
623*4882a593Smuzhiyun bpp = 3;
624*4882a593Smuzhiyun break;
625*4882a593Smuzhiyun default:
626*4882a593Smuzhiyun bpp = 1;
627*4882a593Smuzhiyun break;
628*4882a593Smuzhiyun }
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun return bpp;
631*4882a593Smuzhiyun }
632*4882a593Smuzhiyun
g2d_check_buf_desc_is_valid(struct g2d_data * g2d,struct g2d_buf_desc * buf_desc,enum g2d_reg_type reg_type,unsigned long size)633*4882a593Smuzhiyun static bool g2d_check_buf_desc_is_valid(struct g2d_data *g2d,
634*4882a593Smuzhiyun struct g2d_buf_desc *buf_desc,
635*4882a593Smuzhiyun enum g2d_reg_type reg_type,
636*4882a593Smuzhiyun unsigned long size)
637*4882a593Smuzhiyun {
638*4882a593Smuzhiyun int width, height;
639*4882a593Smuzhiyun unsigned long bpp, last_pos;
640*4882a593Smuzhiyun
641*4882a593Smuzhiyun /*
642*4882a593Smuzhiyun * check source and destination buffers only.
643*4882a593Smuzhiyun * so the others are always valid.
644*4882a593Smuzhiyun */
645*4882a593Smuzhiyun if (reg_type != REG_TYPE_SRC && reg_type != REG_TYPE_DST)
646*4882a593Smuzhiyun return true;
647*4882a593Smuzhiyun
648*4882a593Smuzhiyun /* This check also makes sure that right_x > left_x. */
649*4882a593Smuzhiyun width = (int)buf_desc->right_x - (int)buf_desc->left_x;
650*4882a593Smuzhiyun if (width < G2D_LEN_MIN || width > G2D_LEN_MAX) {
651*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev, "width[%d] is out of range!\n", width);
652*4882a593Smuzhiyun return false;
653*4882a593Smuzhiyun }
654*4882a593Smuzhiyun
655*4882a593Smuzhiyun /* This check also makes sure that bottom_y > top_y. */
656*4882a593Smuzhiyun height = (int)buf_desc->bottom_y - (int)buf_desc->top_y;
657*4882a593Smuzhiyun if (height < G2D_LEN_MIN || height > G2D_LEN_MAX) {
658*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev,
659*4882a593Smuzhiyun "height[%d] is out of range!\n", height);
660*4882a593Smuzhiyun return false;
661*4882a593Smuzhiyun }
662*4882a593Smuzhiyun
663*4882a593Smuzhiyun bpp = g2d_get_buf_bpp(buf_desc->format);
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun /* Compute the position of the last byte that the engine accesses. */
666*4882a593Smuzhiyun last_pos = ((unsigned long)buf_desc->bottom_y - 1) *
667*4882a593Smuzhiyun (unsigned long)buf_desc->stride +
668*4882a593Smuzhiyun (unsigned long)buf_desc->right_x * bpp - 1;
669*4882a593Smuzhiyun
670*4882a593Smuzhiyun /*
671*4882a593Smuzhiyun * Since right_x > left_x and bottom_y > top_y we already know
672*4882a593Smuzhiyun * that the first_pos < last_pos (first_pos being the position
673*4882a593Smuzhiyun * of the first byte the engine accesses), it just remains to
674*4882a593Smuzhiyun * check if last_pos is smaller then the buffer size.
675*4882a593Smuzhiyun */
676*4882a593Smuzhiyun
677*4882a593Smuzhiyun if (last_pos >= size) {
678*4882a593Smuzhiyun DRM_DEV_ERROR(g2d->dev, "last engine access position [%lu] "
679*4882a593Smuzhiyun "is out of range [%lu]!\n", last_pos, size);
680*4882a593Smuzhiyun return false;
681*4882a593Smuzhiyun }
682*4882a593Smuzhiyun
683*4882a593Smuzhiyun return true;
684*4882a593Smuzhiyun }
685*4882a593Smuzhiyun
g2d_map_cmdlist_gem(struct g2d_data * g2d,struct g2d_cmdlist_node * node,struct drm_device * drm_dev,struct drm_file * file)686*4882a593Smuzhiyun static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
687*4882a593Smuzhiyun struct g2d_cmdlist_node *node,
688*4882a593Smuzhiyun struct drm_device *drm_dev,
689*4882a593Smuzhiyun struct drm_file *file)
690*4882a593Smuzhiyun {
691*4882a593Smuzhiyun struct g2d_cmdlist *cmdlist = node->cmdlist;
692*4882a593Smuzhiyun struct g2d_buf_info *buf_info = &node->buf_info;
693*4882a593Smuzhiyun int offset;
694*4882a593Smuzhiyun int ret;
695*4882a593Smuzhiyun int i;
696*4882a593Smuzhiyun
697*4882a593Smuzhiyun for (i = 0; i < buf_info->map_nr; i++) {
698*4882a593Smuzhiyun struct g2d_buf_desc *buf_desc;
699*4882a593Smuzhiyun enum g2d_reg_type reg_type;
700*4882a593Smuzhiyun int reg_pos;
701*4882a593Smuzhiyun unsigned long handle;
702*4882a593Smuzhiyun dma_addr_t *addr;
703*4882a593Smuzhiyun
704*4882a593Smuzhiyun reg_pos = cmdlist->last - 2 * (i + 1);
705*4882a593Smuzhiyun
706*4882a593Smuzhiyun offset = cmdlist->data[reg_pos];
707*4882a593Smuzhiyun handle = cmdlist->data[reg_pos + 1];
708*4882a593Smuzhiyun
709*4882a593Smuzhiyun reg_type = g2d_get_reg_type(g2d, offset);
710*4882a593Smuzhiyun if (reg_type == REG_TYPE_NONE) {
711*4882a593Smuzhiyun ret = -EFAULT;
712*4882a593Smuzhiyun goto err;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
715*4882a593Smuzhiyun buf_desc = &buf_info->descs[reg_type];
716*4882a593Smuzhiyun
717*4882a593Smuzhiyun if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
718*4882a593Smuzhiyun struct exynos_drm_gem *exynos_gem;
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun exynos_gem = exynos_drm_gem_get(file, handle);
721*4882a593Smuzhiyun if (!exynos_gem) {
722*4882a593Smuzhiyun ret = -EFAULT;
723*4882a593Smuzhiyun goto err;
724*4882a593Smuzhiyun }
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
727*4882a593Smuzhiyun reg_type, exynos_gem->size)) {
728*4882a593Smuzhiyun exynos_drm_gem_put(exynos_gem);
729*4882a593Smuzhiyun ret = -EFAULT;
730*4882a593Smuzhiyun goto err;
731*4882a593Smuzhiyun }
732*4882a593Smuzhiyun
733*4882a593Smuzhiyun addr = &exynos_gem->dma_addr;
734*4882a593Smuzhiyun buf_info->obj[reg_type] = exynos_gem;
735*4882a593Smuzhiyun } else {
736*4882a593Smuzhiyun struct drm_exynos_g2d_userptr g2d_userptr;
737*4882a593Smuzhiyun
738*4882a593Smuzhiyun if (copy_from_user(&g2d_userptr, (void __user *)handle,
739*4882a593Smuzhiyun sizeof(struct drm_exynos_g2d_userptr))) {
740*4882a593Smuzhiyun ret = -EFAULT;
741*4882a593Smuzhiyun goto err;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun
744*4882a593Smuzhiyun if (!g2d_check_buf_desc_is_valid(g2d, buf_desc,
745*4882a593Smuzhiyun reg_type,
746*4882a593Smuzhiyun g2d_userptr.size)) {
747*4882a593Smuzhiyun ret = -EFAULT;
748*4882a593Smuzhiyun goto err;
749*4882a593Smuzhiyun }
750*4882a593Smuzhiyun
751*4882a593Smuzhiyun addr = g2d_userptr_get_dma_addr(g2d,
752*4882a593Smuzhiyun g2d_userptr.userptr,
753*4882a593Smuzhiyun g2d_userptr.size,
754*4882a593Smuzhiyun file,
755*4882a593Smuzhiyun &buf_info->obj[reg_type]);
756*4882a593Smuzhiyun if (IS_ERR(addr)) {
757*4882a593Smuzhiyun ret = -EFAULT;
758*4882a593Smuzhiyun goto err;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun }
761*4882a593Smuzhiyun
762*4882a593Smuzhiyun cmdlist->data[reg_pos + 1] = *addr;
763*4882a593Smuzhiyun buf_info->reg_types[i] = reg_type;
764*4882a593Smuzhiyun }
765*4882a593Smuzhiyun
766*4882a593Smuzhiyun return 0;
767*4882a593Smuzhiyun
768*4882a593Smuzhiyun err:
769*4882a593Smuzhiyun buf_info->map_nr = i;
770*4882a593Smuzhiyun return ret;
771*4882a593Smuzhiyun }
772*4882a593Smuzhiyun
g2d_unmap_cmdlist_gem(struct g2d_data * g2d,struct g2d_cmdlist_node * node,struct drm_file * filp)773*4882a593Smuzhiyun static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
774*4882a593Smuzhiyun struct g2d_cmdlist_node *node,
775*4882a593Smuzhiyun struct drm_file *filp)
776*4882a593Smuzhiyun {
777*4882a593Smuzhiyun struct g2d_buf_info *buf_info = &node->buf_info;
778*4882a593Smuzhiyun int i;
779*4882a593Smuzhiyun
780*4882a593Smuzhiyun for (i = 0; i < buf_info->map_nr; i++) {
781*4882a593Smuzhiyun struct g2d_buf_desc *buf_desc;
782*4882a593Smuzhiyun enum g2d_reg_type reg_type;
783*4882a593Smuzhiyun void *obj;
784*4882a593Smuzhiyun
785*4882a593Smuzhiyun reg_type = buf_info->reg_types[i];
786*4882a593Smuzhiyun
787*4882a593Smuzhiyun buf_desc = &buf_info->descs[reg_type];
788*4882a593Smuzhiyun obj = buf_info->obj[reg_type];
789*4882a593Smuzhiyun
790*4882a593Smuzhiyun if (buf_info->types[reg_type] == BUF_TYPE_GEM)
791*4882a593Smuzhiyun exynos_drm_gem_put(obj);
792*4882a593Smuzhiyun else
793*4882a593Smuzhiyun g2d_userptr_put_dma_addr(g2d, obj, false);
794*4882a593Smuzhiyun
795*4882a593Smuzhiyun buf_info->reg_types[i] = REG_TYPE_NONE;
796*4882a593Smuzhiyun buf_info->obj[reg_type] = NULL;
797*4882a593Smuzhiyun buf_info->types[reg_type] = 0;
798*4882a593Smuzhiyun memset(buf_desc, 0x00, sizeof(*buf_desc));
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun buf_info->map_nr = 0;
802*4882a593Smuzhiyun }
803*4882a593Smuzhiyun
g2d_dma_start(struct g2d_data * g2d,struct g2d_runqueue_node * runqueue_node)804*4882a593Smuzhiyun static void g2d_dma_start(struct g2d_data *g2d,
805*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node)
806*4882a593Smuzhiyun {
807*4882a593Smuzhiyun struct g2d_cmdlist_node *node =
808*4882a593Smuzhiyun list_first_entry(&runqueue_node->run_cmdlist,
809*4882a593Smuzhiyun struct g2d_cmdlist_node, list);
810*4882a593Smuzhiyun
811*4882a593Smuzhiyun set_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
812*4882a593Smuzhiyun writel_relaxed(node->dma_addr, g2d->regs + G2D_DMA_SFR_BASE_ADDR);
813*4882a593Smuzhiyun writel_relaxed(G2D_DMA_START, g2d->regs + G2D_DMA_COMMAND);
814*4882a593Smuzhiyun }
815*4882a593Smuzhiyun
g2d_get_runqueue_node(struct g2d_data * g2d)816*4882a593Smuzhiyun static struct g2d_runqueue_node *g2d_get_runqueue_node(struct g2d_data *g2d)
817*4882a593Smuzhiyun {
818*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node;
819*4882a593Smuzhiyun
820*4882a593Smuzhiyun if (list_empty(&g2d->runqueue))
821*4882a593Smuzhiyun return NULL;
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun runqueue_node = list_first_entry(&g2d->runqueue,
824*4882a593Smuzhiyun struct g2d_runqueue_node, list);
825*4882a593Smuzhiyun list_del_init(&runqueue_node->list);
826*4882a593Smuzhiyun return runqueue_node;
827*4882a593Smuzhiyun }
828*4882a593Smuzhiyun
g2d_free_runqueue_node(struct g2d_data * g2d,struct g2d_runqueue_node * runqueue_node)829*4882a593Smuzhiyun static void g2d_free_runqueue_node(struct g2d_data *g2d,
830*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node)
831*4882a593Smuzhiyun {
832*4882a593Smuzhiyun struct g2d_cmdlist_node *node;
833*4882a593Smuzhiyun
834*4882a593Smuzhiyun mutex_lock(&g2d->cmdlist_mutex);
835*4882a593Smuzhiyun /*
836*4882a593Smuzhiyun * commands in run_cmdlist have been completed so unmap all gem
837*4882a593Smuzhiyun * objects in each command node so that they are unreferenced.
838*4882a593Smuzhiyun */
839*4882a593Smuzhiyun list_for_each_entry(node, &runqueue_node->run_cmdlist, list)
840*4882a593Smuzhiyun g2d_unmap_cmdlist_gem(g2d, node, runqueue_node->filp);
841*4882a593Smuzhiyun list_splice_tail_init(&runqueue_node->run_cmdlist, &g2d->free_cmdlist);
842*4882a593Smuzhiyun mutex_unlock(&g2d->cmdlist_mutex);
843*4882a593Smuzhiyun
844*4882a593Smuzhiyun kmem_cache_free(g2d->runqueue_slab, runqueue_node);
845*4882a593Smuzhiyun }
846*4882a593Smuzhiyun
847*4882a593Smuzhiyun /**
848*4882a593Smuzhiyun * g2d_remove_runqueue_nodes - remove items from the list of runqueue nodes
849*4882a593Smuzhiyun * @g2d: G2D state object
850*4882a593Smuzhiyun * @file: if not zero, only remove items with this DRM file
851*4882a593Smuzhiyun *
852*4882a593Smuzhiyun * Has to be called under runqueue lock.
853*4882a593Smuzhiyun */
g2d_remove_runqueue_nodes(struct g2d_data * g2d,struct drm_file * file)854*4882a593Smuzhiyun static void g2d_remove_runqueue_nodes(struct g2d_data *g2d, struct drm_file *file)
855*4882a593Smuzhiyun {
856*4882a593Smuzhiyun struct g2d_runqueue_node *node, *n;
857*4882a593Smuzhiyun
858*4882a593Smuzhiyun if (list_empty(&g2d->runqueue))
859*4882a593Smuzhiyun return;
860*4882a593Smuzhiyun
861*4882a593Smuzhiyun list_for_each_entry_safe(node, n, &g2d->runqueue, list) {
862*4882a593Smuzhiyun if (file && node->filp != file)
863*4882a593Smuzhiyun continue;
864*4882a593Smuzhiyun
865*4882a593Smuzhiyun list_del_init(&node->list);
866*4882a593Smuzhiyun g2d_free_runqueue_node(g2d, node);
867*4882a593Smuzhiyun }
868*4882a593Smuzhiyun }
869*4882a593Smuzhiyun
g2d_runqueue_worker(struct work_struct * work)870*4882a593Smuzhiyun static void g2d_runqueue_worker(struct work_struct *work)
871*4882a593Smuzhiyun {
872*4882a593Smuzhiyun struct g2d_data *g2d = container_of(work, struct g2d_data,
873*4882a593Smuzhiyun runqueue_work);
874*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node;
875*4882a593Smuzhiyun
876*4882a593Smuzhiyun /*
877*4882a593Smuzhiyun * The engine is busy and the completion of the current node is going
878*4882a593Smuzhiyun * to poke the runqueue worker, so nothing to do here.
879*4882a593Smuzhiyun */
880*4882a593Smuzhiyun if (test_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags))
881*4882a593Smuzhiyun return;
882*4882a593Smuzhiyun
883*4882a593Smuzhiyun mutex_lock(&g2d->runqueue_mutex);
884*4882a593Smuzhiyun
885*4882a593Smuzhiyun runqueue_node = g2d->runqueue_node;
886*4882a593Smuzhiyun g2d->runqueue_node = NULL;
887*4882a593Smuzhiyun
888*4882a593Smuzhiyun if (runqueue_node) {
889*4882a593Smuzhiyun pm_runtime_mark_last_busy(g2d->dev);
890*4882a593Smuzhiyun pm_runtime_put_autosuspend(g2d->dev);
891*4882a593Smuzhiyun
892*4882a593Smuzhiyun complete(&runqueue_node->complete);
893*4882a593Smuzhiyun if (runqueue_node->async)
894*4882a593Smuzhiyun g2d_free_runqueue_node(g2d, runqueue_node);
895*4882a593Smuzhiyun }
896*4882a593Smuzhiyun
897*4882a593Smuzhiyun if (!test_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags)) {
898*4882a593Smuzhiyun g2d->runqueue_node = g2d_get_runqueue_node(g2d);
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun if (g2d->runqueue_node) {
901*4882a593Smuzhiyun pm_runtime_get_sync(g2d->dev);
902*4882a593Smuzhiyun g2d_dma_start(g2d, g2d->runqueue_node);
903*4882a593Smuzhiyun }
904*4882a593Smuzhiyun }
905*4882a593Smuzhiyun
906*4882a593Smuzhiyun mutex_unlock(&g2d->runqueue_mutex);
907*4882a593Smuzhiyun }
908*4882a593Smuzhiyun
g2d_finish_event(struct g2d_data * g2d,u32 cmdlist_no)909*4882a593Smuzhiyun static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
910*4882a593Smuzhiyun {
911*4882a593Smuzhiyun struct drm_device *drm_dev = g2d->drm_dev;
912*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
913*4882a593Smuzhiyun struct drm_exynos_pending_g2d_event *e;
914*4882a593Smuzhiyun struct timespec64 now;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun if (list_empty(&runqueue_node->event_list))
917*4882a593Smuzhiyun return;
918*4882a593Smuzhiyun
919*4882a593Smuzhiyun e = list_first_entry(&runqueue_node->event_list,
920*4882a593Smuzhiyun struct drm_exynos_pending_g2d_event, base.link);
921*4882a593Smuzhiyun
922*4882a593Smuzhiyun ktime_get_ts64(&now);
923*4882a593Smuzhiyun e->event.tv_sec = now.tv_sec;
924*4882a593Smuzhiyun e->event.tv_usec = now.tv_nsec / NSEC_PER_USEC;
925*4882a593Smuzhiyun e->event.cmdlist_no = cmdlist_no;
926*4882a593Smuzhiyun
927*4882a593Smuzhiyun drm_send_event(drm_dev, &e->base);
928*4882a593Smuzhiyun }
929*4882a593Smuzhiyun
g2d_irq_handler(int irq,void * dev_id)930*4882a593Smuzhiyun static irqreturn_t g2d_irq_handler(int irq, void *dev_id)
931*4882a593Smuzhiyun {
932*4882a593Smuzhiyun struct g2d_data *g2d = dev_id;
933*4882a593Smuzhiyun u32 pending;
934*4882a593Smuzhiyun
935*4882a593Smuzhiyun pending = readl_relaxed(g2d->regs + G2D_INTC_PEND);
936*4882a593Smuzhiyun if (pending)
937*4882a593Smuzhiyun writel_relaxed(pending, g2d->regs + G2D_INTC_PEND);
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun if (pending & G2D_INTP_GCMD_FIN) {
940*4882a593Smuzhiyun u32 cmdlist_no = readl_relaxed(g2d->regs + G2D_DMA_STATUS);
941*4882a593Smuzhiyun
942*4882a593Smuzhiyun cmdlist_no = (cmdlist_no & G2D_DMA_LIST_DONE_COUNT) >>
943*4882a593Smuzhiyun G2D_DMA_LIST_DONE_COUNT_OFFSET;
944*4882a593Smuzhiyun
945*4882a593Smuzhiyun g2d_finish_event(g2d, cmdlist_no);
946*4882a593Smuzhiyun
947*4882a593Smuzhiyun writel_relaxed(0, g2d->regs + G2D_DMA_HOLD_CMD);
948*4882a593Smuzhiyun if (!(pending & G2D_INTP_ACMD_FIN)) {
949*4882a593Smuzhiyun writel_relaxed(G2D_DMA_CONTINUE,
950*4882a593Smuzhiyun g2d->regs + G2D_DMA_COMMAND);
951*4882a593Smuzhiyun }
952*4882a593Smuzhiyun }
953*4882a593Smuzhiyun
954*4882a593Smuzhiyun if (pending & G2D_INTP_ACMD_FIN) {
955*4882a593Smuzhiyun clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
956*4882a593Smuzhiyun queue_work(g2d->g2d_workq, &g2d->runqueue_work);
957*4882a593Smuzhiyun }
958*4882a593Smuzhiyun
959*4882a593Smuzhiyun return IRQ_HANDLED;
960*4882a593Smuzhiyun }
961*4882a593Smuzhiyun
962*4882a593Smuzhiyun /**
963*4882a593Smuzhiyun * g2d_wait_finish - wait for the G2D engine to finish the current runqueue node
964*4882a593Smuzhiyun * @g2d: G2D state object
965*4882a593Smuzhiyun * @file: if not zero, only wait if the current runqueue node belongs
966*4882a593Smuzhiyun * to the DRM file
967*4882a593Smuzhiyun *
968*4882a593Smuzhiyun * Should the engine not become idle after a 100ms timeout, a hardware
969*4882a593Smuzhiyun * reset is issued.
970*4882a593Smuzhiyun */
g2d_wait_finish(struct g2d_data * g2d,struct drm_file * file)971*4882a593Smuzhiyun static void g2d_wait_finish(struct g2d_data *g2d, struct drm_file *file)
972*4882a593Smuzhiyun {
973*4882a593Smuzhiyun struct device *dev = g2d->dev;
974*4882a593Smuzhiyun
975*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node = NULL;
976*4882a593Smuzhiyun unsigned int tries = 10;
977*4882a593Smuzhiyun
978*4882a593Smuzhiyun mutex_lock(&g2d->runqueue_mutex);
979*4882a593Smuzhiyun
980*4882a593Smuzhiyun /* If no node is currently processed, we have nothing to do. */
981*4882a593Smuzhiyun if (!g2d->runqueue_node)
982*4882a593Smuzhiyun goto out;
983*4882a593Smuzhiyun
984*4882a593Smuzhiyun runqueue_node = g2d->runqueue_node;
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun /* Check if the currently processed item belongs to us. */
987*4882a593Smuzhiyun if (file && runqueue_node->filp != file)
988*4882a593Smuzhiyun goto out;
989*4882a593Smuzhiyun
990*4882a593Smuzhiyun mutex_unlock(&g2d->runqueue_mutex);
991*4882a593Smuzhiyun
992*4882a593Smuzhiyun /* Wait for the G2D engine to finish. */
993*4882a593Smuzhiyun while (tries-- && (g2d->runqueue_node == runqueue_node))
994*4882a593Smuzhiyun mdelay(10);
995*4882a593Smuzhiyun
996*4882a593Smuzhiyun mutex_lock(&g2d->runqueue_mutex);
997*4882a593Smuzhiyun
998*4882a593Smuzhiyun if (g2d->runqueue_node != runqueue_node)
999*4882a593Smuzhiyun goto out;
1000*4882a593Smuzhiyun
1001*4882a593Smuzhiyun dev_err(dev, "wait timed out, resetting engine...\n");
1002*4882a593Smuzhiyun g2d_hw_reset(g2d);
1003*4882a593Smuzhiyun
1004*4882a593Smuzhiyun /*
1005*4882a593Smuzhiyun * After the hardware reset of the engine we are going to loose
1006*4882a593Smuzhiyun * the IRQ which triggers the PM runtime put().
1007*4882a593Smuzhiyun * So do this manually here.
1008*4882a593Smuzhiyun */
1009*4882a593Smuzhiyun pm_runtime_mark_last_busy(dev);
1010*4882a593Smuzhiyun pm_runtime_put_autosuspend(dev);
1011*4882a593Smuzhiyun
1012*4882a593Smuzhiyun complete(&runqueue_node->complete);
1013*4882a593Smuzhiyun if (runqueue_node->async)
1014*4882a593Smuzhiyun g2d_free_runqueue_node(g2d, runqueue_node);
1015*4882a593Smuzhiyun
1016*4882a593Smuzhiyun out:
1017*4882a593Smuzhiyun mutex_unlock(&g2d->runqueue_mutex);
1018*4882a593Smuzhiyun }
1019*4882a593Smuzhiyun
g2d_check_reg_offset(struct g2d_data * g2d,struct g2d_cmdlist_node * node,int nr,bool for_addr)1020*4882a593Smuzhiyun static int g2d_check_reg_offset(struct g2d_data *g2d,
1021*4882a593Smuzhiyun struct g2d_cmdlist_node *node,
1022*4882a593Smuzhiyun int nr, bool for_addr)
1023*4882a593Smuzhiyun {
1024*4882a593Smuzhiyun struct g2d_cmdlist *cmdlist = node->cmdlist;
1025*4882a593Smuzhiyun int reg_offset;
1026*4882a593Smuzhiyun int index;
1027*4882a593Smuzhiyun int i;
1028*4882a593Smuzhiyun
1029*4882a593Smuzhiyun for (i = 0; i < nr; i++) {
1030*4882a593Smuzhiyun struct g2d_buf_info *buf_info = &node->buf_info;
1031*4882a593Smuzhiyun struct g2d_buf_desc *buf_desc;
1032*4882a593Smuzhiyun enum g2d_reg_type reg_type;
1033*4882a593Smuzhiyun unsigned long value;
1034*4882a593Smuzhiyun
1035*4882a593Smuzhiyun index = cmdlist->last - 2 * (i + 1);
1036*4882a593Smuzhiyun
1037*4882a593Smuzhiyun reg_offset = cmdlist->data[index] & ~0xfffff000;
1038*4882a593Smuzhiyun if (reg_offset < G2D_VALID_START || reg_offset > G2D_VALID_END)
1039*4882a593Smuzhiyun goto err;
1040*4882a593Smuzhiyun if (reg_offset % 4)
1041*4882a593Smuzhiyun goto err;
1042*4882a593Smuzhiyun
1043*4882a593Smuzhiyun switch (reg_offset) {
1044*4882a593Smuzhiyun case G2D_SRC_BASE_ADDR:
1045*4882a593Smuzhiyun case G2D_SRC_PLANE2_BASE_ADDR:
1046*4882a593Smuzhiyun case G2D_DST_BASE_ADDR:
1047*4882a593Smuzhiyun case G2D_DST_PLANE2_BASE_ADDR:
1048*4882a593Smuzhiyun case G2D_PAT_BASE_ADDR:
1049*4882a593Smuzhiyun case G2D_MSK_BASE_ADDR:
1050*4882a593Smuzhiyun if (!for_addr)
1051*4882a593Smuzhiyun goto err;
1052*4882a593Smuzhiyun
1053*4882a593Smuzhiyun reg_type = g2d_get_reg_type(g2d, reg_offset);
1054*4882a593Smuzhiyun
1055*4882a593Smuzhiyun /* check userptr buffer type. */
1056*4882a593Smuzhiyun if ((cmdlist->data[index] & ~0x7fffffff) >> 31) {
1057*4882a593Smuzhiyun buf_info->types[reg_type] = BUF_TYPE_USERPTR;
1058*4882a593Smuzhiyun cmdlist->data[index] &= ~G2D_BUF_USERPTR;
1059*4882a593Smuzhiyun } else
1060*4882a593Smuzhiyun buf_info->types[reg_type] = BUF_TYPE_GEM;
1061*4882a593Smuzhiyun break;
1062*4882a593Smuzhiyun case G2D_SRC_STRIDE:
1063*4882a593Smuzhiyun case G2D_DST_STRIDE:
1064*4882a593Smuzhiyun if (for_addr)
1065*4882a593Smuzhiyun goto err;
1066*4882a593Smuzhiyun
1067*4882a593Smuzhiyun reg_type = g2d_get_reg_type(g2d, reg_offset);
1068*4882a593Smuzhiyun
1069*4882a593Smuzhiyun buf_desc = &buf_info->descs[reg_type];
1070*4882a593Smuzhiyun buf_desc->stride = cmdlist->data[index + 1];
1071*4882a593Smuzhiyun break;
1072*4882a593Smuzhiyun case G2D_SRC_COLOR_MODE:
1073*4882a593Smuzhiyun case G2D_DST_COLOR_MODE:
1074*4882a593Smuzhiyun if (for_addr)
1075*4882a593Smuzhiyun goto err;
1076*4882a593Smuzhiyun
1077*4882a593Smuzhiyun reg_type = g2d_get_reg_type(g2d, reg_offset);
1078*4882a593Smuzhiyun
1079*4882a593Smuzhiyun buf_desc = &buf_info->descs[reg_type];
1080*4882a593Smuzhiyun value = cmdlist->data[index + 1];
1081*4882a593Smuzhiyun
1082*4882a593Smuzhiyun buf_desc->format = value & 0xf;
1083*4882a593Smuzhiyun break;
1084*4882a593Smuzhiyun case G2D_SRC_LEFT_TOP:
1085*4882a593Smuzhiyun case G2D_DST_LEFT_TOP:
1086*4882a593Smuzhiyun if (for_addr)
1087*4882a593Smuzhiyun goto err;
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun reg_type = g2d_get_reg_type(g2d, reg_offset);
1090*4882a593Smuzhiyun
1091*4882a593Smuzhiyun buf_desc = &buf_info->descs[reg_type];
1092*4882a593Smuzhiyun value = cmdlist->data[index + 1];
1093*4882a593Smuzhiyun
1094*4882a593Smuzhiyun buf_desc->left_x = value & 0x1fff;
1095*4882a593Smuzhiyun buf_desc->top_y = (value & 0x1fff0000) >> 16;
1096*4882a593Smuzhiyun break;
1097*4882a593Smuzhiyun case G2D_SRC_RIGHT_BOTTOM:
1098*4882a593Smuzhiyun case G2D_DST_RIGHT_BOTTOM:
1099*4882a593Smuzhiyun if (for_addr)
1100*4882a593Smuzhiyun goto err;
1101*4882a593Smuzhiyun
1102*4882a593Smuzhiyun reg_type = g2d_get_reg_type(g2d, reg_offset);
1103*4882a593Smuzhiyun
1104*4882a593Smuzhiyun buf_desc = &buf_info->descs[reg_type];
1105*4882a593Smuzhiyun value = cmdlist->data[index + 1];
1106*4882a593Smuzhiyun
1107*4882a593Smuzhiyun buf_desc->right_x = value & 0x1fff;
1108*4882a593Smuzhiyun buf_desc->bottom_y = (value & 0x1fff0000) >> 16;
1109*4882a593Smuzhiyun break;
1110*4882a593Smuzhiyun default:
1111*4882a593Smuzhiyun if (for_addr)
1112*4882a593Smuzhiyun goto err;
1113*4882a593Smuzhiyun break;
1114*4882a593Smuzhiyun }
1115*4882a593Smuzhiyun }
1116*4882a593Smuzhiyun
1117*4882a593Smuzhiyun return 0;
1118*4882a593Smuzhiyun
1119*4882a593Smuzhiyun err:
1120*4882a593Smuzhiyun dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
1121*4882a593Smuzhiyun return -EINVAL;
1122*4882a593Smuzhiyun }
1123*4882a593Smuzhiyun
1124*4882a593Smuzhiyun /* ioctl functions */
exynos_g2d_get_ver_ioctl(struct drm_device * drm_dev,void * data,struct drm_file * file)1125*4882a593Smuzhiyun int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
1126*4882a593Smuzhiyun struct drm_file *file)
1127*4882a593Smuzhiyun {
1128*4882a593Smuzhiyun struct drm_exynos_g2d_get_ver *ver = data;
1129*4882a593Smuzhiyun
1130*4882a593Smuzhiyun ver->major = G2D_HW_MAJOR_VER;
1131*4882a593Smuzhiyun ver->minor = G2D_HW_MINOR_VER;
1132*4882a593Smuzhiyun
1133*4882a593Smuzhiyun return 0;
1134*4882a593Smuzhiyun }
1135*4882a593Smuzhiyun
exynos_g2d_set_cmdlist_ioctl(struct drm_device * drm_dev,void * data,struct drm_file * file)1136*4882a593Smuzhiyun int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
1137*4882a593Smuzhiyun struct drm_file *file)
1138*4882a593Smuzhiyun {
1139*4882a593Smuzhiyun struct drm_exynos_file_private *file_priv = file->driver_priv;
1140*4882a593Smuzhiyun struct exynos_drm_private *priv = drm_dev->dev_private;
1141*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
1142*4882a593Smuzhiyun struct drm_exynos_g2d_set_cmdlist *req = data;
1143*4882a593Smuzhiyun struct drm_exynos_g2d_cmd *cmd;
1144*4882a593Smuzhiyun struct drm_exynos_pending_g2d_event *e;
1145*4882a593Smuzhiyun struct g2d_cmdlist_node *node;
1146*4882a593Smuzhiyun struct g2d_cmdlist *cmdlist;
1147*4882a593Smuzhiyun int size;
1148*4882a593Smuzhiyun int ret;
1149*4882a593Smuzhiyun
1150*4882a593Smuzhiyun node = g2d_get_cmdlist(g2d);
1151*4882a593Smuzhiyun if (!node)
1152*4882a593Smuzhiyun return -ENOMEM;
1153*4882a593Smuzhiyun
1154*4882a593Smuzhiyun /*
1155*4882a593Smuzhiyun * To avoid an integer overflow for the later size computations, we
1156*4882a593Smuzhiyun * enforce a maximum number of submitted commands here. This limit is
1157*4882a593Smuzhiyun * sufficient for all conceivable usage cases of the G2D.
1158*4882a593Smuzhiyun */
1159*4882a593Smuzhiyun if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
1160*4882a593Smuzhiyun req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
1161*4882a593Smuzhiyun dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
1162*4882a593Smuzhiyun return -EINVAL;
1163*4882a593Smuzhiyun }
1164*4882a593Smuzhiyun
1165*4882a593Smuzhiyun node->event = NULL;
1166*4882a593Smuzhiyun
1167*4882a593Smuzhiyun if (req->event_type != G2D_EVENT_NOT) {
1168*4882a593Smuzhiyun e = kzalloc(sizeof(*node->event), GFP_KERNEL);
1169*4882a593Smuzhiyun if (!e) {
1170*4882a593Smuzhiyun ret = -ENOMEM;
1171*4882a593Smuzhiyun goto err;
1172*4882a593Smuzhiyun }
1173*4882a593Smuzhiyun
1174*4882a593Smuzhiyun e->event.base.type = DRM_EXYNOS_G2D_EVENT;
1175*4882a593Smuzhiyun e->event.base.length = sizeof(e->event);
1176*4882a593Smuzhiyun e->event.user_data = req->user_data;
1177*4882a593Smuzhiyun
1178*4882a593Smuzhiyun ret = drm_event_reserve_init(drm_dev, file, &e->base, &e->event.base);
1179*4882a593Smuzhiyun if (ret) {
1180*4882a593Smuzhiyun kfree(e);
1181*4882a593Smuzhiyun goto err;
1182*4882a593Smuzhiyun }
1183*4882a593Smuzhiyun
1184*4882a593Smuzhiyun node->event = e;
1185*4882a593Smuzhiyun }
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun cmdlist = node->cmdlist;
1188*4882a593Smuzhiyun
1189*4882a593Smuzhiyun cmdlist->last = 0;
1190*4882a593Smuzhiyun
1191*4882a593Smuzhiyun /*
1192*4882a593Smuzhiyun * If don't clear SFR registers, the cmdlist is affected by register
1193*4882a593Smuzhiyun * values of previous cmdlist. G2D hw executes SFR clear command and
1194*4882a593Smuzhiyun * a next command at the same time then the next command is ignored and
1195*4882a593Smuzhiyun * is executed rightly from next next command, so needs a dummy command
1196*4882a593Smuzhiyun * to next command of SFR clear command.
1197*4882a593Smuzhiyun */
1198*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_SOFT_RESET;
1199*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_SFRCLEAR;
1200*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_SRC_BASE_ADDR;
1201*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = 0;
1202*4882a593Smuzhiyun
1203*4882a593Smuzhiyun /*
1204*4882a593Smuzhiyun * 'LIST_HOLD' command should be set to the DMA_HOLD_CMD_REG
1205*4882a593Smuzhiyun * and GCF bit should be set to INTEN register if user wants
1206*4882a593Smuzhiyun * G2D interrupt event once current command list execution is
1207*4882a593Smuzhiyun * finished.
1208*4882a593Smuzhiyun * Otherwise only ACF bit should be set to INTEN register so
1209*4882a593Smuzhiyun * that one interrupt is occurred after all command lists
1210*4882a593Smuzhiyun * have been completed.
1211*4882a593Smuzhiyun */
1212*4882a593Smuzhiyun if (node->event) {
1213*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_INTEN;
1214*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF | G2D_INTEN_GCF;
1215*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_DMA_HOLD_CMD;
1216*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_LIST_HOLD;
1217*4882a593Smuzhiyun } else {
1218*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_INTEN;
1219*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_INTEN_ACF;
1220*4882a593Smuzhiyun }
1221*4882a593Smuzhiyun
1222*4882a593Smuzhiyun /*
1223*4882a593Smuzhiyun * Check the size of cmdlist. The 2 that is added last comes from
1224*4882a593Smuzhiyun * the implicit G2D_BITBLT_START that is appended once we have
1225*4882a593Smuzhiyun * checked all the submitted commands.
1226*4882a593Smuzhiyun */
1227*4882a593Smuzhiyun size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
1228*4882a593Smuzhiyun if (size > G2D_CMDLIST_DATA_NUM) {
1229*4882a593Smuzhiyun dev_err(g2d->dev, "cmdlist size is too big\n");
1230*4882a593Smuzhiyun ret = -EINVAL;
1231*4882a593Smuzhiyun goto err_free_event;
1232*4882a593Smuzhiyun }
1233*4882a593Smuzhiyun
1234*4882a593Smuzhiyun cmd = (struct drm_exynos_g2d_cmd *)(unsigned long)req->cmd;
1235*4882a593Smuzhiyun
1236*4882a593Smuzhiyun if (copy_from_user(cmdlist->data + cmdlist->last,
1237*4882a593Smuzhiyun (void __user *)cmd,
1238*4882a593Smuzhiyun sizeof(*cmd) * req->cmd_nr)) {
1239*4882a593Smuzhiyun ret = -EFAULT;
1240*4882a593Smuzhiyun goto err_free_event;
1241*4882a593Smuzhiyun }
1242*4882a593Smuzhiyun cmdlist->last += req->cmd_nr * 2;
1243*4882a593Smuzhiyun
1244*4882a593Smuzhiyun ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
1245*4882a593Smuzhiyun if (ret < 0)
1246*4882a593Smuzhiyun goto err_free_event;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun node->buf_info.map_nr = req->cmd_buf_nr;
1249*4882a593Smuzhiyun if (req->cmd_buf_nr) {
1250*4882a593Smuzhiyun struct drm_exynos_g2d_cmd *cmd_buf;
1251*4882a593Smuzhiyun
1252*4882a593Smuzhiyun cmd_buf = (struct drm_exynos_g2d_cmd *)
1253*4882a593Smuzhiyun (unsigned long)req->cmd_buf;
1254*4882a593Smuzhiyun
1255*4882a593Smuzhiyun if (copy_from_user(cmdlist->data + cmdlist->last,
1256*4882a593Smuzhiyun (void __user *)cmd_buf,
1257*4882a593Smuzhiyun sizeof(*cmd_buf) * req->cmd_buf_nr)) {
1258*4882a593Smuzhiyun ret = -EFAULT;
1259*4882a593Smuzhiyun goto err_free_event;
1260*4882a593Smuzhiyun }
1261*4882a593Smuzhiyun cmdlist->last += req->cmd_buf_nr * 2;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
1264*4882a593Smuzhiyun if (ret < 0)
1265*4882a593Smuzhiyun goto err_free_event;
1266*4882a593Smuzhiyun
1267*4882a593Smuzhiyun ret = g2d_map_cmdlist_gem(g2d, node, drm_dev, file);
1268*4882a593Smuzhiyun if (ret < 0)
1269*4882a593Smuzhiyun goto err_unmap;
1270*4882a593Smuzhiyun }
1271*4882a593Smuzhiyun
1272*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_BITBLT_START;
1273*4882a593Smuzhiyun cmdlist->data[cmdlist->last++] = G2D_START_BITBLT;
1274*4882a593Smuzhiyun
1275*4882a593Smuzhiyun /* head */
1276*4882a593Smuzhiyun cmdlist->head = cmdlist->last / 2;
1277*4882a593Smuzhiyun
1278*4882a593Smuzhiyun /* tail */
1279*4882a593Smuzhiyun cmdlist->data[cmdlist->last] = 0;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun g2d_add_cmdlist_to_inuse(file_priv, node);
1282*4882a593Smuzhiyun
1283*4882a593Smuzhiyun return 0;
1284*4882a593Smuzhiyun
1285*4882a593Smuzhiyun err_unmap:
1286*4882a593Smuzhiyun g2d_unmap_cmdlist_gem(g2d, node, file);
1287*4882a593Smuzhiyun err_free_event:
1288*4882a593Smuzhiyun if (node->event)
1289*4882a593Smuzhiyun drm_event_cancel_free(drm_dev, &node->event->base);
1290*4882a593Smuzhiyun err:
1291*4882a593Smuzhiyun g2d_put_cmdlist(g2d, node);
1292*4882a593Smuzhiyun return ret;
1293*4882a593Smuzhiyun }
1294*4882a593Smuzhiyun
exynos_g2d_exec_ioctl(struct drm_device * drm_dev,void * data,struct drm_file * file)1295*4882a593Smuzhiyun int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
1296*4882a593Smuzhiyun struct drm_file *file)
1297*4882a593Smuzhiyun {
1298*4882a593Smuzhiyun struct drm_exynos_file_private *file_priv = file->driver_priv;
1299*4882a593Smuzhiyun struct exynos_drm_private *priv = drm_dev->dev_private;
1300*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
1301*4882a593Smuzhiyun struct drm_exynos_g2d_exec *req = data;
1302*4882a593Smuzhiyun struct g2d_runqueue_node *runqueue_node;
1303*4882a593Smuzhiyun struct list_head *run_cmdlist;
1304*4882a593Smuzhiyun struct list_head *event_list;
1305*4882a593Smuzhiyun
1306*4882a593Smuzhiyun runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
1307*4882a593Smuzhiyun if (!runqueue_node)
1308*4882a593Smuzhiyun return -ENOMEM;
1309*4882a593Smuzhiyun
1310*4882a593Smuzhiyun run_cmdlist = &runqueue_node->run_cmdlist;
1311*4882a593Smuzhiyun event_list = &runqueue_node->event_list;
1312*4882a593Smuzhiyun INIT_LIST_HEAD(run_cmdlist);
1313*4882a593Smuzhiyun INIT_LIST_HEAD(event_list);
1314*4882a593Smuzhiyun init_completion(&runqueue_node->complete);
1315*4882a593Smuzhiyun runqueue_node->async = req->async;
1316*4882a593Smuzhiyun
1317*4882a593Smuzhiyun list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
1318*4882a593Smuzhiyun list_splice_init(&file_priv->event_list, event_list);
1319*4882a593Smuzhiyun
1320*4882a593Smuzhiyun if (list_empty(run_cmdlist)) {
1321*4882a593Smuzhiyun dev_err(g2d->dev, "there is no inuse cmdlist\n");
1322*4882a593Smuzhiyun kmem_cache_free(g2d->runqueue_slab, runqueue_node);
1323*4882a593Smuzhiyun return -EPERM;
1324*4882a593Smuzhiyun }
1325*4882a593Smuzhiyun
1326*4882a593Smuzhiyun mutex_lock(&g2d->runqueue_mutex);
1327*4882a593Smuzhiyun runqueue_node->pid = current->pid;
1328*4882a593Smuzhiyun runqueue_node->filp = file;
1329*4882a593Smuzhiyun list_add_tail(&runqueue_node->list, &g2d->runqueue);
1330*4882a593Smuzhiyun mutex_unlock(&g2d->runqueue_mutex);
1331*4882a593Smuzhiyun
1332*4882a593Smuzhiyun /* Let the runqueue know that there is work to do. */
1333*4882a593Smuzhiyun queue_work(g2d->g2d_workq, &g2d->runqueue_work);
1334*4882a593Smuzhiyun
1335*4882a593Smuzhiyun if (runqueue_node->async)
1336*4882a593Smuzhiyun goto out;
1337*4882a593Smuzhiyun
1338*4882a593Smuzhiyun wait_for_completion(&runqueue_node->complete);
1339*4882a593Smuzhiyun g2d_free_runqueue_node(g2d, runqueue_node);
1340*4882a593Smuzhiyun
1341*4882a593Smuzhiyun out:
1342*4882a593Smuzhiyun return 0;
1343*4882a593Smuzhiyun }
1344*4882a593Smuzhiyun
g2d_open(struct drm_device * drm_dev,struct drm_file * file)1345*4882a593Smuzhiyun int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
1346*4882a593Smuzhiyun {
1347*4882a593Smuzhiyun struct drm_exynos_file_private *file_priv = file->driver_priv;
1348*4882a593Smuzhiyun
1349*4882a593Smuzhiyun INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
1350*4882a593Smuzhiyun INIT_LIST_HEAD(&file_priv->event_list);
1351*4882a593Smuzhiyun INIT_LIST_HEAD(&file_priv->userptr_list);
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun return 0;
1354*4882a593Smuzhiyun }
1355*4882a593Smuzhiyun
g2d_close(struct drm_device * drm_dev,struct drm_file * file)1356*4882a593Smuzhiyun void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
1357*4882a593Smuzhiyun {
1358*4882a593Smuzhiyun struct drm_exynos_file_private *file_priv = file->driver_priv;
1359*4882a593Smuzhiyun struct exynos_drm_private *priv = drm_dev->dev_private;
1360*4882a593Smuzhiyun struct g2d_data *g2d;
1361*4882a593Smuzhiyun struct g2d_cmdlist_node *node, *n;
1362*4882a593Smuzhiyun
1363*4882a593Smuzhiyun if (!priv->g2d_dev)
1364*4882a593Smuzhiyun return;
1365*4882a593Smuzhiyun
1366*4882a593Smuzhiyun g2d = dev_get_drvdata(priv->g2d_dev);
1367*4882a593Smuzhiyun
1368*4882a593Smuzhiyun /* Remove the runqueue nodes that belong to us. */
1369*4882a593Smuzhiyun mutex_lock(&g2d->runqueue_mutex);
1370*4882a593Smuzhiyun g2d_remove_runqueue_nodes(g2d, file);
1371*4882a593Smuzhiyun mutex_unlock(&g2d->runqueue_mutex);
1372*4882a593Smuzhiyun
1373*4882a593Smuzhiyun /*
1374*4882a593Smuzhiyun * Wait for the runqueue worker to finish its current node.
1375*4882a593Smuzhiyun * After this the engine should no longer be accessing any
1376*4882a593Smuzhiyun * memory belonging to us.
1377*4882a593Smuzhiyun */
1378*4882a593Smuzhiyun g2d_wait_finish(g2d, file);
1379*4882a593Smuzhiyun
1380*4882a593Smuzhiyun /*
1381*4882a593Smuzhiyun * Even after the engine is idle, there might still be stale cmdlists
1382*4882a593Smuzhiyun * (i.e. cmdlisst which we submitted but never executed) around, with
1383*4882a593Smuzhiyun * their corresponding GEM/userptr buffers.
1384*4882a593Smuzhiyun * Properly unmap these buffers here.
1385*4882a593Smuzhiyun */
1386*4882a593Smuzhiyun mutex_lock(&g2d->cmdlist_mutex);
1387*4882a593Smuzhiyun list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
1388*4882a593Smuzhiyun g2d_unmap_cmdlist_gem(g2d, node, file);
1389*4882a593Smuzhiyun list_move_tail(&node->list, &g2d->free_cmdlist);
1390*4882a593Smuzhiyun }
1391*4882a593Smuzhiyun mutex_unlock(&g2d->cmdlist_mutex);
1392*4882a593Smuzhiyun
1393*4882a593Smuzhiyun /* release all g2d_userptr in pool. */
1394*4882a593Smuzhiyun g2d_userptr_free_all(g2d, file);
1395*4882a593Smuzhiyun }
1396*4882a593Smuzhiyun
g2d_bind(struct device * dev,struct device * master,void * data)1397*4882a593Smuzhiyun static int g2d_bind(struct device *dev, struct device *master, void *data)
1398*4882a593Smuzhiyun {
1399*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(dev);
1400*4882a593Smuzhiyun struct drm_device *drm_dev = data;
1401*4882a593Smuzhiyun struct exynos_drm_private *priv = drm_dev->dev_private;
1402*4882a593Smuzhiyun int ret;
1403*4882a593Smuzhiyun
1404*4882a593Smuzhiyun g2d->drm_dev = drm_dev;
1405*4882a593Smuzhiyun
1406*4882a593Smuzhiyun /* allocate dma-aware cmdlist buffer. */
1407*4882a593Smuzhiyun ret = g2d_init_cmdlist(g2d);
1408*4882a593Smuzhiyun if (ret < 0) {
1409*4882a593Smuzhiyun dev_err(dev, "cmdlist init failed\n");
1410*4882a593Smuzhiyun return ret;
1411*4882a593Smuzhiyun }
1412*4882a593Smuzhiyun
1413*4882a593Smuzhiyun ret = exynos_drm_register_dma(drm_dev, dev, &g2d->dma_priv);
1414*4882a593Smuzhiyun if (ret < 0) {
1415*4882a593Smuzhiyun dev_err(dev, "failed to enable iommu.\n");
1416*4882a593Smuzhiyun g2d_fini_cmdlist(g2d);
1417*4882a593Smuzhiyun return ret;
1418*4882a593Smuzhiyun }
1419*4882a593Smuzhiyun priv->g2d_dev = dev;
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
1422*4882a593Smuzhiyun G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
1423*4882a593Smuzhiyun return 0;
1424*4882a593Smuzhiyun }
1425*4882a593Smuzhiyun
g2d_unbind(struct device * dev,struct device * master,void * data)1426*4882a593Smuzhiyun static void g2d_unbind(struct device *dev, struct device *master, void *data)
1427*4882a593Smuzhiyun {
1428*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(dev);
1429*4882a593Smuzhiyun struct drm_device *drm_dev = data;
1430*4882a593Smuzhiyun struct exynos_drm_private *priv = drm_dev->dev_private;
1431*4882a593Smuzhiyun
1432*4882a593Smuzhiyun /* Suspend operation and wait for engine idle. */
1433*4882a593Smuzhiyun set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1434*4882a593Smuzhiyun g2d_wait_finish(g2d, NULL);
1435*4882a593Smuzhiyun priv->g2d_dev = NULL;
1436*4882a593Smuzhiyun
1437*4882a593Smuzhiyun cancel_work_sync(&g2d->runqueue_work);
1438*4882a593Smuzhiyun exynos_drm_unregister_dma(g2d->drm_dev, dev, &g2d->dma_priv);
1439*4882a593Smuzhiyun }
1440*4882a593Smuzhiyun
1441*4882a593Smuzhiyun static const struct component_ops g2d_component_ops = {
1442*4882a593Smuzhiyun .bind = g2d_bind,
1443*4882a593Smuzhiyun .unbind = g2d_unbind,
1444*4882a593Smuzhiyun };
1445*4882a593Smuzhiyun
g2d_probe(struct platform_device * pdev)1446*4882a593Smuzhiyun static int g2d_probe(struct platform_device *pdev)
1447*4882a593Smuzhiyun {
1448*4882a593Smuzhiyun struct device *dev = &pdev->dev;
1449*4882a593Smuzhiyun struct resource *res;
1450*4882a593Smuzhiyun struct g2d_data *g2d;
1451*4882a593Smuzhiyun int ret;
1452*4882a593Smuzhiyun
1453*4882a593Smuzhiyun g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
1454*4882a593Smuzhiyun if (!g2d)
1455*4882a593Smuzhiyun return -ENOMEM;
1456*4882a593Smuzhiyun
1457*4882a593Smuzhiyun g2d->runqueue_slab = kmem_cache_create("g2d_runqueue_slab",
1458*4882a593Smuzhiyun sizeof(struct g2d_runqueue_node), 0, 0, NULL);
1459*4882a593Smuzhiyun if (!g2d->runqueue_slab)
1460*4882a593Smuzhiyun return -ENOMEM;
1461*4882a593Smuzhiyun
1462*4882a593Smuzhiyun g2d->dev = dev;
1463*4882a593Smuzhiyun
1464*4882a593Smuzhiyun g2d->g2d_workq = create_singlethread_workqueue("g2d");
1465*4882a593Smuzhiyun if (!g2d->g2d_workq) {
1466*4882a593Smuzhiyun dev_err(dev, "failed to create workqueue\n");
1467*4882a593Smuzhiyun ret = -EINVAL;
1468*4882a593Smuzhiyun goto err_destroy_slab;
1469*4882a593Smuzhiyun }
1470*4882a593Smuzhiyun
1471*4882a593Smuzhiyun INIT_WORK(&g2d->runqueue_work, g2d_runqueue_worker);
1472*4882a593Smuzhiyun INIT_LIST_HEAD(&g2d->free_cmdlist);
1473*4882a593Smuzhiyun INIT_LIST_HEAD(&g2d->runqueue);
1474*4882a593Smuzhiyun
1475*4882a593Smuzhiyun mutex_init(&g2d->cmdlist_mutex);
1476*4882a593Smuzhiyun mutex_init(&g2d->runqueue_mutex);
1477*4882a593Smuzhiyun
1478*4882a593Smuzhiyun g2d->gate_clk = devm_clk_get(dev, "fimg2d");
1479*4882a593Smuzhiyun if (IS_ERR(g2d->gate_clk)) {
1480*4882a593Smuzhiyun dev_err(dev, "failed to get gate clock\n");
1481*4882a593Smuzhiyun ret = PTR_ERR(g2d->gate_clk);
1482*4882a593Smuzhiyun goto err_destroy_workqueue;
1483*4882a593Smuzhiyun }
1484*4882a593Smuzhiyun
1485*4882a593Smuzhiyun pm_runtime_use_autosuspend(dev);
1486*4882a593Smuzhiyun pm_runtime_set_autosuspend_delay(dev, 2000);
1487*4882a593Smuzhiyun pm_runtime_enable(dev);
1488*4882a593Smuzhiyun clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1489*4882a593Smuzhiyun clear_bit(G2D_BIT_ENGINE_BUSY, &g2d->flags);
1490*4882a593Smuzhiyun
1491*4882a593Smuzhiyun res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
1492*4882a593Smuzhiyun
1493*4882a593Smuzhiyun g2d->regs = devm_ioremap_resource(dev, res);
1494*4882a593Smuzhiyun if (IS_ERR(g2d->regs)) {
1495*4882a593Smuzhiyun ret = PTR_ERR(g2d->regs);
1496*4882a593Smuzhiyun goto err_put_clk;
1497*4882a593Smuzhiyun }
1498*4882a593Smuzhiyun
1499*4882a593Smuzhiyun g2d->irq = platform_get_irq(pdev, 0);
1500*4882a593Smuzhiyun if (g2d->irq < 0) {
1501*4882a593Smuzhiyun ret = g2d->irq;
1502*4882a593Smuzhiyun goto err_put_clk;
1503*4882a593Smuzhiyun }
1504*4882a593Smuzhiyun
1505*4882a593Smuzhiyun ret = devm_request_irq(dev, g2d->irq, g2d_irq_handler, 0,
1506*4882a593Smuzhiyun "drm_g2d", g2d);
1507*4882a593Smuzhiyun if (ret < 0) {
1508*4882a593Smuzhiyun dev_err(dev, "irq request failed\n");
1509*4882a593Smuzhiyun goto err_put_clk;
1510*4882a593Smuzhiyun }
1511*4882a593Smuzhiyun
1512*4882a593Smuzhiyun g2d->max_pool = MAX_POOL;
1513*4882a593Smuzhiyun
1514*4882a593Smuzhiyun platform_set_drvdata(pdev, g2d);
1515*4882a593Smuzhiyun
1516*4882a593Smuzhiyun ret = component_add(dev, &g2d_component_ops);
1517*4882a593Smuzhiyun if (ret < 0) {
1518*4882a593Smuzhiyun dev_err(dev, "failed to register drm g2d device\n");
1519*4882a593Smuzhiyun goto err_put_clk;
1520*4882a593Smuzhiyun }
1521*4882a593Smuzhiyun
1522*4882a593Smuzhiyun return 0;
1523*4882a593Smuzhiyun
1524*4882a593Smuzhiyun err_put_clk:
1525*4882a593Smuzhiyun pm_runtime_disable(dev);
1526*4882a593Smuzhiyun err_destroy_workqueue:
1527*4882a593Smuzhiyun destroy_workqueue(g2d->g2d_workq);
1528*4882a593Smuzhiyun err_destroy_slab:
1529*4882a593Smuzhiyun kmem_cache_destroy(g2d->runqueue_slab);
1530*4882a593Smuzhiyun return ret;
1531*4882a593Smuzhiyun }
1532*4882a593Smuzhiyun
g2d_remove(struct platform_device * pdev)1533*4882a593Smuzhiyun static int g2d_remove(struct platform_device *pdev)
1534*4882a593Smuzhiyun {
1535*4882a593Smuzhiyun struct g2d_data *g2d = platform_get_drvdata(pdev);
1536*4882a593Smuzhiyun
1537*4882a593Smuzhiyun component_del(&pdev->dev, &g2d_component_ops);
1538*4882a593Smuzhiyun
1539*4882a593Smuzhiyun /* There should be no locking needed here. */
1540*4882a593Smuzhiyun g2d_remove_runqueue_nodes(g2d, NULL);
1541*4882a593Smuzhiyun
1542*4882a593Smuzhiyun pm_runtime_dont_use_autosuspend(&pdev->dev);
1543*4882a593Smuzhiyun pm_runtime_disable(&pdev->dev);
1544*4882a593Smuzhiyun
1545*4882a593Smuzhiyun g2d_fini_cmdlist(g2d);
1546*4882a593Smuzhiyun destroy_workqueue(g2d->g2d_workq);
1547*4882a593Smuzhiyun kmem_cache_destroy(g2d->runqueue_slab);
1548*4882a593Smuzhiyun
1549*4882a593Smuzhiyun return 0;
1550*4882a593Smuzhiyun }
1551*4882a593Smuzhiyun
1552*4882a593Smuzhiyun #ifdef CONFIG_PM_SLEEP
g2d_suspend(struct device * dev)1553*4882a593Smuzhiyun static int g2d_suspend(struct device *dev)
1554*4882a593Smuzhiyun {
1555*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(dev);
1556*4882a593Smuzhiyun
1557*4882a593Smuzhiyun /*
1558*4882a593Smuzhiyun * Suspend the runqueue worker operation and wait until the G2D
1559*4882a593Smuzhiyun * engine is idle.
1560*4882a593Smuzhiyun */
1561*4882a593Smuzhiyun set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1562*4882a593Smuzhiyun g2d_wait_finish(g2d, NULL);
1563*4882a593Smuzhiyun flush_work(&g2d->runqueue_work);
1564*4882a593Smuzhiyun
1565*4882a593Smuzhiyun return 0;
1566*4882a593Smuzhiyun }
1567*4882a593Smuzhiyun
g2d_resume(struct device * dev)1568*4882a593Smuzhiyun static int g2d_resume(struct device *dev)
1569*4882a593Smuzhiyun {
1570*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(dev);
1571*4882a593Smuzhiyun
1572*4882a593Smuzhiyun clear_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
1573*4882a593Smuzhiyun queue_work(g2d->g2d_workq, &g2d->runqueue_work);
1574*4882a593Smuzhiyun
1575*4882a593Smuzhiyun return 0;
1576*4882a593Smuzhiyun }
1577*4882a593Smuzhiyun #endif
1578*4882a593Smuzhiyun
1579*4882a593Smuzhiyun #ifdef CONFIG_PM
g2d_runtime_suspend(struct device * dev)1580*4882a593Smuzhiyun static int g2d_runtime_suspend(struct device *dev)
1581*4882a593Smuzhiyun {
1582*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(dev);
1583*4882a593Smuzhiyun
1584*4882a593Smuzhiyun clk_disable_unprepare(g2d->gate_clk);
1585*4882a593Smuzhiyun
1586*4882a593Smuzhiyun return 0;
1587*4882a593Smuzhiyun }
1588*4882a593Smuzhiyun
g2d_runtime_resume(struct device * dev)1589*4882a593Smuzhiyun static int g2d_runtime_resume(struct device *dev)
1590*4882a593Smuzhiyun {
1591*4882a593Smuzhiyun struct g2d_data *g2d = dev_get_drvdata(dev);
1592*4882a593Smuzhiyun int ret;
1593*4882a593Smuzhiyun
1594*4882a593Smuzhiyun ret = clk_prepare_enable(g2d->gate_clk);
1595*4882a593Smuzhiyun if (ret < 0)
1596*4882a593Smuzhiyun dev_warn(dev, "failed to enable clock.\n");
1597*4882a593Smuzhiyun
1598*4882a593Smuzhiyun return ret;
1599*4882a593Smuzhiyun }
1600*4882a593Smuzhiyun #endif
1601*4882a593Smuzhiyun
1602*4882a593Smuzhiyun static const struct dev_pm_ops g2d_pm_ops = {
1603*4882a593Smuzhiyun SET_SYSTEM_SLEEP_PM_OPS(g2d_suspend, g2d_resume)
1604*4882a593Smuzhiyun SET_RUNTIME_PM_OPS(g2d_runtime_suspend, g2d_runtime_resume, NULL)
1605*4882a593Smuzhiyun };
1606*4882a593Smuzhiyun
1607*4882a593Smuzhiyun static const struct of_device_id exynos_g2d_match[] = {
1608*4882a593Smuzhiyun { .compatible = "samsung,exynos5250-g2d" },
1609*4882a593Smuzhiyun { .compatible = "samsung,exynos4212-g2d" },
1610*4882a593Smuzhiyun {},
1611*4882a593Smuzhiyun };
1612*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, exynos_g2d_match);
1613*4882a593Smuzhiyun
1614*4882a593Smuzhiyun struct platform_driver g2d_driver = {
1615*4882a593Smuzhiyun .probe = g2d_probe,
1616*4882a593Smuzhiyun .remove = g2d_remove,
1617*4882a593Smuzhiyun .driver = {
1618*4882a593Smuzhiyun .name = "exynos-drm-g2d",
1619*4882a593Smuzhiyun .owner = THIS_MODULE,
1620*4882a593Smuzhiyun .pm = &g2d_pm_ops,
1621*4882a593Smuzhiyun .of_match_table = exynos_g2d_match,
1622*4882a593Smuzhiyun },
1623*4882a593Smuzhiyun };
1624