1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2009-2013, NVIDIA Corporation. All rights reserved.
4*4882a593Smuzhiyun */
5*4882a593Smuzhiyun
6*4882a593Smuzhiyun #ifndef __LINUX_HOST1X_H
7*4882a593Smuzhiyun #define __LINUX_HOST1X_H
8*4882a593Smuzhiyun
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun
12*4882a593Smuzhiyun enum host1x_class {
13*4882a593Smuzhiyun HOST1X_CLASS_HOST1X = 0x1,
14*4882a593Smuzhiyun HOST1X_CLASS_GR2D = 0x51,
15*4882a593Smuzhiyun HOST1X_CLASS_GR2D_SB = 0x52,
16*4882a593Smuzhiyun HOST1X_CLASS_VIC = 0x5D,
17*4882a593Smuzhiyun HOST1X_CLASS_GR3D = 0x60,
18*4882a593Smuzhiyun };
19*4882a593Smuzhiyun
20*4882a593Smuzhiyun struct host1x;
21*4882a593Smuzhiyun struct host1x_client;
22*4882a593Smuzhiyun struct iommu_group;
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun u64 host1x_get_dma_mask(struct host1x *host1x);
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun /**
27*4882a593Smuzhiyun * struct host1x_client_ops - host1x client operations
28*4882a593Smuzhiyun * @init: host1x client initialization code
29*4882a593Smuzhiyun * @exit: host1x client tear down code
30*4882a593Smuzhiyun * @suspend: host1x client suspend code
31*4882a593Smuzhiyun * @resume: host1x client resume code
32*4882a593Smuzhiyun */
33*4882a593Smuzhiyun struct host1x_client_ops {
34*4882a593Smuzhiyun int (*init)(struct host1x_client *client);
35*4882a593Smuzhiyun int (*exit)(struct host1x_client *client);
36*4882a593Smuzhiyun int (*suspend)(struct host1x_client *client);
37*4882a593Smuzhiyun int (*resume)(struct host1x_client *client);
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun * struct host1x_client - host1x client structure
42*4882a593Smuzhiyun * @list: list node for the host1x client
43*4882a593Smuzhiyun * @host: pointer to struct device representing the host1x controller
44*4882a593Smuzhiyun * @dev: pointer to struct device backing this host1x client
45*4882a593Smuzhiyun * @group: IOMMU group that this client is a member of
46*4882a593Smuzhiyun * @ops: host1x client operations
47*4882a593Smuzhiyun * @class: host1x class represented by this client
48*4882a593Smuzhiyun * @channel: host1x channel associated with this client
49*4882a593Smuzhiyun * @syncpts: array of syncpoints requested for this client
50*4882a593Smuzhiyun * @num_syncpts: number of syncpoints requested for this client
51*4882a593Smuzhiyun * @parent: pointer to parent structure
52*4882a593Smuzhiyun * @usecount: reference count for this structure
53*4882a593Smuzhiyun * @lock: mutex for mutually exclusive concurrency
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun struct host1x_client {
56*4882a593Smuzhiyun struct list_head list;
57*4882a593Smuzhiyun struct device *host;
58*4882a593Smuzhiyun struct device *dev;
59*4882a593Smuzhiyun struct iommu_group *group;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun const struct host1x_client_ops *ops;
62*4882a593Smuzhiyun
63*4882a593Smuzhiyun enum host1x_class class;
64*4882a593Smuzhiyun struct host1x_channel *channel;
65*4882a593Smuzhiyun
66*4882a593Smuzhiyun struct host1x_syncpt **syncpts;
67*4882a593Smuzhiyun unsigned int num_syncpts;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun struct host1x_client *parent;
70*4882a593Smuzhiyun unsigned int usecount;
71*4882a593Smuzhiyun struct mutex lock;
72*4882a593Smuzhiyun };
73*4882a593Smuzhiyun
74*4882a593Smuzhiyun /*
75*4882a593Smuzhiyun * host1x buffer objects
76*4882a593Smuzhiyun */
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun struct host1x_bo;
79*4882a593Smuzhiyun struct sg_table;
80*4882a593Smuzhiyun
81*4882a593Smuzhiyun struct host1x_bo_ops {
82*4882a593Smuzhiyun struct host1x_bo *(*get)(struct host1x_bo *bo);
83*4882a593Smuzhiyun void (*put)(struct host1x_bo *bo);
84*4882a593Smuzhiyun struct sg_table *(*pin)(struct device *dev, struct host1x_bo *bo,
85*4882a593Smuzhiyun dma_addr_t *phys);
86*4882a593Smuzhiyun void (*unpin)(struct device *dev, struct sg_table *sgt);
87*4882a593Smuzhiyun void *(*mmap)(struct host1x_bo *bo);
88*4882a593Smuzhiyun void (*munmap)(struct host1x_bo *bo, void *addr);
89*4882a593Smuzhiyun };
90*4882a593Smuzhiyun
91*4882a593Smuzhiyun struct host1x_bo {
92*4882a593Smuzhiyun const struct host1x_bo_ops *ops;
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
host1x_bo_init(struct host1x_bo * bo,const struct host1x_bo_ops * ops)95*4882a593Smuzhiyun static inline void host1x_bo_init(struct host1x_bo *bo,
96*4882a593Smuzhiyun const struct host1x_bo_ops *ops)
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun bo->ops = ops;
99*4882a593Smuzhiyun }
100*4882a593Smuzhiyun
host1x_bo_get(struct host1x_bo * bo)101*4882a593Smuzhiyun static inline struct host1x_bo *host1x_bo_get(struct host1x_bo *bo)
102*4882a593Smuzhiyun {
103*4882a593Smuzhiyun return bo->ops->get(bo);
104*4882a593Smuzhiyun }
105*4882a593Smuzhiyun
host1x_bo_put(struct host1x_bo * bo)106*4882a593Smuzhiyun static inline void host1x_bo_put(struct host1x_bo *bo)
107*4882a593Smuzhiyun {
108*4882a593Smuzhiyun bo->ops->put(bo);
109*4882a593Smuzhiyun }
110*4882a593Smuzhiyun
host1x_bo_pin(struct device * dev,struct host1x_bo * bo,dma_addr_t * phys)111*4882a593Smuzhiyun static inline struct sg_table *host1x_bo_pin(struct device *dev,
112*4882a593Smuzhiyun struct host1x_bo *bo,
113*4882a593Smuzhiyun dma_addr_t *phys)
114*4882a593Smuzhiyun {
115*4882a593Smuzhiyun return bo->ops->pin(dev, bo, phys);
116*4882a593Smuzhiyun }
117*4882a593Smuzhiyun
host1x_bo_unpin(struct device * dev,struct host1x_bo * bo,struct sg_table * sgt)118*4882a593Smuzhiyun static inline void host1x_bo_unpin(struct device *dev, struct host1x_bo *bo,
119*4882a593Smuzhiyun struct sg_table *sgt)
120*4882a593Smuzhiyun {
121*4882a593Smuzhiyun bo->ops->unpin(dev, sgt);
122*4882a593Smuzhiyun }
123*4882a593Smuzhiyun
host1x_bo_mmap(struct host1x_bo * bo)124*4882a593Smuzhiyun static inline void *host1x_bo_mmap(struct host1x_bo *bo)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun return bo->ops->mmap(bo);
127*4882a593Smuzhiyun }
128*4882a593Smuzhiyun
host1x_bo_munmap(struct host1x_bo * bo,void * addr)129*4882a593Smuzhiyun static inline void host1x_bo_munmap(struct host1x_bo *bo, void *addr)
130*4882a593Smuzhiyun {
131*4882a593Smuzhiyun bo->ops->munmap(bo, addr);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun
134*4882a593Smuzhiyun /*
135*4882a593Smuzhiyun * host1x syncpoints
136*4882a593Smuzhiyun */
137*4882a593Smuzhiyun
138*4882a593Smuzhiyun #define HOST1X_SYNCPT_CLIENT_MANAGED (1 << 0)
139*4882a593Smuzhiyun #define HOST1X_SYNCPT_HAS_BASE (1 << 1)
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun struct host1x_syncpt_base;
142*4882a593Smuzhiyun struct host1x_syncpt;
143*4882a593Smuzhiyun struct host1x;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun struct host1x_syncpt *host1x_syncpt_get(struct host1x *host, u32 id);
146*4882a593Smuzhiyun u32 host1x_syncpt_id(struct host1x_syncpt *sp);
147*4882a593Smuzhiyun u32 host1x_syncpt_read_min(struct host1x_syncpt *sp);
148*4882a593Smuzhiyun u32 host1x_syncpt_read_max(struct host1x_syncpt *sp);
149*4882a593Smuzhiyun u32 host1x_syncpt_read(struct host1x_syncpt *sp);
150*4882a593Smuzhiyun int host1x_syncpt_incr(struct host1x_syncpt *sp);
151*4882a593Smuzhiyun u32 host1x_syncpt_incr_max(struct host1x_syncpt *sp, u32 incrs);
152*4882a593Smuzhiyun int host1x_syncpt_wait(struct host1x_syncpt *sp, u32 thresh, long timeout,
153*4882a593Smuzhiyun u32 *value);
154*4882a593Smuzhiyun struct host1x_syncpt *host1x_syncpt_request(struct host1x_client *client,
155*4882a593Smuzhiyun unsigned long flags);
156*4882a593Smuzhiyun void host1x_syncpt_free(struct host1x_syncpt *sp);
157*4882a593Smuzhiyun
158*4882a593Smuzhiyun struct host1x_syncpt_base *host1x_syncpt_get_base(struct host1x_syncpt *sp);
159*4882a593Smuzhiyun u32 host1x_syncpt_base_id(struct host1x_syncpt_base *base);
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun /*
162*4882a593Smuzhiyun * host1x channel
163*4882a593Smuzhiyun */
164*4882a593Smuzhiyun
165*4882a593Smuzhiyun struct host1x_channel;
166*4882a593Smuzhiyun struct host1x_job;
167*4882a593Smuzhiyun
168*4882a593Smuzhiyun struct host1x_channel *host1x_channel_request(struct host1x_client *client);
169*4882a593Smuzhiyun struct host1x_channel *host1x_channel_get(struct host1x_channel *channel);
170*4882a593Smuzhiyun void host1x_channel_put(struct host1x_channel *channel);
171*4882a593Smuzhiyun int host1x_job_submit(struct host1x_job *job);
172*4882a593Smuzhiyun
173*4882a593Smuzhiyun /*
174*4882a593Smuzhiyun * host1x job
175*4882a593Smuzhiyun */
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun #define HOST1X_RELOC_READ (1 << 0)
178*4882a593Smuzhiyun #define HOST1X_RELOC_WRITE (1 << 1)
179*4882a593Smuzhiyun
180*4882a593Smuzhiyun struct host1x_reloc {
181*4882a593Smuzhiyun struct {
182*4882a593Smuzhiyun struct host1x_bo *bo;
183*4882a593Smuzhiyun unsigned long offset;
184*4882a593Smuzhiyun } cmdbuf;
185*4882a593Smuzhiyun struct {
186*4882a593Smuzhiyun struct host1x_bo *bo;
187*4882a593Smuzhiyun unsigned long offset;
188*4882a593Smuzhiyun } target;
189*4882a593Smuzhiyun unsigned long shift;
190*4882a593Smuzhiyun unsigned long flags;
191*4882a593Smuzhiyun };
192*4882a593Smuzhiyun
193*4882a593Smuzhiyun struct host1x_job {
194*4882a593Smuzhiyun /* When refcount goes to zero, job can be freed */
195*4882a593Smuzhiyun struct kref ref;
196*4882a593Smuzhiyun
197*4882a593Smuzhiyun /* List entry */
198*4882a593Smuzhiyun struct list_head list;
199*4882a593Smuzhiyun
200*4882a593Smuzhiyun /* Channel where job is submitted to */
201*4882a593Smuzhiyun struct host1x_channel *channel;
202*4882a593Smuzhiyun
203*4882a593Smuzhiyun /* client where the job originated */
204*4882a593Smuzhiyun struct host1x_client *client;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun /* Gathers and their memory */
207*4882a593Smuzhiyun struct host1x_job_gather *gathers;
208*4882a593Smuzhiyun unsigned int num_gathers;
209*4882a593Smuzhiyun
210*4882a593Smuzhiyun /* Array of handles to be pinned & unpinned */
211*4882a593Smuzhiyun struct host1x_reloc *relocs;
212*4882a593Smuzhiyun unsigned int num_relocs;
213*4882a593Smuzhiyun struct host1x_job_unpin_data *unpins;
214*4882a593Smuzhiyun unsigned int num_unpins;
215*4882a593Smuzhiyun
216*4882a593Smuzhiyun dma_addr_t *addr_phys;
217*4882a593Smuzhiyun dma_addr_t *gather_addr_phys;
218*4882a593Smuzhiyun dma_addr_t *reloc_addr_phys;
219*4882a593Smuzhiyun
220*4882a593Smuzhiyun /* Sync point id, number of increments and end related to the submit */
221*4882a593Smuzhiyun u32 syncpt_id;
222*4882a593Smuzhiyun u32 syncpt_incrs;
223*4882a593Smuzhiyun u32 syncpt_end;
224*4882a593Smuzhiyun
225*4882a593Smuzhiyun /* Maximum time to wait for this job */
226*4882a593Smuzhiyun unsigned int timeout;
227*4882a593Smuzhiyun
228*4882a593Smuzhiyun /* Index and number of slots used in the push buffer */
229*4882a593Smuzhiyun unsigned int first_get;
230*4882a593Smuzhiyun unsigned int num_slots;
231*4882a593Smuzhiyun
232*4882a593Smuzhiyun /* Copy of gathers */
233*4882a593Smuzhiyun size_t gather_copy_size;
234*4882a593Smuzhiyun dma_addr_t gather_copy;
235*4882a593Smuzhiyun u8 *gather_copy_mapped;
236*4882a593Smuzhiyun
237*4882a593Smuzhiyun /* Check if register is marked as an address reg */
238*4882a593Smuzhiyun int (*is_addr_reg)(struct device *dev, u32 class, u32 reg);
239*4882a593Smuzhiyun
240*4882a593Smuzhiyun /* Check if class belongs to the unit */
241*4882a593Smuzhiyun int (*is_valid_class)(u32 class);
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun /* Request a SETCLASS to this class */
244*4882a593Smuzhiyun u32 class;
245*4882a593Smuzhiyun
246*4882a593Smuzhiyun /* Add a channel wait for previous ops to complete */
247*4882a593Smuzhiyun bool serialize;
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun
250*4882a593Smuzhiyun struct host1x_job *host1x_job_alloc(struct host1x_channel *ch,
251*4882a593Smuzhiyun u32 num_cmdbufs, u32 num_relocs);
252*4882a593Smuzhiyun void host1x_job_add_gather(struct host1x_job *job, struct host1x_bo *bo,
253*4882a593Smuzhiyun unsigned int words, unsigned int offset);
254*4882a593Smuzhiyun struct host1x_job *host1x_job_get(struct host1x_job *job);
255*4882a593Smuzhiyun void host1x_job_put(struct host1x_job *job);
256*4882a593Smuzhiyun int host1x_job_pin(struct host1x_job *job, struct device *dev);
257*4882a593Smuzhiyun void host1x_job_unpin(struct host1x_job *job);
258*4882a593Smuzhiyun
259*4882a593Smuzhiyun /*
260*4882a593Smuzhiyun * subdevice probe infrastructure
261*4882a593Smuzhiyun */
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun struct host1x_device;
264*4882a593Smuzhiyun
265*4882a593Smuzhiyun /**
266*4882a593Smuzhiyun * struct host1x_driver - host1x logical device driver
267*4882a593Smuzhiyun * @driver: core driver
268*4882a593Smuzhiyun * @subdevs: table of OF device IDs matching subdevices for this driver
269*4882a593Smuzhiyun * @list: list node for the driver
270*4882a593Smuzhiyun * @probe: called when the host1x logical device is probed
271*4882a593Smuzhiyun * @remove: called when the host1x logical device is removed
272*4882a593Smuzhiyun * @shutdown: called when the host1x logical device is shut down
273*4882a593Smuzhiyun */
274*4882a593Smuzhiyun struct host1x_driver {
275*4882a593Smuzhiyun struct device_driver driver;
276*4882a593Smuzhiyun
277*4882a593Smuzhiyun const struct of_device_id *subdevs;
278*4882a593Smuzhiyun struct list_head list;
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun int (*probe)(struct host1x_device *device);
281*4882a593Smuzhiyun int (*remove)(struct host1x_device *device);
282*4882a593Smuzhiyun void (*shutdown)(struct host1x_device *device);
283*4882a593Smuzhiyun };
284*4882a593Smuzhiyun
285*4882a593Smuzhiyun static inline struct host1x_driver *
to_host1x_driver(struct device_driver * driver)286*4882a593Smuzhiyun to_host1x_driver(struct device_driver *driver)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun return container_of(driver, struct host1x_driver, driver);
289*4882a593Smuzhiyun }
290*4882a593Smuzhiyun
291*4882a593Smuzhiyun int host1x_driver_register_full(struct host1x_driver *driver,
292*4882a593Smuzhiyun struct module *owner);
293*4882a593Smuzhiyun void host1x_driver_unregister(struct host1x_driver *driver);
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun #define host1x_driver_register(driver) \
296*4882a593Smuzhiyun host1x_driver_register_full(driver, THIS_MODULE)
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun struct host1x_device {
299*4882a593Smuzhiyun struct host1x_driver *driver;
300*4882a593Smuzhiyun struct list_head list;
301*4882a593Smuzhiyun struct device dev;
302*4882a593Smuzhiyun
303*4882a593Smuzhiyun struct mutex subdevs_lock;
304*4882a593Smuzhiyun struct list_head subdevs;
305*4882a593Smuzhiyun struct list_head active;
306*4882a593Smuzhiyun
307*4882a593Smuzhiyun struct mutex clients_lock;
308*4882a593Smuzhiyun struct list_head clients;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun bool registered;
311*4882a593Smuzhiyun
312*4882a593Smuzhiyun struct device_dma_parameters dma_parms;
313*4882a593Smuzhiyun };
314*4882a593Smuzhiyun
to_host1x_device(struct device * dev)315*4882a593Smuzhiyun static inline struct host1x_device *to_host1x_device(struct device *dev)
316*4882a593Smuzhiyun {
317*4882a593Smuzhiyun return container_of(dev, struct host1x_device, dev);
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun
320*4882a593Smuzhiyun int host1x_device_init(struct host1x_device *device);
321*4882a593Smuzhiyun int host1x_device_exit(struct host1x_device *device);
322*4882a593Smuzhiyun
323*4882a593Smuzhiyun void __host1x_client_init(struct host1x_client *client, struct lock_class_key *key);
324*4882a593Smuzhiyun void host1x_client_exit(struct host1x_client *client);
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun #define host1x_client_init(client) \
327*4882a593Smuzhiyun ({ \
328*4882a593Smuzhiyun static struct lock_class_key __key; \
329*4882a593Smuzhiyun __host1x_client_init(client, &__key); \
330*4882a593Smuzhiyun })
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun int __host1x_client_register(struct host1x_client *client);
333*4882a593Smuzhiyun
334*4882a593Smuzhiyun /*
335*4882a593Smuzhiyun * Note that this wrapper calls __host1x_client_init() for compatibility
336*4882a593Smuzhiyun * with existing callers. Callers that want to separately initialize and
337*4882a593Smuzhiyun * register a host1x client must first initialize using either of the
338*4882a593Smuzhiyun * __host1x_client_init() or host1x_client_init() functions and then use
339*4882a593Smuzhiyun * the low-level __host1x_client_register() function to avoid the client
340*4882a593Smuzhiyun * getting reinitialized.
341*4882a593Smuzhiyun */
342*4882a593Smuzhiyun #define host1x_client_register(client) \
343*4882a593Smuzhiyun ({ \
344*4882a593Smuzhiyun static struct lock_class_key __key; \
345*4882a593Smuzhiyun __host1x_client_init(client, &__key); \
346*4882a593Smuzhiyun __host1x_client_register(client); \
347*4882a593Smuzhiyun })
348*4882a593Smuzhiyun
349*4882a593Smuzhiyun int host1x_client_unregister(struct host1x_client *client);
350*4882a593Smuzhiyun
351*4882a593Smuzhiyun int host1x_client_suspend(struct host1x_client *client);
352*4882a593Smuzhiyun int host1x_client_resume(struct host1x_client *client);
353*4882a593Smuzhiyun
354*4882a593Smuzhiyun struct tegra_mipi_device;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun struct tegra_mipi_device *tegra_mipi_request(struct device *device,
357*4882a593Smuzhiyun struct device_node *np);
358*4882a593Smuzhiyun void tegra_mipi_free(struct tegra_mipi_device *device);
359*4882a593Smuzhiyun int tegra_mipi_enable(struct tegra_mipi_device *device);
360*4882a593Smuzhiyun int tegra_mipi_disable(struct tegra_mipi_device *device);
361*4882a593Smuzhiyun int tegra_mipi_start_calibration(struct tegra_mipi_device *device);
362*4882a593Smuzhiyun int tegra_mipi_finish_calibration(struct tegra_mipi_device *device);
363*4882a593Smuzhiyun
364*4882a593Smuzhiyun #endif
365