xref: /OK3568_Linux_fs/kernel/drivers/hwtracing/intel_th/gth.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Intel(R) Trace Hub Global Trace Hub
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2014-2015 Intel Corporation.
6*4882a593Smuzhiyun  */
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun #define pr_fmt(fmt)	KBUILD_MODNAME ": " fmt
9*4882a593Smuzhiyun 
10*4882a593Smuzhiyun #include <linux/types.h>
11*4882a593Smuzhiyun #include <linux/module.h>
12*4882a593Smuzhiyun #include <linux/device.h>
13*4882a593Smuzhiyun #include <linux/io.h>
14*4882a593Smuzhiyun #include <linux/mm.h>
15*4882a593Smuzhiyun #include <linux/slab.h>
16*4882a593Smuzhiyun #include <linux/bitmap.h>
17*4882a593Smuzhiyun #include <linux/pm_runtime.h>
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun #include "intel_th.h"
20*4882a593Smuzhiyun #include "gth.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun struct gth_device;
23*4882a593Smuzhiyun 
24*4882a593Smuzhiyun /**
25*4882a593Smuzhiyun  * struct gth_output - GTH view on an output port
26*4882a593Smuzhiyun  * @gth:	backlink to the GTH device
27*4882a593Smuzhiyun  * @output:	link to output device's output descriptor
28*4882a593Smuzhiyun  * @index:	output port number
29*4882a593Smuzhiyun  * @port_type:	one of GTH_* port type values
30*4882a593Smuzhiyun  * @master:	bitmap of masters configured for this output
31*4882a593Smuzhiyun  */
32*4882a593Smuzhiyun struct gth_output {
33*4882a593Smuzhiyun 	struct gth_device	*gth;
34*4882a593Smuzhiyun 	struct intel_th_output	*output;
35*4882a593Smuzhiyun 	unsigned int		index;
36*4882a593Smuzhiyun 	unsigned int		port_type;
37*4882a593Smuzhiyun 	DECLARE_BITMAP(master, TH_CONFIGURABLE_MASTERS + 1);
38*4882a593Smuzhiyun };
39*4882a593Smuzhiyun 
40*4882a593Smuzhiyun /**
41*4882a593Smuzhiyun  * struct gth_device - GTH device
42*4882a593Smuzhiyun  * @dev:	driver core's device
43*4882a593Smuzhiyun  * @base:	register window base address
44*4882a593Smuzhiyun  * @output_group:	attributes describing output ports
45*4882a593Smuzhiyun  * @master_group:	attributes describing master assignments
46*4882a593Smuzhiyun  * @output:		output ports
47*4882a593Smuzhiyun  * @master:		master/output port assignments
48*4882a593Smuzhiyun  * @gth_lock:		serializes accesses to GTH bits
49*4882a593Smuzhiyun  */
50*4882a593Smuzhiyun struct gth_device {
51*4882a593Smuzhiyun 	struct device		*dev;
52*4882a593Smuzhiyun 	void __iomem		*base;
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun 	struct attribute_group	output_group;
55*4882a593Smuzhiyun 	struct attribute_group	master_group;
56*4882a593Smuzhiyun 	struct gth_output	output[TH_POSSIBLE_OUTPUTS];
57*4882a593Smuzhiyun 	signed char		master[TH_CONFIGURABLE_MASTERS + 1];
58*4882a593Smuzhiyun 	spinlock_t		gth_lock;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
gth_output_set(struct gth_device * gth,int port,unsigned int config)61*4882a593Smuzhiyun static void gth_output_set(struct gth_device *gth, int port,
62*4882a593Smuzhiyun 			   unsigned int config)
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun 	unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
65*4882a593Smuzhiyun 	u32 val;
66*4882a593Smuzhiyun 	int shift = (port & 3) * 8;
67*4882a593Smuzhiyun 
68*4882a593Smuzhiyun 	val = ioread32(gth->base + reg);
69*4882a593Smuzhiyun 	val &= ~(0xff << shift);
70*4882a593Smuzhiyun 	val |= config << shift;
71*4882a593Smuzhiyun 	iowrite32(val, gth->base + reg);
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
gth_output_get(struct gth_device * gth,int port)74*4882a593Smuzhiyun static unsigned int gth_output_get(struct gth_device *gth, int port)
75*4882a593Smuzhiyun {
76*4882a593Smuzhiyun 	unsigned long reg = port & 4 ? REG_GTH_GTHOPT1 : REG_GTH_GTHOPT0;
77*4882a593Smuzhiyun 	u32 val;
78*4882a593Smuzhiyun 	int shift = (port & 3) * 8;
79*4882a593Smuzhiyun 
80*4882a593Smuzhiyun 	val = ioread32(gth->base + reg);
81*4882a593Smuzhiyun 	val &= 0xff << shift;
82*4882a593Smuzhiyun 	val >>= shift;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	return val;
85*4882a593Smuzhiyun }
86*4882a593Smuzhiyun 
gth_smcfreq_set(struct gth_device * gth,int port,unsigned int freq)87*4882a593Smuzhiyun static void gth_smcfreq_set(struct gth_device *gth, int port,
88*4882a593Smuzhiyun 			    unsigned int freq)
89*4882a593Smuzhiyun {
90*4882a593Smuzhiyun 	unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
91*4882a593Smuzhiyun 	int shift = (port & 1) * 16;
92*4882a593Smuzhiyun 	u32 val;
93*4882a593Smuzhiyun 
94*4882a593Smuzhiyun 	val = ioread32(gth->base + reg);
95*4882a593Smuzhiyun 	val &= ~(0xffff << shift);
96*4882a593Smuzhiyun 	val |= freq << shift;
97*4882a593Smuzhiyun 	iowrite32(val, gth->base + reg);
98*4882a593Smuzhiyun }
99*4882a593Smuzhiyun 
gth_smcfreq_get(struct gth_device * gth,int port)100*4882a593Smuzhiyun static unsigned int gth_smcfreq_get(struct gth_device *gth, int port)
101*4882a593Smuzhiyun {
102*4882a593Smuzhiyun 	unsigned long reg = REG_GTH_SMCR0 + ((port / 2) * 4);
103*4882a593Smuzhiyun 	int shift = (port & 1) * 16;
104*4882a593Smuzhiyun 	u32 val;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	val = ioread32(gth->base + reg);
107*4882a593Smuzhiyun 	val &= 0xffff << shift;
108*4882a593Smuzhiyun 	val >>= shift;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	return val;
111*4882a593Smuzhiyun }
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun /*
114*4882a593Smuzhiyun  * "masters" attribute group
115*4882a593Smuzhiyun  */
116*4882a593Smuzhiyun 
117*4882a593Smuzhiyun struct master_attribute {
118*4882a593Smuzhiyun 	struct device_attribute	attr;
119*4882a593Smuzhiyun 	struct gth_device	*gth;
120*4882a593Smuzhiyun 	unsigned int		master;
121*4882a593Smuzhiyun };
122*4882a593Smuzhiyun 
123*4882a593Smuzhiyun static void
gth_master_set(struct gth_device * gth,unsigned int master,int port)124*4882a593Smuzhiyun gth_master_set(struct gth_device *gth, unsigned int master, int port)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	unsigned int reg = REG_GTH_SWDEST0 + ((master >> 1) & ~3u);
127*4882a593Smuzhiyun 	unsigned int shift = (master & 0x7) * 4;
128*4882a593Smuzhiyun 	u32 val;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (master >= 256) {
131*4882a593Smuzhiyun 		reg = REG_GTH_GSWTDEST;
132*4882a593Smuzhiyun 		shift = 0;
133*4882a593Smuzhiyun 	}
134*4882a593Smuzhiyun 
135*4882a593Smuzhiyun 	val = ioread32(gth->base + reg);
136*4882a593Smuzhiyun 	val &= ~(0xf << shift);
137*4882a593Smuzhiyun 	if (port >= 0)
138*4882a593Smuzhiyun 		val |= (0x8 | port) << shift;
139*4882a593Smuzhiyun 	iowrite32(val, gth->base + reg);
140*4882a593Smuzhiyun }
141*4882a593Smuzhiyun 
master_attr_show(struct device * dev,struct device_attribute * attr,char * buf)142*4882a593Smuzhiyun static ssize_t master_attr_show(struct device *dev,
143*4882a593Smuzhiyun 				struct device_attribute *attr,
144*4882a593Smuzhiyun 				char *buf)
145*4882a593Smuzhiyun {
146*4882a593Smuzhiyun 	struct master_attribute *ma =
147*4882a593Smuzhiyun 		container_of(attr, struct master_attribute, attr);
148*4882a593Smuzhiyun 	struct gth_device *gth = ma->gth;
149*4882a593Smuzhiyun 	size_t count;
150*4882a593Smuzhiyun 	int port;
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
153*4882a593Smuzhiyun 	port = gth->master[ma->master];
154*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
155*4882a593Smuzhiyun 
156*4882a593Smuzhiyun 	if (port >= 0)
157*4882a593Smuzhiyun 		count = snprintf(buf, PAGE_SIZE, "%x\n", port);
158*4882a593Smuzhiyun 	else
159*4882a593Smuzhiyun 		count = snprintf(buf, PAGE_SIZE, "disabled\n");
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun 	return count;
162*4882a593Smuzhiyun }
163*4882a593Smuzhiyun 
master_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)164*4882a593Smuzhiyun static ssize_t master_attr_store(struct device *dev,
165*4882a593Smuzhiyun 				 struct device_attribute *attr,
166*4882a593Smuzhiyun 				 const char *buf, size_t count)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct master_attribute *ma =
169*4882a593Smuzhiyun 		container_of(attr, struct master_attribute, attr);
170*4882a593Smuzhiyun 	struct gth_device *gth = ma->gth;
171*4882a593Smuzhiyun 	int old_port, port;
172*4882a593Smuzhiyun 
173*4882a593Smuzhiyun 	if (kstrtoint(buf, 10, &port) < 0)
174*4882a593Smuzhiyun 		return -EINVAL;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	if (port >= TH_POSSIBLE_OUTPUTS || port < -1)
177*4882a593Smuzhiyun 		return -EINVAL;
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	/* disconnect from the previous output port, if any */
182*4882a593Smuzhiyun 	old_port = gth->master[ma->master];
183*4882a593Smuzhiyun 	if (old_port >= 0) {
184*4882a593Smuzhiyun 		gth->master[ma->master] = -1;
185*4882a593Smuzhiyun 		clear_bit(ma->master, gth->output[old_port].master);
186*4882a593Smuzhiyun 
187*4882a593Smuzhiyun 		/*
188*4882a593Smuzhiyun 		 * if the port is active, program this setting,
189*4882a593Smuzhiyun 		 * implies that runtime PM is on
190*4882a593Smuzhiyun 		 */
191*4882a593Smuzhiyun 		if (gth->output[old_port].output->active)
192*4882a593Smuzhiyun 			gth_master_set(gth, ma->master, -1);
193*4882a593Smuzhiyun 	}
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/* connect to the new output port, if any */
196*4882a593Smuzhiyun 	if (port >= 0) {
197*4882a593Smuzhiyun 		/* check if there's a driver for this port */
198*4882a593Smuzhiyun 		if (!gth->output[port].output) {
199*4882a593Smuzhiyun 			count = -ENODEV;
200*4882a593Smuzhiyun 			goto unlock;
201*4882a593Smuzhiyun 		}
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 		set_bit(ma->master, gth->output[port].master);
204*4882a593Smuzhiyun 
205*4882a593Smuzhiyun 		/* if the port is active, program this setting, see above */
206*4882a593Smuzhiyun 		if (gth->output[port].output->active)
207*4882a593Smuzhiyun 			gth_master_set(gth, ma->master, port);
208*4882a593Smuzhiyun 	}
209*4882a593Smuzhiyun 
210*4882a593Smuzhiyun 	gth->master[ma->master] = port;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun unlock:
213*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
214*4882a593Smuzhiyun 
215*4882a593Smuzhiyun 	return count;
216*4882a593Smuzhiyun }
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun struct output_attribute {
219*4882a593Smuzhiyun 	struct device_attribute attr;
220*4882a593Smuzhiyun 	struct gth_device	*gth;
221*4882a593Smuzhiyun 	unsigned int		port;
222*4882a593Smuzhiyun 	unsigned int		parm;
223*4882a593Smuzhiyun };
224*4882a593Smuzhiyun 
225*4882a593Smuzhiyun #define OUTPUT_PARM(_name, _mask, _r, _w, _what)			\
226*4882a593Smuzhiyun 	[TH_OUTPUT_PARM(_name)] = { .name = __stringify(_name),		\
227*4882a593Smuzhiyun 				    .get = gth_ ## _what ## _get,	\
228*4882a593Smuzhiyun 				    .set = gth_ ## _what ## _set,	\
229*4882a593Smuzhiyun 				    .mask = (_mask),			\
230*4882a593Smuzhiyun 				    .readable = (_r),			\
231*4882a593Smuzhiyun 				    .writable = (_w) }
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun static const struct output_parm {
234*4882a593Smuzhiyun 	const char	*name;
235*4882a593Smuzhiyun 	unsigned int	(*get)(struct gth_device *gth, int port);
236*4882a593Smuzhiyun 	void		(*set)(struct gth_device *gth, int port,
237*4882a593Smuzhiyun 			       unsigned int val);
238*4882a593Smuzhiyun 	unsigned int	mask;
239*4882a593Smuzhiyun 	unsigned int	readable : 1,
240*4882a593Smuzhiyun 			writable : 1;
241*4882a593Smuzhiyun } output_parms[] = {
242*4882a593Smuzhiyun 	OUTPUT_PARM(port,	0x7,	1, 0, output),
243*4882a593Smuzhiyun 	OUTPUT_PARM(null,	BIT(3),	1, 1, output),
244*4882a593Smuzhiyun 	OUTPUT_PARM(drop,	BIT(4),	1, 1, output),
245*4882a593Smuzhiyun 	OUTPUT_PARM(reset,	BIT(5),	1, 0, output),
246*4882a593Smuzhiyun 	OUTPUT_PARM(flush,	BIT(7),	0, 1, output),
247*4882a593Smuzhiyun 	OUTPUT_PARM(smcfreq,	0xffff,	1, 1, smcfreq),
248*4882a593Smuzhiyun };
249*4882a593Smuzhiyun 
250*4882a593Smuzhiyun static void
gth_output_parm_set(struct gth_device * gth,int port,unsigned int parm,unsigned int val)251*4882a593Smuzhiyun gth_output_parm_set(struct gth_device *gth, int port, unsigned int parm,
252*4882a593Smuzhiyun 		    unsigned int val)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	unsigned int config = output_parms[parm].get(gth, port);
255*4882a593Smuzhiyun 	unsigned int mask = output_parms[parm].mask;
256*4882a593Smuzhiyun 	unsigned int shift = __ffs(mask);
257*4882a593Smuzhiyun 
258*4882a593Smuzhiyun 	config &= ~mask;
259*4882a593Smuzhiyun 	config |= (val << shift) & mask;
260*4882a593Smuzhiyun 	output_parms[parm].set(gth, port, config);
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun 
263*4882a593Smuzhiyun static unsigned int
gth_output_parm_get(struct gth_device * gth,int port,unsigned int parm)264*4882a593Smuzhiyun gth_output_parm_get(struct gth_device *gth, int port, unsigned int parm)
265*4882a593Smuzhiyun {
266*4882a593Smuzhiyun 	unsigned int config = output_parms[parm].get(gth, port);
267*4882a593Smuzhiyun 	unsigned int mask = output_parms[parm].mask;
268*4882a593Smuzhiyun 	unsigned int shift = __ffs(mask);
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun 	config &= mask;
271*4882a593Smuzhiyun 	config >>= shift;
272*4882a593Smuzhiyun 	return config;
273*4882a593Smuzhiyun }
274*4882a593Smuzhiyun 
275*4882a593Smuzhiyun /*
276*4882a593Smuzhiyun  * Reset outputs and sources
277*4882a593Smuzhiyun  */
intel_th_gth_reset(struct gth_device * gth)278*4882a593Smuzhiyun static int intel_th_gth_reset(struct gth_device *gth)
279*4882a593Smuzhiyun {
280*4882a593Smuzhiyun 	u32 reg;
281*4882a593Smuzhiyun 	int port, i;
282*4882a593Smuzhiyun 
283*4882a593Smuzhiyun 	reg = ioread32(gth->base + REG_GTH_SCRPD0);
284*4882a593Smuzhiyun 	if (reg & SCRPD_DEBUGGER_IN_USE)
285*4882a593Smuzhiyun 		return -EBUSY;
286*4882a593Smuzhiyun 
287*4882a593Smuzhiyun 	/* Always save/restore STH and TU registers in S0ix entry/exit */
288*4882a593Smuzhiyun 	reg |= SCRPD_STH_IS_ENABLED | SCRPD_TRIGGER_IS_ENABLED;
289*4882a593Smuzhiyun 	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
290*4882a593Smuzhiyun 
291*4882a593Smuzhiyun 	/* output ports */
292*4882a593Smuzhiyun 	for (port = 0; port < 8; port++) {
293*4882a593Smuzhiyun 		if (gth_output_parm_get(gth, port, TH_OUTPUT_PARM(port)) ==
294*4882a593Smuzhiyun 		    GTH_NONE)
295*4882a593Smuzhiyun 			continue;
296*4882a593Smuzhiyun 
297*4882a593Smuzhiyun 		gth_output_set(gth, port, 0);
298*4882a593Smuzhiyun 		gth_smcfreq_set(gth, port, 16);
299*4882a593Smuzhiyun 	}
300*4882a593Smuzhiyun 	/* disable overrides */
301*4882a593Smuzhiyun 	iowrite32(0, gth->base + REG_GTH_DESTOVR);
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	/* masters swdest_0~31 and gswdest */
304*4882a593Smuzhiyun 	for (i = 0; i < 33; i++)
305*4882a593Smuzhiyun 		iowrite32(0, gth->base + REG_GTH_SWDEST0 + i * 4);
306*4882a593Smuzhiyun 
307*4882a593Smuzhiyun 	/* sources */
308*4882a593Smuzhiyun 	iowrite32(0, gth->base + REG_GTH_SCR);
309*4882a593Smuzhiyun 	iowrite32(0xfc, gth->base + REG_GTH_SCR2);
310*4882a593Smuzhiyun 
311*4882a593Smuzhiyun 	/* setup CTS for single trigger */
312*4882a593Smuzhiyun 	iowrite32(CTS_EVENT_ENABLE_IF_ANYTHING, gth->base + REG_CTS_C0S0_EN);
313*4882a593Smuzhiyun 	iowrite32(CTS_ACTION_CONTROL_SET_STATE(CTS_STATE_IDLE) |
314*4882a593Smuzhiyun 		  CTS_ACTION_CONTROL_TRIGGER, gth->base + REG_CTS_C0S0_ACT);
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	return 0;
317*4882a593Smuzhiyun }
318*4882a593Smuzhiyun 
319*4882a593Smuzhiyun /*
320*4882a593Smuzhiyun  * "outputs" attribute group
321*4882a593Smuzhiyun  */
322*4882a593Smuzhiyun 
output_attr_show(struct device * dev,struct device_attribute * attr,char * buf)323*4882a593Smuzhiyun static ssize_t output_attr_show(struct device *dev,
324*4882a593Smuzhiyun 				struct device_attribute *attr,
325*4882a593Smuzhiyun 				char *buf)
326*4882a593Smuzhiyun {
327*4882a593Smuzhiyun 	struct output_attribute *oa =
328*4882a593Smuzhiyun 		container_of(attr, struct output_attribute, attr);
329*4882a593Smuzhiyun 	struct gth_device *gth = oa->gth;
330*4882a593Smuzhiyun 	size_t count;
331*4882a593Smuzhiyun 
332*4882a593Smuzhiyun 	pm_runtime_get_sync(dev);
333*4882a593Smuzhiyun 
334*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
335*4882a593Smuzhiyun 	count = snprintf(buf, PAGE_SIZE, "%x\n",
336*4882a593Smuzhiyun 			 gth_output_parm_get(gth, oa->port, oa->parm));
337*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	pm_runtime_put(dev);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	return count;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun 
output_attr_store(struct device * dev,struct device_attribute * attr,const char * buf,size_t count)344*4882a593Smuzhiyun static ssize_t output_attr_store(struct device *dev,
345*4882a593Smuzhiyun 				 struct device_attribute *attr,
346*4882a593Smuzhiyun 				 const char *buf, size_t count)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct output_attribute *oa =
349*4882a593Smuzhiyun 		container_of(attr, struct output_attribute, attr);
350*4882a593Smuzhiyun 	struct gth_device *gth = oa->gth;
351*4882a593Smuzhiyun 	unsigned int config;
352*4882a593Smuzhiyun 
353*4882a593Smuzhiyun 	if (kstrtouint(buf, 16, &config) < 0)
354*4882a593Smuzhiyun 		return -EINVAL;
355*4882a593Smuzhiyun 
356*4882a593Smuzhiyun 	pm_runtime_get_sync(dev);
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
359*4882a593Smuzhiyun 	gth_output_parm_set(gth, oa->port, oa->parm, config);
360*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
361*4882a593Smuzhiyun 
362*4882a593Smuzhiyun 	pm_runtime_put(dev);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	return count;
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
intel_th_master_attributes(struct gth_device * gth)367*4882a593Smuzhiyun static int intel_th_master_attributes(struct gth_device *gth)
368*4882a593Smuzhiyun {
369*4882a593Smuzhiyun 	struct master_attribute *master_attrs;
370*4882a593Smuzhiyun 	struct attribute **attrs;
371*4882a593Smuzhiyun 	int i, nattrs = TH_CONFIGURABLE_MASTERS + 2;
372*4882a593Smuzhiyun 
373*4882a593Smuzhiyun 	attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
374*4882a593Smuzhiyun 	if (!attrs)
375*4882a593Smuzhiyun 		return -ENOMEM;
376*4882a593Smuzhiyun 
377*4882a593Smuzhiyun 	master_attrs = devm_kcalloc(gth->dev, nattrs,
378*4882a593Smuzhiyun 				    sizeof(struct master_attribute),
379*4882a593Smuzhiyun 				    GFP_KERNEL);
380*4882a593Smuzhiyun 	if (!master_attrs)
381*4882a593Smuzhiyun 		return -ENOMEM;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++) {
384*4882a593Smuzhiyun 		char *name;
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 		name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d%s", i,
387*4882a593Smuzhiyun 				      i == TH_CONFIGURABLE_MASTERS ? "+" : "");
388*4882a593Smuzhiyun 		if (!name)
389*4882a593Smuzhiyun 			return -ENOMEM;
390*4882a593Smuzhiyun 
391*4882a593Smuzhiyun 		master_attrs[i].attr.attr.name = name;
392*4882a593Smuzhiyun 		master_attrs[i].attr.attr.mode = S_IRUGO | S_IWUSR;
393*4882a593Smuzhiyun 		master_attrs[i].attr.show = master_attr_show;
394*4882a593Smuzhiyun 		master_attrs[i].attr.store = master_attr_store;
395*4882a593Smuzhiyun 
396*4882a593Smuzhiyun 		sysfs_attr_init(&master_attrs[i].attr.attr);
397*4882a593Smuzhiyun 		attrs[i] = &master_attrs[i].attr.attr;
398*4882a593Smuzhiyun 
399*4882a593Smuzhiyun 		master_attrs[i].gth = gth;
400*4882a593Smuzhiyun 		master_attrs[i].master = i;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	gth->master_group.name	= "masters";
404*4882a593Smuzhiyun 	gth->master_group.attrs = attrs;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	return sysfs_create_group(&gth->dev->kobj, &gth->master_group);
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun 
intel_th_output_attributes(struct gth_device * gth)409*4882a593Smuzhiyun static int intel_th_output_attributes(struct gth_device *gth)
410*4882a593Smuzhiyun {
411*4882a593Smuzhiyun 	struct output_attribute *out_attrs;
412*4882a593Smuzhiyun 	struct attribute **attrs;
413*4882a593Smuzhiyun 	int i, j, nouts = TH_POSSIBLE_OUTPUTS;
414*4882a593Smuzhiyun 	int nparms = ARRAY_SIZE(output_parms);
415*4882a593Smuzhiyun 	int nattrs = nouts * nparms + 1;
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	attrs = devm_kcalloc(gth->dev, nattrs, sizeof(void *), GFP_KERNEL);
418*4882a593Smuzhiyun 	if (!attrs)
419*4882a593Smuzhiyun 		return -ENOMEM;
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 	out_attrs = devm_kcalloc(gth->dev, nattrs,
422*4882a593Smuzhiyun 				 sizeof(struct output_attribute),
423*4882a593Smuzhiyun 				 GFP_KERNEL);
424*4882a593Smuzhiyun 	if (!out_attrs)
425*4882a593Smuzhiyun 		return -ENOMEM;
426*4882a593Smuzhiyun 
427*4882a593Smuzhiyun 	for (i = 0; i < nouts; i++) {
428*4882a593Smuzhiyun 		for (j = 0; j < nparms; j++) {
429*4882a593Smuzhiyun 			unsigned int idx = i * nparms + j;
430*4882a593Smuzhiyun 			char *name;
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 			name = devm_kasprintf(gth->dev, GFP_KERNEL, "%d_%s", i,
433*4882a593Smuzhiyun 					      output_parms[j].name);
434*4882a593Smuzhiyun 			if (!name)
435*4882a593Smuzhiyun 				return -ENOMEM;
436*4882a593Smuzhiyun 
437*4882a593Smuzhiyun 			out_attrs[idx].attr.attr.name = name;
438*4882a593Smuzhiyun 
439*4882a593Smuzhiyun 			if (output_parms[j].readable) {
440*4882a593Smuzhiyun 				out_attrs[idx].attr.attr.mode |= S_IRUGO;
441*4882a593Smuzhiyun 				out_attrs[idx].attr.show = output_attr_show;
442*4882a593Smuzhiyun 			}
443*4882a593Smuzhiyun 
444*4882a593Smuzhiyun 			if (output_parms[j].writable) {
445*4882a593Smuzhiyun 				out_attrs[idx].attr.attr.mode |= S_IWUSR;
446*4882a593Smuzhiyun 				out_attrs[idx].attr.store = output_attr_store;
447*4882a593Smuzhiyun 			}
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 			sysfs_attr_init(&out_attrs[idx].attr.attr);
450*4882a593Smuzhiyun 			attrs[idx] = &out_attrs[idx].attr.attr;
451*4882a593Smuzhiyun 
452*4882a593Smuzhiyun 			out_attrs[idx].gth = gth;
453*4882a593Smuzhiyun 			out_attrs[idx].port = i;
454*4882a593Smuzhiyun 			out_attrs[idx].parm = j;
455*4882a593Smuzhiyun 		}
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	gth->output_group.name	= "outputs";
459*4882a593Smuzhiyun 	gth->output_group.attrs = attrs;
460*4882a593Smuzhiyun 
461*4882a593Smuzhiyun 	return sysfs_create_group(&gth->dev->kobj, &gth->output_group);
462*4882a593Smuzhiyun }
463*4882a593Smuzhiyun 
464*4882a593Smuzhiyun /**
465*4882a593Smuzhiyun  * intel_th_gth_stop() - stop tracing to an output device
466*4882a593Smuzhiyun  * @gth:		GTH device
467*4882a593Smuzhiyun  * @output:		output device's descriptor
468*4882a593Smuzhiyun  * @capture_done:	set when no more traces will be captured
469*4882a593Smuzhiyun  *
470*4882a593Smuzhiyun  * This will stop tracing using force storeEn off signal and wait for the
471*4882a593Smuzhiyun  * pipelines to be empty for the corresponding output port.
472*4882a593Smuzhiyun  */
intel_th_gth_stop(struct gth_device * gth,struct intel_th_output * output,bool capture_done)473*4882a593Smuzhiyun static void intel_th_gth_stop(struct gth_device *gth,
474*4882a593Smuzhiyun 			      struct intel_th_output *output,
475*4882a593Smuzhiyun 			      bool capture_done)
476*4882a593Smuzhiyun {
477*4882a593Smuzhiyun 	struct intel_th_device *outdev =
478*4882a593Smuzhiyun 		container_of(output, struct intel_th_device, output);
479*4882a593Smuzhiyun 	struct intel_th_driver *outdrv =
480*4882a593Smuzhiyun 		to_intel_th_driver(outdev->dev.driver);
481*4882a593Smuzhiyun 	unsigned long count;
482*4882a593Smuzhiyun 	u32 reg;
483*4882a593Smuzhiyun 	u32 scr2 = 0xfc | (capture_done ? 1 : 0);
484*4882a593Smuzhiyun 
485*4882a593Smuzhiyun 	iowrite32(0, gth->base + REG_GTH_SCR);
486*4882a593Smuzhiyun 	iowrite32(scr2, gth->base + REG_GTH_SCR2);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	/* wait on pipeline empty for the given port */
489*4882a593Smuzhiyun 	for (reg = 0, count = GTH_PLE_WAITLOOP_DEPTH;
490*4882a593Smuzhiyun 	     count && !(reg & BIT(output->port)); count--) {
491*4882a593Smuzhiyun 		reg = ioread32(gth->base + REG_GTH_STAT);
492*4882a593Smuzhiyun 		cpu_relax();
493*4882a593Smuzhiyun 	}
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	if (!count)
496*4882a593Smuzhiyun 		dev_dbg(gth->dev, "timeout waiting for GTH[%d] PLE\n",
497*4882a593Smuzhiyun 			output->port);
498*4882a593Smuzhiyun 
499*4882a593Smuzhiyun 	/* wait on output piepline empty */
500*4882a593Smuzhiyun 	if (outdrv->wait_empty)
501*4882a593Smuzhiyun 		outdrv->wait_empty(outdev);
502*4882a593Smuzhiyun 
503*4882a593Smuzhiyun 	/* clear force capture done for next captures */
504*4882a593Smuzhiyun 	iowrite32(0xfc, gth->base + REG_GTH_SCR2);
505*4882a593Smuzhiyun }
506*4882a593Smuzhiyun 
507*4882a593Smuzhiyun /**
508*4882a593Smuzhiyun  * intel_th_gth_start() - start tracing to an output device
509*4882a593Smuzhiyun  * @gth:	GTH device
510*4882a593Smuzhiyun  * @output:	output device's descriptor
511*4882a593Smuzhiyun  *
512*4882a593Smuzhiyun  * This will start tracing using force storeEn signal.
513*4882a593Smuzhiyun  */
intel_th_gth_start(struct gth_device * gth,struct intel_th_output * output)514*4882a593Smuzhiyun static void intel_th_gth_start(struct gth_device *gth,
515*4882a593Smuzhiyun 			       struct intel_th_output *output)
516*4882a593Smuzhiyun {
517*4882a593Smuzhiyun 	u32 scr = 0xfc0000;
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	if (output->multiblock)
520*4882a593Smuzhiyun 		scr |= 0xff;
521*4882a593Smuzhiyun 
522*4882a593Smuzhiyun 	iowrite32(scr, gth->base + REG_GTH_SCR);
523*4882a593Smuzhiyun 	iowrite32(0, gth->base + REG_GTH_SCR2);
524*4882a593Smuzhiyun }
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun /**
527*4882a593Smuzhiyun  * intel_th_gth_disable() - disable tracing to an output device
528*4882a593Smuzhiyun  * @thdev:	GTH device
529*4882a593Smuzhiyun  * @output:	output device's descriptor
530*4882a593Smuzhiyun  *
531*4882a593Smuzhiyun  * This will deconfigure all masters set to output to this device,
532*4882a593Smuzhiyun  * disable tracing using force storeEn off signal and wait for the
533*4882a593Smuzhiyun  * "pipeline empty" bit for corresponding output port.
534*4882a593Smuzhiyun  */
intel_th_gth_disable(struct intel_th_device * thdev,struct intel_th_output * output)535*4882a593Smuzhiyun static void intel_th_gth_disable(struct intel_th_device *thdev,
536*4882a593Smuzhiyun 				 struct intel_th_output *output)
537*4882a593Smuzhiyun {
538*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
539*4882a593Smuzhiyun 	int master;
540*4882a593Smuzhiyun 	u32 reg;
541*4882a593Smuzhiyun 
542*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
543*4882a593Smuzhiyun 	output->active = false;
544*4882a593Smuzhiyun 
545*4882a593Smuzhiyun 	for_each_set_bit(master, gth->output[output->port].master,
546*4882a593Smuzhiyun 			 TH_CONFIGURABLE_MASTERS + 1) {
547*4882a593Smuzhiyun 		gth_master_set(gth, master, -1);
548*4882a593Smuzhiyun 	}
549*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	intel_th_gth_stop(gth, output, true);
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	reg = ioread32(gth->base + REG_GTH_SCRPD0);
554*4882a593Smuzhiyun 	reg &= ~output->scratchpad;
555*4882a593Smuzhiyun 	iowrite32(reg, gth->base + REG_GTH_SCRPD0);
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun 
gth_tscu_resync(struct gth_device * gth)558*4882a593Smuzhiyun static void gth_tscu_resync(struct gth_device *gth)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun 	u32 reg;
561*4882a593Smuzhiyun 
562*4882a593Smuzhiyun 	reg = ioread32(gth->base + REG_TSCU_TSUCTRL);
563*4882a593Smuzhiyun 	reg &= ~TSUCTRL_CTCRESYNC;
564*4882a593Smuzhiyun 	iowrite32(reg, gth->base + REG_TSCU_TSUCTRL);
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
intel_th_gth_prepare(struct intel_th_device * thdev,struct intel_th_output * output)567*4882a593Smuzhiyun static void intel_th_gth_prepare(struct intel_th_device *thdev,
568*4882a593Smuzhiyun 				 struct intel_th_output *output)
569*4882a593Smuzhiyun {
570*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
571*4882a593Smuzhiyun 	int count;
572*4882a593Smuzhiyun 
573*4882a593Smuzhiyun 	/*
574*4882a593Smuzhiyun 	 * Wait until the output port is in reset before we start
575*4882a593Smuzhiyun 	 * programming it.
576*4882a593Smuzhiyun 	 */
577*4882a593Smuzhiyun 	for (count = GTH_PLE_WAITLOOP_DEPTH;
578*4882a593Smuzhiyun 	     count && !(gth_output_get(gth, output->port) & BIT(5)); count--)
579*4882a593Smuzhiyun 		cpu_relax();
580*4882a593Smuzhiyun }
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun /**
583*4882a593Smuzhiyun  * intel_th_gth_enable() - enable tracing to an output device
584*4882a593Smuzhiyun  * @thdev:	GTH device
585*4882a593Smuzhiyun  * @output:	output device's descriptor
586*4882a593Smuzhiyun  *
587*4882a593Smuzhiyun  * This will configure all masters set to output to this device and
588*4882a593Smuzhiyun  * enable tracing using force storeEn signal.
589*4882a593Smuzhiyun  */
intel_th_gth_enable(struct intel_th_device * thdev,struct intel_th_output * output)590*4882a593Smuzhiyun static void intel_th_gth_enable(struct intel_th_device *thdev,
591*4882a593Smuzhiyun 				struct intel_th_output *output)
592*4882a593Smuzhiyun {
593*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
594*4882a593Smuzhiyun 	struct intel_th *th = to_intel_th(thdev);
595*4882a593Smuzhiyun 	int master;
596*4882a593Smuzhiyun 	u32 scrpd;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
599*4882a593Smuzhiyun 	for_each_set_bit(master, gth->output[output->port].master,
600*4882a593Smuzhiyun 			 TH_CONFIGURABLE_MASTERS + 1) {
601*4882a593Smuzhiyun 		gth_master_set(gth, master, output->port);
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	output->active = true;
605*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
606*4882a593Smuzhiyun 
607*4882a593Smuzhiyun 	if (INTEL_TH_CAP(th, tscu_enable))
608*4882a593Smuzhiyun 		gth_tscu_resync(gth);
609*4882a593Smuzhiyun 
610*4882a593Smuzhiyun 	scrpd = ioread32(gth->base + REG_GTH_SCRPD0);
611*4882a593Smuzhiyun 	scrpd |= output->scratchpad;
612*4882a593Smuzhiyun 	iowrite32(scrpd, gth->base + REG_GTH_SCRPD0);
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	intel_th_gth_start(gth, output);
615*4882a593Smuzhiyun }
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun /**
618*4882a593Smuzhiyun  * intel_th_gth_switch() - execute a switch sequence
619*4882a593Smuzhiyun  * @thdev:	GTH device
620*4882a593Smuzhiyun  * @output:	output device's descriptor
621*4882a593Smuzhiyun  *
622*4882a593Smuzhiyun  * This will execute a switch sequence that will trigger a switch window
623*4882a593Smuzhiyun  * when tracing to MSC in multi-block mode.
624*4882a593Smuzhiyun  */
intel_th_gth_switch(struct intel_th_device * thdev,struct intel_th_output * output)625*4882a593Smuzhiyun static void intel_th_gth_switch(struct intel_th_device *thdev,
626*4882a593Smuzhiyun 				struct intel_th_output *output)
627*4882a593Smuzhiyun {
628*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
629*4882a593Smuzhiyun 	unsigned long count;
630*4882a593Smuzhiyun 	u32 reg;
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 	/* trigger */
633*4882a593Smuzhiyun 	iowrite32(0, gth->base + REG_CTS_CTL);
634*4882a593Smuzhiyun 	iowrite32(CTS_CTL_SEQUENCER_ENABLE, gth->base + REG_CTS_CTL);
635*4882a593Smuzhiyun 	/* wait on trigger status */
636*4882a593Smuzhiyun 	for (reg = 0, count = CTS_TRIG_WAITLOOP_DEPTH;
637*4882a593Smuzhiyun 	     count && !(reg & BIT(4)); count--) {
638*4882a593Smuzhiyun 		reg = ioread32(gth->base + REG_CTS_STAT);
639*4882a593Smuzhiyun 		cpu_relax();
640*4882a593Smuzhiyun 	}
641*4882a593Smuzhiyun 	if (!count)
642*4882a593Smuzhiyun 		dev_dbg(&thdev->dev, "timeout waiting for CTS Trigger\n");
643*4882a593Smuzhiyun 
644*4882a593Smuzhiyun 	/* De-assert the trigger */
645*4882a593Smuzhiyun 	iowrite32(0, gth->base + REG_CTS_CTL);
646*4882a593Smuzhiyun 
647*4882a593Smuzhiyun 	intel_th_gth_stop(gth, output, false);
648*4882a593Smuzhiyun 	intel_th_gth_start(gth, output);
649*4882a593Smuzhiyun }
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun /**
652*4882a593Smuzhiyun  * intel_th_gth_assign() - assign output device to a GTH output port
653*4882a593Smuzhiyun  * @thdev:	GTH device
654*4882a593Smuzhiyun  * @othdev:	output device
655*4882a593Smuzhiyun  *
656*4882a593Smuzhiyun  * This will match a given output device parameters against present
657*4882a593Smuzhiyun  * output ports on the GTH and fill out relevant bits in output device's
658*4882a593Smuzhiyun  * descriptor.
659*4882a593Smuzhiyun  *
660*4882a593Smuzhiyun  * Return:	0 on success, -errno on error.
661*4882a593Smuzhiyun  */
intel_th_gth_assign(struct intel_th_device * thdev,struct intel_th_device * othdev)662*4882a593Smuzhiyun static int intel_th_gth_assign(struct intel_th_device *thdev,
663*4882a593Smuzhiyun 			       struct intel_th_device *othdev)
664*4882a593Smuzhiyun {
665*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
666*4882a593Smuzhiyun 	int i, id;
667*4882a593Smuzhiyun 
668*4882a593Smuzhiyun 	if (thdev->host_mode)
669*4882a593Smuzhiyun 		return -EBUSY;
670*4882a593Smuzhiyun 
671*4882a593Smuzhiyun 	if (othdev->type != INTEL_TH_OUTPUT)
672*4882a593Smuzhiyun 		return -EINVAL;
673*4882a593Smuzhiyun 
674*4882a593Smuzhiyun 	for (i = 0, id = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
675*4882a593Smuzhiyun 		if (gth->output[i].port_type != othdev->output.type)
676*4882a593Smuzhiyun 			continue;
677*4882a593Smuzhiyun 
678*4882a593Smuzhiyun 		if (othdev->id == -1 || othdev->id == id)
679*4882a593Smuzhiyun 			goto found;
680*4882a593Smuzhiyun 
681*4882a593Smuzhiyun 		id++;
682*4882a593Smuzhiyun 	}
683*4882a593Smuzhiyun 
684*4882a593Smuzhiyun 	return -ENOENT;
685*4882a593Smuzhiyun 
686*4882a593Smuzhiyun found:
687*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
688*4882a593Smuzhiyun 	othdev->output.port = i;
689*4882a593Smuzhiyun 	othdev->output.active = false;
690*4882a593Smuzhiyun 	gth->output[i].output = &othdev->output;
691*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
692*4882a593Smuzhiyun 
693*4882a593Smuzhiyun 	return 0;
694*4882a593Smuzhiyun }
695*4882a593Smuzhiyun 
696*4882a593Smuzhiyun /**
697*4882a593Smuzhiyun  * intel_th_gth_unassign() - deassociate an output device from its output port
698*4882a593Smuzhiyun  * @thdev:	GTH device
699*4882a593Smuzhiyun  * @othdev:	output device
700*4882a593Smuzhiyun  */
intel_th_gth_unassign(struct intel_th_device * thdev,struct intel_th_device * othdev)701*4882a593Smuzhiyun static void intel_th_gth_unassign(struct intel_th_device *thdev,
702*4882a593Smuzhiyun 				  struct intel_th_device *othdev)
703*4882a593Smuzhiyun {
704*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
705*4882a593Smuzhiyun 	int port = othdev->output.port;
706*4882a593Smuzhiyun 	int master;
707*4882a593Smuzhiyun 
708*4882a593Smuzhiyun 	if (thdev->host_mode)
709*4882a593Smuzhiyun 		return;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
712*4882a593Smuzhiyun 	othdev->output.port = -1;
713*4882a593Smuzhiyun 	othdev->output.active = false;
714*4882a593Smuzhiyun 	gth->output[port].output = NULL;
715*4882a593Smuzhiyun 	for (master = 0; master < TH_CONFIGURABLE_MASTERS + 1; master++)
716*4882a593Smuzhiyun 		if (gth->master[master] == port)
717*4882a593Smuzhiyun 			gth->master[master] = -1;
718*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
719*4882a593Smuzhiyun }
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun static int
intel_th_gth_set_output(struct intel_th_device * thdev,unsigned int master)722*4882a593Smuzhiyun intel_th_gth_set_output(struct intel_th_device *thdev, unsigned int master)
723*4882a593Smuzhiyun {
724*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
725*4882a593Smuzhiyun 	int port = 0; /* FIXME: make default output configurable */
726*4882a593Smuzhiyun 
727*4882a593Smuzhiyun 	/*
728*4882a593Smuzhiyun 	 * everything above TH_CONFIGURABLE_MASTERS is controlled by the
729*4882a593Smuzhiyun 	 * same register
730*4882a593Smuzhiyun 	 */
731*4882a593Smuzhiyun 	if (master > TH_CONFIGURABLE_MASTERS)
732*4882a593Smuzhiyun 		master = TH_CONFIGURABLE_MASTERS;
733*4882a593Smuzhiyun 
734*4882a593Smuzhiyun 	spin_lock(&gth->gth_lock);
735*4882a593Smuzhiyun 	if (gth->master[master] == -1) {
736*4882a593Smuzhiyun 		set_bit(master, gth->output[port].master);
737*4882a593Smuzhiyun 		gth->master[master] = port;
738*4882a593Smuzhiyun 	}
739*4882a593Smuzhiyun 	spin_unlock(&gth->gth_lock);
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	return 0;
742*4882a593Smuzhiyun }
743*4882a593Smuzhiyun 
intel_th_gth_probe(struct intel_th_device * thdev)744*4882a593Smuzhiyun static int intel_th_gth_probe(struct intel_th_device *thdev)
745*4882a593Smuzhiyun {
746*4882a593Smuzhiyun 	struct device *dev = &thdev->dev;
747*4882a593Smuzhiyun 	struct intel_th *th = dev_get_drvdata(dev->parent);
748*4882a593Smuzhiyun 	struct gth_device *gth;
749*4882a593Smuzhiyun 	struct resource *res;
750*4882a593Smuzhiyun 	void __iomem *base;
751*4882a593Smuzhiyun 	int i, ret;
752*4882a593Smuzhiyun 
753*4882a593Smuzhiyun 	res = intel_th_device_get_resource(thdev, IORESOURCE_MEM, 0);
754*4882a593Smuzhiyun 	if (!res)
755*4882a593Smuzhiyun 		return -ENODEV;
756*4882a593Smuzhiyun 
757*4882a593Smuzhiyun 	base = devm_ioremap(dev, res->start, resource_size(res));
758*4882a593Smuzhiyun 	if (!base)
759*4882a593Smuzhiyun 		return -ENOMEM;
760*4882a593Smuzhiyun 
761*4882a593Smuzhiyun 	gth = devm_kzalloc(dev, sizeof(*gth), GFP_KERNEL);
762*4882a593Smuzhiyun 	if (!gth)
763*4882a593Smuzhiyun 		return -ENOMEM;
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	gth->dev = dev;
766*4882a593Smuzhiyun 	gth->base = base;
767*4882a593Smuzhiyun 	spin_lock_init(&gth->gth_lock);
768*4882a593Smuzhiyun 
769*4882a593Smuzhiyun 	dev_set_drvdata(dev, gth);
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	/*
772*4882a593Smuzhiyun 	 * Host mode can be signalled via SW means or via SCRPD_DEBUGGER_IN_USE
773*4882a593Smuzhiyun 	 * bit. Either way, don't reset HW in this case, and don't export any
774*4882a593Smuzhiyun 	 * capture configuration attributes. Also, refuse to assign output
775*4882a593Smuzhiyun 	 * drivers to ports, see intel_th_gth_assign().
776*4882a593Smuzhiyun 	 */
777*4882a593Smuzhiyun 	if (thdev->host_mode)
778*4882a593Smuzhiyun 		return 0;
779*4882a593Smuzhiyun 
780*4882a593Smuzhiyun 	ret = intel_th_gth_reset(gth);
781*4882a593Smuzhiyun 	if (ret) {
782*4882a593Smuzhiyun 		if (ret != -EBUSY)
783*4882a593Smuzhiyun 			return ret;
784*4882a593Smuzhiyun 
785*4882a593Smuzhiyun 		thdev->host_mode = true;
786*4882a593Smuzhiyun 
787*4882a593Smuzhiyun 		return 0;
788*4882a593Smuzhiyun 	}
789*4882a593Smuzhiyun 
790*4882a593Smuzhiyun 	for (i = 0; i < TH_CONFIGURABLE_MASTERS + 1; i++)
791*4882a593Smuzhiyun 		gth->master[i] = -1;
792*4882a593Smuzhiyun 
793*4882a593Smuzhiyun 	for (i = 0; i < TH_POSSIBLE_OUTPUTS; i++) {
794*4882a593Smuzhiyun 		gth->output[i].gth = gth;
795*4882a593Smuzhiyun 		gth->output[i].index = i;
796*4882a593Smuzhiyun 		gth->output[i].port_type =
797*4882a593Smuzhiyun 			gth_output_parm_get(gth, i, TH_OUTPUT_PARM(port));
798*4882a593Smuzhiyun 		if (gth->output[i].port_type == GTH_NONE)
799*4882a593Smuzhiyun 			continue;
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 		ret = intel_th_output_enable(th, gth->output[i].port_type);
802*4882a593Smuzhiyun 		/* -ENODEV is ok, we just won't have that device enumerated */
803*4882a593Smuzhiyun 		if (ret && ret != -ENODEV)
804*4882a593Smuzhiyun 			return ret;
805*4882a593Smuzhiyun 	}
806*4882a593Smuzhiyun 
807*4882a593Smuzhiyun 	if (intel_th_output_attributes(gth) ||
808*4882a593Smuzhiyun 	    intel_th_master_attributes(gth)) {
809*4882a593Smuzhiyun 		pr_warn("Can't initialize sysfs attributes\n");
810*4882a593Smuzhiyun 
811*4882a593Smuzhiyun 		if (gth->output_group.attrs)
812*4882a593Smuzhiyun 			sysfs_remove_group(&gth->dev->kobj, &gth->output_group);
813*4882a593Smuzhiyun 		return -ENOMEM;
814*4882a593Smuzhiyun 	}
815*4882a593Smuzhiyun 
816*4882a593Smuzhiyun 	return 0;
817*4882a593Smuzhiyun }
818*4882a593Smuzhiyun 
intel_th_gth_remove(struct intel_th_device * thdev)819*4882a593Smuzhiyun static void intel_th_gth_remove(struct intel_th_device *thdev)
820*4882a593Smuzhiyun {
821*4882a593Smuzhiyun 	struct gth_device *gth = dev_get_drvdata(&thdev->dev);
822*4882a593Smuzhiyun 
823*4882a593Smuzhiyun 	sysfs_remove_group(&gth->dev->kobj, &gth->output_group);
824*4882a593Smuzhiyun 	sysfs_remove_group(&gth->dev->kobj, &gth->master_group);
825*4882a593Smuzhiyun }
826*4882a593Smuzhiyun 
827*4882a593Smuzhiyun static struct intel_th_driver intel_th_gth_driver = {
828*4882a593Smuzhiyun 	.probe		= intel_th_gth_probe,
829*4882a593Smuzhiyun 	.remove		= intel_th_gth_remove,
830*4882a593Smuzhiyun 	.assign		= intel_th_gth_assign,
831*4882a593Smuzhiyun 	.unassign	= intel_th_gth_unassign,
832*4882a593Smuzhiyun 	.set_output	= intel_th_gth_set_output,
833*4882a593Smuzhiyun 	.prepare	= intel_th_gth_prepare,
834*4882a593Smuzhiyun 	.enable		= intel_th_gth_enable,
835*4882a593Smuzhiyun 	.trig_switch	= intel_th_gth_switch,
836*4882a593Smuzhiyun 	.disable	= intel_th_gth_disable,
837*4882a593Smuzhiyun 	.driver	= {
838*4882a593Smuzhiyun 		.name	= "gth",
839*4882a593Smuzhiyun 		.owner	= THIS_MODULE,
840*4882a593Smuzhiyun 	},
841*4882a593Smuzhiyun };
842*4882a593Smuzhiyun 
843*4882a593Smuzhiyun module_driver(intel_th_gth_driver,
844*4882a593Smuzhiyun 	      intel_th_driver_register,
845*4882a593Smuzhiyun 	      intel_th_driver_unregister);
846*4882a593Smuzhiyun 
847*4882a593Smuzhiyun MODULE_ALIAS("intel_th_switch");
848*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
849*4882a593Smuzhiyun MODULE_DESCRIPTION("Intel(R) Trace Hub Global Trace Hub driver");
850*4882a593Smuzhiyun MODULE_AUTHOR("Alexander Shishkin <alexander.shishkin@linux.intel.com>");
851