xref: /OK3568_Linux_fs/kernel/drivers/mailbox/tegra-hsp.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2016-2018, NVIDIA CORPORATION.  All rights reserved.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun 
6*4882a593Smuzhiyun #include <linux/delay.h>
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/io.h>
9*4882a593Smuzhiyun #include <linux/mailbox_controller.h>
10*4882a593Smuzhiyun #include <linux/of.h>
11*4882a593Smuzhiyun #include <linux/of_device.h>
12*4882a593Smuzhiyun #include <linux/platform_device.h>
13*4882a593Smuzhiyun #include <linux/pm.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #include <soc/tegra/fuse.h>
17*4882a593Smuzhiyun 
18*4882a593Smuzhiyun #include <dt-bindings/mailbox/tegra186-hsp.h>
19*4882a593Smuzhiyun 
20*4882a593Smuzhiyun #include "mailbox.h"
21*4882a593Smuzhiyun 
22*4882a593Smuzhiyun #define HSP_INT_IE(x)		(0x100 + ((x) * 4))
23*4882a593Smuzhiyun #define HSP_INT_IV		0x300
24*4882a593Smuzhiyun #define HSP_INT_IR		0x304
25*4882a593Smuzhiyun 
26*4882a593Smuzhiyun #define HSP_INT_EMPTY_SHIFT	0
27*4882a593Smuzhiyun #define HSP_INT_EMPTY_MASK	0xff
28*4882a593Smuzhiyun #define HSP_INT_FULL_SHIFT	8
29*4882a593Smuzhiyun #define HSP_INT_FULL_MASK	0xff
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun #define HSP_INT_DIMENSIONING	0x380
32*4882a593Smuzhiyun #define HSP_nSM_SHIFT		0
33*4882a593Smuzhiyun #define HSP_nSS_SHIFT		4
34*4882a593Smuzhiyun #define HSP_nAS_SHIFT		8
35*4882a593Smuzhiyun #define HSP_nDB_SHIFT		12
36*4882a593Smuzhiyun #define HSP_nSI_SHIFT		16
37*4882a593Smuzhiyun #define HSP_nINT_MASK		0xf
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun #define HSP_DB_TRIGGER	0x0
40*4882a593Smuzhiyun #define HSP_DB_ENABLE	0x4
41*4882a593Smuzhiyun #define HSP_DB_RAW	0x8
42*4882a593Smuzhiyun #define HSP_DB_PENDING	0xc
43*4882a593Smuzhiyun 
44*4882a593Smuzhiyun #define HSP_SM_SHRD_MBOX	0x0
45*4882a593Smuzhiyun #define HSP_SM_SHRD_MBOX_FULL	BIT(31)
46*4882a593Smuzhiyun #define HSP_SM_SHRD_MBOX_FULL_INT_IE	0x04
47*4882a593Smuzhiyun #define HSP_SM_SHRD_MBOX_EMPTY_INT_IE	0x08
48*4882a593Smuzhiyun 
49*4882a593Smuzhiyun #define HSP_DB_CCPLEX		1
50*4882a593Smuzhiyun #define HSP_DB_BPMP		3
51*4882a593Smuzhiyun #define HSP_DB_MAX		7
52*4882a593Smuzhiyun 
53*4882a593Smuzhiyun struct tegra_hsp_channel;
54*4882a593Smuzhiyun struct tegra_hsp;
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct tegra_hsp_channel {
57*4882a593Smuzhiyun 	struct tegra_hsp *hsp;
58*4882a593Smuzhiyun 	struct mbox_chan *chan;
59*4882a593Smuzhiyun 	void __iomem *regs;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun 
62*4882a593Smuzhiyun struct tegra_hsp_doorbell {
63*4882a593Smuzhiyun 	struct tegra_hsp_channel channel;
64*4882a593Smuzhiyun 	struct list_head list;
65*4882a593Smuzhiyun 	const char *name;
66*4882a593Smuzhiyun 	unsigned int master;
67*4882a593Smuzhiyun 	unsigned int index;
68*4882a593Smuzhiyun };
69*4882a593Smuzhiyun 
70*4882a593Smuzhiyun struct tegra_hsp_mailbox {
71*4882a593Smuzhiyun 	struct tegra_hsp_channel channel;
72*4882a593Smuzhiyun 	unsigned int index;
73*4882a593Smuzhiyun 	bool producer;
74*4882a593Smuzhiyun };
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun struct tegra_hsp_db_map {
77*4882a593Smuzhiyun 	const char *name;
78*4882a593Smuzhiyun 	unsigned int master;
79*4882a593Smuzhiyun 	unsigned int index;
80*4882a593Smuzhiyun };
81*4882a593Smuzhiyun 
82*4882a593Smuzhiyun struct tegra_hsp_soc {
83*4882a593Smuzhiyun 	const struct tegra_hsp_db_map *map;
84*4882a593Smuzhiyun 	bool has_per_mb_ie;
85*4882a593Smuzhiyun };
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun struct tegra_hsp {
88*4882a593Smuzhiyun 	struct device *dev;
89*4882a593Smuzhiyun 	const struct tegra_hsp_soc *soc;
90*4882a593Smuzhiyun 	struct mbox_controller mbox_db;
91*4882a593Smuzhiyun 	struct mbox_controller mbox_sm;
92*4882a593Smuzhiyun 	void __iomem *regs;
93*4882a593Smuzhiyun 	unsigned int doorbell_irq;
94*4882a593Smuzhiyun 	unsigned int *shared_irqs;
95*4882a593Smuzhiyun 	unsigned int shared_irq;
96*4882a593Smuzhiyun 	unsigned int num_sm;
97*4882a593Smuzhiyun 	unsigned int num_as;
98*4882a593Smuzhiyun 	unsigned int num_ss;
99*4882a593Smuzhiyun 	unsigned int num_db;
100*4882a593Smuzhiyun 	unsigned int num_si;
101*4882a593Smuzhiyun 	spinlock_t lock;
102*4882a593Smuzhiyun 
103*4882a593Smuzhiyun 	struct list_head doorbells;
104*4882a593Smuzhiyun 	struct tegra_hsp_mailbox *mailboxes;
105*4882a593Smuzhiyun 
106*4882a593Smuzhiyun 	unsigned long mask;
107*4882a593Smuzhiyun };
108*4882a593Smuzhiyun 
tegra_hsp_readl(struct tegra_hsp * hsp,unsigned int offset)109*4882a593Smuzhiyun static inline u32 tegra_hsp_readl(struct tegra_hsp *hsp, unsigned int offset)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	return readl(hsp->regs + offset);
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun 
tegra_hsp_writel(struct tegra_hsp * hsp,u32 value,unsigned int offset)114*4882a593Smuzhiyun static inline void tegra_hsp_writel(struct tegra_hsp *hsp, u32 value,
115*4882a593Smuzhiyun 				    unsigned int offset)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun 	writel(value, hsp->regs + offset);
118*4882a593Smuzhiyun }
119*4882a593Smuzhiyun 
tegra_hsp_channel_readl(struct tegra_hsp_channel * channel,unsigned int offset)120*4882a593Smuzhiyun static inline u32 tegra_hsp_channel_readl(struct tegra_hsp_channel *channel,
121*4882a593Smuzhiyun 					  unsigned int offset)
122*4882a593Smuzhiyun {
123*4882a593Smuzhiyun 	return readl(channel->regs + offset);
124*4882a593Smuzhiyun }
125*4882a593Smuzhiyun 
tegra_hsp_channel_writel(struct tegra_hsp_channel * channel,u32 value,unsigned int offset)126*4882a593Smuzhiyun static inline void tegra_hsp_channel_writel(struct tegra_hsp_channel *channel,
127*4882a593Smuzhiyun 					    u32 value, unsigned int offset)
128*4882a593Smuzhiyun {
129*4882a593Smuzhiyun 	writel(value, channel->regs + offset);
130*4882a593Smuzhiyun }
131*4882a593Smuzhiyun 
tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell * db)132*4882a593Smuzhiyun static bool tegra_hsp_doorbell_can_ring(struct tegra_hsp_doorbell *db)
133*4882a593Smuzhiyun {
134*4882a593Smuzhiyun 	u32 value;
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	value = tegra_hsp_channel_readl(&db->channel, HSP_DB_ENABLE);
137*4882a593Smuzhiyun 
138*4882a593Smuzhiyun 	return (value & BIT(TEGRA_HSP_DB_MASTER_CCPLEX)) != 0;
139*4882a593Smuzhiyun }
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun static struct tegra_hsp_doorbell *
__tegra_hsp_doorbell_get(struct tegra_hsp * hsp,unsigned int master)142*4882a593Smuzhiyun __tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
143*4882a593Smuzhiyun {
144*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *entry;
145*4882a593Smuzhiyun 
146*4882a593Smuzhiyun 	list_for_each_entry(entry, &hsp->doorbells, list)
147*4882a593Smuzhiyun 		if (entry->master == master)
148*4882a593Smuzhiyun 			return entry;
149*4882a593Smuzhiyun 
150*4882a593Smuzhiyun 	return NULL;
151*4882a593Smuzhiyun }
152*4882a593Smuzhiyun 
153*4882a593Smuzhiyun static struct tegra_hsp_doorbell *
tegra_hsp_doorbell_get(struct tegra_hsp * hsp,unsigned int master)154*4882a593Smuzhiyun tegra_hsp_doorbell_get(struct tegra_hsp *hsp, unsigned int master)
155*4882a593Smuzhiyun {
156*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db;
157*4882a593Smuzhiyun 	unsigned long flags;
158*4882a593Smuzhiyun 
159*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
160*4882a593Smuzhiyun 	db = __tegra_hsp_doorbell_get(hsp, master);
161*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
162*4882a593Smuzhiyun 
163*4882a593Smuzhiyun 	return db;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun 
tegra_hsp_doorbell_irq(int irq,void * data)166*4882a593Smuzhiyun static irqreturn_t tegra_hsp_doorbell_irq(int irq, void *data)
167*4882a593Smuzhiyun {
168*4882a593Smuzhiyun 	struct tegra_hsp *hsp = data;
169*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db;
170*4882a593Smuzhiyun 	unsigned long master, value;
171*4882a593Smuzhiyun 
172*4882a593Smuzhiyun 	db = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
173*4882a593Smuzhiyun 	if (!db)
174*4882a593Smuzhiyun 		return IRQ_NONE;
175*4882a593Smuzhiyun 
176*4882a593Smuzhiyun 	value = tegra_hsp_channel_readl(&db->channel, HSP_DB_PENDING);
177*4882a593Smuzhiyun 	tegra_hsp_channel_writel(&db->channel, value, HSP_DB_PENDING);
178*4882a593Smuzhiyun 
179*4882a593Smuzhiyun 	spin_lock(&hsp->lock);
180*4882a593Smuzhiyun 
181*4882a593Smuzhiyun 	for_each_set_bit(master, &value, hsp->mbox_db.num_chans) {
182*4882a593Smuzhiyun 		struct tegra_hsp_doorbell *db;
183*4882a593Smuzhiyun 
184*4882a593Smuzhiyun 		db = __tegra_hsp_doorbell_get(hsp, master);
185*4882a593Smuzhiyun 		/*
186*4882a593Smuzhiyun 		 * Depending on the bootloader chain, the CCPLEX doorbell will
187*4882a593Smuzhiyun 		 * have some doorbells enabled, which means that requesting an
188*4882a593Smuzhiyun 		 * interrupt will immediately fire.
189*4882a593Smuzhiyun 		 *
190*4882a593Smuzhiyun 		 * In that case, db->channel.chan will still be NULL here and
191*4882a593Smuzhiyun 		 * cause a crash if not properly guarded.
192*4882a593Smuzhiyun 		 *
193*4882a593Smuzhiyun 		 * It remains to be seen if ignoring the doorbell in that case
194*4882a593Smuzhiyun 		 * is the correct solution.
195*4882a593Smuzhiyun 		 */
196*4882a593Smuzhiyun 		if (db && db->channel.chan)
197*4882a593Smuzhiyun 			mbox_chan_received_data(db->channel.chan, NULL);
198*4882a593Smuzhiyun 	}
199*4882a593Smuzhiyun 
200*4882a593Smuzhiyun 	spin_unlock(&hsp->lock);
201*4882a593Smuzhiyun 
202*4882a593Smuzhiyun 	return IRQ_HANDLED;
203*4882a593Smuzhiyun }
204*4882a593Smuzhiyun 
tegra_hsp_shared_irq(int irq,void * data)205*4882a593Smuzhiyun static irqreturn_t tegra_hsp_shared_irq(int irq, void *data)
206*4882a593Smuzhiyun {
207*4882a593Smuzhiyun 	struct tegra_hsp *hsp = data;
208*4882a593Smuzhiyun 	unsigned long bit, mask;
209*4882a593Smuzhiyun 	u32 status, value;
210*4882a593Smuzhiyun 	void *msg;
211*4882a593Smuzhiyun 
212*4882a593Smuzhiyun 	status = tegra_hsp_readl(hsp, HSP_INT_IR) & hsp->mask;
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* process EMPTY interrupts first */
215*4882a593Smuzhiyun 	mask = (status >> HSP_INT_EMPTY_SHIFT) & HSP_INT_EMPTY_MASK;
216*4882a593Smuzhiyun 
217*4882a593Smuzhiyun 	for_each_set_bit(bit, &mask, hsp->num_sm) {
218*4882a593Smuzhiyun 		struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
219*4882a593Smuzhiyun 
220*4882a593Smuzhiyun 		if (mb->producer) {
221*4882a593Smuzhiyun 			/*
222*4882a593Smuzhiyun 			 * Disable EMPTY interrupts until data is sent with
223*4882a593Smuzhiyun 			 * the next message. These interrupts are level-
224*4882a593Smuzhiyun 			 * triggered, so if we kept them enabled they would
225*4882a593Smuzhiyun 			 * constantly trigger until we next write data into
226*4882a593Smuzhiyun 			 * the message.
227*4882a593Smuzhiyun 			 */
228*4882a593Smuzhiyun 			spin_lock(&hsp->lock);
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 			hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
231*4882a593Smuzhiyun 			tegra_hsp_writel(hsp, hsp->mask,
232*4882a593Smuzhiyun 					 HSP_INT_IE(hsp->shared_irq));
233*4882a593Smuzhiyun 
234*4882a593Smuzhiyun 			spin_unlock(&hsp->lock);
235*4882a593Smuzhiyun 
236*4882a593Smuzhiyun 			mbox_chan_txdone(mb->channel.chan, 0);
237*4882a593Smuzhiyun 		}
238*4882a593Smuzhiyun 	}
239*4882a593Smuzhiyun 
240*4882a593Smuzhiyun 	/* process FULL interrupts */
241*4882a593Smuzhiyun 	mask = (status >> HSP_INT_FULL_SHIFT) & HSP_INT_FULL_MASK;
242*4882a593Smuzhiyun 
243*4882a593Smuzhiyun 	for_each_set_bit(bit, &mask, hsp->num_sm) {
244*4882a593Smuzhiyun 		struct tegra_hsp_mailbox *mb = &hsp->mailboxes[bit];
245*4882a593Smuzhiyun 
246*4882a593Smuzhiyun 		if (!mb->producer) {
247*4882a593Smuzhiyun 			value = tegra_hsp_channel_readl(&mb->channel,
248*4882a593Smuzhiyun 							HSP_SM_SHRD_MBOX);
249*4882a593Smuzhiyun 			value &= ~HSP_SM_SHRD_MBOX_FULL;
250*4882a593Smuzhiyun 			msg = (void *)(unsigned long)value;
251*4882a593Smuzhiyun 			mbox_chan_received_data(mb->channel.chan, msg);
252*4882a593Smuzhiyun 
253*4882a593Smuzhiyun 			/*
254*4882a593Smuzhiyun 			 * Need to clear all bits here since some producers,
255*4882a593Smuzhiyun 			 * such as TCU, depend on fields in the register
256*4882a593Smuzhiyun 			 * getting cleared by the consumer.
257*4882a593Smuzhiyun 			 *
258*4882a593Smuzhiyun 			 * The mailbox API doesn't give the consumers a way
259*4882a593Smuzhiyun 			 * of doing that explicitly, so we have to make sure
260*4882a593Smuzhiyun 			 * we cover all possible cases.
261*4882a593Smuzhiyun 			 */
262*4882a593Smuzhiyun 			tegra_hsp_channel_writel(&mb->channel, 0x0,
263*4882a593Smuzhiyun 						 HSP_SM_SHRD_MBOX);
264*4882a593Smuzhiyun 		}
265*4882a593Smuzhiyun 	}
266*4882a593Smuzhiyun 
267*4882a593Smuzhiyun 	return IRQ_HANDLED;
268*4882a593Smuzhiyun }
269*4882a593Smuzhiyun 
270*4882a593Smuzhiyun static struct tegra_hsp_channel *
tegra_hsp_doorbell_create(struct tegra_hsp * hsp,const char * name,unsigned int master,unsigned int index)271*4882a593Smuzhiyun tegra_hsp_doorbell_create(struct tegra_hsp *hsp, const char *name,
272*4882a593Smuzhiyun 			  unsigned int master, unsigned int index)
273*4882a593Smuzhiyun {
274*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db;
275*4882a593Smuzhiyun 	unsigned int offset;
276*4882a593Smuzhiyun 	unsigned long flags;
277*4882a593Smuzhiyun 
278*4882a593Smuzhiyun 	db = devm_kzalloc(hsp->dev, sizeof(*db), GFP_KERNEL);
279*4882a593Smuzhiyun 	if (!db)
280*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
281*4882a593Smuzhiyun 
282*4882a593Smuzhiyun 	offset = (1 + (hsp->num_sm / 2) + hsp->num_ss + hsp->num_as) * SZ_64K;
283*4882a593Smuzhiyun 	offset += index * 0x100;
284*4882a593Smuzhiyun 
285*4882a593Smuzhiyun 	db->channel.regs = hsp->regs + offset;
286*4882a593Smuzhiyun 	db->channel.hsp = hsp;
287*4882a593Smuzhiyun 
288*4882a593Smuzhiyun 	db->name = devm_kstrdup_const(hsp->dev, name, GFP_KERNEL);
289*4882a593Smuzhiyun 	db->master = master;
290*4882a593Smuzhiyun 	db->index = index;
291*4882a593Smuzhiyun 
292*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
293*4882a593Smuzhiyun 	list_add_tail(&db->list, &hsp->doorbells);
294*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
295*4882a593Smuzhiyun 
296*4882a593Smuzhiyun 	return &db->channel;
297*4882a593Smuzhiyun }
298*4882a593Smuzhiyun 
tegra_hsp_doorbell_send_data(struct mbox_chan * chan,void * data)299*4882a593Smuzhiyun static int tegra_hsp_doorbell_send_data(struct mbox_chan *chan, void *data)
300*4882a593Smuzhiyun {
301*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db = chan->con_priv;
302*4882a593Smuzhiyun 
303*4882a593Smuzhiyun 	tegra_hsp_channel_writel(&db->channel, 1, HSP_DB_TRIGGER);
304*4882a593Smuzhiyun 
305*4882a593Smuzhiyun 	return 0;
306*4882a593Smuzhiyun }
307*4882a593Smuzhiyun 
tegra_hsp_doorbell_startup(struct mbox_chan * chan)308*4882a593Smuzhiyun static int tegra_hsp_doorbell_startup(struct mbox_chan *chan)
309*4882a593Smuzhiyun {
310*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db = chan->con_priv;
311*4882a593Smuzhiyun 	struct tegra_hsp *hsp = db->channel.hsp;
312*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *ccplex;
313*4882a593Smuzhiyun 	unsigned long flags;
314*4882a593Smuzhiyun 	u32 value;
315*4882a593Smuzhiyun 
316*4882a593Smuzhiyun 	if (db->master >= chan->mbox->num_chans) {
317*4882a593Smuzhiyun 		dev_err(chan->mbox->dev,
318*4882a593Smuzhiyun 			"invalid master ID %u for HSP channel\n",
319*4882a593Smuzhiyun 			db->master);
320*4882a593Smuzhiyun 		return -EINVAL;
321*4882a593Smuzhiyun 	}
322*4882a593Smuzhiyun 
323*4882a593Smuzhiyun 	ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
324*4882a593Smuzhiyun 	if (!ccplex)
325*4882a593Smuzhiyun 		return -ENODEV;
326*4882a593Smuzhiyun 
327*4882a593Smuzhiyun 	/*
328*4882a593Smuzhiyun 	 * On simulation platforms the BPMP hasn't had a chance yet to mark
329*4882a593Smuzhiyun 	 * the doorbell as ringable by the CCPLEX, so we want to skip extra
330*4882a593Smuzhiyun 	 * checks here.
331*4882a593Smuzhiyun 	 */
332*4882a593Smuzhiyun 	if (tegra_is_silicon() && !tegra_hsp_doorbell_can_ring(db))
333*4882a593Smuzhiyun 		return -ENODEV;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
336*4882a593Smuzhiyun 
337*4882a593Smuzhiyun 	value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
338*4882a593Smuzhiyun 	value |= BIT(db->master);
339*4882a593Smuzhiyun 	tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
340*4882a593Smuzhiyun 
341*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
342*4882a593Smuzhiyun 
343*4882a593Smuzhiyun 	return 0;
344*4882a593Smuzhiyun }
345*4882a593Smuzhiyun 
tegra_hsp_doorbell_shutdown(struct mbox_chan * chan)346*4882a593Smuzhiyun static void tegra_hsp_doorbell_shutdown(struct mbox_chan *chan)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db = chan->con_priv;
349*4882a593Smuzhiyun 	struct tegra_hsp *hsp = db->channel.hsp;
350*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *ccplex;
351*4882a593Smuzhiyun 	unsigned long flags;
352*4882a593Smuzhiyun 	u32 value;
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	ccplex = tegra_hsp_doorbell_get(hsp, TEGRA_HSP_DB_MASTER_CCPLEX);
355*4882a593Smuzhiyun 	if (!ccplex)
356*4882a593Smuzhiyun 		return;
357*4882a593Smuzhiyun 
358*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
359*4882a593Smuzhiyun 
360*4882a593Smuzhiyun 	value = tegra_hsp_channel_readl(&ccplex->channel, HSP_DB_ENABLE);
361*4882a593Smuzhiyun 	value &= ~BIT(db->master);
362*4882a593Smuzhiyun 	tegra_hsp_channel_writel(&ccplex->channel, value, HSP_DB_ENABLE);
363*4882a593Smuzhiyun 
364*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
365*4882a593Smuzhiyun }
366*4882a593Smuzhiyun 
367*4882a593Smuzhiyun static const struct mbox_chan_ops tegra_hsp_db_ops = {
368*4882a593Smuzhiyun 	.send_data = tegra_hsp_doorbell_send_data,
369*4882a593Smuzhiyun 	.startup = tegra_hsp_doorbell_startup,
370*4882a593Smuzhiyun 	.shutdown = tegra_hsp_doorbell_shutdown,
371*4882a593Smuzhiyun };
372*4882a593Smuzhiyun 
tegra_hsp_mailbox_send_data(struct mbox_chan * chan,void * data)373*4882a593Smuzhiyun static int tegra_hsp_mailbox_send_data(struct mbox_chan *chan, void *data)
374*4882a593Smuzhiyun {
375*4882a593Smuzhiyun 	struct tegra_hsp_mailbox *mb = chan->con_priv;
376*4882a593Smuzhiyun 	struct tegra_hsp *hsp = mb->channel.hsp;
377*4882a593Smuzhiyun 	unsigned long flags;
378*4882a593Smuzhiyun 	u32 value;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	if (WARN_ON(!mb->producer))
381*4882a593Smuzhiyun 		return -EPERM;
382*4882a593Smuzhiyun 
383*4882a593Smuzhiyun 	/* copy data and mark mailbox full */
384*4882a593Smuzhiyun 	value = (u32)(unsigned long)data;
385*4882a593Smuzhiyun 	value |= HSP_SM_SHRD_MBOX_FULL;
386*4882a593Smuzhiyun 
387*4882a593Smuzhiyun 	tegra_hsp_channel_writel(&mb->channel, value, HSP_SM_SHRD_MBOX);
388*4882a593Smuzhiyun 
389*4882a593Smuzhiyun 	/* enable EMPTY interrupt for the shared mailbox */
390*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
391*4882a593Smuzhiyun 
392*4882a593Smuzhiyun 	hsp->mask |= BIT(HSP_INT_EMPTY_SHIFT + mb->index);
393*4882a593Smuzhiyun 	tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
394*4882a593Smuzhiyun 
395*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
396*4882a593Smuzhiyun 
397*4882a593Smuzhiyun 	return 0;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun 
tegra_hsp_mailbox_flush(struct mbox_chan * chan,unsigned long timeout)400*4882a593Smuzhiyun static int tegra_hsp_mailbox_flush(struct mbox_chan *chan,
401*4882a593Smuzhiyun 				   unsigned long timeout)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun 	struct tegra_hsp_mailbox *mb = chan->con_priv;
404*4882a593Smuzhiyun 	struct tegra_hsp_channel *ch = &mb->channel;
405*4882a593Smuzhiyun 	u32 value;
406*4882a593Smuzhiyun 
407*4882a593Smuzhiyun 	timeout = jiffies + msecs_to_jiffies(timeout);
408*4882a593Smuzhiyun 
409*4882a593Smuzhiyun 	while (time_before(jiffies, timeout)) {
410*4882a593Smuzhiyun 		value = tegra_hsp_channel_readl(ch, HSP_SM_SHRD_MBOX);
411*4882a593Smuzhiyun 		if ((value & HSP_SM_SHRD_MBOX_FULL) == 0) {
412*4882a593Smuzhiyun 			mbox_chan_txdone(chan, 0);
413*4882a593Smuzhiyun 
414*4882a593Smuzhiyun 			/* Wait until channel is empty */
415*4882a593Smuzhiyun 			if (chan->active_req != NULL)
416*4882a593Smuzhiyun 				continue;
417*4882a593Smuzhiyun 
418*4882a593Smuzhiyun 			return 0;
419*4882a593Smuzhiyun 		}
420*4882a593Smuzhiyun 
421*4882a593Smuzhiyun 		udelay(1);
422*4882a593Smuzhiyun 	}
423*4882a593Smuzhiyun 
424*4882a593Smuzhiyun 	return -ETIME;
425*4882a593Smuzhiyun }
426*4882a593Smuzhiyun 
tegra_hsp_mailbox_startup(struct mbox_chan * chan)427*4882a593Smuzhiyun static int tegra_hsp_mailbox_startup(struct mbox_chan *chan)
428*4882a593Smuzhiyun {
429*4882a593Smuzhiyun 	struct tegra_hsp_mailbox *mb = chan->con_priv;
430*4882a593Smuzhiyun 	struct tegra_hsp_channel *ch = &mb->channel;
431*4882a593Smuzhiyun 	struct tegra_hsp *hsp = mb->channel.hsp;
432*4882a593Smuzhiyun 	unsigned long flags;
433*4882a593Smuzhiyun 
434*4882a593Smuzhiyun 	chan->txdone_method = TXDONE_BY_IRQ;
435*4882a593Smuzhiyun 
436*4882a593Smuzhiyun 	/*
437*4882a593Smuzhiyun 	 * Shared mailboxes start out as consumers by default. FULL and EMPTY
438*4882a593Smuzhiyun 	 * interrupts are coalesced at the same shared interrupt.
439*4882a593Smuzhiyun 	 *
440*4882a593Smuzhiyun 	 * Keep EMPTY interrupts disabled at startup and only enable them when
441*4882a593Smuzhiyun 	 * the mailbox is actually full. This is required because the FULL and
442*4882a593Smuzhiyun 	 * EMPTY interrupts are level-triggered, so keeping EMPTY interrupts
443*4882a593Smuzhiyun 	 * enabled all the time would cause an interrupt storm while mailboxes
444*4882a593Smuzhiyun 	 * are idle.
445*4882a593Smuzhiyun 	 */
446*4882a593Smuzhiyun 
447*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
448*4882a593Smuzhiyun 
449*4882a593Smuzhiyun 	if (mb->producer)
450*4882a593Smuzhiyun 		hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
451*4882a593Smuzhiyun 	else
452*4882a593Smuzhiyun 		hsp->mask |= BIT(HSP_INT_FULL_SHIFT + mb->index);
453*4882a593Smuzhiyun 
454*4882a593Smuzhiyun 	tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
455*4882a593Smuzhiyun 
456*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	if (hsp->soc->has_per_mb_ie) {
459*4882a593Smuzhiyun 		if (mb->producer)
460*4882a593Smuzhiyun 			tegra_hsp_channel_writel(ch, 0x0,
461*4882a593Smuzhiyun 						 HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
462*4882a593Smuzhiyun 		else
463*4882a593Smuzhiyun 			tegra_hsp_channel_writel(ch, 0x1,
464*4882a593Smuzhiyun 						 HSP_SM_SHRD_MBOX_FULL_INT_IE);
465*4882a593Smuzhiyun 	}
466*4882a593Smuzhiyun 
467*4882a593Smuzhiyun 	return 0;
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun 
tegra_hsp_mailbox_shutdown(struct mbox_chan * chan)470*4882a593Smuzhiyun static void tegra_hsp_mailbox_shutdown(struct mbox_chan *chan)
471*4882a593Smuzhiyun {
472*4882a593Smuzhiyun 	struct tegra_hsp_mailbox *mb = chan->con_priv;
473*4882a593Smuzhiyun 	struct tegra_hsp_channel *ch = &mb->channel;
474*4882a593Smuzhiyun 	struct tegra_hsp *hsp = mb->channel.hsp;
475*4882a593Smuzhiyun 	unsigned long flags;
476*4882a593Smuzhiyun 
477*4882a593Smuzhiyun 	if (hsp->soc->has_per_mb_ie) {
478*4882a593Smuzhiyun 		if (mb->producer)
479*4882a593Smuzhiyun 			tegra_hsp_channel_writel(ch, 0x0,
480*4882a593Smuzhiyun 						 HSP_SM_SHRD_MBOX_EMPTY_INT_IE);
481*4882a593Smuzhiyun 		else
482*4882a593Smuzhiyun 			tegra_hsp_channel_writel(ch, 0x0,
483*4882a593Smuzhiyun 						 HSP_SM_SHRD_MBOX_FULL_INT_IE);
484*4882a593Smuzhiyun 	}
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
487*4882a593Smuzhiyun 
488*4882a593Smuzhiyun 	if (mb->producer)
489*4882a593Smuzhiyun 		hsp->mask &= ~BIT(HSP_INT_EMPTY_SHIFT + mb->index);
490*4882a593Smuzhiyun 	else
491*4882a593Smuzhiyun 		hsp->mask &= ~BIT(HSP_INT_FULL_SHIFT + mb->index);
492*4882a593Smuzhiyun 
493*4882a593Smuzhiyun 	tegra_hsp_writel(hsp, hsp->mask, HSP_INT_IE(hsp->shared_irq));
494*4882a593Smuzhiyun 
495*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
496*4882a593Smuzhiyun }
497*4882a593Smuzhiyun 
498*4882a593Smuzhiyun static const struct mbox_chan_ops tegra_hsp_sm_ops = {
499*4882a593Smuzhiyun 	.send_data = tegra_hsp_mailbox_send_data,
500*4882a593Smuzhiyun 	.flush = tegra_hsp_mailbox_flush,
501*4882a593Smuzhiyun 	.startup = tegra_hsp_mailbox_startup,
502*4882a593Smuzhiyun 	.shutdown = tegra_hsp_mailbox_shutdown,
503*4882a593Smuzhiyun };
504*4882a593Smuzhiyun 
tegra_hsp_db_xlate(struct mbox_controller * mbox,const struct of_phandle_args * args)505*4882a593Smuzhiyun static struct mbox_chan *tegra_hsp_db_xlate(struct mbox_controller *mbox,
506*4882a593Smuzhiyun 					    const struct of_phandle_args *args)
507*4882a593Smuzhiyun {
508*4882a593Smuzhiyun 	struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_db);
509*4882a593Smuzhiyun 	unsigned int type = args->args[0], master = args->args[1];
510*4882a593Smuzhiyun 	struct tegra_hsp_channel *channel = ERR_PTR(-ENODEV);
511*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db;
512*4882a593Smuzhiyun 	struct mbox_chan *chan;
513*4882a593Smuzhiyun 	unsigned long flags;
514*4882a593Smuzhiyun 	unsigned int i;
515*4882a593Smuzhiyun 
516*4882a593Smuzhiyun 	if (type != TEGRA_HSP_MBOX_TYPE_DB || !hsp->doorbell_irq)
517*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
518*4882a593Smuzhiyun 
519*4882a593Smuzhiyun 	db = tegra_hsp_doorbell_get(hsp, master);
520*4882a593Smuzhiyun 	if (db)
521*4882a593Smuzhiyun 		channel = &db->channel;
522*4882a593Smuzhiyun 
523*4882a593Smuzhiyun 	if (IS_ERR(channel))
524*4882a593Smuzhiyun 		return ERR_CAST(channel);
525*4882a593Smuzhiyun 
526*4882a593Smuzhiyun 	spin_lock_irqsave(&hsp->lock, flags);
527*4882a593Smuzhiyun 
528*4882a593Smuzhiyun 	for (i = 0; i < mbox->num_chans; i++) {
529*4882a593Smuzhiyun 		chan = &mbox->chans[i];
530*4882a593Smuzhiyun 		if (!chan->con_priv) {
531*4882a593Smuzhiyun 			channel->chan = chan;
532*4882a593Smuzhiyun 			chan->con_priv = db;
533*4882a593Smuzhiyun 			break;
534*4882a593Smuzhiyun 		}
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 		chan = NULL;
537*4882a593Smuzhiyun 	}
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	spin_unlock_irqrestore(&hsp->lock, flags);
540*4882a593Smuzhiyun 
541*4882a593Smuzhiyun 	return chan ?: ERR_PTR(-EBUSY);
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun 
tegra_hsp_sm_xlate(struct mbox_controller * mbox,const struct of_phandle_args * args)544*4882a593Smuzhiyun static struct mbox_chan *tegra_hsp_sm_xlate(struct mbox_controller *mbox,
545*4882a593Smuzhiyun 					    const struct of_phandle_args *args)
546*4882a593Smuzhiyun {
547*4882a593Smuzhiyun 	struct tegra_hsp *hsp = container_of(mbox, struct tegra_hsp, mbox_sm);
548*4882a593Smuzhiyun 	unsigned int type = args->args[0], index;
549*4882a593Smuzhiyun 	struct tegra_hsp_mailbox *mb;
550*4882a593Smuzhiyun 
551*4882a593Smuzhiyun 	index = args->args[1] & TEGRA_HSP_SM_MASK;
552*4882a593Smuzhiyun 
553*4882a593Smuzhiyun 	if (type != TEGRA_HSP_MBOX_TYPE_SM || !hsp->shared_irqs ||
554*4882a593Smuzhiyun 	    index >= hsp->num_sm)
555*4882a593Smuzhiyun 		return ERR_PTR(-ENODEV);
556*4882a593Smuzhiyun 
557*4882a593Smuzhiyun 	mb = &hsp->mailboxes[index];
558*4882a593Smuzhiyun 
559*4882a593Smuzhiyun 	if ((args->args[1] & TEGRA_HSP_SM_FLAG_TX) == 0)
560*4882a593Smuzhiyun 		mb->producer = false;
561*4882a593Smuzhiyun 	else
562*4882a593Smuzhiyun 		mb->producer = true;
563*4882a593Smuzhiyun 
564*4882a593Smuzhiyun 	return mb->channel.chan;
565*4882a593Smuzhiyun }
566*4882a593Smuzhiyun 
tegra_hsp_add_doorbells(struct tegra_hsp * hsp)567*4882a593Smuzhiyun static int tegra_hsp_add_doorbells(struct tegra_hsp *hsp)
568*4882a593Smuzhiyun {
569*4882a593Smuzhiyun 	const struct tegra_hsp_db_map *map = hsp->soc->map;
570*4882a593Smuzhiyun 	struct tegra_hsp_channel *channel;
571*4882a593Smuzhiyun 
572*4882a593Smuzhiyun 	while (map->name) {
573*4882a593Smuzhiyun 		channel = tegra_hsp_doorbell_create(hsp, map->name,
574*4882a593Smuzhiyun 						    map->master, map->index);
575*4882a593Smuzhiyun 		if (IS_ERR(channel))
576*4882a593Smuzhiyun 			return PTR_ERR(channel);
577*4882a593Smuzhiyun 
578*4882a593Smuzhiyun 		map++;
579*4882a593Smuzhiyun 	}
580*4882a593Smuzhiyun 
581*4882a593Smuzhiyun 	return 0;
582*4882a593Smuzhiyun }
583*4882a593Smuzhiyun 
tegra_hsp_add_mailboxes(struct tegra_hsp * hsp,struct device * dev)584*4882a593Smuzhiyun static int tegra_hsp_add_mailboxes(struct tegra_hsp *hsp, struct device *dev)
585*4882a593Smuzhiyun {
586*4882a593Smuzhiyun 	int i;
587*4882a593Smuzhiyun 
588*4882a593Smuzhiyun 	hsp->mailboxes = devm_kcalloc(dev, hsp->num_sm, sizeof(*hsp->mailboxes),
589*4882a593Smuzhiyun 				      GFP_KERNEL);
590*4882a593Smuzhiyun 	if (!hsp->mailboxes)
591*4882a593Smuzhiyun 		return -ENOMEM;
592*4882a593Smuzhiyun 
593*4882a593Smuzhiyun 	for (i = 0; i < hsp->num_sm; i++) {
594*4882a593Smuzhiyun 		struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
595*4882a593Smuzhiyun 
596*4882a593Smuzhiyun 		mb->index = i;
597*4882a593Smuzhiyun 
598*4882a593Smuzhiyun 		mb->channel.hsp = hsp;
599*4882a593Smuzhiyun 		mb->channel.regs = hsp->regs + SZ_64K + i * SZ_32K;
600*4882a593Smuzhiyun 		mb->channel.chan = &hsp->mbox_sm.chans[i];
601*4882a593Smuzhiyun 		mb->channel.chan->con_priv = mb;
602*4882a593Smuzhiyun 	}
603*4882a593Smuzhiyun 
604*4882a593Smuzhiyun 	return 0;
605*4882a593Smuzhiyun }
606*4882a593Smuzhiyun 
tegra_hsp_request_shared_irq(struct tegra_hsp * hsp)607*4882a593Smuzhiyun static int tegra_hsp_request_shared_irq(struct tegra_hsp *hsp)
608*4882a593Smuzhiyun {
609*4882a593Smuzhiyun 	unsigned int i, irq = 0;
610*4882a593Smuzhiyun 	int err;
611*4882a593Smuzhiyun 
612*4882a593Smuzhiyun 	for (i = 0; i < hsp->num_si; i++) {
613*4882a593Smuzhiyun 		irq = hsp->shared_irqs[i];
614*4882a593Smuzhiyun 		if (irq <= 0)
615*4882a593Smuzhiyun 			continue;
616*4882a593Smuzhiyun 
617*4882a593Smuzhiyun 		err = devm_request_irq(hsp->dev, irq, tegra_hsp_shared_irq, 0,
618*4882a593Smuzhiyun 				       dev_name(hsp->dev), hsp);
619*4882a593Smuzhiyun 		if (err < 0) {
620*4882a593Smuzhiyun 			dev_err(hsp->dev, "failed to request interrupt: %d\n",
621*4882a593Smuzhiyun 				err);
622*4882a593Smuzhiyun 			continue;
623*4882a593Smuzhiyun 		}
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 		hsp->shared_irq = i;
626*4882a593Smuzhiyun 
627*4882a593Smuzhiyun 		/* disable all interrupts */
628*4882a593Smuzhiyun 		tegra_hsp_writel(hsp, 0, HSP_INT_IE(hsp->shared_irq));
629*4882a593Smuzhiyun 
630*4882a593Smuzhiyun 		dev_dbg(hsp->dev, "interrupt requested: %u\n", irq);
631*4882a593Smuzhiyun 
632*4882a593Smuzhiyun 		break;
633*4882a593Smuzhiyun 	}
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 	if (i == hsp->num_si) {
636*4882a593Smuzhiyun 		dev_err(hsp->dev, "failed to find available interrupt\n");
637*4882a593Smuzhiyun 		return -ENOENT;
638*4882a593Smuzhiyun 	}
639*4882a593Smuzhiyun 
640*4882a593Smuzhiyun 	return 0;
641*4882a593Smuzhiyun }
642*4882a593Smuzhiyun 
tegra_hsp_probe(struct platform_device * pdev)643*4882a593Smuzhiyun static int tegra_hsp_probe(struct platform_device *pdev)
644*4882a593Smuzhiyun {
645*4882a593Smuzhiyun 	struct tegra_hsp *hsp;
646*4882a593Smuzhiyun 	struct resource *res;
647*4882a593Smuzhiyun 	unsigned int i;
648*4882a593Smuzhiyun 	u32 value;
649*4882a593Smuzhiyun 	int err;
650*4882a593Smuzhiyun 
651*4882a593Smuzhiyun 	hsp = devm_kzalloc(&pdev->dev, sizeof(*hsp), GFP_KERNEL);
652*4882a593Smuzhiyun 	if (!hsp)
653*4882a593Smuzhiyun 		return -ENOMEM;
654*4882a593Smuzhiyun 
655*4882a593Smuzhiyun 	hsp->dev = &pdev->dev;
656*4882a593Smuzhiyun 	hsp->soc = of_device_get_match_data(&pdev->dev);
657*4882a593Smuzhiyun 	INIT_LIST_HEAD(&hsp->doorbells);
658*4882a593Smuzhiyun 	spin_lock_init(&hsp->lock);
659*4882a593Smuzhiyun 
660*4882a593Smuzhiyun 	res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
661*4882a593Smuzhiyun 	hsp->regs = devm_ioremap_resource(&pdev->dev, res);
662*4882a593Smuzhiyun 	if (IS_ERR(hsp->regs))
663*4882a593Smuzhiyun 		return PTR_ERR(hsp->regs);
664*4882a593Smuzhiyun 
665*4882a593Smuzhiyun 	value = tegra_hsp_readl(hsp, HSP_INT_DIMENSIONING);
666*4882a593Smuzhiyun 	hsp->num_sm = (value >> HSP_nSM_SHIFT) & HSP_nINT_MASK;
667*4882a593Smuzhiyun 	hsp->num_ss = (value >> HSP_nSS_SHIFT) & HSP_nINT_MASK;
668*4882a593Smuzhiyun 	hsp->num_as = (value >> HSP_nAS_SHIFT) & HSP_nINT_MASK;
669*4882a593Smuzhiyun 	hsp->num_db = (value >> HSP_nDB_SHIFT) & HSP_nINT_MASK;
670*4882a593Smuzhiyun 	hsp->num_si = (value >> HSP_nSI_SHIFT) & HSP_nINT_MASK;
671*4882a593Smuzhiyun 
672*4882a593Smuzhiyun 	err = platform_get_irq_byname_optional(pdev, "doorbell");
673*4882a593Smuzhiyun 	if (err >= 0)
674*4882a593Smuzhiyun 		hsp->doorbell_irq = err;
675*4882a593Smuzhiyun 
676*4882a593Smuzhiyun 	if (hsp->num_si > 0) {
677*4882a593Smuzhiyun 		unsigned int count = 0;
678*4882a593Smuzhiyun 
679*4882a593Smuzhiyun 		hsp->shared_irqs = devm_kcalloc(&pdev->dev, hsp->num_si,
680*4882a593Smuzhiyun 						sizeof(*hsp->shared_irqs),
681*4882a593Smuzhiyun 						GFP_KERNEL);
682*4882a593Smuzhiyun 		if (!hsp->shared_irqs)
683*4882a593Smuzhiyun 			return -ENOMEM;
684*4882a593Smuzhiyun 
685*4882a593Smuzhiyun 		for (i = 0; i < hsp->num_si; i++) {
686*4882a593Smuzhiyun 			char *name;
687*4882a593Smuzhiyun 
688*4882a593Smuzhiyun 			name = kasprintf(GFP_KERNEL, "shared%u", i);
689*4882a593Smuzhiyun 			if (!name)
690*4882a593Smuzhiyun 				return -ENOMEM;
691*4882a593Smuzhiyun 
692*4882a593Smuzhiyun 			err = platform_get_irq_byname_optional(pdev, name);
693*4882a593Smuzhiyun 			if (err >= 0) {
694*4882a593Smuzhiyun 				hsp->shared_irqs[i] = err;
695*4882a593Smuzhiyun 				count++;
696*4882a593Smuzhiyun 			}
697*4882a593Smuzhiyun 
698*4882a593Smuzhiyun 			kfree(name);
699*4882a593Smuzhiyun 		}
700*4882a593Smuzhiyun 
701*4882a593Smuzhiyun 		if (count == 0) {
702*4882a593Smuzhiyun 			devm_kfree(&pdev->dev, hsp->shared_irqs);
703*4882a593Smuzhiyun 			hsp->shared_irqs = NULL;
704*4882a593Smuzhiyun 		}
705*4882a593Smuzhiyun 	}
706*4882a593Smuzhiyun 
707*4882a593Smuzhiyun 	/* setup the doorbell controller */
708*4882a593Smuzhiyun 	hsp->mbox_db.of_xlate = tegra_hsp_db_xlate;
709*4882a593Smuzhiyun 	hsp->mbox_db.num_chans = 32;
710*4882a593Smuzhiyun 	hsp->mbox_db.dev = &pdev->dev;
711*4882a593Smuzhiyun 	hsp->mbox_db.ops = &tegra_hsp_db_ops;
712*4882a593Smuzhiyun 
713*4882a593Smuzhiyun 	hsp->mbox_db.chans = devm_kcalloc(&pdev->dev, hsp->mbox_db.num_chans,
714*4882a593Smuzhiyun 					  sizeof(*hsp->mbox_db.chans),
715*4882a593Smuzhiyun 					  GFP_KERNEL);
716*4882a593Smuzhiyun 	if (!hsp->mbox_db.chans)
717*4882a593Smuzhiyun 		return -ENOMEM;
718*4882a593Smuzhiyun 
719*4882a593Smuzhiyun 	if (hsp->doorbell_irq) {
720*4882a593Smuzhiyun 		err = tegra_hsp_add_doorbells(hsp);
721*4882a593Smuzhiyun 		if (err < 0) {
722*4882a593Smuzhiyun 			dev_err(&pdev->dev, "failed to add doorbells: %d\n",
723*4882a593Smuzhiyun 			        err);
724*4882a593Smuzhiyun 			return err;
725*4882a593Smuzhiyun 		}
726*4882a593Smuzhiyun 	}
727*4882a593Smuzhiyun 
728*4882a593Smuzhiyun 	err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_db);
729*4882a593Smuzhiyun 	if (err < 0) {
730*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to register doorbell mailbox: %d\n",
731*4882a593Smuzhiyun 			err);
732*4882a593Smuzhiyun 		return err;
733*4882a593Smuzhiyun 	}
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	/* setup the shared mailbox controller */
736*4882a593Smuzhiyun 	hsp->mbox_sm.of_xlate = tegra_hsp_sm_xlate;
737*4882a593Smuzhiyun 	hsp->mbox_sm.num_chans = hsp->num_sm;
738*4882a593Smuzhiyun 	hsp->mbox_sm.dev = &pdev->dev;
739*4882a593Smuzhiyun 	hsp->mbox_sm.ops = &tegra_hsp_sm_ops;
740*4882a593Smuzhiyun 
741*4882a593Smuzhiyun 	hsp->mbox_sm.chans = devm_kcalloc(&pdev->dev, hsp->mbox_sm.num_chans,
742*4882a593Smuzhiyun 					  sizeof(*hsp->mbox_sm.chans),
743*4882a593Smuzhiyun 					  GFP_KERNEL);
744*4882a593Smuzhiyun 	if (!hsp->mbox_sm.chans)
745*4882a593Smuzhiyun 		return -ENOMEM;
746*4882a593Smuzhiyun 
747*4882a593Smuzhiyun 	if (hsp->shared_irqs) {
748*4882a593Smuzhiyun 		err = tegra_hsp_add_mailboxes(hsp, &pdev->dev);
749*4882a593Smuzhiyun 		if (err < 0) {
750*4882a593Smuzhiyun 			dev_err(&pdev->dev, "failed to add mailboxes: %d\n",
751*4882a593Smuzhiyun 			        err);
752*4882a593Smuzhiyun 			return err;
753*4882a593Smuzhiyun 		}
754*4882a593Smuzhiyun 	}
755*4882a593Smuzhiyun 
756*4882a593Smuzhiyun 	err = devm_mbox_controller_register(&pdev->dev, &hsp->mbox_sm);
757*4882a593Smuzhiyun 	if (err < 0) {
758*4882a593Smuzhiyun 		dev_err(&pdev->dev, "failed to register shared mailbox: %d\n",
759*4882a593Smuzhiyun 			err);
760*4882a593Smuzhiyun 		return err;
761*4882a593Smuzhiyun 	}
762*4882a593Smuzhiyun 
763*4882a593Smuzhiyun 	platform_set_drvdata(pdev, hsp);
764*4882a593Smuzhiyun 
765*4882a593Smuzhiyun 	if (hsp->doorbell_irq) {
766*4882a593Smuzhiyun 		err = devm_request_irq(&pdev->dev, hsp->doorbell_irq,
767*4882a593Smuzhiyun 				       tegra_hsp_doorbell_irq, IRQF_NO_SUSPEND,
768*4882a593Smuzhiyun 				       dev_name(&pdev->dev), hsp);
769*4882a593Smuzhiyun 		if (err < 0) {
770*4882a593Smuzhiyun 			dev_err(&pdev->dev,
771*4882a593Smuzhiyun 			        "failed to request doorbell IRQ#%u: %d\n",
772*4882a593Smuzhiyun 				hsp->doorbell_irq, err);
773*4882a593Smuzhiyun 			return err;
774*4882a593Smuzhiyun 		}
775*4882a593Smuzhiyun 	}
776*4882a593Smuzhiyun 
777*4882a593Smuzhiyun 	if (hsp->shared_irqs) {
778*4882a593Smuzhiyun 		err = tegra_hsp_request_shared_irq(hsp);
779*4882a593Smuzhiyun 		if (err < 0)
780*4882a593Smuzhiyun 			return err;
781*4882a593Smuzhiyun 	}
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun 	return 0;
784*4882a593Smuzhiyun }
785*4882a593Smuzhiyun 
tegra_hsp_resume(struct device * dev)786*4882a593Smuzhiyun static int __maybe_unused tegra_hsp_resume(struct device *dev)
787*4882a593Smuzhiyun {
788*4882a593Smuzhiyun 	struct tegra_hsp *hsp = dev_get_drvdata(dev);
789*4882a593Smuzhiyun 	unsigned int i;
790*4882a593Smuzhiyun 	struct tegra_hsp_doorbell *db;
791*4882a593Smuzhiyun 
792*4882a593Smuzhiyun 	list_for_each_entry(db, &hsp->doorbells, list) {
793*4882a593Smuzhiyun 		if (db && db->channel.chan)
794*4882a593Smuzhiyun 			tegra_hsp_doorbell_startup(db->channel.chan);
795*4882a593Smuzhiyun 	}
796*4882a593Smuzhiyun 
797*4882a593Smuzhiyun 	if (hsp->mailboxes) {
798*4882a593Smuzhiyun 		for (i = 0; i < hsp->num_sm; i++) {
799*4882a593Smuzhiyun 			struct tegra_hsp_mailbox *mb = &hsp->mailboxes[i];
800*4882a593Smuzhiyun 
801*4882a593Smuzhiyun 			if (mb->channel.chan->cl)
802*4882a593Smuzhiyun 				tegra_hsp_mailbox_startup(mb->channel.chan);
803*4882a593Smuzhiyun 		}
804*4882a593Smuzhiyun 	}
805*4882a593Smuzhiyun 
806*4882a593Smuzhiyun 	return 0;
807*4882a593Smuzhiyun }
808*4882a593Smuzhiyun 
809*4882a593Smuzhiyun static const struct dev_pm_ops tegra_hsp_pm_ops = {
810*4882a593Smuzhiyun 	.resume_noirq = tegra_hsp_resume,
811*4882a593Smuzhiyun };
812*4882a593Smuzhiyun 
813*4882a593Smuzhiyun static const struct tegra_hsp_db_map tegra186_hsp_db_map[] = {
814*4882a593Smuzhiyun 	{ "ccplex", TEGRA_HSP_DB_MASTER_CCPLEX, HSP_DB_CCPLEX, },
815*4882a593Smuzhiyun 	{ "bpmp",   TEGRA_HSP_DB_MASTER_BPMP,   HSP_DB_BPMP,   },
816*4882a593Smuzhiyun 	{ /* sentinel */ }
817*4882a593Smuzhiyun };
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun static const struct tegra_hsp_soc tegra186_hsp_soc = {
820*4882a593Smuzhiyun 	.map = tegra186_hsp_db_map,
821*4882a593Smuzhiyun 	.has_per_mb_ie = false,
822*4882a593Smuzhiyun };
823*4882a593Smuzhiyun 
824*4882a593Smuzhiyun static const struct tegra_hsp_soc tegra194_hsp_soc = {
825*4882a593Smuzhiyun 	.map = tegra186_hsp_db_map,
826*4882a593Smuzhiyun 	.has_per_mb_ie = true,
827*4882a593Smuzhiyun };
828*4882a593Smuzhiyun 
829*4882a593Smuzhiyun static const struct of_device_id tegra_hsp_match[] = {
830*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra186-hsp", .data = &tegra186_hsp_soc },
831*4882a593Smuzhiyun 	{ .compatible = "nvidia,tegra194-hsp", .data = &tegra194_hsp_soc },
832*4882a593Smuzhiyun 	{ }
833*4882a593Smuzhiyun };
834*4882a593Smuzhiyun 
835*4882a593Smuzhiyun static struct platform_driver tegra_hsp_driver = {
836*4882a593Smuzhiyun 	.driver = {
837*4882a593Smuzhiyun 		.name = "tegra-hsp",
838*4882a593Smuzhiyun 		.of_match_table = tegra_hsp_match,
839*4882a593Smuzhiyun 		.pm = &tegra_hsp_pm_ops,
840*4882a593Smuzhiyun 	},
841*4882a593Smuzhiyun 	.probe = tegra_hsp_probe,
842*4882a593Smuzhiyun };
843*4882a593Smuzhiyun 
tegra_hsp_init(void)844*4882a593Smuzhiyun static int __init tegra_hsp_init(void)
845*4882a593Smuzhiyun {
846*4882a593Smuzhiyun 	return platform_driver_register(&tegra_hsp_driver);
847*4882a593Smuzhiyun }
848*4882a593Smuzhiyun core_initcall(tegra_hsp_init);
849