1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0-only
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun * Copyright (c) 2015, Sony Mobile Communications Inc.
4*4882a593Smuzhiyun * Copyright (c) 2012-2013, The Linux Foundation. All rights reserved.
5*4882a593Smuzhiyun */
6*4882a593Smuzhiyun
7*4882a593Smuzhiyun #include <linux/interrupt.h>
8*4882a593Smuzhiyun #include <linux/mfd/syscon.h>
9*4882a593Smuzhiyun #include <linux/module.h>
10*4882a593Smuzhiyun #include <linux/of_irq.h>
11*4882a593Smuzhiyun #include <linux/platform_device.h>
12*4882a593Smuzhiyun #include <linux/spinlock.h>
13*4882a593Smuzhiyun #include <linux/regmap.h>
14*4882a593Smuzhiyun #include <linux/soc/qcom/smem.h>
15*4882a593Smuzhiyun #include <linux/soc/qcom/smem_state.h>
16*4882a593Smuzhiyun
17*4882a593Smuzhiyun /*
18*4882a593Smuzhiyun * This driver implements the Qualcomm Shared Memory State Machine, a mechanism
19*4882a593Smuzhiyun * for communicating single bit state information to remote processors.
20*4882a593Smuzhiyun *
21*4882a593Smuzhiyun * The implementation is based on two sections of shared memory; the first
22*4882a593Smuzhiyun * holding the state bits and the second holding a matrix of subscription bits.
23*4882a593Smuzhiyun *
24*4882a593Smuzhiyun * The state bits are structured in entries of 32 bits, each belonging to one
25*4882a593Smuzhiyun * system in the SoC. The entry belonging to the local system is considered
26*4882a593Smuzhiyun * read-write, while the rest should be considered read-only.
27*4882a593Smuzhiyun *
28*4882a593Smuzhiyun * The subscription matrix consists of N bitmaps per entry, denoting interest
29*4882a593Smuzhiyun * in updates of the entry for each of the N hosts. Upon updating a state bit
30*4882a593Smuzhiyun * each host's subscription bitmap should be queried and the remote system
31*4882a593Smuzhiyun * should be interrupted if they request so.
32*4882a593Smuzhiyun *
33*4882a593Smuzhiyun * The subscription matrix is laid out in entry-major order:
34*4882a593Smuzhiyun * entry0: [host0 ... hostN]
35*4882a593Smuzhiyun * .
36*4882a593Smuzhiyun * .
37*4882a593Smuzhiyun * entryM: [host0 ... hostN]
38*4882a593Smuzhiyun *
39*4882a593Smuzhiyun * A third, optional, shared memory region might contain information regarding
40*4882a593Smuzhiyun * the number of entries in the state bitmap as well as number of columns in
41*4882a593Smuzhiyun * the subscription matrix.
42*4882a593Smuzhiyun */
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * Shared memory identifiers, used to acquire handles to respective memory
46*4882a593Smuzhiyun * region.
47*4882a593Smuzhiyun */
48*4882a593Smuzhiyun #define SMEM_SMSM_SHARED_STATE 85
49*4882a593Smuzhiyun #define SMEM_SMSM_CPU_INTR_MASK 333
50*4882a593Smuzhiyun #define SMEM_SMSM_SIZE_INFO 419
51*4882a593Smuzhiyun
52*4882a593Smuzhiyun /*
53*4882a593Smuzhiyun * Default sizes, in case SMEM_SMSM_SIZE_INFO is not found.
54*4882a593Smuzhiyun */
55*4882a593Smuzhiyun #define SMSM_DEFAULT_NUM_ENTRIES 8
56*4882a593Smuzhiyun #define SMSM_DEFAULT_NUM_HOSTS 3
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun struct smsm_entry;
59*4882a593Smuzhiyun struct smsm_host;
60*4882a593Smuzhiyun
61*4882a593Smuzhiyun /**
62*4882a593Smuzhiyun * struct qcom_smsm - smsm driver context
63*4882a593Smuzhiyun * @dev: smsm device pointer
64*4882a593Smuzhiyun * @local_host: column in the subscription matrix representing this system
65*4882a593Smuzhiyun * @num_hosts: number of columns in the subscription matrix
66*4882a593Smuzhiyun * @num_entries: number of entries in the state map and rows in the subscription
67*4882a593Smuzhiyun * matrix
68*4882a593Smuzhiyun * @local_state: pointer to the local processor's state bits
69*4882a593Smuzhiyun * @subscription: pointer to local processor's row in subscription matrix
70*4882a593Smuzhiyun * @state: smem state handle
71*4882a593Smuzhiyun * @lock: spinlock for read-modify-write of the outgoing state
72*4882a593Smuzhiyun * @entries: context for each of the entries
73*4882a593Smuzhiyun * @hosts: context for each of the hosts
74*4882a593Smuzhiyun */
75*4882a593Smuzhiyun struct qcom_smsm {
76*4882a593Smuzhiyun struct device *dev;
77*4882a593Smuzhiyun
78*4882a593Smuzhiyun u32 local_host;
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun u32 num_hosts;
81*4882a593Smuzhiyun u32 num_entries;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun u32 *local_state;
84*4882a593Smuzhiyun u32 *subscription;
85*4882a593Smuzhiyun struct qcom_smem_state *state;
86*4882a593Smuzhiyun
87*4882a593Smuzhiyun spinlock_t lock;
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun struct smsm_entry *entries;
90*4882a593Smuzhiyun struct smsm_host *hosts;
91*4882a593Smuzhiyun };
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun /**
94*4882a593Smuzhiyun * struct smsm_entry - per remote processor entry context
95*4882a593Smuzhiyun * @smsm: back-reference to driver context
96*4882a593Smuzhiyun * @domain: IRQ domain for this entry, if representing a remote system
97*4882a593Smuzhiyun * @irq_enabled: bitmap of which state bits IRQs are enabled
98*4882a593Smuzhiyun * @irq_rising: bitmap tracking if rising bits should be propagated
99*4882a593Smuzhiyun * @irq_falling: bitmap tracking if falling bits should be propagated
100*4882a593Smuzhiyun * @last_value: snapshot of state bits last time the interrupts where propagated
101*4882a593Smuzhiyun * @remote_state: pointer to this entry's state bits
102*4882a593Smuzhiyun * @subscription: pointer to a row in the subscription matrix representing this
103*4882a593Smuzhiyun * entry
104*4882a593Smuzhiyun */
105*4882a593Smuzhiyun struct smsm_entry {
106*4882a593Smuzhiyun struct qcom_smsm *smsm;
107*4882a593Smuzhiyun
108*4882a593Smuzhiyun struct irq_domain *domain;
109*4882a593Smuzhiyun DECLARE_BITMAP(irq_enabled, 32);
110*4882a593Smuzhiyun DECLARE_BITMAP(irq_rising, 32);
111*4882a593Smuzhiyun DECLARE_BITMAP(irq_falling, 32);
112*4882a593Smuzhiyun unsigned long last_value;
113*4882a593Smuzhiyun
114*4882a593Smuzhiyun u32 *remote_state;
115*4882a593Smuzhiyun u32 *subscription;
116*4882a593Smuzhiyun };
117*4882a593Smuzhiyun
118*4882a593Smuzhiyun /**
119*4882a593Smuzhiyun * struct smsm_host - representation of a remote host
120*4882a593Smuzhiyun * @ipc_regmap: regmap for outgoing interrupt
121*4882a593Smuzhiyun * @ipc_offset: offset in @ipc_regmap for outgoing interrupt
122*4882a593Smuzhiyun * @ipc_bit: bit in @ipc_regmap + @ipc_offset for outgoing interrupt
123*4882a593Smuzhiyun */
124*4882a593Smuzhiyun struct smsm_host {
125*4882a593Smuzhiyun struct regmap *ipc_regmap;
126*4882a593Smuzhiyun int ipc_offset;
127*4882a593Smuzhiyun int ipc_bit;
128*4882a593Smuzhiyun };
129*4882a593Smuzhiyun
130*4882a593Smuzhiyun /**
131*4882a593Smuzhiyun * smsm_update_bits() - change bit in outgoing entry and inform subscribers
132*4882a593Smuzhiyun * @data: smsm context pointer
133*4882a593Smuzhiyun * @offset: bit in the entry
134*4882a593Smuzhiyun * @value: new value
135*4882a593Smuzhiyun *
136*4882a593Smuzhiyun * Used to set and clear the bits in the outgoing/local entry and inform
137*4882a593Smuzhiyun * subscribers about the change.
138*4882a593Smuzhiyun */
smsm_update_bits(void * data,u32 mask,u32 value)139*4882a593Smuzhiyun static int smsm_update_bits(void *data, u32 mask, u32 value)
140*4882a593Smuzhiyun {
141*4882a593Smuzhiyun struct qcom_smsm *smsm = data;
142*4882a593Smuzhiyun struct smsm_host *hostp;
143*4882a593Smuzhiyun unsigned long flags;
144*4882a593Smuzhiyun u32 changes;
145*4882a593Smuzhiyun u32 host;
146*4882a593Smuzhiyun u32 orig;
147*4882a593Smuzhiyun u32 val;
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun spin_lock_irqsave(&smsm->lock, flags);
150*4882a593Smuzhiyun
151*4882a593Smuzhiyun /* Update the entry */
152*4882a593Smuzhiyun val = orig = readl(smsm->local_state);
153*4882a593Smuzhiyun val &= ~mask;
154*4882a593Smuzhiyun val |= value;
155*4882a593Smuzhiyun
156*4882a593Smuzhiyun /* Don't signal if we didn't change the value */
157*4882a593Smuzhiyun changes = val ^ orig;
158*4882a593Smuzhiyun if (!changes) {
159*4882a593Smuzhiyun spin_unlock_irqrestore(&smsm->lock, flags);
160*4882a593Smuzhiyun goto done;
161*4882a593Smuzhiyun }
162*4882a593Smuzhiyun
163*4882a593Smuzhiyun /* Write out the new value */
164*4882a593Smuzhiyun writel(val, smsm->local_state);
165*4882a593Smuzhiyun spin_unlock_irqrestore(&smsm->lock, flags);
166*4882a593Smuzhiyun
167*4882a593Smuzhiyun /* Make sure the value update is ordered before any kicks */
168*4882a593Smuzhiyun wmb();
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun /* Iterate over all hosts to check whom wants a kick */
171*4882a593Smuzhiyun for (host = 0; host < smsm->num_hosts; host++) {
172*4882a593Smuzhiyun hostp = &smsm->hosts[host];
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun val = readl(smsm->subscription + host);
175*4882a593Smuzhiyun if (val & changes && hostp->ipc_regmap) {
176*4882a593Smuzhiyun regmap_write(hostp->ipc_regmap,
177*4882a593Smuzhiyun hostp->ipc_offset,
178*4882a593Smuzhiyun BIT(hostp->ipc_bit));
179*4882a593Smuzhiyun }
180*4882a593Smuzhiyun }
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun done:
183*4882a593Smuzhiyun return 0;
184*4882a593Smuzhiyun }
185*4882a593Smuzhiyun
186*4882a593Smuzhiyun static const struct qcom_smem_state_ops smsm_state_ops = {
187*4882a593Smuzhiyun .update_bits = smsm_update_bits,
188*4882a593Smuzhiyun };
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun /**
191*4882a593Smuzhiyun * smsm_intr() - cascading IRQ handler for SMSM
192*4882a593Smuzhiyun * @irq: unused
193*4882a593Smuzhiyun * @data: entry related to this IRQ
194*4882a593Smuzhiyun *
195*4882a593Smuzhiyun * This function cascades an incoming interrupt from a remote system, based on
196*4882a593Smuzhiyun * the state bits and configuration.
197*4882a593Smuzhiyun */
smsm_intr(int irq,void * data)198*4882a593Smuzhiyun static irqreturn_t smsm_intr(int irq, void *data)
199*4882a593Smuzhiyun {
200*4882a593Smuzhiyun struct smsm_entry *entry = data;
201*4882a593Smuzhiyun unsigned i;
202*4882a593Smuzhiyun int irq_pin;
203*4882a593Smuzhiyun u32 changed;
204*4882a593Smuzhiyun u32 val;
205*4882a593Smuzhiyun
206*4882a593Smuzhiyun val = readl(entry->remote_state);
207*4882a593Smuzhiyun changed = val ^ xchg(&entry->last_value, val);
208*4882a593Smuzhiyun
209*4882a593Smuzhiyun for_each_set_bit(i, entry->irq_enabled, 32) {
210*4882a593Smuzhiyun if (!(changed & BIT(i)))
211*4882a593Smuzhiyun continue;
212*4882a593Smuzhiyun
213*4882a593Smuzhiyun if (val & BIT(i)) {
214*4882a593Smuzhiyun if (test_bit(i, entry->irq_rising)) {
215*4882a593Smuzhiyun irq_pin = irq_find_mapping(entry->domain, i);
216*4882a593Smuzhiyun handle_nested_irq(irq_pin);
217*4882a593Smuzhiyun }
218*4882a593Smuzhiyun } else {
219*4882a593Smuzhiyun if (test_bit(i, entry->irq_falling)) {
220*4882a593Smuzhiyun irq_pin = irq_find_mapping(entry->domain, i);
221*4882a593Smuzhiyun handle_nested_irq(irq_pin);
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun }
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun return IRQ_HANDLED;
227*4882a593Smuzhiyun }
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /**
230*4882a593Smuzhiyun * smsm_mask_irq() - un-subscribe from cascades of IRQs of a certain staus bit
231*4882a593Smuzhiyun * @irqd: IRQ handle to be masked
232*4882a593Smuzhiyun *
233*4882a593Smuzhiyun * This un-subscribes the local CPU from interrupts upon changes to the defines
234*4882a593Smuzhiyun * status bit. The bit is also cleared from cascading.
235*4882a593Smuzhiyun */
smsm_mask_irq(struct irq_data * irqd)236*4882a593Smuzhiyun static void smsm_mask_irq(struct irq_data *irqd)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
239*4882a593Smuzhiyun irq_hw_number_t irq = irqd_to_hwirq(irqd);
240*4882a593Smuzhiyun struct qcom_smsm *smsm = entry->smsm;
241*4882a593Smuzhiyun u32 val;
242*4882a593Smuzhiyun
243*4882a593Smuzhiyun if (entry->subscription) {
244*4882a593Smuzhiyun val = readl(entry->subscription + smsm->local_host);
245*4882a593Smuzhiyun val &= ~BIT(irq);
246*4882a593Smuzhiyun writel(val, entry->subscription + smsm->local_host);
247*4882a593Smuzhiyun }
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun clear_bit(irq, entry->irq_enabled);
250*4882a593Smuzhiyun }
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /**
253*4882a593Smuzhiyun * smsm_unmask_irq() - subscribe to cascades of IRQs of a certain status bit
254*4882a593Smuzhiyun * @irqd: IRQ handle to be unmasked
255*4882a593Smuzhiyun *
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun * This subscribes the local CPU to interrupts upon changes to the defined
258*4882a593Smuzhiyun * status bit. The bit is also marked for cascading.
259*4882a593Smuzhiyun
260*4882a593Smuzhiyun */
smsm_unmask_irq(struct irq_data * irqd)261*4882a593Smuzhiyun static void smsm_unmask_irq(struct irq_data *irqd)
262*4882a593Smuzhiyun {
263*4882a593Smuzhiyun struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
264*4882a593Smuzhiyun irq_hw_number_t irq = irqd_to_hwirq(irqd);
265*4882a593Smuzhiyun struct qcom_smsm *smsm = entry->smsm;
266*4882a593Smuzhiyun u32 val;
267*4882a593Smuzhiyun
268*4882a593Smuzhiyun /* Make sure our last cached state is up-to-date */
269*4882a593Smuzhiyun if (readl(entry->remote_state) & BIT(irq))
270*4882a593Smuzhiyun set_bit(irq, &entry->last_value);
271*4882a593Smuzhiyun else
272*4882a593Smuzhiyun clear_bit(irq, &entry->last_value);
273*4882a593Smuzhiyun
274*4882a593Smuzhiyun set_bit(irq, entry->irq_enabled);
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun if (entry->subscription) {
277*4882a593Smuzhiyun val = readl(entry->subscription + smsm->local_host);
278*4882a593Smuzhiyun val |= BIT(irq);
279*4882a593Smuzhiyun writel(val, entry->subscription + smsm->local_host);
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun }
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun /**
284*4882a593Smuzhiyun * smsm_set_irq_type() - updates the requested IRQ type for the cascading
285*4882a593Smuzhiyun * @irqd: consumer interrupt handle
286*4882a593Smuzhiyun * @type: requested flags
287*4882a593Smuzhiyun */
smsm_set_irq_type(struct irq_data * irqd,unsigned int type)288*4882a593Smuzhiyun static int smsm_set_irq_type(struct irq_data *irqd, unsigned int type)
289*4882a593Smuzhiyun {
290*4882a593Smuzhiyun struct smsm_entry *entry = irq_data_get_irq_chip_data(irqd);
291*4882a593Smuzhiyun irq_hw_number_t irq = irqd_to_hwirq(irqd);
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun if (!(type & IRQ_TYPE_EDGE_BOTH))
294*4882a593Smuzhiyun return -EINVAL;
295*4882a593Smuzhiyun
296*4882a593Smuzhiyun if (type & IRQ_TYPE_EDGE_RISING)
297*4882a593Smuzhiyun set_bit(irq, entry->irq_rising);
298*4882a593Smuzhiyun else
299*4882a593Smuzhiyun clear_bit(irq, entry->irq_rising);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (type & IRQ_TYPE_EDGE_FALLING)
302*4882a593Smuzhiyun set_bit(irq, entry->irq_falling);
303*4882a593Smuzhiyun else
304*4882a593Smuzhiyun clear_bit(irq, entry->irq_falling);
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun return 0;
307*4882a593Smuzhiyun }
308*4882a593Smuzhiyun
309*4882a593Smuzhiyun static struct irq_chip smsm_irq_chip = {
310*4882a593Smuzhiyun .name = "smsm",
311*4882a593Smuzhiyun .irq_mask = smsm_mask_irq,
312*4882a593Smuzhiyun .irq_unmask = smsm_unmask_irq,
313*4882a593Smuzhiyun .irq_set_type = smsm_set_irq_type,
314*4882a593Smuzhiyun };
315*4882a593Smuzhiyun
316*4882a593Smuzhiyun /**
317*4882a593Smuzhiyun * smsm_irq_map() - sets up a mapping for a cascaded IRQ
318*4882a593Smuzhiyun * @d: IRQ domain representing an entry
319*4882a593Smuzhiyun * @irq: IRQ to set up
320*4882a593Smuzhiyun * @hw: unused
321*4882a593Smuzhiyun */
smsm_irq_map(struct irq_domain * d,unsigned int irq,irq_hw_number_t hw)322*4882a593Smuzhiyun static int smsm_irq_map(struct irq_domain *d,
323*4882a593Smuzhiyun unsigned int irq,
324*4882a593Smuzhiyun irq_hw_number_t hw)
325*4882a593Smuzhiyun {
326*4882a593Smuzhiyun struct smsm_entry *entry = d->host_data;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun irq_set_chip_and_handler(irq, &smsm_irq_chip, handle_level_irq);
329*4882a593Smuzhiyun irq_set_chip_data(irq, entry);
330*4882a593Smuzhiyun irq_set_nested_thread(irq, 1);
331*4882a593Smuzhiyun
332*4882a593Smuzhiyun return 0;
333*4882a593Smuzhiyun }
334*4882a593Smuzhiyun
335*4882a593Smuzhiyun static const struct irq_domain_ops smsm_irq_ops = {
336*4882a593Smuzhiyun .map = smsm_irq_map,
337*4882a593Smuzhiyun .xlate = irq_domain_xlate_twocell,
338*4882a593Smuzhiyun };
339*4882a593Smuzhiyun
340*4882a593Smuzhiyun /**
341*4882a593Smuzhiyun * smsm_parse_ipc() - parses a qcom,ipc-%d device tree property
342*4882a593Smuzhiyun * @smsm: smsm driver context
343*4882a593Smuzhiyun * @host_id: index of the remote host to be resolved
344*4882a593Smuzhiyun *
345*4882a593Smuzhiyun * Parses device tree to acquire the information needed for sending the
346*4882a593Smuzhiyun * outgoing interrupts to a remote host - identified by @host_id.
347*4882a593Smuzhiyun */
smsm_parse_ipc(struct qcom_smsm * smsm,unsigned host_id)348*4882a593Smuzhiyun static int smsm_parse_ipc(struct qcom_smsm *smsm, unsigned host_id)
349*4882a593Smuzhiyun {
350*4882a593Smuzhiyun struct device_node *syscon;
351*4882a593Smuzhiyun struct device_node *node = smsm->dev->of_node;
352*4882a593Smuzhiyun struct smsm_host *host = &smsm->hosts[host_id];
353*4882a593Smuzhiyun char key[16];
354*4882a593Smuzhiyun int ret;
355*4882a593Smuzhiyun
356*4882a593Smuzhiyun snprintf(key, sizeof(key), "qcom,ipc-%d", host_id);
357*4882a593Smuzhiyun syscon = of_parse_phandle(node, key, 0);
358*4882a593Smuzhiyun if (!syscon)
359*4882a593Smuzhiyun return 0;
360*4882a593Smuzhiyun
361*4882a593Smuzhiyun host->ipc_regmap = syscon_node_to_regmap(syscon);
362*4882a593Smuzhiyun of_node_put(syscon);
363*4882a593Smuzhiyun if (IS_ERR(host->ipc_regmap))
364*4882a593Smuzhiyun return PTR_ERR(host->ipc_regmap);
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun ret = of_property_read_u32_index(node, key, 1, &host->ipc_offset);
367*4882a593Smuzhiyun if (ret < 0) {
368*4882a593Smuzhiyun dev_err(smsm->dev, "no offset in %s\n", key);
369*4882a593Smuzhiyun return -EINVAL;
370*4882a593Smuzhiyun }
371*4882a593Smuzhiyun
372*4882a593Smuzhiyun ret = of_property_read_u32_index(node, key, 2, &host->ipc_bit);
373*4882a593Smuzhiyun if (ret < 0) {
374*4882a593Smuzhiyun dev_err(smsm->dev, "no bit in %s\n", key);
375*4882a593Smuzhiyun return -EINVAL;
376*4882a593Smuzhiyun }
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun return 0;
379*4882a593Smuzhiyun }
380*4882a593Smuzhiyun
381*4882a593Smuzhiyun /**
382*4882a593Smuzhiyun * smsm_inbound_entry() - parse DT and set up an entry representing a remote system
383*4882a593Smuzhiyun * @smsm: smsm driver context
384*4882a593Smuzhiyun * @entry: entry context to be set up
385*4882a593Smuzhiyun * @node: dt node containing the entry's properties
386*4882a593Smuzhiyun */
smsm_inbound_entry(struct qcom_smsm * smsm,struct smsm_entry * entry,struct device_node * node)387*4882a593Smuzhiyun static int smsm_inbound_entry(struct qcom_smsm *smsm,
388*4882a593Smuzhiyun struct smsm_entry *entry,
389*4882a593Smuzhiyun struct device_node *node)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun int ret;
392*4882a593Smuzhiyun int irq;
393*4882a593Smuzhiyun
394*4882a593Smuzhiyun irq = irq_of_parse_and_map(node, 0);
395*4882a593Smuzhiyun if (!irq) {
396*4882a593Smuzhiyun dev_err(smsm->dev, "failed to parse smsm interrupt\n");
397*4882a593Smuzhiyun return -EINVAL;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
400*4882a593Smuzhiyun ret = devm_request_threaded_irq(smsm->dev, irq,
401*4882a593Smuzhiyun NULL, smsm_intr,
402*4882a593Smuzhiyun IRQF_ONESHOT,
403*4882a593Smuzhiyun "smsm", (void *)entry);
404*4882a593Smuzhiyun if (ret) {
405*4882a593Smuzhiyun dev_err(smsm->dev, "failed to request interrupt\n");
406*4882a593Smuzhiyun return ret;
407*4882a593Smuzhiyun }
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun entry->domain = irq_domain_add_linear(node, 32, &smsm_irq_ops, entry);
410*4882a593Smuzhiyun if (!entry->domain) {
411*4882a593Smuzhiyun dev_err(smsm->dev, "failed to add irq_domain\n");
412*4882a593Smuzhiyun return -ENOMEM;
413*4882a593Smuzhiyun }
414*4882a593Smuzhiyun
415*4882a593Smuzhiyun return 0;
416*4882a593Smuzhiyun }
417*4882a593Smuzhiyun
418*4882a593Smuzhiyun /**
419*4882a593Smuzhiyun * smsm_get_size_info() - parse the optional memory segment for sizes
420*4882a593Smuzhiyun * @smsm: smsm driver context
421*4882a593Smuzhiyun *
422*4882a593Smuzhiyun * Attempt to acquire the number of hosts and entries from the optional shared
423*4882a593Smuzhiyun * memory location. Not being able to find this segment should indicate that
424*4882a593Smuzhiyun * we're on a older system where these values was hard coded to
425*4882a593Smuzhiyun * SMSM_DEFAULT_NUM_ENTRIES and SMSM_DEFAULT_NUM_HOSTS.
426*4882a593Smuzhiyun *
427*4882a593Smuzhiyun * Returns 0 on success, negative errno on failure.
428*4882a593Smuzhiyun */
smsm_get_size_info(struct qcom_smsm * smsm)429*4882a593Smuzhiyun static int smsm_get_size_info(struct qcom_smsm *smsm)
430*4882a593Smuzhiyun {
431*4882a593Smuzhiyun size_t size;
432*4882a593Smuzhiyun struct {
433*4882a593Smuzhiyun u32 num_hosts;
434*4882a593Smuzhiyun u32 num_entries;
435*4882a593Smuzhiyun u32 reserved0;
436*4882a593Smuzhiyun u32 reserved1;
437*4882a593Smuzhiyun } *info;
438*4882a593Smuzhiyun
439*4882a593Smuzhiyun info = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SIZE_INFO, &size);
440*4882a593Smuzhiyun if (IS_ERR(info) && PTR_ERR(info) != -ENOENT) {
441*4882a593Smuzhiyun if (PTR_ERR(info) != -EPROBE_DEFER)
442*4882a593Smuzhiyun dev_err(smsm->dev, "unable to retrieve smsm size info\n");
443*4882a593Smuzhiyun return PTR_ERR(info);
444*4882a593Smuzhiyun } else if (IS_ERR(info) || size != sizeof(*info)) {
445*4882a593Smuzhiyun dev_warn(smsm->dev, "no smsm size info, using defaults\n");
446*4882a593Smuzhiyun smsm->num_entries = SMSM_DEFAULT_NUM_ENTRIES;
447*4882a593Smuzhiyun smsm->num_hosts = SMSM_DEFAULT_NUM_HOSTS;
448*4882a593Smuzhiyun return 0;
449*4882a593Smuzhiyun }
450*4882a593Smuzhiyun
451*4882a593Smuzhiyun smsm->num_entries = info->num_entries;
452*4882a593Smuzhiyun smsm->num_hosts = info->num_hosts;
453*4882a593Smuzhiyun
454*4882a593Smuzhiyun dev_dbg(smsm->dev,
455*4882a593Smuzhiyun "found custom size of smsm: %d entries %d hosts\n",
456*4882a593Smuzhiyun smsm->num_entries, smsm->num_hosts);
457*4882a593Smuzhiyun
458*4882a593Smuzhiyun return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun
qcom_smsm_probe(struct platform_device * pdev)461*4882a593Smuzhiyun static int qcom_smsm_probe(struct platform_device *pdev)
462*4882a593Smuzhiyun {
463*4882a593Smuzhiyun struct device_node *local_node;
464*4882a593Smuzhiyun struct device_node *node;
465*4882a593Smuzhiyun struct smsm_entry *entry;
466*4882a593Smuzhiyun struct qcom_smsm *smsm;
467*4882a593Smuzhiyun u32 *intr_mask;
468*4882a593Smuzhiyun size_t size;
469*4882a593Smuzhiyun u32 *states;
470*4882a593Smuzhiyun u32 id;
471*4882a593Smuzhiyun int ret;
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun smsm = devm_kzalloc(&pdev->dev, sizeof(*smsm), GFP_KERNEL);
474*4882a593Smuzhiyun if (!smsm)
475*4882a593Smuzhiyun return -ENOMEM;
476*4882a593Smuzhiyun smsm->dev = &pdev->dev;
477*4882a593Smuzhiyun spin_lock_init(&smsm->lock);
478*4882a593Smuzhiyun
479*4882a593Smuzhiyun ret = smsm_get_size_info(smsm);
480*4882a593Smuzhiyun if (ret)
481*4882a593Smuzhiyun return ret;
482*4882a593Smuzhiyun
483*4882a593Smuzhiyun smsm->entries = devm_kcalloc(&pdev->dev,
484*4882a593Smuzhiyun smsm->num_entries,
485*4882a593Smuzhiyun sizeof(struct smsm_entry),
486*4882a593Smuzhiyun GFP_KERNEL);
487*4882a593Smuzhiyun if (!smsm->entries)
488*4882a593Smuzhiyun return -ENOMEM;
489*4882a593Smuzhiyun
490*4882a593Smuzhiyun smsm->hosts = devm_kcalloc(&pdev->dev,
491*4882a593Smuzhiyun smsm->num_hosts,
492*4882a593Smuzhiyun sizeof(struct smsm_host),
493*4882a593Smuzhiyun GFP_KERNEL);
494*4882a593Smuzhiyun if (!smsm->hosts)
495*4882a593Smuzhiyun return -ENOMEM;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun for_each_child_of_node(pdev->dev.of_node, local_node) {
498*4882a593Smuzhiyun if (of_find_property(local_node, "#qcom,smem-state-cells", NULL))
499*4882a593Smuzhiyun break;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun if (!local_node) {
502*4882a593Smuzhiyun dev_err(&pdev->dev, "no state entry\n");
503*4882a593Smuzhiyun return -EINVAL;
504*4882a593Smuzhiyun }
505*4882a593Smuzhiyun
506*4882a593Smuzhiyun of_property_read_u32(pdev->dev.of_node,
507*4882a593Smuzhiyun "qcom,local-host",
508*4882a593Smuzhiyun &smsm->local_host);
509*4882a593Smuzhiyun
510*4882a593Smuzhiyun /* Parse the host properties */
511*4882a593Smuzhiyun for (id = 0; id < smsm->num_hosts; id++) {
512*4882a593Smuzhiyun ret = smsm_parse_ipc(smsm, id);
513*4882a593Smuzhiyun if (ret < 0)
514*4882a593Smuzhiyun goto out_put;
515*4882a593Smuzhiyun }
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun /* Acquire the main SMSM state vector */
518*4882a593Smuzhiyun ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE,
519*4882a593Smuzhiyun smsm->num_entries * sizeof(u32));
520*4882a593Smuzhiyun if (ret < 0 && ret != -EEXIST) {
521*4882a593Smuzhiyun dev_err(&pdev->dev, "unable to allocate shared state entry\n");
522*4882a593Smuzhiyun goto out_put;
523*4882a593Smuzhiyun }
524*4882a593Smuzhiyun
525*4882a593Smuzhiyun states = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_SHARED_STATE, NULL);
526*4882a593Smuzhiyun if (IS_ERR(states)) {
527*4882a593Smuzhiyun dev_err(&pdev->dev, "Unable to acquire shared state entry\n");
528*4882a593Smuzhiyun ret = PTR_ERR(states);
529*4882a593Smuzhiyun goto out_put;
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun
532*4882a593Smuzhiyun /* Acquire the list of interrupt mask vectors */
533*4882a593Smuzhiyun size = smsm->num_entries * smsm->num_hosts * sizeof(u32);
534*4882a593Smuzhiyun ret = qcom_smem_alloc(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, size);
535*4882a593Smuzhiyun if (ret < 0 && ret != -EEXIST) {
536*4882a593Smuzhiyun dev_err(&pdev->dev, "unable to allocate smsm interrupt mask\n");
537*4882a593Smuzhiyun goto out_put;
538*4882a593Smuzhiyun }
539*4882a593Smuzhiyun
540*4882a593Smuzhiyun intr_mask = qcom_smem_get(QCOM_SMEM_HOST_ANY, SMEM_SMSM_CPU_INTR_MASK, NULL);
541*4882a593Smuzhiyun if (IS_ERR(intr_mask)) {
542*4882a593Smuzhiyun dev_err(&pdev->dev, "unable to acquire shared memory interrupt mask\n");
543*4882a593Smuzhiyun ret = PTR_ERR(intr_mask);
544*4882a593Smuzhiyun goto out_put;
545*4882a593Smuzhiyun }
546*4882a593Smuzhiyun
547*4882a593Smuzhiyun /* Setup the reference to the local state bits */
548*4882a593Smuzhiyun smsm->local_state = states + smsm->local_host;
549*4882a593Smuzhiyun smsm->subscription = intr_mask + smsm->local_host * smsm->num_hosts;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun /* Register the outgoing state */
552*4882a593Smuzhiyun smsm->state = qcom_smem_state_register(local_node, &smsm_state_ops, smsm);
553*4882a593Smuzhiyun if (IS_ERR(smsm->state)) {
554*4882a593Smuzhiyun dev_err(smsm->dev, "failed to register qcom_smem_state\n");
555*4882a593Smuzhiyun ret = PTR_ERR(smsm->state);
556*4882a593Smuzhiyun goto out_put;
557*4882a593Smuzhiyun }
558*4882a593Smuzhiyun
559*4882a593Smuzhiyun /* Register handlers for remote processor entries of interest. */
560*4882a593Smuzhiyun for_each_available_child_of_node(pdev->dev.of_node, node) {
561*4882a593Smuzhiyun if (!of_property_read_bool(node, "interrupt-controller"))
562*4882a593Smuzhiyun continue;
563*4882a593Smuzhiyun
564*4882a593Smuzhiyun ret = of_property_read_u32(node, "reg", &id);
565*4882a593Smuzhiyun if (ret || id >= smsm->num_entries) {
566*4882a593Smuzhiyun dev_err(&pdev->dev, "invalid reg of entry\n");
567*4882a593Smuzhiyun if (!ret)
568*4882a593Smuzhiyun ret = -EINVAL;
569*4882a593Smuzhiyun goto unwind_interfaces;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun entry = &smsm->entries[id];
572*4882a593Smuzhiyun
573*4882a593Smuzhiyun entry->smsm = smsm;
574*4882a593Smuzhiyun entry->remote_state = states + id;
575*4882a593Smuzhiyun
576*4882a593Smuzhiyun /* Setup subscription pointers and unsubscribe to any kicks */
577*4882a593Smuzhiyun entry->subscription = intr_mask + id * smsm->num_hosts;
578*4882a593Smuzhiyun writel(0, entry->subscription + smsm->local_host);
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun ret = smsm_inbound_entry(smsm, entry, node);
581*4882a593Smuzhiyun if (ret < 0)
582*4882a593Smuzhiyun goto unwind_interfaces;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun
585*4882a593Smuzhiyun platform_set_drvdata(pdev, smsm);
586*4882a593Smuzhiyun of_node_put(local_node);
587*4882a593Smuzhiyun
588*4882a593Smuzhiyun return 0;
589*4882a593Smuzhiyun
590*4882a593Smuzhiyun unwind_interfaces:
591*4882a593Smuzhiyun of_node_put(node);
592*4882a593Smuzhiyun for (id = 0; id < smsm->num_entries; id++)
593*4882a593Smuzhiyun if (smsm->entries[id].domain)
594*4882a593Smuzhiyun irq_domain_remove(smsm->entries[id].domain);
595*4882a593Smuzhiyun
596*4882a593Smuzhiyun qcom_smem_state_unregister(smsm->state);
597*4882a593Smuzhiyun out_put:
598*4882a593Smuzhiyun of_node_put(local_node);
599*4882a593Smuzhiyun return ret;
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun
qcom_smsm_remove(struct platform_device * pdev)602*4882a593Smuzhiyun static int qcom_smsm_remove(struct platform_device *pdev)
603*4882a593Smuzhiyun {
604*4882a593Smuzhiyun struct qcom_smsm *smsm = platform_get_drvdata(pdev);
605*4882a593Smuzhiyun unsigned id;
606*4882a593Smuzhiyun
607*4882a593Smuzhiyun for (id = 0; id < smsm->num_entries; id++)
608*4882a593Smuzhiyun if (smsm->entries[id].domain)
609*4882a593Smuzhiyun irq_domain_remove(smsm->entries[id].domain);
610*4882a593Smuzhiyun
611*4882a593Smuzhiyun qcom_smem_state_unregister(smsm->state);
612*4882a593Smuzhiyun
613*4882a593Smuzhiyun return 0;
614*4882a593Smuzhiyun }
615*4882a593Smuzhiyun
616*4882a593Smuzhiyun static const struct of_device_id qcom_smsm_of_match[] = {
617*4882a593Smuzhiyun { .compatible = "qcom,smsm" },
618*4882a593Smuzhiyun {}
619*4882a593Smuzhiyun };
620*4882a593Smuzhiyun MODULE_DEVICE_TABLE(of, qcom_smsm_of_match);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun static struct platform_driver qcom_smsm_driver = {
623*4882a593Smuzhiyun .probe = qcom_smsm_probe,
624*4882a593Smuzhiyun .remove = qcom_smsm_remove,
625*4882a593Smuzhiyun .driver = {
626*4882a593Smuzhiyun .name = "qcom-smsm",
627*4882a593Smuzhiyun .of_match_table = qcom_smsm_of_match,
628*4882a593Smuzhiyun },
629*4882a593Smuzhiyun };
630*4882a593Smuzhiyun module_platform_driver(qcom_smsm_driver);
631*4882a593Smuzhiyun
632*4882a593Smuzhiyun MODULE_DESCRIPTION("Qualcomm Shared Memory State Machine driver");
633*4882a593Smuzhiyun MODULE_LICENSE("GPL v2");
634