xref: /OK3568_Linux_fs/kernel/drivers/base/platform-msi.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * MSI framework for platform devices
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Copyright (C) 2015 ARM Limited, All Rights Reserved.
6*4882a593Smuzhiyun  * Author: Marc Zyngier <marc.zyngier@arm.com>
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #include <linux/device.h>
10*4882a593Smuzhiyun #include <linux/idr.h>
11*4882a593Smuzhiyun #include <linux/irq.h>
12*4882a593Smuzhiyun #include <linux/irqdomain.h>
13*4882a593Smuzhiyun #include <linux/msi.h>
14*4882a593Smuzhiyun #include <linux/slab.h>
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun #define DEV_ID_SHIFT	21
17*4882a593Smuzhiyun #define MAX_DEV_MSIS	(1 << (32 - DEV_ID_SHIFT))
18*4882a593Smuzhiyun 
19*4882a593Smuzhiyun /*
20*4882a593Smuzhiyun  * Internal data structure containing a (made up, but unique) devid
21*4882a593Smuzhiyun  * and the callback to write the MSI message.
22*4882a593Smuzhiyun  */
23*4882a593Smuzhiyun struct platform_msi_priv_data {
24*4882a593Smuzhiyun 	struct device		*dev;
25*4882a593Smuzhiyun 	void 			*host_data;
26*4882a593Smuzhiyun 	msi_alloc_info_t	arg;
27*4882a593Smuzhiyun 	irq_write_msi_msg_t	write_msg;
28*4882a593Smuzhiyun 	int			devid;
29*4882a593Smuzhiyun };
30*4882a593Smuzhiyun 
31*4882a593Smuzhiyun /* The devid allocator */
32*4882a593Smuzhiyun static DEFINE_IDA(platform_msi_devid_ida);
33*4882a593Smuzhiyun 
34*4882a593Smuzhiyun #ifdef GENERIC_MSI_DOMAIN_OPS
35*4882a593Smuzhiyun /*
36*4882a593Smuzhiyun  * Convert an msi_desc to a globaly unique identifier (per-device
37*4882a593Smuzhiyun  * devid + msi_desc position in the msi_list).
38*4882a593Smuzhiyun  */
platform_msi_calc_hwirq(struct msi_desc * desc)39*4882a593Smuzhiyun static irq_hw_number_t platform_msi_calc_hwirq(struct msi_desc *desc)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun 	u32 devid;
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun 	devid = desc->platform.msi_priv_data->devid;
44*4882a593Smuzhiyun 
45*4882a593Smuzhiyun 	return (devid << (32 - DEV_ID_SHIFT)) | desc->platform.msi_index;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun 
platform_msi_set_desc(msi_alloc_info_t * arg,struct msi_desc * desc)48*4882a593Smuzhiyun static void platform_msi_set_desc(msi_alloc_info_t *arg, struct msi_desc *desc)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun 	arg->desc = desc;
51*4882a593Smuzhiyun 	arg->hwirq = platform_msi_calc_hwirq(desc);
52*4882a593Smuzhiyun }
53*4882a593Smuzhiyun 
platform_msi_init(struct irq_domain * domain,struct msi_domain_info * info,unsigned int virq,irq_hw_number_t hwirq,msi_alloc_info_t * arg)54*4882a593Smuzhiyun static int platform_msi_init(struct irq_domain *domain,
55*4882a593Smuzhiyun 			     struct msi_domain_info *info,
56*4882a593Smuzhiyun 			     unsigned int virq, irq_hw_number_t hwirq,
57*4882a593Smuzhiyun 			     msi_alloc_info_t *arg)
58*4882a593Smuzhiyun {
59*4882a593Smuzhiyun 	return irq_domain_set_hwirq_and_chip(domain, virq, hwirq,
60*4882a593Smuzhiyun 					     info->chip, info->chip_data);
61*4882a593Smuzhiyun }
62*4882a593Smuzhiyun #else
63*4882a593Smuzhiyun #define platform_msi_set_desc		NULL
64*4882a593Smuzhiyun #define platform_msi_init		NULL
65*4882a593Smuzhiyun #endif
66*4882a593Smuzhiyun 
platform_msi_update_dom_ops(struct msi_domain_info * info)67*4882a593Smuzhiyun static void platform_msi_update_dom_ops(struct msi_domain_info *info)
68*4882a593Smuzhiyun {
69*4882a593Smuzhiyun 	struct msi_domain_ops *ops = info->ops;
70*4882a593Smuzhiyun 
71*4882a593Smuzhiyun 	BUG_ON(!ops);
72*4882a593Smuzhiyun 
73*4882a593Smuzhiyun 	if (ops->msi_init == NULL)
74*4882a593Smuzhiyun 		ops->msi_init = platform_msi_init;
75*4882a593Smuzhiyun 	if (ops->set_desc == NULL)
76*4882a593Smuzhiyun 		ops->set_desc = platform_msi_set_desc;
77*4882a593Smuzhiyun }
78*4882a593Smuzhiyun 
platform_msi_write_msg(struct irq_data * data,struct msi_msg * msg)79*4882a593Smuzhiyun static void platform_msi_write_msg(struct irq_data *data, struct msi_msg *msg)
80*4882a593Smuzhiyun {
81*4882a593Smuzhiyun 	struct msi_desc *desc = irq_data_get_msi_desc(data);
82*4882a593Smuzhiyun 	struct platform_msi_priv_data *priv_data;
83*4882a593Smuzhiyun 
84*4882a593Smuzhiyun 	priv_data = desc->platform.msi_priv_data;
85*4882a593Smuzhiyun 
86*4882a593Smuzhiyun 	priv_data->write_msg(desc, msg);
87*4882a593Smuzhiyun }
88*4882a593Smuzhiyun 
platform_msi_update_chip_ops(struct msi_domain_info * info)89*4882a593Smuzhiyun static void platform_msi_update_chip_ops(struct msi_domain_info *info)
90*4882a593Smuzhiyun {
91*4882a593Smuzhiyun 	struct irq_chip *chip = info->chip;
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun 	BUG_ON(!chip);
94*4882a593Smuzhiyun 	if (!chip->irq_mask)
95*4882a593Smuzhiyun 		chip->irq_mask = irq_chip_mask_parent;
96*4882a593Smuzhiyun 	if (!chip->irq_unmask)
97*4882a593Smuzhiyun 		chip->irq_unmask = irq_chip_unmask_parent;
98*4882a593Smuzhiyun 	if (!chip->irq_eoi)
99*4882a593Smuzhiyun 		chip->irq_eoi = irq_chip_eoi_parent;
100*4882a593Smuzhiyun 	if (!chip->irq_set_affinity)
101*4882a593Smuzhiyun 		chip->irq_set_affinity = msi_domain_set_affinity;
102*4882a593Smuzhiyun 	if (!chip->irq_write_msi_msg)
103*4882a593Smuzhiyun 		chip->irq_write_msi_msg = platform_msi_write_msg;
104*4882a593Smuzhiyun 	if (WARN_ON((info->flags & MSI_FLAG_LEVEL_CAPABLE) &&
105*4882a593Smuzhiyun 		    !(chip->flags & IRQCHIP_SUPPORTS_LEVEL_MSI)))
106*4882a593Smuzhiyun 		info->flags &= ~MSI_FLAG_LEVEL_CAPABLE;
107*4882a593Smuzhiyun }
108*4882a593Smuzhiyun 
platform_msi_free_descs(struct device * dev,int base,int nvec)109*4882a593Smuzhiyun static void platform_msi_free_descs(struct device *dev, int base, int nvec)
110*4882a593Smuzhiyun {
111*4882a593Smuzhiyun 	struct msi_desc *desc, *tmp;
112*4882a593Smuzhiyun 
113*4882a593Smuzhiyun 	list_for_each_entry_safe(desc, tmp, dev_to_msi_list(dev), list) {
114*4882a593Smuzhiyun 		if (desc->platform.msi_index >= base &&
115*4882a593Smuzhiyun 		    desc->platform.msi_index < (base + nvec)) {
116*4882a593Smuzhiyun 			list_del(&desc->list);
117*4882a593Smuzhiyun 			free_msi_entry(desc);
118*4882a593Smuzhiyun 		}
119*4882a593Smuzhiyun 	}
120*4882a593Smuzhiyun }
121*4882a593Smuzhiyun 
platform_msi_alloc_descs_with_irq(struct device * dev,int virq,int nvec,struct platform_msi_priv_data * data)122*4882a593Smuzhiyun static int platform_msi_alloc_descs_with_irq(struct device *dev, int virq,
123*4882a593Smuzhiyun 					     int nvec,
124*4882a593Smuzhiyun 					     struct platform_msi_priv_data *data)
125*4882a593Smuzhiyun 
126*4882a593Smuzhiyun {
127*4882a593Smuzhiyun 	struct msi_desc *desc;
128*4882a593Smuzhiyun 	int i, base = 0;
129*4882a593Smuzhiyun 
130*4882a593Smuzhiyun 	if (!list_empty(dev_to_msi_list(dev))) {
131*4882a593Smuzhiyun 		desc = list_last_entry(dev_to_msi_list(dev),
132*4882a593Smuzhiyun 				       struct msi_desc, list);
133*4882a593Smuzhiyun 		base = desc->platform.msi_index + 1;
134*4882a593Smuzhiyun 	}
135*4882a593Smuzhiyun 
136*4882a593Smuzhiyun 	for (i = 0; i < nvec; i++) {
137*4882a593Smuzhiyun 		desc = alloc_msi_entry(dev, 1, NULL);
138*4882a593Smuzhiyun 		if (!desc)
139*4882a593Smuzhiyun 			break;
140*4882a593Smuzhiyun 
141*4882a593Smuzhiyun 		desc->platform.msi_priv_data = data;
142*4882a593Smuzhiyun 		desc->platform.msi_index = base + i;
143*4882a593Smuzhiyun 		desc->irq = virq ? virq + i : 0;
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 		list_add_tail(&desc->list, dev_to_msi_list(dev));
146*4882a593Smuzhiyun 	}
147*4882a593Smuzhiyun 
148*4882a593Smuzhiyun 	if (i != nvec) {
149*4882a593Smuzhiyun 		/* Clean up the mess */
150*4882a593Smuzhiyun 		platform_msi_free_descs(dev, base, nvec);
151*4882a593Smuzhiyun 
152*4882a593Smuzhiyun 		return -ENOMEM;
153*4882a593Smuzhiyun 	}
154*4882a593Smuzhiyun 
155*4882a593Smuzhiyun 	return 0;
156*4882a593Smuzhiyun }
157*4882a593Smuzhiyun 
platform_msi_alloc_descs(struct device * dev,int nvec,struct platform_msi_priv_data * data)158*4882a593Smuzhiyun static int platform_msi_alloc_descs(struct device *dev, int nvec,
159*4882a593Smuzhiyun 				    struct platform_msi_priv_data *data)
160*4882a593Smuzhiyun 
161*4882a593Smuzhiyun {
162*4882a593Smuzhiyun 	return platform_msi_alloc_descs_with_irq(dev, 0, nvec, data);
163*4882a593Smuzhiyun }
164*4882a593Smuzhiyun 
165*4882a593Smuzhiyun /**
166*4882a593Smuzhiyun  * platform_msi_create_irq_domain - Create a platform MSI interrupt domain
167*4882a593Smuzhiyun  * @fwnode:		Optional fwnode of the interrupt controller
168*4882a593Smuzhiyun  * @info:	MSI domain info
169*4882a593Smuzhiyun  * @parent:	Parent irq domain
170*4882a593Smuzhiyun  *
171*4882a593Smuzhiyun  * Updates the domain and chip ops and creates a platform MSI
172*4882a593Smuzhiyun  * interrupt domain.
173*4882a593Smuzhiyun  *
174*4882a593Smuzhiyun  * Returns:
175*4882a593Smuzhiyun  * A domain pointer or NULL in case of failure.
176*4882a593Smuzhiyun  */
platform_msi_create_irq_domain(struct fwnode_handle * fwnode,struct msi_domain_info * info,struct irq_domain * parent)177*4882a593Smuzhiyun struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
178*4882a593Smuzhiyun 						  struct msi_domain_info *info,
179*4882a593Smuzhiyun 						  struct irq_domain *parent)
180*4882a593Smuzhiyun {
181*4882a593Smuzhiyun 	struct irq_domain *domain;
182*4882a593Smuzhiyun 
183*4882a593Smuzhiyun 	if (info->flags & MSI_FLAG_USE_DEF_DOM_OPS)
184*4882a593Smuzhiyun 		platform_msi_update_dom_ops(info);
185*4882a593Smuzhiyun 	if (info->flags & MSI_FLAG_USE_DEF_CHIP_OPS)
186*4882a593Smuzhiyun 		platform_msi_update_chip_ops(info);
187*4882a593Smuzhiyun 
188*4882a593Smuzhiyun 	domain = msi_create_irq_domain(fwnode, info, parent);
189*4882a593Smuzhiyun 	if (domain)
190*4882a593Smuzhiyun 		irq_domain_update_bus_token(domain, DOMAIN_BUS_PLATFORM_MSI);
191*4882a593Smuzhiyun 
192*4882a593Smuzhiyun 	return domain;
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun static struct platform_msi_priv_data *
platform_msi_alloc_priv_data(struct device * dev,unsigned int nvec,irq_write_msi_msg_t write_msi_msg)196*4882a593Smuzhiyun platform_msi_alloc_priv_data(struct device *dev, unsigned int nvec,
197*4882a593Smuzhiyun 			     irq_write_msi_msg_t write_msi_msg)
198*4882a593Smuzhiyun {
199*4882a593Smuzhiyun 	struct platform_msi_priv_data *datap;
200*4882a593Smuzhiyun 	/*
201*4882a593Smuzhiyun 	 * Limit the number of interrupts to 2048 per device. Should we
202*4882a593Smuzhiyun 	 * need to bump this up, DEV_ID_SHIFT should be adjusted
203*4882a593Smuzhiyun 	 * accordingly (which would impact the max number of MSI
204*4882a593Smuzhiyun 	 * capable devices).
205*4882a593Smuzhiyun 	 */
206*4882a593Smuzhiyun 	if (!dev->msi_domain || !write_msi_msg || !nvec || nvec > MAX_DEV_MSIS)
207*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
208*4882a593Smuzhiyun 
209*4882a593Smuzhiyun 	if (dev->msi_domain->bus_token != DOMAIN_BUS_PLATFORM_MSI) {
210*4882a593Smuzhiyun 		dev_err(dev, "Incompatible msi_domain, giving up\n");
211*4882a593Smuzhiyun 		return ERR_PTR(-EINVAL);
212*4882a593Smuzhiyun 	}
213*4882a593Smuzhiyun 
214*4882a593Smuzhiyun 	/* Already had a helping of MSI? Greed... */
215*4882a593Smuzhiyun 	if (!list_empty(dev_to_msi_list(dev)))
216*4882a593Smuzhiyun 		return ERR_PTR(-EBUSY);
217*4882a593Smuzhiyun 
218*4882a593Smuzhiyun 	datap = kzalloc(sizeof(*datap), GFP_KERNEL);
219*4882a593Smuzhiyun 	if (!datap)
220*4882a593Smuzhiyun 		return ERR_PTR(-ENOMEM);
221*4882a593Smuzhiyun 
222*4882a593Smuzhiyun 	datap->devid = ida_simple_get(&platform_msi_devid_ida,
223*4882a593Smuzhiyun 				      0, 1 << DEV_ID_SHIFT, GFP_KERNEL);
224*4882a593Smuzhiyun 	if (datap->devid < 0) {
225*4882a593Smuzhiyun 		int err = datap->devid;
226*4882a593Smuzhiyun 		kfree(datap);
227*4882a593Smuzhiyun 		return ERR_PTR(err);
228*4882a593Smuzhiyun 	}
229*4882a593Smuzhiyun 
230*4882a593Smuzhiyun 	datap->write_msg = write_msi_msg;
231*4882a593Smuzhiyun 	datap->dev = dev;
232*4882a593Smuzhiyun 
233*4882a593Smuzhiyun 	return datap;
234*4882a593Smuzhiyun }
235*4882a593Smuzhiyun 
platform_msi_free_priv_data(struct platform_msi_priv_data * data)236*4882a593Smuzhiyun static void platform_msi_free_priv_data(struct platform_msi_priv_data *data)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun 	ida_simple_remove(&platform_msi_devid_ida, data->devid);
239*4882a593Smuzhiyun 	kfree(data);
240*4882a593Smuzhiyun }
241*4882a593Smuzhiyun 
242*4882a593Smuzhiyun /**
243*4882a593Smuzhiyun  * platform_msi_domain_alloc_irqs - Allocate MSI interrupts for @dev
244*4882a593Smuzhiyun  * @dev:		The device for which to allocate interrupts
245*4882a593Smuzhiyun  * @nvec:		The number of interrupts to allocate
246*4882a593Smuzhiyun  * @write_msi_msg:	Callback to write an interrupt message for @dev
247*4882a593Smuzhiyun  *
248*4882a593Smuzhiyun  * Returns:
249*4882a593Smuzhiyun  * Zero for success, or an error code in case of failure
250*4882a593Smuzhiyun  */
platform_msi_domain_alloc_irqs(struct device * dev,unsigned int nvec,irq_write_msi_msg_t write_msi_msg)251*4882a593Smuzhiyun int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
252*4882a593Smuzhiyun 				   irq_write_msi_msg_t write_msi_msg)
253*4882a593Smuzhiyun {
254*4882a593Smuzhiyun 	struct platform_msi_priv_data *priv_data;
255*4882a593Smuzhiyun 	int err;
256*4882a593Smuzhiyun 
257*4882a593Smuzhiyun 	priv_data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
258*4882a593Smuzhiyun 	if (IS_ERR(priv_data))
259*4882a593Smuzhiyun 		return PTR_ERR(priv_data);
260*4882a593Smuzhiyun 
261*4882a593Smuzhiyun 	err = platform_msi_alloc_descs(dev, nvec, priv_data);
262*4882a593Smuzhiyun 	if (err)
263*4882a593Smuzhiyun 		goto out_free_priv_data;
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	err = msi_domain_alloc_irqs(dev->msi_domain, dev, nvec);
266*4882a593Smuzhiyun 	if (err)
267*4882a593Smuzhiyun 		goto out_free_desc;
268*4882a593Smuzhiyun 
269*4882a593Smuzhiyun 	return 0;
270*4882a593Smuzhiyun 
271*4882a593Smuzhiyun out_free_desc:
272*4882a593Smuzhiyun 	platform_msi_free_descs(dev, 0, nvec);
273*4882a593Smuzhiyun out_free_priv_data:
274*4882a593Smuzhiyun 	platform_msi_free_priv_data(priv_data);
275*4882a593Smuzhiyun 
276*4882a593Smuzhiyun 	return err;
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(platform_msi_domain_alloc_irqs);
279*4882a593Smuzhiyun 
280*4882a593Smuzhiyun /**
281*4882a593Smuzhiyun  * platform_msi_domain_free_irqs - Free MSI interrupts for @dev
282*4882a593Smuzhiyun  * @dev:	The device for which to free interrupts
283*4882a593Smuzhiyun  */
platform_msi_domain_free_irqs(struct device * dev)284*4882a593Smuzhiyun void platform_msi_domain_free_irqs(struct device *dev)
285*4882a593Smuzhiyun {
286*4882a593Smuzhiyun 	if (!list_empty(dev_to_msi_list(dev))) {
287*4882a593Smuzhiyun 		struct msi_desc *desc;
288*4882a593Smuzhiyun 
289*4882a593Smuzhiyun 		desc = first_msi_entry(dev);
290*4882a593Smuzhiyun 		platform_msi_free_priv_data(desc->platform.msi_priv_data);
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun 
293*4882a593Smuzhiyun 	msi_domain_free_irqs(dev->msi_domain, dev);
294*4882a593Smuzhiyun 	platform_msi_free_descs(dev, 0, MAX_DEV_MSIS);
295*4882a593Smuzhiyun }
296*4882a593Smuzhiyun EXPORT_SYMBOL_GPL(platform_msi_domain_free_irqs);
297*4882a593Smuzhiyun 
298*4882a593Smuzhiyun /**
299*4882a593Smuzhiyun  * platform_msi_get_host_data - Query the private data associated with
300*4882a593Smuzhiyun  *                              a platform-msi domain
301*4882a593Smuzhiyun  * @domain:	The platform-msi domain
302*4882a593Smuzhiyun  *
303*4882a593Smuzhiyun  * Returns the private data provided when calling
304*4882a593Smuzhiyun  * platform_msi_create_device_domain.
305*4882a593Smuzhiyun  */
platform_msi_get_host_data(struct irq_domain * domain)306*4882a593Smuzhiyun void *platform_msi_get_host_data(struct irq_domain *domain)
307*4882a593Smuzhiyun {
308*4882a593Smuzhiyun 	struct platform_msi_priv_data *data = domain->host_data;
309*4882a593Smuzhiyun 	return data->host_data;
310*4882a593Smuzhiyun }
311*4882a593Smuzhiyun 
312*4882a593Smuzhiyun /**
313*4882a593Smuzhiyun  * platform_msi_create_device_domain - Create a platform-msi domain
314*4882a593Smuzhiyun  *
315*4882a593Smuzhiyun  * @dev:		The device generating the MSIs
316*4882a593Smuzhiyun  * @nvec:		The number of MSIs that need to be allocated
317*4882a593Smuzhiyun  * @write_msi_msg:	Callback to write an interrupt message for @dev
318*4882a593Smuzhiyun  * @ops:		The hierarchy domain operations to use
319*4882a593Smuzhiyun  * @host_data:		Private data associated to this domain
320*4882a593Smuzhiyun  *
321*4882a593Smuzhiyun  * Returns an irqdomain for @nvec interrupts
322*4882a593Smuzhiyun  */
323*4882a593Smuzhiyun struct irq_domain *
__platform_msi_create_device_domain(struct device * dev,unsigned int nvec,bool is_tree,irq_write_msi_msg_t write_msi_msg,const struct irq_domain_ops * ops,void * host_data)324*4882a593Smuzhiyun __platform_msi_create_device_domain(struct device *dev,
325*4882a593Smuzhiyun 				    unsigned int nvec,
326*4882a593Smuzhiyun 				    bool is_tree,
327*4882a593Smuzhiyun 				    irq_write_msi_msg_t write_msi_msg,
328*4882a593Smuzhiyun 				    const struct irq_domain_ops *ops,
329*4882a593Smuzhiyun 				    void *host_data)
330*4882a593Smuzhiyun {
331*4882a593Smuzhiyun 	struct platform_msi_priv_data *data;
332*4882a593Smuzhiyun 	struct irq_domain *domain;
333*4882a593Smuzhiyun 	int err;
334*4882a593Smuzhiyun 
335*4882a593Smuzhiyun 	data = platform_msi_alloc_priv_data(dev, nvec, write_msi_msg);
336*4882a593Smuzhiyun 	if (IS_ERR(data))
337*4882a593Smuzhiyun 		return NULL;
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 	data->host_data = host_data;
340*4882a593Smuzhiyun 	domain = irq_domain_create_hierarchy(dev->msi_domain, 0,
341*4882a593Smuzhiyun 					     is_tree ? 0 : nvec,
342*4882a593Smuzhiyun 					     dev->fwnode, ops, data);
343*4882a593Smuzhiyun 	if (!domain)
344*4882a593Smuzhiyun 		goto free_priv;
345*4882a593Smuzhiyun 
346*4882a593Smuzhiyun 	err = msi_domain_prepare_irqs(domain->parent, dev, nvec, &data->arg);
347*4882a593Smuzhiyun 	if (err)
348*4882a593Smuzhiyun 		goto free_domain;
349*4882a593Smuzhiyun 
350*4882a593Smuzhiyun 	return domain;
351*4882a593Smuzhiyun 
352*4882a593Smuzhiyun free_domain:
353*4882a593Smuzhiyun 	irq_domain_remove(domain);
354*4882a593Smuzhiyun free_priv:
355*4882a593Smuzhiyun 	platform_msi_free_priv_data(data);
356*4882a593Smuzhiyun 	return NULL;
357*4882a593Smuzhiyun }
358*4882a593Smuzhiyun 
359*4882a593Smuzhiyun /**
360*4882a593Smuzhiyun  * platform_msi_domain_free - Free interrupts associated with a platform-msi
361*4882a593Smuzhiyun  *                            domain
362*4882a593Smuzhiyun  *
363*4882a593Smuzhiyun  * @domain:	The platform-msi domain
364*4882a593Smuzhiyun  * @virq:	The base irq from which to perform the free operation
365*4882a593Smuzhiyun  * @nvec:	How many interrupts to free from @virq
366*4882a593Smuzhiyun  */
platform_msi_domain_free(struct irq_domain * domain,unsigned int virq,unsigned int nvec)367*4882a593Smuzhiyun void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
368*4882a593Smuzhiyun 			      unsigned int nvec)
369*4882a593Smuzhiyun {
370*4882a593Smuzhiyun 	struct platform_msi_priv_data *data = domain->host_data;
371*4882a593Smuzhiyun 	struct msi_desc *desc, *tmp;
372*4882a593Smuzhiyun 	for_each_msi_entry_safe(desc, tmp, data->dev) {
373*4882a593Smuzhiyun 		if (WARN_ON(!desc->irq || desc->nvec_used != 1))
374*4882a593Smuzhiyun 			return;
375*4882a593Smuzhiyun 		if (!(desc->irq >= virq && desc->irq < (virq + nvec)))
376*4882a593Smuzhiyun 			continue;
377*4882a593Smuzhiyun 
378*4882a593Smuzhiyun 		irq_domain_free_irqs_common(domain, desc->irq, 1);
379*4882a593Smuzhiyun 		list_del(&desc->list);
380*4882a593Smuzhiyun 		free_msi_entry(desc);
381*4882a593Smuzhiyun 	}
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun 
384*4882a593Smuzhiyun /**
385*4882a593Smuzhiyun  * platform_msi_domain_alloc - Allocate interrupts associated with
386*4882a593Smuzhiyun  *			       a platform-msi domain
387*4882a593Smuzhiyun  *
388*4882a593Smuzhiyun  * @domain:	The platform-msi domain
389*4882a593Smuzhiyun  * @virq:	The base irq from which to perform the allocate operation
390*4882a593Smuzhiyun  * @nr_irqs:	How many interrupts to free from @virq
391*4882a593Smuzhiyun  *
392*4882a593Smuzhiyun  * Return 0 on success, or an error code on failure. Must be called
393*4882a593Smuzhiyun  * with irq_domain_mutex held (which can only be done as part of a
394*4882a593Smuzhiyun  * top-level interrupt allocation).
395*4882a593Smuzhiyun  */
platform_msi_domain_alloc(struct irq_domain * domain,unsigned int virq,unsigned int nr_irqs)396*4882a593Smuzhiyun int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
397*4882a593Smuzhiyun 			      unsigned int nr_irqs)
398*4882a593Smuzhiyun {
399*4882a593Smuzhiyun 	struct platform_msi_priv_data *data = domain->host_data;
400*4882a593Smuzhiyun 	int err;
401*4882a593Smuzhiyun 
402*4882a593Smuzhiyun 	err = platform_msi_alloc_descs_with_irq(data->dev, virq, nr_irqs, data);
403*4882a593Smuzhiyun 	if (err)
404*4882a593Smuzhiyun 		return err;
405*4882a593Smuzhiyun 
406*4882a593Smuzhiyun 	err = msi_domain_populate_irqs(domain->parent, data->dev,
407*4882a593Smuzhiyun 				       virq, nr_irqs, &data->arg);
408*4882a593Smuzhiyun 	if (err)
409*4882a593Smuzhiyun 		platform_msi_domain_free(domain, virq, nr_irqs);
410*4882a593Smuzhiyun 
411*4882a593Smuzhiyun 	return err;
412*4882a593Smuzhiyun }
413