1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 */
2*4882a593Smuzhiyun #ifndef LINUX_MSI_H
3*4882a593Smuzhiyun #define LINUX_MSI_H
4*4882a593Smuzhiyun
5*4882a593Smuzhiyun #include <linux/kobject.h>
6*4882a593Smuzhiyun #include <linux/list.h>
7*4882a593Smuzhiyun
8*4882a593Smuzhiyun struct msi_msg {
9*4882a593Smuzhiyun u32 address_lo; /* low 32 bits of msi message address */
10*4882a593Smuzhiyun u32 address_hi; /* high 32 bits of msi message address */
11*4882a593Smuzhiyun u32 data; /* 16 bits of msi message data */
12*4882a593Smuzhiyun };
13*4882a593Smuzhiyun
14*4882a593Smuzhiyun extern int pci_msi_ignore_mask;
15*4882a593Smuzhiyun /* Helper functions */
16*4882a593Smuzhiyun struct irq_data;
17*4882a593Smuzhiyun struct msi_desc;
18*4882a593Smuzhiyun struct pci_dev;
19*4882a593Smuzhiyun struct platform_msi_priv_data;
20*4882a593Smuzhiyun void __get_cached_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
21*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_MSI_IRQ
22*4882a593Smuzhiyun void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg);
23*4882a593Smuzhiyun #else
get_cached_msi_msg(unsigned int irq,struct msi_msg * msg)24*4882a593Smuzhiyun static inline void get_cached_msi_msg(unsigned int irq, struct msi_msg *msg)
25*4882a593Smuzhiyun {
26*4882a593Smuzhiyun }
27*4882a593Smuzhiyun #endif
28*4882a593Smuzhiyun
29*4882a593Smuzhiyun typedef void (*irq_write_msi_msg_t)(struct msi_desc *desc,
30*4882a593Smuzhiyun struct msi_msg *msg);
31*4882a593Smuzhiyun
32*4882a593Smuzhiyun /**
33*4882a593Smuzhiyun * platform_msi_desc - Platform device specific msi descriptor data
34*4882a593Smuzhiyun * @msi_priv_data: Pointer to platform private data
35*4882a593Smuzhiyun * @msi_index: The index of the MSI descriptor for multi MSI
36*4882a593Smuzhiyun */
37*4882a593Smuzhiyun struct platform_msi_desc {
38*4882a593Smuzhiyun struct platform_msi_priv_data *msi_priv_data;
39*4882a593Smuzhiyun u16 msi_index;
40*4882a593Smuzhiyun };
41*4882a593Smuzhiyun
42*4882a593Smuzhiyun /**
43*4882a593Smuzhiyun * fsl_mc_msi_desc - FSL-MC device specific msi descriptor data
44*4882a593Smuzhiyun * @msi_index: The index of the MSI descriptor
45*4882a593Smuzhiyun */
46*4882a593Smuzhiyun struct fsl_mc_msi_desc {
47*4882a593Smuzhiyun u16 msi_index;
48*4882a593Smuzhiyun };
49*4882a593Smuzhiyun
50*4882a593Smuzhiyun /**
51*4882a593Smuzhiyun * ti_sci_inta_msi_desc - TISCI based INTA specific msi descriptor data
52*4882a593Smuzhiyun * @dev_index: TISCI device index
53*4882a593Smuzhiyun */
54*4882a593Smuzhiyun struct ti_sci_inta_msi_desc {
55*4882a593Smuzhiyun u16 dev_index;
56*4882a593Smuzhiyun };
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun /**
59*4882a593Smuzhiyun * struct msi_desc - Descriptor structure for MSI based interrupts
60*4882a593Smuzhiyun * @list: List head for management
61*4882a593Smuzhiyun * @irq: The base interrupt number
62*4882a593Smuzhiyun * @nvec_used: The number of vectors used
63*4882a593Smuzhiyun * @dev: Pointer to the device which uses this descriptor
64*4882a593Smuzhiyun * @msg: The last set MSI message cached for reuse
65*4882a593Smuzhiyun * @affinity: Optional pointer to a cpu affinity mask for this descriptor
66*4882a593Smuzhiyun *
67*4882a593Smuzhiyun * @write_msi_msg: Callback that may be called when the MSI message
68*4882a593Smuzhiyun * address or data changes
69*4882a593Smuzhiyun * @write_msi_msg_data: Data parameter for the callback.
70*4882a593Smuzhiyun *
71*4882a593Smuzhiyun * @masked: [PCI MSI/X] Mask bits
72*4882a593Smuzhiyun * @is_msix: [PCI MSI/X] True if MSI-X
73*4882a593Smuzhiyun * @multiple: [PCI MSI/X] log2 num of messages allocated
74*4882a593Smuzhiyun * @multi_cap: [PCI MSI/X] log2 num of messages supported
75*4882a593Smuzhiyun * @maskbit: [PCI MSI/X] Mask-Pending bit supported?
76*4882a593Smuzhiyun * @is_64: [PCI MSI/X] Address size: 0=32bit 1=64bit
77*4882a593Smuzhiyun * @entry_nr: [PCI MSI/X] Entry which is described by this descriptor
78*4882a593Smuzhiyun * @default_irq:[PCI MSI/X] The default pre-assigned non-MSI irq
79*4882a593Smuzhiyun * @mask_pos: [PCI MSI] Mask register position
80*4882a593Smuzhiyun * @mask_base: [PCI MSI-X] Mask register base address
81*4882a593Smuzhiyun * @platform: [platform] Platform device specific msi descriptor data
82*4882a593Smuzhiyun * @fsl_mc: [fsl-mc] FSL MC device specific msi descriptor data
83*4882a593Smuzhiyun * @inta: [INTA] TISCI based INTA specific msi descriptor data
84*4882a593Smuzhiyun */
85*4882a593Smuzhiyun struct msi_desc {
86*4882a593Smuzhiyun /* Shared device/bus type independent data */
87*4882a593Smuzhiyun struct list_head list;
88*4882a593Smuzhiyun unsigned int irq;
89*4882a593Smuzhiyun unsigned int nvec_used;
90*4882a593Smuzhiyun struct device *dev;
91*4882a593Smuzhiyun struct msi_msg msg;
92*4882a593Smuzhiyun struct irq_affinity_desc *affinity;
93*4882a593Smuzhiyun #ifdef CONFIG_IRQ_MSI_IOMMU
94*4882a593Smuzhiyun const void *iommu_cookie;
95*4882a593Smuzhiyun #endif
96*4882a593Smuzhiyun
97*4882a593Smuzhiyun void (*write_msi_msg)(struct msi_desc *entry, void *data);
98*4882a593Smuzhiyun void *write_msi_msg_data;
99*4882a593Smuzhiyun
100*4882a593Smuzhiyun union {
101*4882a593Smuzhiyun /* PCI MSI/X specific data */
102*4882a593Smuzhiyun struct {
103*4882a593Smuzhiyun u32 masked;
104*4882a593Smuzhiyun struct {
105*4882a593Smuzhiyun u8 is_msix : 1;
106*4882a593Smuzhiyun u8 multiple : 3;
107*4882a593Smuzhiyun u8 multi_cap : 3;
108*4882a593Smuzhiyun u8 maskbit : 1;
109*4882a593Smuzhiyun u8 is_64 : 1;
110*4882a593Smuzhiyun u8 is_virtual : 1;
111*4882a593Smuzhiyun u16 entry_nr;
112*4882a593Smuzhiyun unsigned default_irq;
113*4882a593Smuzhiyun } msi_attrib;
114*4882a593Smuzhiyun union {
115*4882a593Smuzhiyun u8 mask_pos;
116*4882a593Smuzhiyun void __iomem *mask_base;
117*4882a593Smuzhiyun };
118*4882a593Smuzhiyun };
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /*
121*4882a593Smuzhiyun * Non PCI variants add their data structure here. New
122*4882a593Smuzhiyun * entries need to use a named structure. We want
123*4882a593Smuzhiyun * proper name spaces for this. The PCI part is
124*4882a593Smuzhiyun * anonymous for now as it would require an immediate
125*4882a593Smuzhiyun * tree wide cleanup.
126*4882a593Smuzhiyun */
127*4882a593Smuzhiyun struct platform_msi_desc platform;
128*4882a593Smuzhiyun struct fsl_mc_msi_desc fsl_mc;
129*4882a593Smuzhiyun struct ti_sci_inta_msi_desc inta;
130*4882a593Smuzhiyun };
131*4882a593Smuzhiyun };
132*4882a593Smuzhiyun
133*4882a593Smuzhiyun /* Helpers to hide struct msi_desc implementation details */
134*4882a593Smuzhiyun #define msi_desc_to_dev(desc) ((desc)->dev)
135*4882a593Smuzhiyun #define dev_to_msi_list(dev) (&(dev)->msi_list)
136*4882a593Smuzhiyun #define first_msi_entry(dev) \
137*4882a593Smuzhiyun list_first_entry(dev_to_msi_list((dev)), struct msi_desc, list)
138*4882a593Smuzhiyun #define for_each_msi_entry(desc, dev) \
139*4882a593Smuzhiyun list_for_each_entry((desc), dev_to_msi_list((dev)), list)
140*4882a593Smuzhiyun #define for_each_msi_entry_safe(desc, tmp, dev) \
141*4882a593Smuzhiyun list_for_each_entry_safe((desc), (tmp), dev_to_msi_list((dev)), list)
142*4882a593Smuzhiyun #define for_each_msi_vector(desc, __irq, dev) \
143*4882a593Smuzhiyun for_each_msi_entry((desc), (dev)) \
144*4882a593Smuzhiyun if ((desc)->irq) \
145*4882a593Smuzhiyun for (__irq = (desc)->irq; \
146*4882a593Smuzhiyun __irq < ((desc)->irq + (desc)->nvec_used); \
147*4882a593Smuzhiyun __irq++)
148*4882a593Smuzhiyun
149*4882a593Smuzhiyun #ifdef CONFIG_IRQ_MSI_IOMMU
msi_desc_get_iommu_cookie(struct msi_desc * desc)150*4882a593Smuzhiyun static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun return desc->iommu_cookie;
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun
msi_desc_set_iommu_cookie(struct msi_desc * desc,const void * iommu_cookie)155*4882a593Smuzhiyun static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
156*4882a593Smuzhiyun const void *iommu_cookie)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun desc->iommu_cookie = iommu_cookie;
159*4882a593Smuzhiyun }
160*4882a593Smuzhiyun #else
msi_desc_get_iommu_cookie(struct msi_desc * desc)161*4882a593Smuzhiyun static inline const void *msi_desc_get_iommu_cookie(struct msi_desc *desc)
162*4882a593Smuzhiyun {
163*4882a593Smuzhiyun return NULL;
164*4882a593Smuzhiyun }
165*4882a593Smuzhiyun
msi_desc_set_iommu_cookie(struct msi_desc * desc,const void * iommu_cookie)166*4882a593Smuzhiyun static inline void msi_desc_set_iommu_cookie(struct msi_desc *desc,
167*4882a593Smuzhiyun const void *iommu_cookie)
168*4882a593Smuzhiyun {
169*4882a593Smuzhiyun }
170*4882a593Smuzhiyun #endif
171*4882a593Smuzhiyun
172*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI
173*4882a593Smuzhiyun #define first_pci_msi_entry(pdev) first_msi_entry(&(pdev)->dev)
174*4882a593Smuzhiyun #define for_each_pci_msi_entry(desc, pdev) \
175*4882a593Smuzhiyun for_each_msi_entry((desc), &(pdev)->dev)
176*4882a593Smuzhiyun
177*4882a593Smuzhiyun struct pci_dev *msi_desc_to_pci_dev(struct msi_desc *desc);
178*4882a593Smuzhiyun void *msi_desc_to_pci_sysdata(struct msi_desc *desc);
179*4882a593Smuzhiyun void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg);
180*4882a593Smuzhiyun #else /* CONFIG_PCI_MSI */
msi_desc_to_pci_sysdata(struct msi_desc * desc)181*4882a593Smuzhiyun static inline void *msi_desc_to_pci_sysdata(struct msi_desc *desc)
182*4882a593Smuzhiyun {
183*4882a593Smuzhiyun return NULL;
184*4882a593Smuzhiyun }
pci_write_msi_msg(unsigned int irq,struct msi_msg * msg)185*4882a593Smuzhiyun static inline void pci_write_msi_msg(unsigned int irq, struct msi_msg *msg)
186*4882a593Smuzhiyun {
187*4882a593Smuzhiyun }
188*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI */
189*4882a593Smuzhiyun
190*4882a593Smuzhiyun struct msi_desc *alloc_msi_entry(struct device *dev, int nvec,
191*4882a593Smuzhiyun const struct irq_affinity_desc *affinity);
192*4882a593Smuzhiyun void free_msi_entry(struct msi_desc *entry);
193*4882a593Smuzhiyun void __pci_read_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
194*4882a593Smuzhiyun void __pci_write_msi_msg(struct msi_desc *entry, struct msi_msg *msg);
195*4882a593Smuzhiyun
196*4882a593Smuzhiyun u32 __pci_msix_desc_mask_irq(struct msi_desc *desc, u32 flag);
197*4882a593Smuzhiyun u32 __pci_msi_desc_mask_irq(struct msi_desc *desc, u32 mask, u32 flag);
198*4882a593Smuzhiyun void pci_msi_mask_irq(struct irq_data *data);
199*4882a593Smuzhiyun void pci_msi_unmask_irq(struct irq_data *data);
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /*
202*4882a593Smuzhiyun * The arch hooks to setup up msi irqs. Default functions are implemented
203*4882a593Smuzhiyun * as weak symbols so that they /can/ be overriden by architecture specific
204*4882a593Smuzhiyun * code if needed. These hooks must be enabled by the architecture or by
205*4882a593Smuzhiyun * drivers which depend on them via msi_controller based MSI handling.
206*4882a593Smuzhiyun *
207*4882a593Smuzhiyun * If CONFIG_PCI_MSI_ARCH_FALLBACKS is not selected they are replaced by
208*4882a593Smuzhiyun * stubs with warnings.
209*4882a593Smuzhiyun */
210*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI_ARCH_FALLBACKS
211*4882a593Smuzhiyun int arch_setup_msi_irq(struct pci_dev *dev, struct msi_desc *desc);
212*4882a593Smuzhiyun void arch_teardown_msi_irq(unsigned int irq);
213*4882a593Smuzhiyun int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type);
214*4882a593Smuzhiyun void arch_teardown_msi_irqs(struct pci_dev *dev);
215*4882a593Smuzhiyun void default_teardown_msi_irqs(struct pci_dev *dev);
216*4882a593Smuzhiyun #else
arch_setup_msi_irqs(struct pci_dev * dev,int nvec,int type)217*4882a593Smuzhiyun static inline int arch_setup_msi_irqs(struct pci_dev *dev, int nvec, int type)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun WARN_ON_ONCE(1);
220*4882a593Smuzhiyun return -ENODEV;
221*4882a593Smuzhiyun }
222*4882a593Smuzhiyun
arch_teardown_msi_irqs(struct pci_dev * dev)223*4882a593Smuzhiyun static inline void arch_teardown_msi_irqs(struct pci_dev *dev)
224*4882a593Smuzhiyun {
225*4882a593Smuzhiyun WARN_ON_ONCE(1);
226*4882a593Smuzhiyun }
227*4882a593Smuzhiyun #endif
228*4882a593Smuzhiyun
229*4882a593Smuzhiyun /*
230*4882a593Smuzhiyun * The restore hooks are still available as they are useful even
231*4882a593Smuzhiyun * for fully irq domain based setups. Courtesy to XEN/X86.
232*4882a593Smuzhiyun */
233*4882a593Smuzhiyun void arch_restore_msi_irqs(struct pci_dev *dev);
234*4882a593Smuzhiyun void default_restore_msi_irqs(struct pci_dev *dev);
235*4882a593Smuzhiyun
236*4882a593Smuzhiyun struct msi_controller {
237*4882a593Smuzhiyun struct module *owner;
238*4882a593Smuzhiyun struct device *dev;
239*4882a593Smuzhiyun struct device_node *of_node;
240*4882a593Smuzhiyun struct list_head list;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun int (*setup_irq)(struct msi_controller *chip, struct pci_dev *dev,
243*4882a593Smuzhiyun struct msi_desc *desc);
244*4882a593Smuzhiyun int (*setup_irqs)(struct msi_controller *chip, struct pci_dev *dev,
245*4882a593Smuzhiyun int nvec, int type);
246*4882a593Smuzhiyun void (*teardown_irq)(struct msi_controller *chip, unsigned int irq);
247*4882a593Smuzhiyun };
248*4882a593Smuzhiyun
249*4882a593Smuzhiyun #ifdef CONFIG_GENERIC_MSI_IRQ_DOMAIN
250*4882a593Smuzhiyun
251*4882a593Smuzhiyun #include <linux/irqhandler.h>
252*4882a593Smuzhiyun #include <asm/msi.h>
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun struct irq_domain;
255*4882a593Smuzhiyun struct irq_domain_ops;
256*4882a593Smuzhiyun struct irq_chip;
257*4882a593Smuzhiyun struct device_node;
258*4882a593Smuzhiyun struct fwnode_handle;
259*4882a593Smuzhiyun struct msi_domain_info;
260*4882a593Smuzhiyun
261*4882a593Smuzhiyun /**
262*4882a593Smuzhiyun * struct msi_domain_ops - MSI interrupt domain callbacks
263*4882a593Smuzhiyun * @get_hwirq: Retrieve the resulting hw irq number
264*4882a593Smuzhiyun * @msi_init: Domain specific init function for MSI interrupts
265*4882a593Smuzhiyun * @msi_free: Domain specific function to free a MSI interrupts
266*4882a593Smuzhiyun * @msi_check: Callback for verification of the domain/info/dev data
267*4882a593Smuzhiyun * @msi_prepare: Prepare the allocation of the interrupts in the domain
268*4882a593Smuzhiyun * @msi_finish: Optional callback to finalize the allocation
269*4882a593Smuzhiyun * @set_desc: Set the msi descriptor for an interrupt
270*4882a593Smuzhiyun * @handle_error: Optional error handler if the allocation fails
271*4882a593Smuzhiyun * @domain_alloc_irqs: Optional function to override the default allocation
272*4882a593Smuzhiyun * function.
273*4882a593Smuzhiyun * @domain_free_irqs: Optional function to override the default free
274*4882a593Smuzhiyun * function.
275*4882a593Smuzhiyun *
276*4882a593Smuzhiyun * @get_hwirq, @msi_init and @msi_free are callbacks used by
277*4882a593Smuzhiyun * msi_create_irq_domain() and related interfaces
278*4882a593Smuzhiyun *
279*4882a593Smuzhiyun * @msi_check, @msi_prepare, @msi_finish, @set_desc and @handle_error
280*4882a593Smuzhiyun * are callbacks used by msi_domain_alloc_irqs() and related
281*4882a593Smuzhiyun * interfaces which are based on msi_desc.
282*4882a593Smuzhiyun *
283*4882a593Smuzhiyun * @domain_alloc_irqs, @domain_free_irqs can be used to override the
284*4882a593Smuzhiyun * default allocation/free functions (__msi_domain_alloc/free_irqs). This
285*4882a593Smuzhiyun * is initially for a wrapper around XENs seperate MSI universe which can't
286*4882a593Smuzhiyun * be wrapped into the regular irq domains concepts by mere mortals. This
287*4882a593Smuzhiyun * allows to universally use msi_domain_alloc/free_irqs without having to
288*4882a593Smuzhiyun * special case XEN all over the place.
289*4882a593Smuzhiyun *
290*4882a593Smuzhiyun * Contrary to other operations @domain_alloc_irqs and @domain_free_irqs
291*4882a593Smuzhiyun * are set to the default implementation if NULL and even when
292*4882a593Smuzhiyun * MSI_FLAG_USE_DEF_DOM_OPS is not set to avoid breaking existing users and
293*4882a593Smuzhiyun * because these callbacks are obviously mandatory.
294*4882a593Smuzhiyun *
295*4882a593Smuzhiyun * This is NOT meant to be abused, but it can be useful to build wrappers
296*4882a593Smuzhiyun * for specialized MSI irq domains which need extra work before and after
297*4882a593Smuzhiyun * calling __msi_domain_alloc_irqs()/__msi_domain_free_irqs().
298*4882a593Smuzhiyun */
299*4882a593Smuzhiyun struct msi_domain_ops {
300*4882a593Smuzhiyun irq_hw_number_t (*get_hwirq)(struct msi_domain_info *info,
301*4882a593Smuzhiyun msi_alloc_info_t *arg);
302*4882a593Smuzhiyun int (*msi_init)(struct irq_domain *domain,
303*4882a593Smuzhiyun struct msi_domain_info *info,
304*4882a593Smuzhiyun unsigned int virq, irq_hw_number_t hwirq,
305*4882a593Smuzhiyun msi_alloc_info_t *arg);
306*4882a593Smuzhiyun void (*msi_free)(struct irq_domain *domain,
307*4882a593Smuzhiyun struct msi_domain_info *info,
308*4882a593Smuzhiyun unsigned int virq);
309*4882a593Smuzhiyun int (*msi_check)(struct irq_domain *domain,
310*4882a593Smuzhiyun struct msi_domain_info *info,
311*4882a593Smuzhiyun struct device *dev);
312*4882a593Smuzhiyun int (*msi_prepare)(struct irq_domain *domain,
313*4882a593Smuzhiyun struct device *dev, int nvec,
314*4882a593Smuzhiyun msi_alloc_info_t *arg);
315*4882a593Smuzhiyun void (*msi_finish)(msi_alloc_info_t *arg, int retval);
316*4882a593Smuzhiyun void (*set_desc)(msi_alloc_info_t *arg,
317*4882a593Smuzhiyun struct msi_desc *desc);
318*4882a593Smuzhiyun int (*handle_error)(struct irq_domain *domain,
319*4882a593Smuzhiyun struct msi_desc *desc, int error);
320*4882a593Smuzhiyun int (*domain_alloc_irqs)(struct irq_domain *domain,
321*4882a593Smuzhiyun struct device *dev, int nvec);
322*4882a593Smuzhiyun void (*domain_free_irqs)(struct irq_domain *domain,
323*4882a593Smuzhiyun struct device *dev);
324*4882a593Smuzhiyun };
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun /**
327*4882a593Smuzhiyun * struct msi_domain_info - MSI interrupt domain data
328*4882a593Smuzhiyun * @flags: Flags to decribe features and capabilities
329*4882a593Smuzhiyun * @ops: The callback data structure
330*4882a593Smuzhiyun * @chip: Optional: associated interrupt chip
331*4882a593Smuzhiyun * @chip_data: Optional: associated interrupt chip data
332*4882a593Smuzhiyun * @handler: Optional: associated interrupt flow handler
333*4882a593Smuzhiyun * @handler_data: Optional: associated interrupt flow handler data
334*4882a593Smuzhiyun * @handler_name: Optional: associated interrupt flow handler name
335*4882a593Smuzhiyun * @data: Optional: domain specific data
336*4882a593Smuzhiyun */
337*4882a593Smuzhiyun struct msi_domain_info {
338*4882a593Smuzhiyun u32 flags;
339*4882a593Smuzhiyun struct msi_domain_ops *ops;
340*4882a593Smuzhiyun struct irq_chip *chip;
341*4882a593Smuzhiyun void *chip_data;
342*4882a593Smuzhiyun irq_flow_handler_t handler;
343*4882a593Smuzhiyun void *handler_data;
344*4882a593Smuzhiyun const char *handler_name;
345*4882a593Smuzhiyun void *data;
346*4882a593Smuzhiyun };
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* Flags for msi_domain_info */
349*4882a593Smuzhiyun enum {
350*4882a593Smuzhiyun /*
351*4882a593Smuzhiyun * Init non implemented ops callbacks with default MSI domain
352*4882a593Smuzhiyun * callbacks.
353*4882a593Smuzhiyun */
354*4882a593Smuzhiyun MSI_FLAG_USE_DEF_DOM_OPS = (1 << 0),
355*4882a593Smuzhiyun /*
356*4882a593Smuzhiyun * Init non implemented chip callbacks with default MSI chip
357*4882a593Smuzhiyun * callbacks.
358*4882a593Smuzhiyun */
359*4882a593Smuzhiyun MSI_FLAG_USE_DEF_CHIP_OPS = (1 << 1),
360*4882a593Smuzhiyun /* Support multiple PCI MSI interrupts */
361*4882a593Smuzhiyun MSI_FLAG_MULTI_PCI_MSI = (1 << 2),
362*4882a593Smuzhiyun /* Support PCI MSIX interrupts */
363*4882a593Smuzhiyun MSI_FLAG_PCI_MSIX = (1 << 3),
364*4882a593Smuzhiyun /* Needs early activate, required for PCI */
365*4882a593Smuzhiyun MSI_FLAG_ACTIVATE_EARLY = (1 << 4),
366*4882a593Smuzhiyun /*
367*4882a593Smuzhiyun * Must reactivate when irq is started even when
368*4882a593Smuzhiyun * MSI_FLAG_ACTIVATE_EARLY has been set.
369*4882a593Smuzhiyun */
370*4882a593Smuzhiyun MSI_FLAG_MUST_REACTIVATE = (1 << 5),
371*4882a593Smuzhiyun /* Is level-triggered capable, using two messages */
372*4882a593Smuzhiyun MSI_FLAG_LEVEL_CAPABLE = (1 << 6),
373*4882a593Smuzhiyun };
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun int msi_domain_set_affinity(struct irq_data *data, const struct cpumask *mask,
376*4882a593Smuzhiyun bool force);
377*4882a593Smuzhiyun
378*4882a593Smuzhiyun struct irq_domain *msi_create_irq_domain(struct fwnode_handle *fwnode,
379*4882a593Smuzhiyun struct msi_domain_info *info,
380*4882a593Smuzhiyun struct irq_domain *parent);
381*4882a593Smuzhiyun int __msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
382*4882a593Smuzhiyun int nvec);
383*4882a593Smuzhiyun int msi_domain_alloc_irqs(struct irq_domain *domain, struct device *dev,
384*4882a593Smuzhiyun int nvec);
385*4882a593Smuzhiyun void __msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
386*4882a593Smuzhiyun void msi_domain_free_irqs(struct irq_domain *domain, struct device *dev);
387*4882a593Smuzhiyun struct msi_domain_info *msi_get_domain_info(struct irq_domain *domain);
388*4882a593Smuzhiyun
389*4882a593Smuzhiyun struct irq_domain *platform_msi_create_irq_domain(struct fwnode_handle *fwnode,
390*4882a593Smuzhiyun struct msi_domain_info *info,
391*4882a593Smuzhiyun struct irq_domain *parent);
392*4882a593Smuzhiyun int platform_msi_domain_alloc_irqs(struct device *dev, unsigned int nvec,
393*4882a593Smuzhiyun irq_write_msi_msg_t write_msi_msg);
394*4882a593Smuzhiyun void platform_msi_domain_free_irqs(struct device *dev);
395*4882a593Smuzhiyun
396*4882a593Smuzhiyun /* When an MSI domain is used as an intermediate domain */
397*4882a593Smuzhiyun int msi_domain_prepare_irqs(struct irq_domain *domain, struct device *dev,
398*4882a593Smuzhiyun int nvec, msi_alloc_info_t *args);
399*4882a593Smuzhiyun int msi_domain_populate_irqs(struct irq_domain *domain, struct device *dev,
400*4882a593Smuzhiyun int virq, int nvec, msi_alloc_info_t *args);
401*4882a593Smuzhiyun struct irq_domain *
402*4882a593Smuzhiyun __platform_msi_create_device_domain(struct device *dev,
403*4882a593Smuzhiyun unsigned int nvec,
404*4882a593Smuzhiyun bool is_tree,
405*4882a593Smuzhiyun irq_write_msi_msg_t write_msi_msg,
406*4882a593Smuzhiyun const struct irq_domain_ops *ops,
407*4882a593Smuzhiyun void *host_data);
408*4882a593Smuzhiyun
409*4882a593Smuzhiyun #define platform_msi_create_device_domain(dev, nvec, write, ops, data) \
410*4882a593Smuzhiyun __platform_msi_create_device_domain(dev, nvec, false, write, ops, data)
411*4882a593Smuzhiyun #define platform_msi_create_device_tree_domain(dev, nvec, write, ops, data) \
412*4882a593Smuzhiyun __platform_msi_create_device_domain(dev, nvec, true, write, ops, data)
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun int platform_msi_domain_alloc(struct irq_domain *domain, unsigned int virq,
415*4882a593Smuzhiyun unsigned int nr_irqs);
416*4882a593Smuzhiyun void platform_msi_domain_free(struct irq_domain *domain, unsigned int virq,
417*4882a593Smuzhiyun unsigned int nvec);
418*4882a593Smuzhiyun void *platform_msi_get_host_data(struct irq_domain *domain);
419*4882a593Smuzhiyun #endif /* CONFIG_GENERIC_MSI_IRQ_DOMAIN */
420*4882a593Smuzhiyun
421*4882a593Smuzhiyun #ifdef CONFIG_PCI_MSI_IRQ_DOMAIN
422*4882a593Smuzhiyun void pci_msi_domain_write_msg(struct irq_data *irq_data, struct msi_msg *msg);
423*4882a593Smuzhiyun struct irq_domain *pci_msi_create_irq_domain(struct fwnode_handle *fwnode,
424*4882a593Smuzhiyun struct msi_domain_info *info,
425*4882a593Smuzhiyun struct irq_domain *parent);
426*4882a593Smuzhiyun int pci_msi_domain_check_cap(struct irq_domain *domain,
427*4882a593Smuzhiyun struct msi_domain_info *info, struct device *dev);
428*4882a593Smuzhiyun u32 pci_msi_domain_get_msi_rid(struct irq_domain *domain, struct pci_dev *pdev);
429*4882a593Smuzhiyun struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev);
430*4882a593Smuzhiyun bool pci_dev_has_special_msi_domain(struct pci_dev *pdev);
431*4882a593Smuzhiyun #else
pci_msi_get_device_domain(struct pci_dev * pdev)432*4882a593Smuzhiyun static inline struct irq_domain *pci_msi_get_device_domain(struct pci_dev *pdev)
433*4882a593Smuzhiyun {
434*4882a593Smuzhiyun return NULL;
435*4882a593Smuzhiyun }
436*4882a593Smuzhiyun #endif /* CONFIG_PCI_MSI_IRQ_DOMAIN */
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun #endif /* LINUX_MSI_H */
439