xref: /OK3568_Linux_fs/kernel/arch/powerpc/sysdev/xive/xive-internal.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0-or-later */
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright 2016,2017 IBM Corporation.
4*4882a593Smuzhiyun  */
5*4882a593Smuzhiyun #ifndef __XIVE_INTERNAL_H
6*4882a593Smuzhiyun #define __XIVE_INTERNAL_H
7*4882a593Smuzhiyun 
8*4882a593Smuzhiyun /*
9*4882a593Smuzhiyun  * A "disabled" interrupt should never fire, to catch problems
10*4882a593Smuzhiyun  * we set its logical number to this
11*4882a593Smuzhiyun  */
12*4882a593Smuzhiyun #define XIVE_BAD_IRQ		0x7fffffff
13*4882a593Smuzhiyun #define XIVE_MAX_IRQ		(XIVE_BAD_IRQ - 1)
14*4882a593Smuzhiyun 
15*4882a593Smuzhiyun /* Each CPU carry one of these with various per-CPU state */
16*4882a593Smuzhiyun struct xive_cpu {
17*4882a593Smuzhiyun #ifdef CONFIG_SMP
18*4882a593Smuzhiyun 	/* HW irq number and data of IPI */
19*4882a593Smuzhiyun 	u32 hw_ipi;
20*4882a593Smuzhiyun 	struct xive_irq_data ipi_data;
21*4882a593Smuzhiyun #endif /* CONFIG_SMP */
22*4882a593Smuzhiyun 
23*4882a593Smuzhiyun 	int chip_id;
24*4882a593Smuzhiyun 
25*4882a593Smuzhiyun 	/* Queue datas. Only one is populated */
26*4882a593Smuzhiyun #define XIVE_MAX_QUEUES	8
27*4882a593Smuzhiyun 	struct xive_q queue[XIVE_MAX_QUEUES];
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun 	/*
30*4882a593Smuzhiyun 	 * Pending mask. Each bit corresponds to a priority that
31*4882a593Smuzhiyun 	 * potentially has pending interrupts.
32*4882a593Smuzhiyun 	 */
33*4882a593Smuzhiyun 	u8 pending_prio;
34*4882a593Smuzhiyun 
35*4882a593Smuzhiyun 	/* Cache of HW CPPR */
36*4882a593Smuzhiyun 	u8 cppr;
37*4882a593Smuzhiyun };
38*4882a593Smuzhiyun 
39*4882a593Smuzhiyun /* Backend ops */
40*4882a593Smuzhiyun struct xive_ops {
41*4882a593Smuzhiyun 	int	(*populate_irq_data)(u32 hw_irq, struct xive_irq_data *data);
42*4882a593Smuzhiyun 	int 	(*configure_irq)(u32 hw_irq, u32 target, u8 prio, u32 sw_irq);
43*4882a593Smuzhiyun 	int	(*get_irq_config)(u32 hw_irq, u32 *target, u8 *prio,
44*4882a593Smuzhiyun 				  u32 *sw_irq);
45*4882a593Smuzhiyun 	int	(*setup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
46*4882a593Smuzhiyun 	void	(*cleanup_queue)(unsigned int cpu, struct xive_cpu *xc, u8 prio);
47*4882a593Smuzhiyun 	void	(*setup_cpu)(unsigned int cpu, struct xive_cpu *xc);
48*4882a593Smuzhiyun 	void	(*teardown_cpu)(unsigned int cpu, struct xive_cpu *xc);
49*4882a593Smuzhiyun 	bool	(*match)(struct device_node *np);
50*4882a593Smuzhiyun 	void	(*shutdown)(void);
51*4882a593Smuzhiyun 
52*4882a593Smuzhiyun 	void	(*update_pending)(struct xive_cpu *xc);
53*4882a593Smuzhiyun 	void	(*eoi)(u32 hw_irq);
54*4882a593Smuzhiyun 	void	(*sync_source)(u32 hw_irq);
55*4882a593Smuzhiyun 	u64	(*esb_rw)(u32 hw_irq, u32 offset, u64 data, bool write);
56*4882a593Smuzhiyun #ifdef CONFIG_SMP
57*4882a593Smuzhiyun 	int	(*get_ipi)(unsigned int cpu, struct xive_cpu *xc);
58*4882a593Smuzhiyun 	void	(*put_ipi)(unsigned int cpu, struct xive_cpu *xc);
59*4882a593Smuzhiyun #endif
60*4882a593Smuzhiyun 	int	(*debug_show)(struct seq_file *m, void *private);
61*4882a593Smuzhiyun 	const char *name;
62*4882a593Smuzhiyun };
63*4882a593Smuzhiyun 
64*4882a593Smuzhiyun bool xive_core_init(const struct xive_ops *ops, void __iomem *area, u32 offset,
65*4882a593Smuzhiyun 		    u8 max_prio);
66*4882a593Smuzhiyun __be32 *xive_queue_page_alloc(unsigned int cpu, u32 queue_shift);
67*4882a593Smuzhiyun int xive_core_debug_init(void);
68*4882a593Smuzhiyun 
xive_alloc_order(u32 queue_shift)69*4882a593Smuzhiyun static inline u32 xive_alloc_order(u32 queue_shift)
70*4882a593Smuzhiyun {
71*4882a593Smuzhiyun 	return (queue_shift > PAGE_SHIFT) ? (queue_shift - PAGE_SHIFT) : 0;
72*4882a593Smuzhiyun }
73*4882a593Smuzhiyun 
74*4882a593Smuzhiyun extern bool xive_cmdline_disabled;
75*4882a593Smuzhiyun 
76*4882a593Smuzhiyun #endif /*  __XIVE_INTERNAL_H */
77