xref: /OK3568_Linux_fs/u-boot/arch/x86/include/asm/mp.h (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun  * Copyright (c) 2015 Google, Inc
3*4882a593Smuzhiyun  *
4*4882a593Smuzhiyun  * SPDX-License-Identifier:	GPL-2.0
5*4882a593Smuzhiyun  *
6*4882a593Smuzhiyun  * Taken from coreboot file of the same name
7*4882a593Smuzhiyun  */
8*4882a593Smuzhiyun 
9*4882a593Smuzhiyun #ifndef _X86_MP_H_
10*4882a593Smuzhiyun #define _X86_MP_H_
11*4882a593Smuzhiyun 
12*4882a593Smuzhiyun #include <asm/atomic.h>
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun typedef int (*mp_callback_t)(struct udevice *cpu, void *arg);
15*4882a593Smuzhiyun 
16*4882a593Smuzhiyun /*
17*4882a593Smuzhiyun  * A mp_flight_record details a sequence of calls for the APs to perform
18*4882a593Smuzhiyun  * along with the BSP to coordinate sequencing. Each flight record either
19*4882a593Smuzhiyun  * provides a barrier for each AP before calling the callback or the APs
20*4882a593Smuzhiyun  * are allowed to perform the callback without waiting. Regardless, each
21*4882a593Smuzhiyun  * record has the cpus_entered field incremented for each record. When
22*4882a593Smuzhiyun  * the BSP observes that the cpus_entered matches the number of APs
23*4882a593Smuzhiyun  * the bsp_call is called with bsp_arg and upon returning releases the
24*4882a593Smuzhiyun  * barrier allowing the APs to make further progress.
25*4882a593Smuzhiyun  *
26*4882a593Smuzhiyun  * Note that ap_call() and bsp_call() can be NULL. In the NULL case the
27*4882a593Smuzhiyun  * callback will just not be called.
28*4882a593Smuzhiyun  */
29*4882a593Smuzhiyun struct mp_flight_record {
30*4882a593Smuzhiyun 	atomic_t barrier;
31*4882a593Smuzhiyun 	atomic_t cpus_entered;
32*4882a593Smuzhiyun 	mp_callback_t ap_call;
33*4882a593Smuzhiyun 	void *ap_arg;
34*4882a593Smuzhiyun 	mp_callback_t bsp_call;
35*4882a593Smuzhiyun 	void *bsp_arg;
36*4882a593Smuzhiyun } __attribute__((aligned(ARCH_DMA_MINALIGN)));
37*4882a593Smuzhiyun 
38*4882a593Smuzhiyun #define MP_FLIGHT_RECORD(barrier_, ap_func_, ap_arg_, bsp_func_, bsp_arg_) \
39*4882a593Smuzhiyun 	{							\
40*4882a593Smuzhiyun 		.barrier = ATOMIC_INIT(barrier_),		\
41*4882a593Smuzhiyun 		.cpus_entered = ATOMIC_INIT(0),			\
42*4882a593Smuzhiyun 		.ap_call = ap_func_,				\
43*4882a593Smuzhiyun 		.ap_arg = ap_arg_,				\
44*4882a593Smuzhiyun 		.bsp_call = bsp_func_,				\
45*4882a593Smuzhiyun 		.bsp_arg = bsp_arg_,				\
46*4882a593Smuzhiyun 	}
47*4882a593Smuzhiyun 
48*4882a593Smuzhiyun #define MP_FR_BLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
49*4882a593Smuzhiyun 	MP_FLIGHT_RECORD(0, ap_func, ap_arg, bsp_func, bsp_arg)
50*4882a593Smuzhiyun 
51*4882a593Smuzhiyun #define MP_FR_NOBLOCK_APS(ap_func, ap_arg, bsp_func, bsp_arg) \
52*4882a593Smuzhiyun 	MP_FLIGHT_RECORD(1, ap_func, ap_arg, bsp_func, bsp_arg)
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun /*
55*4882a593Smuzhiyun  * The mp_params structure provides the arguments to the mp subsystem
56*4882a593Smuzhiyun  * for bringing up APs.
57*4882a593Smuzhiyun  *
58*4882a593Smuzhiyun  * At present this is overkill for U-Boot, but it may make it easier to add
59*4882a593Smuzhiyun  * SMM support.
60*4882a593Smuzhiyun  */
61*4882a593Smuzhiyun struct mp_params {
62*4882a593Smuzhiyun 	int parallel_microcode_load;
63*4882a593Smuzhiyun 	const void *microcode_pointer;
64*4882a593Smuzhiyun 	/* Flight plan  for APs and BSP */
65*4882a593Smuzhiyun 	struct mp_flight_record *flight_plan;
66*4882a593Smuzhiyun 	int num_records;
67*4882a593Smuzhiyun };
68*4882a593Smuzhiyun 
69*4882a593Smuzhiyun /*
70*4882a593Smuzhiyun  * mp_init() will set up the SIPI vector and bring up the APs according to
71*4882a593Smuzhiyun  * mp_params. Each flight record will be executed according to the plan. Note
72*4882a593Smuzhiyun  * that the MP infrastructure uses SMM default area without saving it. It's
73*4882a593Smuzhiyun  * up to the chipset or mainboard to either e820 reserve this area or save this
74*4882a593Smuzhiyun  * region prior to calling mp_init() and restoring it after mp_init returns.
75*4882a593Smuzhiyun  *
76*4882a593Smuzhiyun  * At the time mp_init() is called the MTRR MSRs are mirrored into APs then
77*4882a593Smuzhiyun  * caching is enabled before running the flight plan.
78*4882a593Smuzhiyun  *
79*4882a593Smuzhiyun  * The MP init has the following properties:
80*4882a593Smuzhiyun  * 1. APs are brought up in parallel.
81*4882a593Smuzhiyun  * 2. The ordering of cpu number and APIC ids is not deterministic.
82*4882a593Smuzhiyun  *    Therefore, one cannot rely on this property or the order of devices in
83*4882a593Smuzhiyun  *    the device tree unless the chipset or mainboard know the APIC ids
84*4882a593Smuzhiyun  *    a priori.
85*4882a593Smuzhiyun  *
86*4882a593Smuzhiyun  * mp_init() returns < 0 on error, 0 on success.
87*4882a593Smuzhiyun  */
88*4882a593Smuzhiyun int mp_init(struct mp_params *params);
89*4882a593Smuzhiyun 
90*4882a593Smuzhiyun /* Probes the CPU device */
91*4882a593Smuzhiyun int mp_init_cpu(struct udevice *cpu, void *unused);
92*4882a593Smuzhiyun 
93*4882a593Smuzhiyun /* Set up additional CPUs */
94*4882a593Smuzhiyun int x86_mp_init(void);
95*4882a593Smuzhiyun 
96*4882a593Smuzhiyun #endif /* _X86_MP_H_ */
97