xref: /OK3568_Linux_fs/kernel/drivers/acpi/numa/hmat.c (revision 4882a59341e53eb6f0b4789bf948001014eff981)
1*4882a593Smuzhiyun // SPDX-License-Identifier: GPL-2.0
2*4882a593Smuzhiyun /*
3*4882a593Smuzhiyun  * Copyright (c) 2019, Intel Corporation.
4*4882a593Smuzhiyun  *
5*4882a593Smuzhiyun  * Heterogeneous Memory Attributes Table (HMAT) representation
6*4882a593Smuzhiyun  *
7*4882a593Smuzhiyun  * This program parses and reports the platform's HMAT tables, and registers
8*4882a593Smuzhiyun  * the applicable attributes with the node's interfaces.
9*4882a593Smuzhiyun  */
10*4882a593Smuzhiyun 
11*4882a593Smuzhiyun #define pr_fmt(fmt) "acpi/hmat: " fmt
12*4882a593Smuzhiyun #define dev_fmt(fmt) "acpi/hmat: " fmt
13*4882a593Smuzhiyun 
14*4882a593Smuzhiyun #include <linux/acpi.h>
15*4882a593Smuzhiyun #include <linux/bitops.h>
16*4882a593Smuzhiyun #include <linux/device.h>
17*4882a593Smuzhiyun #include <linux/init.h>
18*4882a593Smuzhiyun #include <linux/list.h>
19*4882a593Smuzhiyun #include <linux/mm.h>
20*4882a593Smuzhiyun #include <linux/platform_device.h>
21*4882a593Smuzhiyun #include <linux/list_sort.h>
22*4882a593Smuzhiyun #include <linux/memregion.h>
23*4882a593Smuzhiyun #include <linux/memory.h>
24*4882a593Smuzhiyun #include <linux/mutex.h>
25*4882a593Smuzhiyun #include <linux/node.h>
26*4882a593Smuzhiyun #include <linux/sysfs.h>
27*4882a593Smuzhiyun #include <linux/dax.h>
28*4882a593Smuzhiyun 
29*4882a593Smuzhiyun static u8 hmat_revision;
30*4882a593Smuzhiyun static int hmat_disable __initdata;
31*4882a593Smuzhiyun 
disable_hmat(void)32*4882a593Smuzhiyun void __init disable_hmat(void)
33*4882a593Smuzhiyun {
34*4882a593Smuzhiyun 	hmat_disable = 1;
35*4882a593Smuzhiyun }
36*4882a593Smuzhiyun 
37*4882a593Smuzhiyun static LIST_HEAD(targets);
38*4882a593Smuzhiyun static LIST_HEAD(initiators);
39*4882a593Smuzhiyun static LIST_HEAD(localities);
40*4882a593Smuzhiyun 
41*4882a593Smuzhiyun static DEFINE_MUTEX(target_lock);
42*4882a593Smuzhiyun 
43*4882a593Smuzhiyun /*
44*4882a593Smuzhiyun  * The defined enum order is used to prioritize attributes to break ties when
45*4882a593Smuzhiyun  * selecting the best performing node.
46*4882a593Smuzhiyun  */
47*4882a593Smuzhiyun enum locality_types {
48*4882a593Smuzhiyun 	WRITE_LATENCY,
49*4882a593Smuzhiyun 	READ_LATENCY,
50*4882a593Smuzhiyun 	WRITE_BANDWIDTH,
51*4882a593Smuzhiyun 	READ_BANDWIDTH,
52*4882a593Smuzhiyun };
53*4882a593Smuzhiyun 
54*4882a593Smuzhiyun static struct memory_locality *localities_types[4];
55*4882a593Smuzhiyun 
56*4882a593Smuzhiyun struct target_cache {
57*4882a593Smuzhiyun 	struct list_head node;
58*4882a593Smuzhiyun 	struct node_cache_attrs cache_attrs;
59*4882a593Smuzhiyun };
60*4882a593Smuzhiyun 
61*4882a593Smuzhiyun struct memory_target {
62*4882a593Smuzhiyun 	struct list_head node;
63*4882a593Smuzhiyun 	unsigned int memory_pxm;
64*4882a593Smuzhiyun 	unsigned int processor_pxm;
65*4882a593Smuzhiyun 	struct resource memregions;
66*4882a593Smuzhiyun 	struct node_hmem_attrs hmem_attrs[2];
67*4882a593Smuzhiyun 	struct list_head caches;
68*4882a593Smuzhiyun 	struct node_cache_attrs cache_attrs;
69*4882a593Smuzhiyun 	bool registered;
70*4882a593Smuzhiyun };
71*4882a593Smuzhiyun 
72*4882a593Smuzhiyun struct memory_initiator {
73*4882a593Smuzhiyun 	struct list_head node;
74*4882a593Smuzhiyun 	unsigned int processor_pxm;
75*4882a593Smuzhiyun 	bool has_cpu;
76*4882a593Smuzhiyun };
77*4882a593Smuzhiyun 
78*4882a593Smuzhiyun struct memory_locality {
79*4882a593Smuzhiyun 	struct list_head node;
80*4882a593Smuzhiyun 	struct acpi_hmat_locality *hmat_loc;
81*4882a593Smuzhiyun };
82*4882a593Smuzhiyun 
find_mem_initiator(unsigned int cpu_pxm)83*4882a593Smuzhiyun static struct memory_initiator *find_mem_initiator(unsigned int cpu_pxm)
84*4882a593Smuzhiyun {
85*4882a593Smuzhiyun 	struct memory_initiator *initiator;
86*4882a593Smuzhiyun 
87*4882a593Smuzhiyun 	list_for_each_entry(initiator, &initiators, node)
88*4882a593Smuzhiyun 		if (initiator->processor_pxm == cpu_pxm)
89*4882a593Smuzhiyun 			return initiator;
90*4882a593Smuzhiyun 	return NULL;
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun 
find_mem_target(unsigned int mem_pxm)93*4882a593Smuzhiyun static struct memory_target *find_mem_target(unsigned int mem_pxm)
94*4882a593Smuzhiyun {
95*4882a593Smuzhiyun 	struct memory_target *target;
96*4882a593Smuzhiyun 
97*4882a593Smuzhiyun 	list_for_each_entry(target, &targets, node)
98*4882a593Smuzhiyun 		if (target->memory_pxm == mem_pxm)
99*4882a593Smuzhiyun 			return target;
100*4882a593Smuzhiyun 	return NULL;
101*4882a593Smuzhiyun }
102*4882a593Smuzhiyun 
alloc_memory_initiator(unsigned int cpu_pxm)103*4882a593Smuzhiyun static __init void alloc_memory_initiator(unsigned int cpu_pxm)
104*4882a593Smuzhiyun {
105*4882a593Smuzhiyun 	struct memory_initiator *initiator;
106*4882a593Smuzhiyun 
107*4882a593Smuzhiyun 	if (pxm_to_node(cpu_pxm) == NUMA_NO_NODE)
108*4882a593Smuzhiyun 		return;
109*4882a593Smuzhiyun 
110*4882a593Smuzhiyun 	initiator = find_mem_initiator(cpu_pxm);
111*4882a593Smuzhiyun 	if (initiator)
112*4882a593Smuzhiyun 		return;
113*4882a593Smuzhiyun 
114*4882a593Smuzhiyun 	initiator = kzalloc(sizeof(*initiator), GFP_KERNEL);
115*4882a593Smuzhiyun 	if (!initiator)
116*4882a593Smuzhiyun 		return;
117*4882a593Smuzhiyun 
118*4882a593Smuzhiyun 	initiator->processor_pxm = cpu_pxm;
119*4882a593Smuzhiyun 	initiator->has_cpu = node_state(pxm_to_node(cpu_pxm), N_CPU);
120*4882a593Smuzhiyun 	list_add_tail(&initiator->node, &initiators);
121*4882a593Smuzhiyun }
122*4882a593Smuzhiyun 
alloc_memory_target(unsigned int mem_pxm,resource_size_t start,resource_size_t len)123*4882a593Smuzhiyun static __init void alloc_memory_target(unsigned int mem_pxm,
124*4882a593Smuzhiyun 		resource_size_t start, resource_size_t len)
125*4882a593Smuzhiyun {
126*4882a593Smuzhiyun 	struct memory_target *target;
127*4882a593Smuzhiyun 
128*4882a593Smuzhiyun 	target = find_mem_target(mem_pxm);
129*4882a593Smuzhiyun 	if (!target) {
130*4882a593Smuzhiyun 		target = kzalloc(sizeof(*target), GFP_KERNEL);
131*4882a593Smuzhiyun 		if (!target)
132*4882a593Smuzhiyun 			return;
133*4882a593Smuzhiyun 		target->memory_pxm = mem_pxm;
134*4882a593Smuzhiyun 		target->processor_pxm = PXM_INVAL;
135*4882a593Smuzhiyun 		target->memregions = (struct resource) {
136*4882a593Smuzhiyun 			.name	= "ACPI mem",
137*4882a593Smuzhiyun 			.start	= 0,
138*4882a593Smuzhiyun 			.end	= -1,
139*4882a593Smuzhiyun 			.flags	= IORESOURCE_MEM,
140*4882a593Smuzhiyun 		};
141*4882a593Smuzhiyun 		list_add_tail(&target->node, &targets);
142*4882a593Smuzhiyun 		INIT_LIST_HEAD(&target->caches);
143*4882a593Smuzhiyun 	}
144*4882a593Smuzhiyun 
145*4882a593Smuzhiyun 	/*
146*4882a593Smuzhiyun 	 * There are potentially multiple ranges per PXM, so record each
147*4882a593Smuzhiyun 	 * in the per-target memregions resource tree.
148*4882a593Smuzhiyun 	 */
149*4882a593Smuzhiyun 	if (!__request_region(&target->memregions, start, len, "memory target",
150*4882a593Smuzhiyun 				IORESOURCE_MEM))
151*4882a593Smuzhiyun 		pr_warn("failed to reserve %#llx - %#llx in pxm: %d\n",
152*4882a593Smuzhiyun 				start, start + len, mem_pxm);
153*4882a593Smuzhiyun }
154*4882a593Smuzhiyun 
hmat_data_type(u8 type)155*4882a593Smuzhiyun static __init const char *hmat_data_type(u8 type)
156*4882a593Smuzhiyun {
157*4882a593Smuzhiyun 	switch (type) {
158*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_LATENCY:
159*4882a593Smuzhiyun 		return "Access Latency";
160*4882a593Smuzhiyun 	case ACPI_HMAT_READ_LATENCY:
161*4882a593Smuzhiyun 		return "Read Latency";
162*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_LATENCY:
163*4882a593Smuzhiyun 		return "Write Latency";
164*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_BANDWIDTH:
165*4882a593Smuzhiyun 		return "Access Bandwidth";
166*4882a593Smuzhiyun 	case ACPI_HMAT_READ_BANDWIDTH:
167*4882a593Smuzhiyun 		return "Read Bandwidth";
168*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_BANDWIDTH:
169*4882a593Smuzhiyun 		return "Write Bandwidth";
170*4882a593Smuzhiyun 	default:
171*4882a593Smuzhiyun 		return "Reserved";
172*4882a593Smuzhiyun 	}
173*4882a593Smuzhiyun }
174*4882a593Smuzhiyun 
hmat_data_type_suffix(u8 type)175*4882a593Smuzhiyun static __init const char *hmat_data_type_suffix(u8 type)
176*4882a593Smuzhiyun {
177*4882a593Smuzhiyun 	switch (type) {
178*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_LATENCY:
179*4882a593Smuzhiyun 	case ACPI_HMAT_READ_LATENCY:
180*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_LATENCY:
181*4882a593Smuzhiyun 		return " nsec";
182*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_BANDWIDTH:
183*4882a593Smuzhiyun 	case ACPI_HMAT_READ_BANDWIDTH:
184*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_BANDWIDTH:
185*4882a593Smuzhiyun 		return " MB/s";
186*4882a593Smuzhiyun 	default:
187*4882a593Smuzhiyun 		return "";
188*4882a593Smuzhiyun 	}
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun 
hmat_normalize(u16 entry,u64 base,u8 type)191*4882a593Smuzhiyun static u32 hmat_normalize(u16 entry, u64 base, u8 type)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun 	u32 value;
194*4882a593Smuzhiyun 
195*4882a593Smuzhiyun 	/*
196*4882a593Smuzhiyun 	 * Check for invalid and overflow values
197*4882a593Smuzhiyun 	 */
198*4882a593Smuzhiyun 	if (entry == 0xffff || !entry)
199*4882a593Smuzhiyun 		return 0;
200*4882a593Smuzhiyun 	else if (base > (UINT_MAX / (entry)))
201*4882a593Smuzhiyun 		return 0;
202*4882a593Smuzhiyun 
203*4882a593Smuzhiyun 	/*
204*4882a593Smuzhiyun 	 * Divide by the base unit for version 1, convert latency from
205*4882a593Smuzhiyun 	 * picosenonds to nanoseconds if revision 2.
206*4882a593Smuzhiyun 	 */
207*4882a593Smuzhiyun 	value = entry * base;
208*4882a593Smuzhiyun 	if (hmat_revision == 1) {
209*4882a593Smuzhiyun 		if (value < 10)
210*4882a593Smuzhiyun 			return 0;
211*4882a593Smuzhiyun 		value = DIV_ROUND_UP(value, 10);
212*4882a593Smuzhiyun 	} else if (hmat_revision == 2) {
213*4882a593Smuzhiyun 		switch (type) {
214*4882a593Smuzhiyun 		case ACPI_HMAT_ACCESS_LATENCY:
215*4882a593Smuzhiyun 		case ACPI_HMAT_READ_LATENCY:
216*4882a593Smuzhiyun 		case ACPI_HMAT_WRITE_LATENCY:
217*4882a593Smuzhiyun 			value = DIV_ROUND_UP(value, 1000);
218*4882a593Smuzhiyun 			break;
219*4882a593Smuzhiyun 		default:
220*4882a593Smuzhiyun 			break;
221*4882a593Smuzhiyun 		}
222*4882a593Smuzhiyun 	}
223*4882a593Smuzhiyun 	return value;
224*4882a593Smuzhiyun }
225*4882a593Smuzhiyun 
hmat_update_target_access(struct memory_target * target,u8 type,u32 value,int access)226*4882a593Smuzhiyun static void hmat_update_target_access(struct memory_target *target,
227*4882a593Smuzhiyun 				      u8 type, u32 value, int access)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun 	switch (type) {
230*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_LATENCY:
231*4882a593Smuzhiyun 		target->hmem_attrs[access].read_latency = value;
232*4882a593Smuzhiyun 		target->hmem_attrs[access].write_latency = value;
233*4882a593Smuzhiyun 		break;
234*4882a593Smuzhiyun 	case ACPI_HMAT_READ_LATENCY:
235*4882a593Smuzhiyun 		target->hmem_attrs[access].read_latency = value;
236*4882a593Smuzhiyun 		break;
237*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_LATENCY:
238*4882a593Smuzhiyun 		target->hmem_attrs[access].write_latency = value;
239*4882a593Smuzhiyun 		break;
240*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_BANDWIDTH:
241*4882a593Smuzhiyun 		target->hmem_attrs[access].read_bandwidth = value;
242*4882a593Smuzhiyun 		target->hmem_attrs[access].write_bandwidth = value;
243*4882a593Smuzhiyun 		break;
244*4882a593Smuzhiyun 	case ACPI_HMAT_READ_BANDWIDTH:
245*4882a593Smuzhiyun 		target->hmem_attrs[access].read_bandwidth = value;
246*4882a593Smuzhiyun 		break;
247*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_BANDWIDTH:
248*4882a593Smuzhiyun 		target->hmem_attrs[access].write_bandwidth = value;
249*4882a593Smuzhiyun 		break;
250*4882a593Smuzhiyun 	default:
251*4882a593Smuzhiyun 		break;
252*4882a593Smuzhiyun 	}
253*4882a593Smuzhiyun }
254*4882a593Smuzhiyun 
hmat_add_locality(struct acpi_hmat_locality * hmat_loc)255*4882a593Smuzhiyun static __init void hmat_add_locality(struct acpi_hmat_locality *hmat_loc)
256*4882a593Smuzhiyun {
257*4882a593Smuzhiyun 	struct memory_locality *loc;
258*4882a593Smuzhiyun 
259*4882a593Smuzhiyun 	loc = kzalloc(sizeof(*loc), GFP_KERNEL);
260*4882a593Smuzhiyun 	if (!loc) {
261*4882a593Smuzhiyun 		pr_notice_once("Failed to allocate HMAT locality\n");
262*4882a593Smuzhiyun 		return;
263*4882a593Smuzhiyun 	}
264*4882a593Smuzhiyun 
265*4882a593Smuzhiyun 	loc->hmat_loc = hmat_loc;
266*4882a593Smuzhiyun 	list_add_tail(&loc->node, &localities);
267*4882a593Smuzhiyun 
268*4882a593Smuzhiyun 	switch (hmat_loc->data_type) {
269*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_LATENCY:
270*4882a593Smuzhiyun 		localities_types[READ_LATENCY] = loc;
271*4882a593Smuzhiyun 		localities_types[WRITE_LATENCY] = loc;
272*4882a593Smuzhiyun 		break;
273*4882a593Smuzhiyun 	case ACPI_HMAT_READ_LATENCY:
274*4882a593Smuzhiyun 		localities_types[READ_LATENCY] = loc;
275*4882a593Smuzhiyun 		break;
276*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_LATENCY:
277*4882a593Smuzhiyun 		localities_types[WRITE_LATENCY] = loc;
278*4882a593Smuzhiyun 		break;
279*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_BANDWIDTH:
280*4882a593Smuzhiyun 		localities_types[READ_BANDWIDTH] = loc;
281*4882a593Smuzhiyun 		localities_types[WRITE_BANDWIDTH] = loc;
282*4882a593Smuzhiyun 		break;
283*4882a593Smuzhiyun 	case ACPI_HMAT_READ_BANDWIDTH:
284*4882a593Smuzhiyun 		localities_types[READ_BANDWIDTH] = loc;
285*4882a593Smuzhiyun 		break;
286*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_BANDWIDTH:
287*4882a593Smuzhiyun 		localities_types[WRITE_BANDWIDTH] = loc;
288*4882a593Smuzhiyun 		break;
289*4882a593Smuzhiyun 	default:
290*4882a593Smuzhiyun 		break;
291*4882a593Smuzhiyun 	}
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun 
hmat_parse_locality(union acpi_subtable_headers * header,const unsigned long end)294*4882a593Smuzhiyun static __init int hmat_parse_locality(union acpi_subtable_headers *header,
295*4882a593Smuzhiyun 				      const unsigned long end)
296*4882a593Smuzhiyun {
297*4882a593Smuzhiyun 	struct acpi_hmat_locality *hmat_loc = (void *)header;
298*4882a593Smuzhiyun 	struct memory_target *target;
299*4882a593Smuzhiyun 	unsigned int init, targ, total_size, ipds, tpds;
300*4882a593Smuzhiyun 	u32 *inits, *targs, value;
301*4882a593Smuzhiyun 	u16 *entries;
302*4882a593Smuzhiyun 	u8 type, mem_hier;
303*4882a593Smuzhiyun 
304*4882a593Smuzhiyun 	if (hmat_loc->header.length < sizeof(*hmat_loc)) {
305*4882a593Smuzhiyun 		pr_notice("HMAT: Unexpected locality header length: %u\n",
306*4882a593Smuzhiyun 			 hmat_loc->header.length);
307*4882a593Smuzhiyun 		return -EINVAL;
308*4882a593Smuzhiyun 	}
309*4882a593Smuzhiyun 
310*4882a593Smuzhiyun 	type = hmat_loc->data_type;
311*4882a593Smuzhiyun 	mem_hier = hmat_loc->flags & ACPI_HMAT_MEMORY_HIERARCHY;
312*4882a593Smuzhiyun 	ipds = hmat_loc->number_of_initiator_Pds;
313*4882a593Smuzhiyun 	tpds = hmat_loc->number_of_target_Pds;
314*4882a593Smuzhiyun 	total_size = sizeof(*hmat_loc) + sizeof(*entries) * ipds * tpds +
315*4882a593Smuzhiyun 		     sizeof(*inits) * ipds + sizeof(*targs) * tpds;
316*4882a593Smuzhiyun 	if (hmat_loc->header.length < total_size) {
317*4882a593Smuzhiyun 		pr_notice("HMAT: Unexpected locality header length:%u, minimum required:%u\n",
318*4882a593Smuzhiyun 			 hmat_loc->header.length, total_size);
319*4882a593Smuzhiyun 		return -EINVAL;
320*4882a593Smuzhiyun 	}
321*4882a593Smuzhiyun 
322*4882a593Smuzhiyun 	pr_info("HMAT: Locality: Flags:%02x Type:%s Initiator Domains:%u Target Domains:%u Base:%lld\n",
323*4882a593Smuzhiyun 		hmat_loc->flags, hmat_data_type(type), ipds, tpds,
324*4882a593Smuzhiyun 		hmat_loc->entry_base_unit);
325*4882a593Smuzhiyun 
326*4882a593Smuzhiyun 	inits = (u32 *)(hmat_loc + 1);
327*4882a593Smuzhiyun 	targs = inits + ipds;
328*4882a593Smuzhiyun 	entries = (u16 *)(targs + tpds);
329*4882a593Smuzhiyun 	for (init = 0; init < ipds; init++) {
330*4882a593Smuzhiyun 		alloc_memory_initiator(inits[init]);
331*4882a593Smuzhiyun 		for (targ = 0; targ < tpds; targ++) {
332*4882a593Smuzhiyun 			value = hmat_normalize(entries[init * tpds + targ],
333*4882a593Smuzhiyun 					       hmat_loc->entry_base_unit,
334*4882a593Smuzhiyun 					       type);
335*4882a593Smuzhiyun 			pr_info("  Initiator-Target[%u-%u]:%u%s\n",
336*4882a593Smuzhiyun 				inits[init], targs[targ], value,
337*4882a593Smuzhiyun 				hmat_data_type_suffix(type));
338*4882a593Smuzhiyun 
339*4882a593Smuzhiyun 			if (mem_hier == ACPI_HMAT_MEMORY) {
340*4882a593Smuzhiyun 				target = find_mem_target(targs[targ]);
341*4882a593Smuzhiyun 				if (target && target->processor_pxm == inits[init]) {
342*4882a593Smuzhiyun 					hmat_update_target_access(target, type, value, 0);
343*4882a593Smuzhiyun 					/* If the node has a CPU, update access 1 */
344*4882a593Smuzhiyun 					if (node_state(pxm_to_node(inits[init]), N_CPU))
345*4882a593Smuzhiyun 						hmat_update_target_access(target, type, value, 1);
346*4882a593Smuzhiyun 				}
347*4882a593Smuzhiyun 			}
348*4882a593Smuzhiyun 		}
349*4882a593Smuzhiyun 	}
350*4882a593Smuzhiyun 
351*4882a593Smuzhiyun 	if (mem_hier == ACPI_HMAT_MEMORY)
352*4882a593Smuzhiyun 		hmat_add_locality(hmat_loc);
353*4882a593Smuzhiyun 
354*4882a593Smuzhiyun 	return 0;
355*4882a593Smuzhiyun }
356*4882a593Smuzhiyun 
hmat_parse_cache(union acpi_subtable_headers * header,const unsigned long end)357*4882a593Smuzhiyun static __init int hmat_parse_cache(union acpi_subtable_headers *header,
358*4882a593Smuzhiyun 				   const unsigned long end)
359*4882a593Smuzhiyun {
360*4882a593Smuzhiyun 	struct acpi_hmat_cache *cache = (void *)header;
361*4882a593Smuzhiyun 	struct memory_target *target;
362*4882a593Smuzhiyun 	struct target_cache *tcache;
363*4882a593Smuzhiyun 	u32 attrs;
364*4882a593Smuzhiyun 
365*4882a593Smuzhiyun 	if (cache->header.length < sizeof(*cache)) {
366*4882a593Smuzhiyun 		pr_notice("HMAT: Unexpected cache header length: %u\n",
367*4882a593Smuzhiyun 			 cache->header.length);
368*4882a593Smuzhiyun 		return -EINVAL;
369*4882a593Smuzhiyun 	}
370*4882a593Smuzhiyun 
371*4882a593Smuzhiyun 	attrs = cache->cache_attributes;
372*4882a593Smuzhiyun 	pr_info("HMAT: Cache: Domain:%u Size:%llu Attrs:%08x SMBIOS Handles:%d\n",
373*4882a593Smuzhiyun 		cache->memory_PD, cache->cache_size, attrs,
374*4882a593Smuzhiyun 		cache->number_of_SMBIOShandles);
375*4882a593Smuzhiyun 
376*4882a593Smuzhiyun 	target = find_mem_target(cache->memory_PD);
377*4882a593Smuzhiyun 	if (!target)
378*4882a593Smuzhiyun 		return 0;
379*4882a593Smuzhiyun 
380*4882a593Smuzhiyun 	tcache = kzalloc(sizeof(*tcache), GFP_KERNEL);
381*4882a593Smuzhiyun 	if (!tcache) {
382*4882a593Smuzhiyun 		pr_notice_once("Failed to allocate HMAT cache info\n");
383*4882a593Smuzhiyun 		return 0;
384*4882a593Smuzhiyun 	}
385*4882a593Smuzhiyun 
386*4882a593Smuzhiyun 	tcache->cache_attrs.size = cache->cache_size;
387*4882a593Smuzhiyun 	tcache->cache_attrs.level = (attrs & ACPI_HMAT_CACHE_LEVEL) >> 4;
388*4882a593Smuzhiyun 	tcache->cache_attrs.line_size = (attrs & ACPI_HMAT_CACHE_LINE_SIZE) >> 16;
389*4882a593Smuzhiyun 
390*4882a593Smuzhiyun 	switch ((attrs & ACPI_HMAT_CACHE_ASSOCIATIVITY) >> 8) {
391*4882a593Smuzhiyun 	case ACPI_HMAT_CA_DIRECT_MAPPED:
392*4882a593Smuzhiyun 		tcache->cache_attrs.indexing = NODE_CACHE_DIRECT_MAP;
393*4882a593Smuzhiyun 		break;
394*4882a593Smuzhiyun 	case ACPI_HMAT_CA_COMPLEX_CACHE_INDEXING:
395*4882a593Smuzhiyun 		tcache->cache_attrs.indexing = NODE_CACHE_INDEXED;
396*4882a593Smuzhiyun 		break;
397*4882a593Smuzhiyun 	case ACPI_HMAT_CA_NONE:
398*4882a593Smuzhiyun 	default:
399*4882a593Smuzhiyun 		tcache->cache_attrs.indexing = NODE_CACHE_OTHER;
400*4882a593Smuzhiyun 		break;
401*4882a593Smuzhiyun 	}
402*4882a593Smuzhiyun 
403*4882a593Smuzhiyun 	switch ((attrs & ACPI_HMAT_WRITE_POLICY) >> 12) {
404*4882a593Smuzhiyun 	case ACPI_HMAT_CP_WB:
405*4882a593Smuzhiyun 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_BACK;
406*4882a593Smuzhiyun 		break;
407*4882a593Smuzhiyun 	case ACPI_HMAT_CP_WT:
408*4882a593Smuzhiyun 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_THROUGH;
409*4882a593Smuzhiyun 		break;
410*4882a593Smuzhiyun 	case ACPI_HMAT_CP_NONE:
411*4882a593Smuzhiyun 	default:
412*4882a593Smuzhiyun 		tcache->cache_attrs.write_policy = NODE_CACHE_WRITE_OTHER;
413*4882a593Smuzhiyun 		break;
414*4882a593Smuzhiyun 	}
415*4882a593Smuzhiyun 	list_add_tail(&tcache->node, &target->caches);
416*4882a593Smuzhiyun 
417*4882a593Smuzhiyun 	return 0;
418*4882a593Smuzhiyun }
419*4882a593Smuzhiyun 
hmat_parse_proximity_domain(union acpi_subtable_headers * header,const unsigned long end)420*4882a593Smuzhiyun static int __init hmat_parse_proximity_domain(union acpi_subtable_headers *header,
421*4882a593Smuzhiyun 					      const unsigned long end)
422*4882a593Smuzhiyun {
423*4882a593Smuzhiyun 	struct acpi_hmat_proximity_domain *p = (void *)header;
424*4882a593Smuzhiyun 	struct memory_target *target = NULL;
425*4882a593Smuzhiyun 
426*4882a593Smuzhiyun 	if (p->header.length != sizeof(*p)) {
427*4882a593Smuzhiyun 		pr_notice("HMAT: Unexpected address range header length: %u\n",
428*4882a593Smuzhiyun 			 p->header.length);
429*4882a593Smuzhiyun 		return -EINVAL;
430*4882a593Smuzhiyun 	}
431*4882a593Smuzhiyun 
432*4882a593Smuzhiyun 	if (hmat_revision == 1)
433*4882a593Smuzhiyun 		pr_info("HMAT: Memory (%#llx length %#llx) Flags:%04x Processor Domain:%u Memory Domain:%u\n",
434*4882a593Smuzhiyun 			p->reserved3, p->reserved4, p->flags, p->processor_PD,
435*4882a593Smuzhiyun 			p->memory_PD);
436*4882a593Smuzhiyun 	else
437*4882a593Smuzhiyun 		pr_info("HMAT: Memory Flags:%04x Processor Domain:%u Memory Domain:%u\n",
438*4882a593Smuzhiyun 			p->flags, p->processor_PD, p->memory_PD);
439*4882a593Smuzhiyun 
440*4882a593Smuzhiyun 	if ((hmat_revision == 1 && p->flags & ACPI_HMAT_MEMORY_PD_VALID) ||
441*4882a593Smuzhiyun 	    hmat_revision > 1) {
442*4882a593Smuzhiyun 		target = find_mem_target(p->memory_PD);
443*4882a593Smuzhiyun 		if (!target) {
444*4882a593Smuzhiyun 			pr_debug("HMAT: Memory Domain missing from SRAT\n");
445*4882a593Smuzhiyun 			return -EINVAL;
446*4882a593Smuzhiyun 		}
447*4882a593Smuzhiyun 	}
448*4882a593Smuzhiyun 	if (target && p->flags & ACPI_HMAT_PROCESSOR_PD_VALID) {
449*4882a593Smuzhiyun 		int p_node = pxm_to_node(p->processor_PD);
450*4882a593Smuzhiyun 
451*4882a593Smuzhiyun 		if (p_node == NUMA_NO_NODE) {
452*4882a593Smuzhiyun 			pr_debug("HMAT: Invalid Processor Domain\n");
453*4882a593Smuzhiyun 			return -EINVAL;
454*4882a593Smuzhiyun 		}
455*4882a593Smuzhiyun 		target->processor_pxm = p->processor_PD;
456*4882a593Smuzhiyun 	}
457*4882a593Smuzhiyun 
458*4882a593Smuzhiyun 	return 0;
459*4882a593Smuzhiyun }
460*4882a593Smuzhiyun 
hmat_parse_subtable(union acpi_subtable_headers * header,const unsigned long end)461*4882a593Smuzhiyun static int __init hmat_parse_subtable(union acpi_subtable_headers *header,
462*4882a593Smuzhiyun 				      const unsigned long end)
463*4882a593Smuzhiyun {
464*4882a593Smuzhiyun 	struct acpi_hmat_structure *hdr = (void *)header;
465*4882a593Smuzhiyun 
466*4882a593Smuzhiyun 	if (!hdr)
467*4882a593Smuzhiyun 		return -EINVAL;
468*4882a593Smuzhiyun 
469*4882a593Smuzhiyun 	switch (hdr->type) {
470*4882a593Smuzhiyun 	case ACPI_HMAT_TYPE_PROXIMITY:
471*4882a593Smuzhiyun 		return hmat_parse_proximity_domain(header, end);
472*4882a593Smuzhiyun 	case ACPI_HMAT_TYPE_LOCALITY:
473*4882a593Smuzhiyun 		return hmat_parse_locality(header, end);
474*4882a593Smuzhiyun 	case ACPI_HMAT_TYPE_CACHE:
475*4882a593Smuzhiyun 		return hmat_parse_cache(header, end);
476*4882a593Smuzhiyun 	default:
477*4882a593Smuzhiyun 		return -EINVAL;
478*4882a593Smuzhiyun 	}
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun 
srat_parse_mem_affinity(union acpi_subtable_headers * header,const unsigned long end)481*4882a593Smuzhiyun static __init int srat_parse_mem_affinity(union acpi_subtable_headers *header,
482*4882a593Smuzhiyun 					  const unsigned long end)
483*4882a593Smuzhiyun {
484*4882a593Smuzhiyun 	struct acpi_srat_mem_affinity *ma = (void *)header;
485*4882a593Smuzhiyun 
486*4882a593Smuzhiyun 	if (!ma)
487*4882a593Smuzhiyun 		return -EINVAL;
488*4882a593Smuzhiyun 	if (!(ma->flags & ACPI_SRAT_MEM_ENABLED))
489*4882a593Smuzhiyun 		return 0;
490*4882a593Smuzhiyun 	alloc_memory_target(ma->proximity_domain, ma->base_address, ma->length);
491*4882a593Smuzhiyun 	return 0;
492*4882a593Smuzhiyun }
493*4882a593Smuzhiyun 
hmat_initiator_perf(struct memory_target * target,struct memory_initiator * initiator,struct acpi_hmat_locality * hmat_loc)494*4882a593Smuzhiyun static u32 hmat_initiator_perf(struct memory_target *target,
495*4882a593Smuzhiyun 			       struct memory_initiator *initiator,
496*4882a593Smuzhiyun 			       struct acpi_hmat_locality *hmat_loc)
497*4882a593Smuzhiyun {
498*4882a593Smuzhiyun 	unsigned int ipds, tpds, i, idx = 0, tdx = 0;
499*4882a593Smuzhiyun 	u32 *inits, *targs;
500*4882a593Smuzhiyun 	u16 *entries;
501*4882a593Smuzhiyun 
502*4882a593Smuzhiyun 	ipds = hmat_loc->number_of_initiator_Pds;
503*4882a593Smuzhiyun 	tpds = hmat_loc->number_of_target_Pds;
504*4882a593Smuzhiyun 	inits = (u32 *)(hmat_loc + 1);
505*4882a593Smuzhiyun 	targs = inits + ipds;
506*4882a593Smuzhiyun 	entries = (u16 *)(targs + tpds);
507*4882a593Smuzhiyun 
508*4882a593Smuzhiyun 	for (i = 0; i < ipds; i++) {
509*4882a593Smuzhiyun 		if (inits[i] == initiator->processor_pxm) {
510*4882a593Smuzhiyun 			idx = i;
511*4882a593Smuzhiyun 			break;
512*4882a593Smuzhiyun 		}
513*4882a593Smuzhiyun 	}
514*4882a593Smuzhiyun 
515*4882a593Smuzhiyun 	if (i == ipds)
516*4882a593Smuzhiyun 		return 0;
517*4882a593Smuzhiyun 
518*4882a593Smuzhiyun 	for (i = 0; i < tpds; i++) {
519*4882a593Smuzhiyun 		if (targs[i] == target->memory_pxm) {
520*4882a593Smuzhiyun 			tdx = i;
521*4882a593Smuzhiyun 			break;
522*4882a593Smuzhiyun 		}
523*4882a593Smuzhiyun 	}
524*4882a593Smuzhiyun 	if (i == tpds)
525*4882a593Smuzhiyun 		return 0;
526*4882a593Smuzhiyun 
527*4882a593Smuzhiyun 	return hmat_normalize(entries[idx * tpds + tdx],
528*4882a593Smuzhiyun 			      hmat_loc->entry_base_unit,
529*4882a593Smuzhiyun 			      hmat_loc->data_type);
530*4882a593Smuzhiyun }
531*4882a593Smuzhiyun 
hmat_update_best(u8 type,u32 value,u32 * best)532*4882a593Smuzhiyun static bool hmat_update_best(u8 type, u32 value, u32 *best)
533*4882a593Smuzhiyun {
534*4882a593Smuzhiyun 	bool updated = false;
535*4882a593Smuzhiyun 
536*4882a593Smuzhiyun 	if (!value)
537*4882a593Smuzhiyun 		return false;
538*4882a593Smuzhiyun 
539*4882a593Smuzhiyun 	switch (type) {
540*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_LATENCY:
541*4882a593Smuzhiyun 	case ACPI_HMAT_READ_LATENCY:
542*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_LATENCY:
543*4882a593Smuzhiyun 		if (!*best || *best > value) {
544*4882a593Smuzhiyun 			*best = value;
545*4882a593Smuzhiyun 			updated = true;
546*4882a593Smuzhiyun 		}
547*4882a593Smuzhiyun 		break;
548*4882a593Smuzhiyun 	case ACPI_HMAT_ACCESS_BANDWIDTH:
549*4882a593Smuzhiyun 	case ACPI_HMAT_READ_BANDWIDTH:
550*4882a593Smuzhiyun 	case ACPI_HMAT_WRITE_BANDWIDTH:
551*4882a593Smuzhiyun 		if (!*best || *best < value) {
552*4882a593Smuzhiyun 			*best = value;
553*4882a593Smuzhiyun 			updated = true;
554*4882a593Smuzhiyun 		}
555*4882a593Smuzhiyun 		break;
556*4882a593Smuzhiyun 	}
557*4882a593Smuzhiyun 
558*4882a593Smuzhiyun 	return updated;
559*4882a593Smuzhiyun }
560*4882a593Smuzhiyun 
initiator_cmp(void * priv,struct list_head * a,struct list_head * b)561*4882a593Smuzhiyun static int initiator_cmp(void *priv, struct list_head *a, struct list_head *b)
562*4882a593Smuzhiyun {
563*4882a593Smuzhiyun 	struct memory_initiator *ia;
564*4882a593Smuzhiyun 	struct memory_initiator *ib;
565*4882a593Smuzhiyun 
566*4882a593Smuzhiyun 	ia = list_entry(a, struct memory_initiator, node);
567*4882a593Smuzhiyun 	ib = list_entry(b, struct memory_initiator, node);
568*4882a593Smuzhiyun 
569*4882a593Smuzhiyun 	return ia->processor_pxm - ib->processor_pxm;
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun 
initiators_to_nodemask(unsigned long * p_nodes)572*4882a593Smuzhiyun static int initiators_to_nodemask(unsigned long *p_nodes)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun 	struct memory_initiator *initiator;
575*4882a593Smuzhiyun 
576*4882a593Smuzhiyun 	if (list_empty(&initiators))
577*4882a593Smuzhiyun 		return -ENXIO;
578*4882a593Smuzhiyun 
579*4882a593Smuzhiyun 	list_for_each_entry(initiator, &initiators, node)
580*4882a593Smuzhiyun 		set_bit(initiator->processor_pxm, p_nodes);
581*4882a593Smuzhiyun 
582*4882a593Smuzhiyun 	return 0;
583*4882a593Smuzhiyun }
584*4882a593Smuzhiyun 
hmat_register_target_initiators(struct memory_target * target)585*4882a593Smuzhiyun static void hmat_register_target_initiators(struct memory_target *target)
586*4882a593Smuzhiyun {
587*4882a593Smuzhiyun 	static DECLARE_BITMAP(p_nodes, MAX_NUMNODES);
588*4882a593Smuzhiyun 	struct memory_initiator *initiator;
589*4882a593Smuzhiyun 	unsigned int mem_nid, cpu_nid;
590*4882a593Smuzhiyun 	struct memory_locality *loc = NULL;
591*4882a593Smuzhiyun 	u32 best = 0;
592*4882a593Smuzhiyun 	bool access0done = false;
593*4882a593Smuzhiyun 	int i;
594*4882a593Smuzhiyun 
595*4882a593Smuzhiyun 	mem_nid = pxm_to_node(target->memory_pxm);
596*4882a593Smuzhiyun 	/*
597*4882a593Smuzhiyun 	 * If the Address Range Structure provides a local processor pxm, link
598*4882a593Smuzhiyun 	 * only that one. Otherwise, find the best performance attributes and
599*4882a593Smuzhiyun 	 * register all initiators that match.
600*4882a593Smuzhiyun 	 */
601*4882a593Smuzhiyun 	if (target->processor_pxm != PXM_INVAL) {
602*4882a593Smuzhiyun 		cpu_nid = pxm_to_node(target->processor_pxm);
603*4882a593Smuzhiyun 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
604*4882a593Smuzhiyun 		access0done = true;
605*4882a593Smuzhiyun 		if (node_state(cpu_nid, N_CPU)) {
606*4882a593Smuzhiyun 			register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
607*4882a593Smuzhiyun 			return;
608*4882a593Smuzhiyun 		}
609*4882a593Smuzhiyun 	}
610*4882a593Smuzhiyun 
611*4882a593Smuzhiyun 	if (list_empty(&localities))
612*4882a593Smuzhiyun 		return;
613*4882a593Smuzhiyun 
614*4882a593Smuzhiyun 	/*
615*4882a593Smuzhiyun 	 * We need the initiator list sorted so we can use bitmap_clear for
616*4882a593Smuzhiyun 	 * previously set initiators when we find a better memory accessor.
617*4882a593Smuzhiyun 	 * We'll also use the sorting to prime the candidate nodes with known
618*4882a593Smuzhiyun 	 * initiators.
619*4882a593Smuzhiyun 	 */
620*4882a593Smuzhiyun 	bitmap_zero(p_nodes, MAX_NUMNODES);
621*4882a593Smuzhiyun 	list_sort(NULL, &initiators, initiator_cmp);
622*4882a593Smuzhiyun 	if (initiators_to_nodemask(p_nodes) < 0)
623*4882a593Smuzhiyun 		return;
624*4882a593Smuzhiyun 
625*4882a593Smuzhiyun 	if (!access0done) {
626*4882a593Smuzhiyun 		for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
627*4882a593Smuzhiyun 			loc = localities_types[i];
628*4882a593Smuzhiyun 			if (!loc)
629*4882a593Smuzhiyun 				continue;
630*4882a593Smuzhiyun 
631*4882a593Smuzhiyun 			best = 0;
632*4882a593Smuzhiyun 			list_for_each_entry(initiator, &initiators, node) {
633*4882a593Smuzhiyun 				u32 value;
634*4882a593Smuzhiyun 
635*4882a593Smuzhiyun 				if (!test_bit(initiator->processor_pxm, p_nodes))
636*4882a593Smuzhiyun 					continue;
637*4882a593Smuzhiyun 
638*4882a593Smuzhiyun 				value = hmat_initiator_perf(target, initiator,
639*4882a593Smuzhiyun 							    loc->hmat_loc);
640*4882a593Smuzhiyun 				if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
641*4882a593Smuzhiyun 					bitmap_clear(p_nodes, 0, initiator->processor_pxm);
642*4882a593Smuzhiyun 				if (value != best)
643*4882a593Smuzhiyun 					clear_bit(initiator->processor_pxm, p_nodes);
644*4882a593Smuzhiyun 			}
645*4882a593Smuzhiyun 			if (best)
646*4882a593Smuzhiyun 				hmat_update_target_access(target, loc->hmat_loc->data_type,
647*4882a593Smuzhiyun 							  best, 0);
648*4882a593Smuzhiyun 		}
649*4882a593Smuzhiyun 
650*4882a593Smuzhiyun 		for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
651*4882a593Smuzhiyun 			cpu_nid = pxm_to_node(i);
652*4882a593Smuzhiyun 			register_memory_node_under_compute_node(mem_nid, cpu_nid, 0);
653*4882a593Smuzhiyun 		}
654*4882a593Smuzhiyun 	}
655*4882a593Smuzhiyun 
656*4882a593Smuzhiyun 	/* Access 1 ignores Generic Initiators */
657*4882a593Smuzhiyun 	bitmap_zero(p_nodes, MAX_NUMNODES);
658*4882a593Smuzhiyun 	if (initiators_to_nodemask(p_nodes) < 0)
659*4882a593Smuzhiyun 		return;
660*4882a593Smuzhiyun 
661*4882a593Smuzhiyun 	for (i = WRITE_LATENCY; i <= READ_BANDWIDTH; i++) {
662*4882a593Smuzhiyun 		loc = localities_types[i];
663*4882a593Smuzhiyun 		if (!loc)
664*4882a593Smuzhiyun 			continue;
665*4882a593Smuzhiyun 
666*4882a593Smuzhiyun 		best = 0;
667*4882a593Smuzhiyun 		list_for_each_entry(initiator, &initiators, node) {
668*4882a593Smuzhiyun 			u32 value;
669*4882a593Smuzhiyun 
670*4882a593Smuzhiyun 			if (!initiator->has_cpu) {
671*4882a593Smuzhiyun 				clear_bit(initiator->processor_pxm, p_nodes);
672*4882a593Smuzhiyun 				continue;
673*4882a593Smuzhiyun 			}
674*4882a593Smuzhiyun 			if (!test_bit(initiator->processor_pxm, p_nodes))
675*4882a593Smuzhiyun 				continue;
676*4882a593Smuzhiyun 
677*4882a593Smuzhiyun 			value = hmat_initiator_perf(target, initiator, loc->hmat_loc);
678*4882a593Smuzhiyun 			if (hmat_update_best(loc->hmat_loc->data_type, value, &best))
679*4882a593Smuzhiyun 				bitmap_clear(p_nodes, 0, initiator->processor_pxm);
680*4882a593Smuzhiyun 			if (value != best)
681*4882a593Smuzhiyun 				clear_bit(initiator->processor_pxm, p_nodes);
682*4882a593Smuzhiyun 		}
683*4882a593Smuzhiyun 		if (best)
684*4882a593Smuzhiyun 			hmat_update_target_access(target, loc->hmat_loc->data_type, best, 1);
685*4882a593Smuzhiyun 	}
686*4882a593Smuzhiyun 	for_each_set_bit(i, p_nodes, MAX_NUMNODES) {
687*4882a593Smuzhiyun 		cpu_nid = pxm_to_node(i);
688*4882a593Smuzhiyun 		register_memory_node_under_compute_node(mem_nid, cpu_nid, 1);
689*4882a593Smuzhiyun 	}
690*4882a593Smuzhiyun }
691*4882a593Smuzhiyun 
hmat_register_target_cache(struct memory_target * target)692*4882a593Smuzhiyun static void hmat_register_target_cache(struct memory_target *target)
693*4882a593Smuzhiyun {
694*4882a593Smuzhiyun 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
695*4882a593Smuzhiyun 	struct target_cache *tcache;
696*4882a593Smuzhiyun 
697*4882a593Smuzhiyun 	list_for_each_entry(tcache, &target->caches, node)
698*4882a593Smuzhiyun 		node_add_cache(mem_nid, &tcache->cache_attrs);
699*4882a593Smuzhiyun }
700*4882a593Smuzhiyun 
hmat_register_target_perf(struct memory_target * target,int access)701*4882a593Smuzhiyun static void hmat_register_target_perf(struct memory_target *target, int access)
702*4882a593Smuzhiyun {
703*4882a593Smuzhiyun 	unsigned mem_nid = pxm_to_node(target->memory_pxm);
704*4882a593Smuzhiyun 	node_set_perf_attrs(mem_nid, &target->hmem_attrs[access], access);
705*4882a593Smuzhiyun }
706*4882a593Smuzhiyun 
hmat_register_target_devices(struct memory_target * target)707*4882a593Smuzhiyun static void hmat_register_target_devices(struct memory_target *target)
708*4882a593Smuzhiyun {
709*4882a593Smuzhiyun 	struct resource *res;
710*4882a593Smuzhiyun 
711*4882a593Smuzhiyun 	/*
712*4882a593Smuzhiyun 	 * Do not bother creating devices if no driver is available to
713*4882a593Smuzhiyun 	 * consume them.
714*4882a593Smuzhiyun 	 */
715*4882a593Smuzhiyun 	if (!IS_ENABLED(CONFIG_DEV_DAX_HMEM))
716*4882a593Smuzhiyun 		return;
717*4882a593Smuzhiyun 
718*4882a593Smuzhiyun 	for (res = target->memregions.child; res; res = res->sibling) {
719*4882a593Smuzhiyun 		int target_nid = pxm_to_node(target->memory_pxm);
720*4882a593Smuzhiyun 
721*4882a593Smuzhiyun 		hmem_register_device(target_nid, res);
722*4882a593Smuzhiyun 	}
723*4882a593Smuzhiyun }
724*4882a593Smuzhiyun 
hmat_register_target(struct memory_target * target)725*4882a593Smuzhiyun static void hmat_register_target(struct memory_target *target)
726*4882a593Smuzhiyun {
727*4882a593Smuzhiyun 	int nid = pxm_to_node(target->memory_pxm);
728*4882a593Smuzhiyun 
729*4882a593Smuzhiyun 	/*
730*4882a593Smuzhiyun 	 * Devices may belong to either an offline or online
731*4882a593Smuzhiyun 	 * node, so unconditionally add them.
732*4882a593Smuzhiyun 	 */
733*4882a593Smuzhiyun 	hmat_register_target_devices(target);
734*4882a593Smuzhiyun 
735*4882a593Smuzhiyun 	/*
736*4882a593Smuzhiyun 	 * Skip offline nodes. This can happen when memory
737*4882a593Smuzhiyun 	 * marked EFI_MEMORY_SP, "specific purpose", is applied
738*4882a593Smuzhiyun 	 * to all the memory in a promixity domain leading to
739*4882a593Smuzhiyun 	 * the node being marked offline / unplugged, or if
740*4882a593Smuzhiyun 	 * memory-only "hotplug" node is offline.
741*4882a593Smuzhiyun 	 */
742*4882a593Smuzhiyun 	if (nid == NUMA_NO_NODE || !node_online(nid))
743*4882a593Smuzhiyun 		return;
744*4882a593Smuzhiyun 
745*4882a593Smuzhiyun 	mutex_lock(&target_lock);
746*4882a593Smuzhiyun 	if (!target->registered) {
747*4882a593Smuzhiyun 		hmat_register_target_initiators(target);
748*4882a593Smuzhiyun 		hmat_register_target_cache(target);
749*4882a593Smuzhiyun 		hmat_register_target_perf(target, 0);
750*4882a593Smuzhiyun 		hmat_register_target_perf(target, 1);
751*4882a593Smuzhiyun 		target->registered = true;
752*4882a593Smuzhiyun 	}
753*4882a593Smuzhiyun 	mutex_unlock(&target_lock);
754*4882a593Smuzhiyun }
755*4882a593Smuzhiyun 
hmat_register_targets(void)756*4882a593Smuzhiyun static void hmat_register_targets(void)
757*4882a593Smuzhiyun {
758*4882a593Smuzhiyun 	struct memory_target *target;
759*4882a593Smuzhiyun 
760*4882a593Smuzhiyun 	list_for_each_entry(target, &targets, node)
761*4882a593Smuzhiyun 		hmat_register_target(target);
762*4882a593Smuzhiyun }
763*4882a593Smuzhiyun 
hmat_callback(struct notifier_block * self,unsigned long action,void * arg)764*4882a593Smuzhiyun static int hmat_callback(struct notifier_block *self,
765*4882a593Smuzhiyun 			 unsigned long action, void *arg)
766*4882a593Smuzhiyun {
767*4882a593Smuzhiyun 	struct memory_target *target;
768*4882a593Smuzhiyun 	struct memory_notify *mnb = arg;
769*4882a593Smuzhiyun 	int pxm, nid = mnb->status_change_nid;
770*4882a593Smuzhiyun 
771*4882a593Smuzhiyun 	if (nid == NUMA_NO_NODE || action != MEM_ONLINE)
772*4882a593Smuzhiyun 		return NOTIFY_OK;
773*4882a593Smuzhiyun 
774*4882a593Smuzhiyun 	pxm = node_to_pxm(nid);
775*4882a593Smuzhiyun 	target = find_mem_target(pxm);
776*4882a593Smuzhiyun 	if (!target)
777*4882a593Smuzhiyun 		return NOTIFY_OK;
778*4882a593Smuzhiyun 
779*4882a593Smuzhiyun 	hmat_register_target(target);
780*4882a593Smuzhiyun 	return NOTIFY_OK;
781*4882a593Smuzhiyun }
782*4882a593Smuzhiyun 
783*4882a593Smuzhiyun static struct notifier_block hmat_callback_nb = {
784*4882a593Smuzhiyun 	.notifier_call = hmat_callback,
785*4882a593Smuzhiyun 	.priority = 2,
786*4882a593Smuzhiyun };
787*4882a593Smuzhiyun 
hmat_free_structures(void)788*4882a593Smuzhiyun static __init void hmat_free_structures(void)
789*4882a593Smuzhiyun {
790*4882a593Smuzhiyun 	struct memory_target *target, *tnext;
791*4882a593Smuzhiyun 	struct memory_locality *loc, *lnext;
792*4882a593Smuzhiyun 	struct memory_initiator *initiator, *inext;
793*4882a593Smuzhiyun 	struct target_cache *tcache, *cnext;
794*4882a593Smuzhiyun 
795*4882a593Smuzhiyun 	list_for_each_entry_safe(target, tnext, &targets, node) {
796*4882a593Smuzhiyun 		struct resource *res, *res_next;
797*4882a593Smuzhiyun 
798*4882a593Smuzhiyun 		list_for_each_entry_safe(tcache, cnext, &target->caches, node) {
799*4882a593Smuzhiyun 			list_del(&tcache->node);
800*4882a593Smuzhiyun 			kfree(tcache);
801*4882a593Smuzhiyun 		}
802*4882a593Smuzhiyun 
803*4882a593Smuzhiyun 		list_del(&target->node);
804*4882a593Smuzhiyun 		res = target->memregions.child;
805*4882a593Smuzhiyun 		while (res) {
806*4882a593Smuzhiyun 			res_next = res->sibling;
807*4882a593Smuzhiyun 			__release_region(&target->memregions, res->start,
808*4882a593Smuzhiyun 					resource_size(res));
809*4882a593Smuzhiyun 			res = res_next;
810*4882a593Smuzhiyun 		}
811*4882a593Smuzhiyun 		kfree(target);
812*4882a593Smuzhiyun 	}
813*4882a593Smuzhiyun 
814*4882a593Smuzhiyun 	list_for_each_entry_safe(initiator, inext, &initiators, node) {
815*4882a593Smuzhiyun 		list_del(&initiator->node);
816*4882a593Smuzhiyun 		kfree(initiator);
817*4882a593Smuzhiyun 	}
818*4882a593Smuzhiyun 
819*4882a593Smuzhiyun 	list_for_each_entry_safe(loc, lnext, &localities, node) {
820*4882a593Smuzhiyun 		list_del(&loc->node);
821*4882a593Smuzhiyun 		kfree(loc);
822*4882a593Smuzhiyun 	}
823*4882a593Smuzhiyun }
824*4882a593Smuzhiyun 
hmat_init(void)825*4882a593Smuzhiyun static __init int hmat_init(void)
826*4882a593Smuzhiyun {
827*4882a593Smuzhiyun 	struct acpi_table_header *tbl;
828*4882a593Smuzhiyun 	enum acpi_hmat_type i;
829*4882a593Smuzhiyun 	acpi_status status;
830*4882a593Smuzhiyun 
831*4882a593Smuzhiyun 	if (srat_disabled() || hmat_disable)
832*4882a593Smuzhiyun 		return 0;
833*4882a593Smuzhiyun 
834*4882a593Smuzhiyun 	status = acpi_get_table(ACPI_SIG_SRAT, 0, &tbl);
835*4882a593Smuzhiyun 	if (ACPI_FAILURE(status))
836*4882a593Smuzhiyun 		return 0;
837*4882a593Smuzhiyun 
838*4882a593Smuzhiyun 	if (acpi_table_parse_entries(ACPI_SIG_SRAT,
839*4882a593Smuzhiyun 				sizeof(struct acpi_table_srat),
840*4882a593Smuzhiyun 				ACPI_SRAT_TYPE_MEMORY_AFFINITY,
841*4882a593Smuzhiyun 				srat_parse_mem_affinity, 0) < 0)
842*4882a593Smuzhiyun 		goto out_put;
843*4882a593Smuzhiyun 	acpi_put_table(tbl);
844*4882a593Smuzhiyun 
845*4882a593Smuzhiyun 	status = acpi_get_table(ACPI_SIG_HMAT, 0, &tbl);
846*4882a593Smuzhiyun 	if (ACPI_FAILURE(status))
847*4882a593Smuzhiyun 		goto out_put;
848*4882a593Smuzhiyun 
849*4882a593Smuzhiyun 	hmat_revision = tbl->revision;
850*4882a593Smuzhiyun 	switch (hmat_revision) {
851*4882a593Smuzhiyun 	case 1:
852*4882a593Smuzhiyun 	case 2:
853*4882a593Smuzhiyun 		break;
854*4882a593Smuzhiyun 	default:
855*4882a593Smuzhiyun 		pr_notice("Ignoring HMAT: Unknown revision:%d\n", hmat_revision);
856*4882a593Smuzhiyun 		goto out_put;
857*4882a593Smuzhiyun 	}
858*4882a593Smuzhiyun 
859*4882a593Smuzhiyun 	for (i = ACPI_HMAT_TYPE_PROXIMITY; i < ACPI_HMAT_TYPE_RESERVED; i++) {
860*4882a593Smuzhiyun 		if (acpi_table_parse_entries(ACPI_SIG_HMAT,
861*4882a593Smuzhiyun 					     sizeof(struct acpi_table_hmat), i,
862*4882a593Smuzhiyun 					     hmat_parse_subtable, 0) < 0) {
863*4882a593Smuzhiyun 			pr_notice("Ignoring HMAT: Invalid table");
864*4882a593Smuzhiyun 			goto out_put;
865*4882a593Smuzhiyun 		}
866*4882a593Smuzhiyun 	}
867*4882a593Smuzhiyun 	hmat_register_targets();
868*4882a593Smuzhiyun 
869*4882a593Smuzhiyun 	/* Keep the table and structures if the notifier may use them */
870*4882a593Smuzhiyun 	if (!register_hotmemory_notifier(&hmat_callback_nb))
871*4882a593Smuzhiyun 		return 0;
872*4882a593Smuzhiyun out_put:
873*4882a593Smuzhiyun 	hmat_free_structures();
874*4882a593Smuzhiyun 	acpi_put_table(tbl);
875*4882a593Smuzhiyun 	return 0;
876*4882a593Smuzhiyun }
877*4882a593Smuzhiyun device_initcall(hmat_init);
878