1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2015-2017 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun */
22*4882a593Smuzhiyun
23*4882a593Smuzhiyun #include <linux/pci.h>
24*4882a593Smuzhiyun #include <linux/acpi.h>
25*4882a593Smuzhiyun #include "kfd_crat.h"
26*4882a593Smuzhiyun #include "kfd_priv.h"
27*4882a593Smuzhiyun #include "kfd_topology.h"
28*4882a593Smuzhiyun #include "kfd_iommu.h"
29*4882a593Smuzhiyun #include "amdgpu_amdkfd.h"
30*4882a593Smuzhiyun
31*4882a593Smuzhiyun /* GPU Processor ID base for dGPUs for which VCRAT needs to be created.
32*4882a593Smuzhiyun * GPU processor ID are expressed with Bit[31]=1.
33*4882a593Smuzhiyun * The base is set to 0x8000_0000 + 0x1000 to avoid collision with GPU IDs
34*4882a593Smuzhiyun * used in the CRAT.
35*4882a593Smuzhiyun */
36*4882a593Smuzhiyun static uint32_t gpu_processor_id_low = 0x80001000;
37*4882a593Smuzhiyun
38*4882a593Smuzhiyun /* Return the next available gpu_processor_id and increment it for next GPU
39*4882a593Smuzhiyun * @total_cu_count - Total CUs present in the GPU including ones
40*4882a593Smuzhiyun * masked off
41*4882a593Smuzhiyun */
get_and_inc_gpu_processor_id(unsigned int total_cu_count)42*4882a593Smuzhiyun static inline unsigned int get_and_inc_gpu_processor_id(
43*4882a593Smuzhiyun unsigned int total_cu_count)
44*4882a593Smuzhiyun {
45*4882a593Smuzhiyun int current_id = gpu_processor_id_low;
46*4882a593Smuzhiyun
47*4882a593Smuzhiyun gpu_processor_id_low += total_cu_count;
48*4882a593Smuzhiyun return current_id;
49*4882a593Smuzhiyun }
50*4882a593Smuzhiyun
51*4882a593Smuzhiyun /* Static table to describe GPU Cache information */
52*4882a593Smuzhiyun struct kfd_gpu_cache_info {
53*4882a593Smuzhiyun uint32_t cache_size;
54*4882a593Smuzhiyun uint32_t cache_level;
55*4882a593Smuzhiyun uint32_t flags;
56*4882a593Smuzhiyun /* Indicates how many Compute Units share this cache
57*4882a593Smuzhiyun * Value = 1 indicates the cache is not shared
58*4882a593Smuzhiyun */
59*4882a593Smuzhiyun uint32_t num_cu_shared;
60*4882a593Smuzhiyun };
61*4882a593Smuzhiyun
62*4882a593Smuzhiyun static struct kfd_gpu_cache_info kaveri_cache_info[] = {
63*4882a593Smuzhiyun {
64*4882a593Smuzhiyun /* TCP L1 Cache per CU */
65*4882a593Smuzhiyun .cache_size = 16,
66*4882a593Smuzhiyun .cache_level = 1,
67*4882a593Smuzhiyun .flags = (CRAT_CACHE_FLAGS_ENABLED |
68*4882a593Smuzhiyun CRAT_CACHE_FLAGS_DATA_CACHE |
69*4882a593Smuzhiyun CRAT_CACHE_FLAGS_SIMD_CACHE),
70*4882a593Smuzhiyun .num_cu_shared = 1,
71*4882a593Smuzhiyun
72*4882a593Smuzhiyun },
73*4882a593Smuzhiyun {
74*4882a593Smuzhiyun /* Scalar L1 Instruction Cache (in SQC module) per bank */
75*4882a593Smuzhiyun .cache_size = 16,
76*4882a593Smuzhiyun .cache_level = 1,
77*4882a593Smuzhiyun .flags = (CRAT_CACHE_FLAGS_ENABLED |
78*4882a593Smuzhiyun CRAT_CACHE_FLAGS_INST_CACHE |
79*4882a593Smuzhiyun CRAT_CACHE_FLAGS_SIMD_CACHE),
80*4882a593Smuzhiyun .num_cu_shared = 2,
81*4882a593Smuzhiyun },
82*4882a593Smuzhiyun {
83*4882a593Smuzhiyun /* Scalar L1 Data Cache (in SQC module) per bank */
84*4882a593Smuzhiyun .cache_size = 8,
85*4882a593Smuzhiyun .cache_level = 1,
86*4882a593Smuzhiyun .flags = (CRAT_CACHE_FLAGS_ENABLED |
87*4882a593Smuzhiyun CRAT_CACHE_FLAGS_DATA_CACHE |
88*4882a593Smuzhiyun CRAT_CACHE_FLAGS_SIMD_CACHE),
89*4882a593Smuzhiyun .num_cu_shared = 2,
90*4882a593Smuzhiyun },
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun /* TODO: Add L2 Cache information */
93*4882a593Smuzhiyun };
94*4882a593Smuzhiyun
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun static struct kfd_gpu_cache_info carrizo_cache_info[] = {
97*4882a593Smuzhiyun {
98*4882a593Smuzhiyun /* TCP L1 Cache per CU */
99*4882a593Smuzhiyun .cache_size = 16,
100*4882a593Smuzhiyun .cache_level = 1,
101*4882a593Smuzhiyun .flags = (CRAT_CACHE_FLAGS_ENABLED |
102*4882a593Smuzhiyun CRAT_CACHE_FLAGS_DATA_CACHE |
103*4882a593Smuzhiyun CRAT_CACHE_FLAGS_SIMD_CACHE),
104*4882a593Smuzhiyun .num_cu_shared = 1,
105*4882a593Smuzhiyun },
106*4882a593Smuzhiyun {
107*4882a593Smuzhiyun /* Scalar L1 Instruction Cache (in SQC module) per bank */
108*4882a593Smuzhiyun .cache_size = 8,
109*4882a593Smuzhiyun .cache_level = 1,
110*4882a593Smuzhiyun .flags = (CRAT_CACHE_FLAGS_ENABLED |
111*4882a593Smuzhiyun CRAT_CACHE_FLAGS_INST_CACHE |
112*4882a593Smuzhiyun CRAT_CACHE_FLAGS_SIMD_CACHE),
113*4882a593Smuzhiyun .num_cu_shared = 4,
114*4882a593Smuzhiyun },
115*4882a593Smuzhiyun {
116*4882a593Smuzhiyun /* Scalar L1 Data Cache (in SQC module) per bank. */
117*4882a593Smuzhiyun .cache_size = 4,
118*4882a593Smuzhiyun .cache_level = 1,
119*4882a593Smuzhiyun .flags = (CRAT_CACHE_FLAGS_ENABLED |
120*4882a593Smuzhiyun CRAT_CACHE_FLAGS_DATA_CACHE |
121*4882a593Smuzhiyun CRAT_CACHE_FLAGS_SIMD_CACHE),
122*4882a593Smuzhiyun .num_cu_shared = 4,
123*4882a593Smuzhiyun },
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun /* TODO: Add L2 Cache information */
126*4882a593Smuzhiyun };
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /* NOTE: In future if more information is added to struct kfd_gpu_cache_info
129*4882a593Smuzhiyun * the following ASICs may need a separate table.
130*4882a593Smuzhiyun */
131*4882a593Smuzhiyun #define hawaii_cache_info kaveri_cache_info
132*4882a593Smuzhiyun #define tonga_cache_info carrizo_cache_info
133*4882a593Smuzhiyun #define fiji_cache_info carrizo_cache_info
134*4882a593Smuzhiyun #define polaris10_cache_info carrizo_cache_info
135*4882a593Smuzhiyun #define polaris11_cache_info carrizo_cache_info
136*4882a593Smuzhiyun #define polaris12_cache_info carrizo_cache_info
137*4882a593Smuzhiyun #define vegam_cache_info carrizo_cache_info
138*4882a593Smuzhiyun /* TODO - check & update Vega10 cache details */
139*4882a593Smuzhiyun #define vega10_cache_info carrizo_cache_info
140*4882a593Smuzhiyun #define raven_cache_info carrizo_cache_info
141*4882a593Smuzhiyun #define renoir_cache_info carrizo_cache_info
142*4882a593Smuzhiyun /* TODO - check & update Navi10 cache details */
143*4882a593Smuzhiyun #define navi10_cache_info carrizo_cache_info
144*4882a593Smuzhiyun
kfd_populated_cu_info_cpu(struct kfd_topology_device * dev,struct crat_subtype_computeunit * cu)145*4882a593Smuzhiyun static void kfd_populated_cu_info_cpu(struct kfd_topology_device *dev,
146*4882a593Smuzhiyun struct crat_subtype_computeunit *cu)
147*4882a593Smuzhiyun {
148*4882a593Smuzhiyun dev->node_props.cpu_cores_count = cu->num_cpu_cores;
149*4882a593Smuzhiyun dev->node_props.cpu_core_id_base = cu->processor_id_low;
150*4882a593Smuzhiyun if (cu->hsa_capability & CRAT_CU_FLAGS_IOMMU_PRESENT)
151*4882a593Smuzhiyun dev->node_props.capability |= HSA_CAP_ATS_PRESENT;
152*4882a593Smuzhiyun
153*4882a593Smuzhiyun pr_debug("CU CPU: cores=%d id_base=%d\n", cu->num_cpu_cores,
154*4882a593Smuzhiyun cu->processor_id_low);
155*4882a593Smuzhiyun }
156*4882a593Smuzhiyun
kfd_populated_cu_info_gpu(struct kfd_topology_device * dev,struct crat_subtype_computeunit * cu)157*4882a593Smuzhiyun static void kfd_populated_cu_info_gpu(struct kfd_topology_device *dev,
158*4882a593Smuzhiyun struct crat_subtype_computeunit *cu)
159*4882a593Smuzhiyun {
160*4882a593Smuzhiyun dev->node_props.simd_id_base = cu->processor_id_low;
161*4882a593Smuzhiyun dev->node_props.simd_count = cu->num_simd_cores;
162*4882a593Smuzhiyun dev->node_props.lds_size_in_kb = cu->lds_size_in_kb;
163*4882a593Smuzhiyun dev->node_props.max_waves_per_simd = cu->max_waves_simd;
164*4882a593Smuzhiyun dev->node_props.wave_front_size = cu->wave_front_size;
165*4882a593Smuzhiyun dev->node_props.array_count = cu->array_count;
166*4882a593Smuzhiyun dev->node_props.cu_per_simd_array = cu->num_cu_per_array;
167*4882a593Smuzhiyun dev->node_props.simd_per_cu = cu->num_simd_per_cu;
168*4882a593Smuzhiyun dev->node_props.max_slots_scratch_cu = cu->max_slots_scatch_cu;
169*4882a593Smuzhiyun if (cu->hsa_capability & CRAT_CU_FLAGS_HOT_PLUGGABLE)
170*4882a593Smuzhiyun dev->node_props.capability |= HSA_CAP_HOT_PLUGGABLE;
171*4882a593Smuzhiyun pr_debug("CU GPU: id_base=%d\n", cu->processor_id_low);
172*4882a593Smuzhiyun }
173*4882a593Smuzhiyun
174*4882a593Smuzhiyun /* kfd_parse_subtype_cu - parse compute unit subtypes and attach it to correct
175*4882a593Smuzhiyun * topology device present in the device_list
176*4882a593Smuzhiyun */
kfd_parse_subtype_cu(struct crat_subtype_computeunit * cu,struct list_head * device_list)177*4882a593Smuzhiyun static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
178*4882a593Smuzhiyun struct list_head *device_list)
179*4882a593Smuzhiyun {
180*4882a593Smuzhiyun struct kfd_topology_device *dev;
181*4882a593Smuzhiyun
182*4882a593Smuzhiyun pr_debug("Found CU entry in CRAT table with proximity_domain=%d caps=%x\n",
183*4882a593Smuzhiyun cu->proximity_domain, cu->hsa_capability);
184*4882a593Smuzhiyun list_for_each_entry(dev, device_list, list) {
185*4882a593Smuzhiyun if (cu->proximity_domain == dev->proximity_domain) {
186*4882a593Smuzhiyun if (cu->flags & CRAT_CU_FLAGS_CPU_PRESENT)
187*4882a593Smuzhiyun kfd_populated_cu_info_cpu(dev, cu);
188*4882a593Smuzhiyun
189*4882a593Smuzhiyun if (cu->flags & CRAT_CU_FLAGS_GPU_PRESENT)
190*4882a593Smuzhiyun kfd_populated_cu_info_gpu(dev, cu);
191*4882a593Smuzhiyun break;
192*4882a593Smuzhiyun }
193*4882a593Smuzhiyun }
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun return 0;
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun static struct kfd_mem_properties *
find_subtype_mem(uint32_t heap_type,uint32_t flags,uint32_t width,struct kfd_topology_device * dev)199*4882a593Smuzhiyun find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
200*4882a593Smuzhiyun struct kfd_topology_device *dev)
201*4882a593Smuzhiyun {
202*4882a593Smuzhiyun struct kfd_mem_properties *props;
203*4882a593Smuzhiyun
204*4882a593Smuzhiyun list_for_each_entry(props, &dev->mem_props, list) {
205*4882a593Smuzhiyun if (props->heap_type == heap_type
206*4882a593Smuzhiyun && props->flags == flags
207*4882a593Smuzhiyun && props->width == width)
208*4882a593Smuzhiyun return props;
209*4882a593Smuzhiyun }
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun return NULL;
212*4882a593Smuzhiyun }
213*4882a593Smuzhiyun /* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
214*4882a593Smuzhiyun * topology device present in the device_list
215*4882a593Smuzhiyun */
kfd_parse_subtype_mem(struct crat_subtype_memory * mem,struct list_head * device_list)216*4882a593Smuzhiyun static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
217*4882a593Smuzhiyun struct list_head *device_list)
218*4882a593Smuzhiyun {
219*4882a593Smuzhiyun struct kfd_mem_properties *props;
220*4882a593Smuzhiyun struct kfd_topology_device *dev;
221*4882a593Smuzhiyun uint32_t heap_type;
222*4882a593Smuzhiyun uint64_t size_in_bytes;
223*4882a593Smuzhiyun uint32_t flags = 0;
224*4882a593Smuzhiyun uint32_t width;
225*4882a593Smuzhiyun
226*4882a593Smuzhiyun pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
227*4882a593Smuzhiyun mem->proximity_domain);
228*4882a593Smuzhiyun list_for_each_entry(dev, device_list, list) {
229*4882a593Smuzhiyun if (mem->proximity_domain == dev->proximity_domain) {
230*4882a593Smuzhiyun /* We're on GPU node */
231*4882a593Smuzhiyun if (dev->node_props.cpu_cores_count == 0) {
232*4882a593Smuzhiyun /* APU */
233*4882a593Smuzhiyun if (mem->visibility_type == 0)
234*4882a593Smuzhiyun heap_type =
235*4882a593Smuzhiyun HSA_MEM_HEAP_TYPE_FB_PRIVATE;
236*4882a593Smuzhiyun /* dGPU */
237*4882a593Smuzhiyun else
238*4882a593Smuzhiyun heap_type = mem->visibility_type;
239*4882a593Smuzhiyun } else
240*4882a593Smuzhiyun heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
241*4882a593Smuzhiyun
242*4882a593Smuzhiyun if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
243*4882a593Smuzhiyun flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
244*4882a593Smuzhiyun if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
245*4882a593Smuzhiyun flags |= HSA_MEM_FLAGS_NON_VOLATILE;
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun size_in_bytes =
248*4882a593Smuzhiyun ((uint64_t)mem->length_high << 32) +
249*4882a593Smuzhiyun mem->length_low;
250*4882a593Smuzhiyun width = mem->width;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun /* Multiple banks of the same type are aggregated into
253*4882a593Smuzhiyun * one. User mode doesn't care about multiple physical
254*4882a593Smuzhiyun * memory segments. It's managed as a single virtual
255*4882a593Smuzhiyun * heap for user mode.
256*4882a593Smuzhiyun */
257*4882a593Smuzhiyun props = find_subtype_mem(heap_type, flags, width, dev);
258*4882a593Smuzhiyun if (props) {
259*4882a593Smuzhiyun props->size_in_bytes += size_in_bytes;
260*4882a593Smuzhiyun break;
261*4882a593Smuzhiyun }
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun props = kfd_alloc_struct(props);
264*4882a593Smuzhiyun if (!props)
265*4882a593Smuzhiyun return -ENOMEM;
266*4882a593Smuzhiyun
267*4882a593Smuzhiyun props->heap_type = heap_type;
268*4882a593Smuzhiyun props->flags = flags;
269*4882a593Smuzhiyun props->size_in_bytes = size_in_bytes;
270*4882a593Smuzhiyun props->width = width;
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun dev->node_props.mem_banks_count++;
273*4882a593Smuzhiyun list_add_tail(&props->list, &dev->mem_props);
274*4882a593Smuzhiyun
275*4882a593Smuzhiyun break;
276*4882a593Smuzhiyun }
277*4882a593Smuzhiyun }
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun return 0;
280*4882a593Smuzhiyun }
281*4882a593Smuzhiyun
282*4882a593Smuzhiyun /* kfd_parse_subtype_cache - parse cache subtypes and attach it to correct
283*4882a593Smuzhiyun * topology device present in the device_list
284*4882a593Smuzhiyun */
kfd_parse_subtype_cache(struct crat_subtype_cache * cache,struct list_head * device_list)285*4882a593Smuzhiyun static int kfd_parse_subtype_cache(struct crat_subtype_cache *cache,
286*4882a593Smuzhiyun struct list_head *device_list)
287*4882a593Smuzhiyun {
288*4882a593Smuzhiyun struct kfd_cache_properties *props;
289*4882a593Smuzhiyun struct kfd_topology_device *dev;
290*4882a593Smuzhiyun uint32_t id;
291*4882a593Smuzhiyun uint32_t total_num_of_cu;
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun id = cache->processor_id_low;
294*4882a593Smuzhiyun
295*4882a593Smuzhiyun pr_debug("Found cache entry in CRAT table with processor_id=%d\n", id);
296*4882a593Smuzhiyun list_for_each_entry(dev, device_list, list) {
297*4882a593Smuzhiyun total_num_of_cu = (dev->node_props.array_count *
298*4882a593Smuzhiyun dev->node_props.cu_per_simd_array);
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun /* Cache infomration in CRAT doesn't have proximity_domain
301*4882a593Smuzhiyun * information as it is associated with a CPU core or GPU
302*4882a593Smuzhiyun * Compute Unit. So map the cache using CPU core Id or SIMD
303*4882a593Smuzhiyun * (GPU) ID.
304*4882a593Smuzhiyun * TODO: This works because currently we can safely assume that
305*4882a593Smuzhiyun * Compute Units are parsed before caches are parsed. In
306*4882a593Smuzhiyun * future, remove this dependency
307*4882a593Smuzhiyun */
308*4882a593Smuzhiyun if ((id >= dev->node_props.cpu_core_id_base &&
309*4882a593Smuzhiyun id <= dev->node_props.cpu_core_id_base +
310*4882a593Smuzhiyun dev->node_props.cpu_cores_count) ||
311*4882a593Smuzhiyun (id >= dev->node_props.simd_id_base &&
312*4882a593Smuzhiyun id < dev->node_props.simd_id_base +
313*4882a593Smuzhiyun total_num_of_cu)) {
314*4882a593Smuzhiyun props = kfd_alloc_struct(props);
315*4882a593Smuzhiyun if (!props)
316*4882a593Smuzhiyun return -ENOMEM;
317*4882a593Smuzhiyun
318*4882a593Smuzhiyun props->processor_id_low = id;
319*4882a593Smuzhiyun props->cache_level = cache->cache_level;
320*4882a593Smuzhiyun props->cache_size = cache->cache_size;
321*4882a593Smuzhiyun props->cacheline_size = cache->cache_line_size;
322*4882a593Smuzhiyun props->cachelines_per_tag = cache->lines_per_tag;
323*4882a593Smuzhiyun props->cache_assoc = cache->associativity;
324*4882a593Smuzhiyun props->cache_latency = cache->cache_latency;
325*4882a593Smuzhiyun memcpy(props->sibling_map, cache->sibling_map,
326*4882a593Smuzhiyun sizeof(props->sibling_map));
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (cache->flags & CRAT_CACHE_FLAGS_DATA_CACHE)
329*4882a593Smuzhiyun props->cache_type |= HSA_CACHE_TYPE_DATA;
330*4882a593Smuzhiyun if (cache->flags & CRAT_CACHE_FLAGS_INST_CACHE)
331*4882a593Smuzhiyun props->cache_type |= HSA_CACHE_TYPE_INSTRUCTION;
332*4882a593Smuzhiyun if (cache->flags & CRAT_CACHE_FLAGS_CPU_CACHE)
333*4882a593Smuzhiyun props->cache_type |= HSA_CACHE_TYPE_CPU;
334*4882a593Smuzhiyun if (cache->flags & CRAT_CACHE_FLAGS_SIMD_CACHE)
335*4882a593Smuzhiyun props->cache_type |= HSA_CACHE_TYPE_HSACU;
336*4882a593Smuzhiyun
337*4882a593Smuzhiyun dev->cache_count++;
338*4882a593Smuzhiyun dev->node_props.caches_count++;
339*4882a593Smuzhiyun list_add_tail(&props->list, &dev->cache_props);
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun break;
342*4882a593Smuzhiyun }
343*4882a593Smuzhiyun }
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun return 0;
346*4882a593Smuzhiyun }
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun /* kfd_parse_subtype_iolink - parse iolink subtypes and attach it to correct
349*4882a593Smuzhiyun * topology device present in the device_list
350*4882a593Smuzhiyun */
kfd_parse_subtype_iolink(struct crat_subtype_iolink * iolink,struct list_head * device_list)351*4882a593Smuzhiyun static int kfd_parse_subtype_iolink(struct crat_subtype_iolink *iolink,
352*4882a593Smuzhiyun struct list_head *device_list)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct kfd_iolink_properties *props = NULL, *props2;
355*4882a593Smuzhiyun struct kfd_topology_device *dev, *to_dev;
356*4882a593Smuzhiyun uint32_t id_from;
357*4882a593Smuzhiyun uint32_t id_to;
358*4882a593Smuzhiyun
359*4882a593Smuzhiyun id_from = iolink->proximity_domain_from;
360*4882a593Smuzhiyun id_to = iolink->proximity_domain_to;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun pr_debug("Found IO link entry in CRAT table with id_from=%d, id_to %d\n",
363*4882a593Smuzhiyun id_from, id_to);
364*4882a593Smuzhiyun list_for_each_entry(dev, device_list, list) {
365*4882a593Smuzhiyun if (id_from == dev->proximity_domain) {
366*4882a593Smuzhiyun props = kfd_alloc_struct(props);
367*4882a593Smuzhiyun if (!props)
368*4882a593Smuzhiyun return -ENOMEM;
369*4882a593Smuzhiyun
370*4882a593Smuzhiyun props->node_from = id_from;
371*4882a593Smuzhiyun props->node_to = id_to;
372*4882a593Smuzhiyun props->ver_maj = iolink->version_major;
373*4882a593Smuzhiyun props->ver_min = iolink->version_minor;
374*4882a593Smuzhiyun props->iolink_type = iolink->io_interface_type;
375*4882a593Smuzhiyun
376*4882a593Smuzhiyun if (props->iolink_type == CRAT_IOLINK_TYPE_PCIEXPRESS)
377*4882a593Smuzhiyun props->weight = 20;
378*4882a593Smuzhiyun else if (props->iolink_type == CRAT_IOLINK_TYPE_XGMI)
379*4882a593Smuzhiyun props->weight = 15 * iolink->num_hops_xgmi;
380*4882a593Smuzhiyun else
381*4882a593Smuzhiyun props->weight = node_distance(id_from, id_to);
382*4882a593Smuzhiyun
383*4882a593Smuzhiyun props->min_latency = iolink->minimum_latency;
384*4882a593Smuzhiyun props->max_latency = iolink->maximum_latency;
385*4882a593Smuzhiyun props->min_bandwidth = iolink->minimum_bandwidth_mbs;
386*4882a593Smuzhiyun props->max_bandwidth = iolink->maximum_bandwidth_mbs;
387*4882a593Smuzhiyun props->rec_transfer_size =
388*4882a593Smuzhiyun iolink->recommended_transfer_size;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun dev->io_link_count++;
391*4882a593Smuzhiyun dev->node_props.io_links_count++;
392*4882a593Smuzhiyun list_add_tail(&props->list, &dev->io_link_props);
393*4882a593Smuzhiyun break;
394*4882a593Smuzhiyun }
395*4882a593Smuzhiyun }
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun /* CPU topology is created before GPUs are detected, so CPU->GPU
398*4882a593Smuzhiyun * links are not built at that time. If a PCIe type is discovered, it
399*4882a593Smuzhiyun * means a GPU is detected and we are adding GPU->CPU to the topology.
400*4882a593Smuzhiyun * At this time, also add the corresponded CPU->GPU link if GPU
401*4882a593Smuzhiyun * is large bar.
402*4882a593Smuzhiyun * For xGMI, we only added the link with one direction in the crat
403*4882a593Smuzhiyun * table, add corresponded reversed direction link now.
404*4882a593Smuzhiyun */
405*4882a593Smuzhiyun if (props && (iolink->flags & CRAT_IOLINK_FLAGS_BI_DIRECTIONAL)) {
406*4882a593Smuzhiyun to_dev = kfd_topology_device_by_proximity_domain(id_to);
407*4882a593Smuzhiyun if (!to_dev)
408*4882a593Smuzhiyun return -ENODEV;
409*4882a593Smuzhiyun /* same everything but the other direction */
410*4882a593Smuzhiyun props2 = kmemdup(props, sizeof(*props2), GFP_KERNEL);
411*4882a593Smuzhiyun props2->node_from = id_to;
412*4882a593Smuzhiyun props2->node_to = id_from;
413*4882a593Smuzhiyun props2->kobj = NULL;
414*4882a593Smuzhiyun to_dev->io_link_count++;
415*4882a593Smuzhiyun to_dev->node_props.io_links_count++;
416*4882a593Smuzhiyun list_add_tail(&props2->list, &to_dev->io_link_props);
417*4882a593Smuzhiyun }
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun return 0;
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun
422*4882a593Smuzhiyun /* kfd_parse_subtype - parse subtypes and attach it to correct topology device
423*4882a593Smuzhiyun * present in the device_list
424*4882a593Smuzhiyun * @sub_type_hdr - subtype section of crat_image
425*4882a593Smuzhiyun * @device_list - list of topology devices present in this crat_image
426*4882a593Smuzhiyun */
kfd_parse_subtype(struct crat_subtype_generic * sub_type_hdr,struct list_head * device_list)427*4882a593Smuzhiyun static int kfd_parse_subtype(struct crat_subtype_generic *sub_type_hdr,
428*4882a593Smuzhiyun struct list_head *device_list)
429*4882a593Smuzhiyun {
430*4882a593Smuzhiyun struct crat_subtype_computeunit *cu;
431*4882a593Smuzhiyun struct crat_subtype_memory *mem;
432*4882a593Smuzhiyun struct crat_subtype_cache *cache;
433*4882a593Smuzhiyun struct crat_subtype_iolink *iolink;
434*4882a593Smuzhiyun int ret = 0;
435*4882a593Smuzhiyun
436*4882a593Smuzhiyun switch (sub_type_hdr->type) {
437*4882a593Smuzhiyun case CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY:
438*4882a593Smuzhiyun cu = (struct crat_subtype_computeunit *)sub_type_hdr;
439*4882a593Smuzhiyun ret = kfd_parse_subtype_cu(cu, device_list);
440*4882a593Smuzhiyun break;
441*4882a593Smuzhiyun case CRAT_SUBTYPE_MEMORY_AFFINITY:
442*4882a593Smuzhiyun mem = (struct crat_subtype_memory *)sub_type_hdr;
443*4882a593Smuzhiyun ret = kfd_parse_subtype_mem(mem, device_list);
444*4882a593Smuzhiyun break;
445*4882a593Smuzhiyun case CRAT_SUBTYPE_CACHE_AFFINITY:
446*4882a593Smuzhiyun cache = (struct crat_subtype_cache *)sub_type_hdr;
447*4882a593Smuzhiyun ret = kfd_parse_subtype_cache(cache, device_list);
448*4882a593Smuzhiyun break;
449*4882a593Smuzhiyun case CRAT_SUBTYPE_TLB_AFFINITY:
450*4882a593Smuzhiyun /*
451*4882a593Smuzhiyun * For now, nothing to do here
452*4882a593Smuzhiyun */
453*4882a593Smuzhiyun pr_debug("Found TLB entry in CRAT table (not processing)\n");
454*4882a593Smuzhiyun break;
455*4882a593Smuzhiyun case CRAT_SUBTYPE_CCOMPUTE_AFFINITY:
456*4882a593Smuzhiyun /*
457*4882a593Smuzhiyun * For now, nothing to do here
458*4882a593Smuzhiyun */
459*4882a593Smuzhiyun pr_debug("Found CCOMPUTE entry in CRAT table (not processing)\n");
460*4882a593Smuzhiyun break;
461*4882a593Smuzhiyun case CRAT_SUBTYPE_IOLINK_AFFINITY:
462*4882a593Smuzhiyun iolink = (struct crat_subtype_iolink *)sub_type_hdr;
463*4882a593Smuzhiyun ret = kfd_parse_subtype_iolink(iolink, device_list);
464*4882a593Smuzhiyun break;
465*4882a593Smuzhiyun default:
466*4882a593Smuzhiyun pr_warn("Unknown subtype %d in CRAT\n",
467*4882a593Smuzhiyun sub_type_hdr->type);
468*4882a593Smuzhiyun }
469*4882a593Smuzhiyun
470*4882a593Smuzhiyun return ret;
471*4882a593Smuzhiyun }
472*4882a593Smuzhiyun
473*4882a593Smuzhiyun /* kfd_parse_crat_table - parse CRAT table. For each node present in CRAT
474*4882a593Smuzhiyun * create a kfd_topology_device and add in to device_list. Also parse
475*4882a593Smuzhiyun * CRAT subtypes and attach it to appropriate kfd_topology_device
476*4882a593Smuzhiyun * @crat_image - input image containing CRAT
477*4882a593Smuzhiyun * @device_list - [OUT] list of kfd_topology_device generated after
478*4882a593Smuzhiyun * parsing crat_image
479*4882a593Smuzhiyun * @proximity_domain - Proximity domain of the first device in the table
480*4882a593Smuzhiyun *
481*4882a593Smuzhiyun * Return - 0 if successful else -ve value
482*4882a593Smuzhiyun */
kfd_parse_crat_table(void * crat_image,struct list_head * device_list,uint32_t proximity_domain)483*4882a593Smuzhiyun int kfd_parse_crat_table(void *crat_image, struct list_head *device_list,
484*4882a593Smuzhiyun uint32_t proximity_domain)
485*4882a593Smuzhiyun {
486*4882a593Smuzhiyun struct kfd_topology_device *top_dev = NULL;
487*4882a593Smuzhiyun struct crat_subtype_generic *sub_type_hdr;
488*4882a593Smuzhiyun uint16_t node_id;
489*4882a593Smuzhiyun int ret = 0;
490*4882a593Smuzhiyun struct crat_header *crat_table = (struct crat_header *)crat_image;
491*4882a593Smuzhiyun uint16_t num_nodes;
492*4882a593Smuzhiyun uint32_t image_len;
493*4882a593Smuzhiyun
494*4882a593Smuzhiyun if (!crat_image)
495*4882a593Smuzhiyun return -EINVAL;
496*4882a593Smuzhiyun
497*4882a593Smuzhiyun if (!list_empty(device_list)) {
498*4882a593Smuzhiyun pr_warn("Error device list should be empty\n");
499*4882a593Smuzhiyun return -EINVAL;
500*4882a593Smuzhiyun }
501*4882a593Smuzhiyun
502*4882a593Smuzhiyun num_nodes = crat_table->num_domains;
503*4882a593Smuzhiyun image_len = crat_table->length;
504*4882a593Smuzhiyun
505*4882a593Smuzhiyun pr_debug("Parsing CRAT table with %d nodes\n", num_nodes);
506*4882a593Smuzhiyun
507*4882a593Smuzhiyun for (node_id = 0; node_id < num_nodes; node_id++) {
508*4882a593Smuzhiyun top_dev = kfd_create_topology_device(device_list);
509*4882a593Smuzhiyun if (!top_dev)
510*4882a593Smuzhiyun break;
511*4882a593Smuzhiyun top_dev->proximity_domain = proximity_domain++;
512*4882a593Smuzhiyun }
513*4882a593Smuzhiyun
514*4882a593Smuzhiyun if (!top_dev) {
515*4882a593Smuzhiyun ret = -ENOMEM;
516*4882a593Smuzhiyun goto err;
517*4882a593Smuzhiyun }
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun memcpy(top_dev->oem_id, crat_table->oem_id, CRAT_OEMID_LENGTH);
520*4882a593Smuzhiyun memcpy(top_dev->oem_table_id, crat_table->oem_table_id,
521*4882a593Smuzhiyun CRAT_OEMTABLEID_LENGTH);
522*4882a593Smuzhiyun top_dev->oem_revision = crat_table->oem_revision;
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
525*4882a593Smuzhiyun while ((char *)sub_type_hdr + sizeof(struct crat_subtype_generic) <
526*4882a593Smuzhiyun ((char *)crat_image) + image_len) {
527*4882a593Smuzhiyun if (sub_type_hdr->flags & CRAT_SUBTYPE_FLAGS_ENABLED) {
528*4882a593Smuzhiyun ret = kfd_parse_subtype(sub_type_hdr, device_list);
529*4882a593Smuzhiyun if (ret)
530*4882a593Smuzhiyun break;
531*4882a593Smuzhiyun }
532*4882a593Smuzhiyun
533*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
534*4882a593Smuzhiyun sub_type_hdr->length);
535*4882a593Smuzhiyun }
536*4882a593Smuzhiyun
537*4882a593Smuzhiyun err:
538*4882a593Smuzhiyun if (ret)
539*4882a593Smuzhiyun kfd_release_topology_device_list(device_list);
540*4882a593Smuzhiyun
541*4882a593Smuzhiyun return ret;
542*4882a593Smuzhiyun }
543*4882a593Smuzhiyun
544*4882a593Smuzhiyun /* Helper function. See kfd_fill_gpu_cache_info for parameter description */
fill_in_pcache(struct crat_subtype_cache * pcache,struct kfd_gpu_cache_info * pcache_info,struct kfd_cu_info * cu_info,int mem_available,int cu_bitmask,int cache_type,unsigned int cu_processor_id,int cu_block)545*4882a593Smuzhiyun static int fill_in_pcache(struct crat_subtype_cache *pcache,
546*4882a593Smuzhiyun struct kfd_gpu_cache_info *pcache_info,
547*4882a593Smuzhiyun struct kfd_cu_info *cu_info,
548*4882a593Smuzhiyun int mem_available,
549*4882a593Smuzhiyun int cu_bitmask,
550*4882a593Smuzhiyun int cache_type, unsigned int cu_processor_id,
551*4882a593Smuzhiyun int cu_block)
552*4882a593Smuzhiyun {
553*4882a593Smuzhiyun unsigned int cu_sibling_map_mask;
554*4882a593Smuzhiyun int first_active_cu;
555*4882a593Smuzhiyun
556*4882a593Smuzhiyun /* First check if enough memory is available */
557*4882a593Smuzhiyun if (sizeof(struct crat_subtype_cache) > mem_available)
558*4882a593Smuzhiyun return -ENOMEM;
559*4882a593Smuzhiyun
560*4882a593Smuzhiyun cu_sibling_map_mask = cu_bitmask;
561*4882a593Smuzhiyun cu_sibling_map_mask >>= cu_block;
562*4882a593Smuzhiyun cu_sibling_map_mask &=
563*4882a593Smuzhiyun ((1 << pcache_info[cache_type].num_cu_shared) - 1);
564*4882a593Smuzhiyun first_active_cu = ffs(cu_sibling_map_mask);
565*4882a593Smuzhiyun
566*4882a593Smuzhiyun /* CU could be inactive. In case of shared cache find the first active
567*4882a593Smuzhiyun * CU. and incase of non-shared cache check if the CU is inactive. If
568*4882a593Smuzhiyun * inactive active skip it
569*4882a593Smuzhiyun */
570*4882a593Smuzhiyun if (first_active_cu) {
571*4882a593Smuzhiyun memset(pcache, 0, sizeof(struct crat_subtype_cache));
572*4882a593Smuzhiyun pcache->type = CRAT_SUBTYPE_CACHE_AFFINITY;
573*4882a593Smuzhiyun pcache->length = sizeof(struct crat_subtype_cache);
574*4882a593Smuzhiyun pcache->flags = pcache_info[cache_type].flags;
575*4882a593Smuzhiyun pcache->processor_id_low = cu_processor_id
576*4882a593Smuzhiyun + (first_active_cu - 1);
577*4882a593Smuzhiyun pcache->cache_level = pcache_info[cache_type].cache_level;
578*4882a593Smuzhiyun pcache->cache_size = pcache_info[cache_type].cache_size;
579*4882a593Smuzhiyun
580*4882a593Smuzhiyun /* Sibling map is w.r.t processor_id_low, so shift out
581*4882a593Smuzhiyun * inactive CU
582*4882a593Smuzhiyun */
583*4882a593Smuzhiyun cu_sibling_map_mask =
584*4882a593Smuzhiyun cu_sibling_map_mask >> (first_active_cu - 1);
585*4882a593Smuzhiyun
586*4882a593Smuzhiyun pcache->sibling_map[0] = (uint8_t)(cu_sibling_map_mask & 0xFF);
587*4882a593Smuzhiyun pcache->sibling_map[1] =
588*4882a593Smuzhiyun (uint8_t)((cu_sibling_map_mask >> 8) & 0xFF);
589*4882a593Smuzhiyun pcache->sibling_map[2] =
590*4882a593Smuzhiyun (uint8_t)((cu_sibling_map_mask >> 16) & 0xFF);
591*4882a593Smuzhiyun pcache->sibling_map[3] =
592*4882a593Smuzhiyun (uint8_t)((cu_sibling_map_mask >> 24) & 0xFF);
593*4882a593Smuzhiyun return 0;
594*4882a593Smuzhiyun }
595*4882a593Smuzhiyun return 1;
596*4882a593Smuzhiyun }
597*4882a593Smuzhiyun
598*4882a593Smuzhiyun /* kfd_fill_gpu_cache_info - Fill GPU cache info using kfd_gpu_cache_info
599*4882a593Smuzhiyun * tables
600*4882a593Smuzhiyun *
601*4882a593Smuzhiyun * @kdev - [IN] GPU device
602*4882a593Smuzhiyun * @gpu_processor_id - [IN] GPU processor ID to which these caches
603*4882a593Smuzhiyun * associate
604*4882a593Smuzhiyun * @available_size - [IN] Amount of memory available in pcache
605*4882a593Smuzhiyun * @cu_info - [IN] Compute Unit info obtained from KGD
606*4882a593Smuzhiyun * @pcache - [OUT] memory into which cache data is to be filled in.
607*4882a593Smuzhiyun * @size_filled - [OUT] amount of data used up in pcache.
608*4882a593Smuzhiyun * @num_of_entries - [OUT] number of caches added
609*4882a593Smuzhiyun */
kfd_fill_gpu_cache_info(struct kfd_dev * kdev,int gpu_processor_id,int available_size,struct kfd_cu_info * cu_info,struct crat_subtype_cache * pcache,int * size_filled,int * num_of_entries)610*4882a593Smuzhiyun static int kfd_fill_gpu_cache_info(struct kfd_dev *kdev,
611*4882a593Smuzhiyun int gpu_processor_id,
612*4882a593Smuzhiyun int available_size,
613*4882a593Smuzhiyun struct kfd_cu_info *cu_info,
614*4882a593Smuzhiyun struct crat_subtype_cache *pcache,
615*4882a593Smuzhiyun int *size_filled,
616*4882a593Smuzhiyun int *num_of_entries)
617*4882a593Smuzhiyun {
618*4882a593Smuzhiyun struct kfd_gpu_cache_info *pcache_info;
619*4882a593Smuzhiyun int num_of_cache_types = 0;
620*4882a593Smuzhiyun int i, j, k;
621*4882a593Smuzhiyun int ct = 0;
622*4882a593Smuzhiyun int mem_available = available_size;
623*4882a593Smuzhiyun unsigned int cu_processor_id;
624*4882a593Smuzhiyun int ret;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun switch (kdev->device_info->asic_family) {
627*4882a593Smuzhiyun case CHIP_KAVERI:
628*4882a593Smuzhiyun pcache_info = kaveri_cache_info;
629*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(kaveri_cache_info);
630*4882a593Smuzhiyun break;
631*4882a593Smuzhiyun case CHIP_HAWAII:
632*4882a593Smuzhiyun pcache_info = hawaii_cache_info;
633*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(hawaii_cache_info);
634*4882a593Smuzhiyun break;
635*4882a593Smuzhiyun case CHIP_CARRIZO:
636*4882a593Smuzhiyun pcache_info = carrizo_cache_info;
637*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(carrizo_cache_info);
638*4882a593Smuzhiyun break;
639*4882a593Smuzhiyun case CHIP_TONGA:
640*4882a593Smuzhiyun pcache_info = tonga_cache_info;
641*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(tonga_cache_info);
642*4882a593Smuzhiyun break;
643*4882a593Smuzhiyun case CHIP_FIJI:
644*4882a593Smuzhiyun pcache_info = fiji_cache_info;
645*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(fiji_cache_info);
646*4882a593Smuzhiyun break;
647*4882a593Smuzhiyun case CHIP_POLARIS10:
648*4882a593Smuzhiyun pcache_info = polaris10_cache_info;
649*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(polaris10_cache_info);
650*4882a593Smuzhiyun break;
651*4882a593Smuzhiyun case CHIP_POLARIS11:
652*4882a593Smuzhiyun pcache_info = polaris11_cache_info;
653*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(polaris11_cache_info);
654*4882a593Smuzhiyun break;
655*4882a593Smuzhiyun case CHIP_POLARIS12:
656*4882a593Smuzhiyun pcache_info = polaris12_cache_info;
657*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(polaris12_cache_info);
658*4882a593Smuzhiyun break;
659*4882a593Smuzhiyun case CHIP_VEGAM:
660*4882a593Smuzhiyun pcache_info = vegam_cache_info;
661*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(vegam_cache_info);
662*4882a593Smuzhiyun break;
663*4882a593Smuzhiyun case CHIP_VEGA10:
664*4882a593Smuzhiyun case CHIP_VEGA12:
665*4882a593Smuzhiyun case CHIP_VEGA20:
666*4882a593Smuzhiyun case CHIP_ARCTURUS:
667*4882a593Smuzhiyun pcache_info = vega10_cache_info;
668*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(vega10_cache_info);
669*4882a593Smuzhiyun break;
670*4882a593Smuzhiyun case CHIP_RAVEN:
671*4882a593Smuzhiyun pcache_info = raven_cache_info;
672*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(raven_cache_info);
673*4882a593Smuzhiyun break;
674*4882a593Smuzhiyun case CHIP_RENOIR:
675*4882a593Smuzhiyun pcache_info = renoir_cache_info;
676*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(renoir_cache_info);
677*4882a593Smuzhiyun break;
678*4882a593Smuzhiyun case CHIP_NAVI10:
679*4882a593Smuzhiyun case CHIP_NAVI12:
680*4882a593Smuzhiyun case CHIP_NAVI14:
681*4882a593Smuzhiyun case CHIP_SIENNA_CICHLID:
682*4882a593Smuzhiyun case CHIP_NAVY_FLOUNDER:
683*4882a593Smuzhiyun pcache_info = navi10_cache_info;
684*4882a593Smuzhiyun num_of_cache_types = ARRAY_SIZE(navi10_cache_info);
685*4882a593Smuzhiyun break;
686*4882a593Smuzhiyun default:
687*4882a593Smuzhiyun return -EINVAL;
688*4882a593Smuzhiyun }
689*4882a593Smuzhiyun
690*4882a593Smuzhiyun *size_filled = 0;
691*4882a593Smuzhiyun *num_of_entries = 0;
692*4882a593Smuzhiyun
693*4882a593Smuzhiyun /* For each type of cache listed in the kfd_gpu_cache_info table,
694*4882a593Smuzhiyun * go through all available Compute Units.
695*4882a593Smuzhiyun * The [i,j,k] loop will
696*4882a593Smuzhiyun * if kfd_gpu_cache_info.num_cu_shared = 1
697*4882a593Smuzhiyun * will parse through all available CU
698*4882a593Smuzhiyun * If (kfd_gpu_cache_info.num_cu_shared != 1)
699*4882a593Smuzhiyun * then it will consider only one CU from
700*4882a593Smuzhiyun * the shared unit
701*4882a593Smuzhiyun */
702*4882a593Smuzhiyun
703*4882a593Smuzhiyun for (ct = 0; ct < num_of_cache_types; ct++) {
704*4882a593Smuzhiyun cu_processor_id = gpu_processor_id;
705*4882a593Smuzhiyun for (i = 0; i < cu_info->num_shader_engines; i++) {
706*4882a593Smuzhiyun for (j = 0; j < cu_info->num_shader_arrays_per_engine;
707*4882a593Smuzhiyun j++) {
708*4882a593Smuzhiyun for (k = 0; k < cu_info->num_cu_per_sh;
709*4882a593Smuzhiyun k += pcache_info[ct].num_cu_shared) {
710*4882a593Smuzhiyun
711*4882a593Smuzhiyun ret = fill_in_pcache(pcache,
712*4882a593Smuzhiyun pcache_info,
713*4882a593Smuzhiyun cu_info,
714*4882a593Smuzhiyun mem_available,
715*4882a593Smuzhiyun cu_info->cu_bitmap[i % 4][j + i / 4],
716*4882a593Smuzhiyun ct,
717*4882a593Smuzhiyun cu_processor_id,
718*4882a593Smuzhiyun k);
719*4882a593Smuzhiyun
720*4882a593Smuzhiyun if (ret < 0)
721*4882a593Smuzhiyun break;
722*4882a593Smuzhiyun
723*4882a593Smuzhiyun if (!ret) {
724*4882a593Smuzhiyun pcache++;
725*4882a593Smuzhiyun (*num_of_entries)++;
726*4882a593Smuzhiyun mem_available -=
727*4882a593Smuzhiyun sizeof(*pcache);
728*4882a593Smuzhiyun (*size_filled) +=
729*4882a593Smuzhiyun sizeof(*pcache);
730*4882a593Smuzhiyun }
731*4882a593Smuzhiyun
732*4882a593Smuzhiyun /* Move to next CU block */
733*4882a593Smuzhiyun cu_processor_id +=
734*4882a593Smuzhiyun pcache_info[ct].num_cu_shared;
735*4882a593Smuzhiyun }
736*4882a593Smuzhiyun }
737*4882a593Smuzhiyun }
738*4882a593Smuzhiyun }
739*4882a593Smuzhiyun
740*4882a593Smuzhiyun pr_debug("Added [%d] GPU cache entries\n", *num_of_entries);
741*4882a593Smuzhiyun
742*4882a593Smuzhiyun return 0;
743*4882a593Smuzhiyun }
744*4882a593Smuzhiyun
kfd_ignore_crat(void)745*4882a593Smuzhiyun static bool kfd_ignore_crat(void)
746*4882a593Smuzhiyun {
747*4882a593Smuzhiyun bool ret;
748*4882a593Smuzhiyun
749*4882a593Smuzhiyun if (ignore_crat)
750*4882a593Smuzhiyun return true;
751*4882a593Smuzhiyun
752*4882a593Smuzhiyun #ifndef KFD_SUPPORT_IOMMU_V2
753*4882a593Smuzhiyun ret = true;
754*4882a593Smuzhiyun #else
755*4882a593Smuzhiyun ret = false;
756*4882a593Smuzhiyun #endif
757*4882a593Smuzhiyun
758*4882a593Smuzhiyun return ret;
759*4882a593Smuzhiyun }
760*4882a593Smuzhiyun
761*4882a593Smuzhiyun /*
762*4882a593Smuzhiyun * kfd_create_crat_image_acpi - Allocates memory for CRAT image and
763*4882a593Smuzhiyun * copies CRAT from ACPI (if available).
764*4882a593Smuzhiyun * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
765*4882a593Smuzhiyun *
766*4882a593Smuzhiyun * @crat_image: CRAT read from ACPI. If no CRAT in ACPI then
767*4882a593Smuzhiyun * crat_image will be NULL
768*4882a593Smuzhiyun * @size: [OUT] size of crat_image
769*4882a593Smuzhiyun *
770*4882a593Smuzhiyun * Return 0 if successful else return error code
771*4882a593Smuzhiyun */
kfd_create_crat_image_acpi(void ** crat_image,size_t * size)772*4882a593Smuzhiyun int kfd_create_crat_image_acpi(void **crat_image, size_t *size)
773*4882a593Smuzhiyun {
774*4882a593Smuzhiyun struct acpi_table_header *crat_table;
775*4882a593Smuzhiyun acpi_status status;
776*4882a593Smuzhiyun void *pcrat_image;
777*4882a593Smuzhiyun int rc = 0;
778*4882a593Smuzhiyun
779*4882a593Smuzhiyun if (!crat_image)
780*4882a593Smuzhiyun return -EINVAL;
781*4882a593Smuzhiyun
782*4882a593Smuzhiyun *crat_image = NULL;
783*4882a593Smuzhiyun
784*4882a593Smuzhiyun /* Fetch the CRAT table from ACPI */
785*4882a593Smuzhiyun status = acpi_get_table(CRAT_SIGNATURE, 0, &crat_table);
786*4882a593Smuzhiyun if (status == AE_NOT_FOUND) {
787*4882a593Smuzhiyun pr_info("CRAT table not found\n");
788*4882a593Smuzhiyun return -ENODATA;
789*4882a593Smuzhiyun } else if (ACPI_FAILURE(status)) {
790*4882a593Smuzhiyun const char *err = acpi_format_exception(status);
791*4882a593Smuzhiyun
792*4882a593Smuzhiyun pr_err("CRAT table error: %s\n", err);
793*4882a593Smuzhiyun return -EINVAL;
794*4882a593Smuzhiyun }
795*4882a593Smuzhiyun
796*4882a593Smuzhiyun if (kfd_ignore_crat()) {
797*4882a593Smuzhiyun pr_info("CRAT table disabled by module option\n");
798*4882a593Smuzhiyun return -ENODATA;
799*4882a593Smuzhiyun }
800*4882a593Smuzhiyun
801*4882a593Smuzhiyun pcrat_image = kvmalloc(crat_table->length, GFP_KERNEL);
802*4882a593Smuzhiyun if (!pcrat_image) {
803*4882a593Smuzhiyun rc = -ENOMEM;
804*4882a593Smuzhiyun goto out;
805*4882a593Smuzhiyun }
806*4882a593Smuzhiyun
807*4882a593Smuzhiyun memcpy(pcrat_image, crat_table, crat_table->length);
808*4882a593Smuzhiyun *crat_image = pcrat_image;
809*4882a593Smuzhiyun *size = crat_table->length;
810*4882a593Smuzhiyun out:
811*4882a593Smuzhiyun acpi_put_table(crat_table);
812*4882a593Smuzhiyun return rc;
813*4882a593Smuzhiyun }
814*4882a593Smuzhiyun
815*4882a593Smuzhiyun /* Memory required to create Virtual CRAT.
816*4882a593Smuzhiyun * Since there is no easy way to predict the amount of memory required, the
817*4882a593Smuzhiyun * following amount is allocated for GPU Virtual CRAT. This is
818*4882a593Smuzhiyun * expected to cover all known conditions. But to be safe additional check
819*4882a593Smuzhiyun * is put in the code to ensure we don't overwrite.
820*4882a593Smuzhiyun */
821*4882a593Smuzhiyun #define VCRAT_SIZE_FOR_GPU (4 * PAGE_SIZE)
822*4882a593Smuzhiyun
823*4882a593Smuzhiyun /* kfd_fill_cu_for_cpu - Fill in Compute info for the given CPU NUMA node
824*4882a593Smuzhiyun *
825*4882a593Smuzhiyun * @numa_node_id: CPU NUMA node id
826*4882a593Smuzhiyun * @avail_size: Available size in the memory
827*4882a593Smuzhiyun * @sub_type_hdr: Memory into which compute info will be filled in
828*4882a593Smuzhiyun *
829*4882a593Smuzhiyun * Return 0 if successful else return -ve value
830*4882a593Smuzhiyun */
kfd_fill_cu_for_cpu(int numa_node_id,int * avail_size,int proximity_domain,struct crat_subtype_computeunit * sub_type_hdr)831*4882a593Smuzhiyun static int kfd_fill_cu_for_cpu(int numa_node_id, int *avail_size,
832*4882a593Smuzhiyun int proximity_domain,
833*4882a593Smuzhiyun struct crat_subtype_computeunit *sub_type_hdr)
834*4882a593Smuzhiyun {
835*4882a593Smuzhiyun const struct cpumask *cpumask;
836*4882a593Smuzhiyun
837*4882a593Smuzhiyun *avail_size -= sizeof(struct crat_subtype_computeunit);
838*4882a593Smuzhiyun if (*avail_size < 0)
839*4882a593Smuzhiyun return -ENOMEM;
840*4882a593Smuzhiyun
841*4882a593Smuzhiyun memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
842*4882a593Smuzhiyun
843*4882a593Smuzhiyun /* Fill in subtype header data */
844*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
845*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
846*4882a593Smuzhiyun sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
847*4882a593Smuzhiyun
848*4882a593Smuzhiyun cpumask = cpumask_of_node(numa_node_id);
849*4882a593Smuzhiyun
850*4882a593Smuzhiyun /* Fill in CU data */
851*4882a593Smuzhiyun sub_type_hdr->flags |= CRAT_CU_FLAGS_CPU_PRESENT;
852*4882a593Smuzhiyun sub_type_hdr->proximity_domain = proximity_domain;
853*4882a593Smuzhiyun sub_type_hdr->processor_id_low = kfd_numa_node_to_apic_id(numa_node_id);
854*4882a593Smuzhiyun if (sub_type_hdr->processor_id_low == -1)
855*4882a593Smuzhiyun return -EINVAL;
856*4882a593Smuzhiyun
857*4882a593Smuzhiyun sub_type_hdr->num_cpu_cores = cpumask_weight(cpumask);
858*4882a593Smuzhiyun
859*4882a593Smuzhiyun return 0;
860*4882a593Smuzhiyun }
861*4882a593Smuzhiyun
862*4882a593Smuzhiyun /* kfd_fill_mem_info_for_cpu - Fill in Memory info for the given CPU NUMA node
863*4882a593Smuzhiyun *
864*4882a593Smuzhiyun * @numa_node_id: CPU NUMA node id
865*4882a593Smuzhiyun * @avail_size: Available size in the memory
866*4882a593Smuzhiyun * @sub_type_hdr: Memory into which compute info will be filled in
867*4882a593Smuzhiyun *
868*4882a593Smuzhiyun * Return 0 if successful else return -ve value
869*4882a593Smuzhiyun */
kfd_fill_mem_info_for_cpu(int numa_node_id,int * avail_size,int proximity_domain,struct crat_subtype_memory * sub_type_hdr)870*4882a593Smuzhiyun static int kfd_fill_mem_info_for_cpu(int numa_node_id, int *avail_size,
871*4882a593Smuzhiyun int proximity_domain,
872*4882a593Smuzhiyun struct crat_subtype_memory *sub_type_hdr)
873*4882a593Smuzhiyun {
874*4882a593Smuzhiyun uint64_t mem_in_bytes = 0;
875*4882a593Smuzhiyun pg_data_t *pgdat;
876*4882a593Smuzhiyun int zone_type;
877*4882a593Smuzhiyun
878*4882a593Smuzhiyun *avail_size -= sizeof(struct crat_subtype_memory);
879*4882a593Smuzhiyun if (*avail_size < 0)
880*4882a593Smuzhiyun return -ENOMEM;
881*4882a593Smuzhiyun
882*4882a593Smuzhiyun memset(sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
883*4882a593Smuzhiyun
884*4882a593Smuzhiyun /* Fill in subtype header data */
885*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
886*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_memory);
887*4882a593Smuzhiyun sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
888*4882a593Smuzhiyun
889*4882a593Smuzhiyun /* Fill in Memory Subunit data */
890*4882a593Smuzhiyun
891*4882a593Smuzhiyun /* Unlike si_meminfo, si_meminfo_node is not exported. So
892*4882a593Smuzhiyun * the following lines are duplicated from si_meminfo_node
893*4882a593Smuzhiyun * function
894*4882a593Smuzhiyun */
895*4882a593Smuzhiyun pgdat = NODE_DATA(numa_node_id);
896*4882a593Smuzhiyun for (zone_type = 0; zone_type < MAX_NR_ZONES; zone_type++)
897*4882a593Smuzhiyun mem_in_bytes += zone_managed_pages(&pgdat->node_zones[zone_type]);
898*4882a593Smuzhiyun mem_in_bytes <<= PAGE_SHIFT;
899*4882a593Smuzhiyun
900*4882a593Smuzhiyun sub_type_hdr->length_low = lower_32_bits(mem_in_bytes);
901*4882a593Smuzhiyun sub_type_hdr->length_high = upper_32_bits(mem_in_bytes);
902*4882a593Smuzhiyun sub_type_hdr->proximity_domain = proximity_domain;
903*4882a593Smuzhiyun
904*4882a593Smuzhiyun return 0;
905*4882a593Smuzhiyun }
906*4882a593Smuzhiyun
907*4882a593Smuzhiyun #ifdef CONFIG_X86_64
kfd_fill_iolink_info_for_cpu(int numa_node_id,int * avail_size,uint32_t * num_entries,struct crat_subtype_iolink * sub_type_hdr)908*4882a593Smuzhiyun static int kfd_fill_iolink_info_for_cpu(int numa_node_id, int *avail_size,
909*4882a593Smuzhiyun uint32_t *num_entries,
910*4882a593Smuzhiyun struct crat_subtype_iolink *sub_type_hdr)
911*4882a593Smuzhiyun {
912*4882a593Smuzhiyun int nid;
913*4882a593Smuzhiyun struct cpuinfo_x86 *c = &cpu_data(0);
914*4882a593Smuzhiyun uint8_t link_type;
915*4882a593Smuzhiyun
916*4882a593Smuzhiyun if (c->x86_vendor == X86_VENDOR_AMD)
917*4882a593Smuzhiyun link_type = CRAT_IOLINK_TYPE_HYPERTRANSPORT;
918*4882a593Smuzhiyun else
919*4882a593Smuzhiyun link_type = CRAT_IOLINK_TYPE_QPI_1_1;
920*4882a593Smuzhiyun
921*4882a593Smuzhiyun *num_entries = 0;
922*4882a593Smuzhiyun
923*4882a593Smuzhiyun /* Create IO links from this node to other CPU nodes */
924*4882a593Smuzhiyun for_each_online_node(nid) {
925*4882a593Smuzhiyun if (nid == numa_node_id) /* node itself */
926*4882a593Smuzhiyun continue;
927*4882a593Smuzhiyun
928*4882a593Smuzhiyun *avail_size -= sizeof(struct crat_subtype_iolink);
929*4882a593Smuzhiyun if (*avail_size < 0)
930*4882a593Smuzhiyun return -ENOMEM;
931*4882a593Smuzhiyun
932*4882a593Smuzhiyun memset(sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
933*4882a593Smuzhiyun
934*4882a593Smuzhiyun /* Fill in subtype header data */
935*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
936*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
937*4882a593Smuzhiyun sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
938*4882a593Smuzhiyun
939*4882a593Smuzhiyun /* Fill in IO link data */
940*4882a593Smuzhiyun sub_type_hdr->proximity_domain_from = numa_node_id;
941*4882a593Smuzhiyun sub_type_hdr->proximity_domain_to = nid;
942*4882a593Smuzhiyun sub_type_hdr->io_interface_type = link_type;
943*4882a593Smuzhiyun
944*4882a593Smuzhiyun (*num_entries)++;
945*4882a593Smuzhiyun sub_type_hdr++;
946*4882a593Smuzhiyun }
947*4882a593Smuzhiyun
948*4882a593Smuzhiyun return 0;
949*4882a593Smuzhiyun }
950*4882a593Smuzhiyun #endif
951*4882a593Smuzhiyun
952*4882a593Smuzhiyun /* kfd_create_vcrat_image_cpu - Create Virtual CRAT for CPU
953*4882a593Smuzhiyun *
954*4882a593Smuzhiyun * @pcrat_image: Fill in VCRAT for CPU
955*4882a593Smuzhiyun * @size: [IN] allocated size of crat_image.
956*4882a593Smuzhiyun * [OUT] actual size of data filled in crat_image
957*4882a593Smuzhiyun */
kfd_create_vcrat_image_cpu(void * pcrat_image,size_t * size)958*4882a593Smuzhiyun static int kfd_create_vcrat_image_cpu(void *pcrat_image, size_t *size)
959*4882a593Smuzhiyun {
960*4882a593Smuzhiyun struct crat_header *crat_table = (struct crat_header *)pcrat_image;
961*4882a593Smuzhiyun struct acpi_table_header *acpi_table;
962*4882a593Smuzhiyun acpi_status status;
963*4882a593Smuzhiyun struct crat_subtype_generic *sub_type_hdr;
964*4882a593Smuzhiyun int avail_size = *size;
965*4882a593Smuzhiyun int numa_node_id;
966*4882a593Smuzhiyun #ifdef CONFIG_X86_64
967*4882a593Smuzhiyun uint32_t entries = 0;
968*4882a593Smuzhiyun #endif
969*4882a593Smuzhiyun int ret = 0;
970*4882a593Smuzhiyun
971*4882a593Smuzhiyun if (!pcrat_image)
972*4882a593Smuzhiyun return -EINVAL;
973*4882a593Smuzhiyun
974*4882a593Smuzhiyun /* Fill in CRAT Header.
975*4882a593Smuzhiyun * Modify length and total_entries as subunits are added.
976*4882a593Smuzhiyun */
977*4882a593Smuzhiyun avail_size -= sizeof(struct crat_header);
978*4882a593Smuzhiyun if (avail_size < 0)
979*4882a593Smuzhiyun return -ENOMEM;
980*4882a593Smuzhiyun
981*4882a593Smuzhiyun memset(crat_table, 0, sizeof(struct crat_header));
982*4882a593Smuzhiyun memcpy(&crat_table->signature, CRAT_SIGNATURE,
983*4882a593Smuzhiyun sizeof(crat_table->signature));
984*4882a593Smuzhiyun crat_table->length = sizeof(struct crat_header);
985*4882a593Smuzhiyun
986*4882a593Smuzhiyun status = acpi_get_table("DSDT", 0, &acpi_table);
987*4882a593Smuzhiyun if (status != AE_OK)
988*4882a593Smuzhiyun pr_warn("DSDT table not found for OEM information\n");
989*4882a593Smuzhiyun else {
990*4882a593Smuzhiyun crat_table->oem_revision = acpi_table->revision;
991*4882a593Smuzhiyun memcpy(crat_table->oem_id, acpi_table->oem_id,
992*4882a593Smuzhiyun CRAT_OEMID_LENGTH);
993*4882a593Smuzhiyun memcpy(crat_table->oem_table_id, acpi_table->oem_table_id,
994*4882a593Smuzhiyun CRAT_OEMTABLEID_LENGTH);
995*4882a593Smuzhiyun acpi_put_table(acpi_table);
996*4882a593Smuzhiyun }
997*4882a593Smuzhiyun crat_table->total_entries = 0;
998*4882a593Smuzhiyun crat_table->num_domains = 0;
999*4882a593Smuzhiyun
1000*4882a593Smuzhiyun sub_type_hdr = (struct crat_subtype_generic *)(crat_table+1);
1001*4882a593Smuzhiyun
1002*4882a593Smuzhiyun for_each_online_node(numa_node_id) {
1003*4882a593Smuzhiyun if (kfd_numa_node_to_apic_id(numa_node_id) == -1)
1004*4882a593Smuzhiyun continue;
1005*4882a593Smuzhiyun
1006*4882a593Smuzhiyun /* Fill in Subtype: Compute Unit */
1007*4882a593Smuzhiyun ret = kfd_fill_cu_for_cpu(numa_node_id, &avail_size,
1008*4882a593Smuzhiyun crat_table->num_domains,
1009*4882a593Smuzhiyun (struct crat_subtype_computeunit *)sub_type_hdr);
1010*4882a593Smuzhiyun if (ret < 0)
1011*4882a593Smuzhiyun return ret;
1012*4882a593Smuzhiyun crat_table->length += sub_type_hdr->length;
1013*4882a593Smuzhiyun crat_table->total_entries++;
1014*4882a593Smuzhiyun
1015*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1016*4882a593Smuzhiyun sub_type_hdr->length);
1017*4882a593Smuzhiyun
1018*4882a593Smuzhiyun /* Fill in Subtype: Memory */
1019*4882a593Smuzhiyun ret = kfd_fill_mem_info_for_cpu(numa_node_id, &avail_size,
1020*4882a593Smuzhiyun crat_table->num_domains,
1021*4882a593Smuzhiyun (struct crat_subtype_memory *)sub_type_hdr);
1022*4882a593Smuzhiyun if (ret < 0)
1023*4882a593Smuzhiyun return ret;
1024*4882a593Smuzhiyun crat_table->length += sub_type_hdr->length;
1025*4882a593Smuzhiyun crat_table->total_entries++;
1026*4882a593Smuzhiyun
1027*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1028*4882a593Smuzhiyun sub_type_hdr->length);
1029*4882a593Smuzhiyun
1030*4882a593Smuzhiyun /* Fill in Subtype: IO Link */
1031*4882a593Smuzhiyun #ifdef CONFIG_X86_64
1032*4882a593Smuzhiyun ret = kfd_fill_iolink_info_for_cpu(numa_node_id, &avail_size,
1033*4882a593Smuzhiyun &entries,
1034*4882a593Smuzhiyun (struct crat_subtype_iolink *)sub_type_hdr);
1035*4882a593Smuzhiyun if (ret < 0)
1036*4882a593Smuzhiyun return ret;
1037*4882a593Smuzhiyun
1038*4882a593Smuzhiyun if (entries) {
1039*4882a593Smuzhiyun crat_table->length += (sub_type_hdr->length * entries);
1040*4882a593Smuzhiyun crat_table->total_entries += entries;
1041*4882a593Smuzhiyun
1042*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1043*4882a593Smuzhiyun sub_type_hdr->length * entries);
1044*4882a593Smuzhiyun }
1045*4882a593Smuzhiyun #else
1046*4882a593Smuzhiyun pr_info("IO link not available for non x86 platforms\n");
1047*4882a593Smuzhiyun #endif
1048*4882a593Smuzhiyun
1049*4882a593Smuzhiyun crat_table->num_domains++;
1050*4882a593Smuzhiyun }
1051*4882a593Smuzhiyun
1052*4882a593Smuzhiyun /* TODO: Add cache Subtype for CPU.
1053*4882a593Smuzhiyun * Currently, CPU cache information is available in function
1054*4882a593Smuzhiyun * detect_cache_attributes(cpu) defined in the file
1055*4882a593Smuzhiyun * ./arch/x86/kernel/cpu/intel_cacheinfo.c. This function is not
1056*4882a593Smuzhiyun * exported and to get the same information the code needs to be
1057*4882a593Smuzhiyun * duplicated.
1058*4882a593Smuzhiyun */
1059*4882a593Smuzhiyun
1060*4882a593Smuzhiyun *size = crat_table->length;
1061*4882a593Smuzhiyun pr_info("Virtual CRAT table created for CPU\n");
1062*4882a593Smuzhiyun
1063*4882a593Smuzhiyun return 0;
1064*4882a593Smuzhiyun }
1065*4882a593Smuzhiyun
kfd_fill_gpu_memory_affinity(int * avail_size,struct kfd_dev * kdev,uint8_t type,uint64_t size,struct crat_subtype_memory * sub_type_hdr,uint32_t proximity_domain,const struct kfd_local_mem_info * local_mem_info)1066*4882a593Smuzhiyun static int kfd_fill_gpu_memory_affinity(int *avail_size,
1067*4882a593Smuzhiyun struct kfd_dev *kdev, uint8_t type, uint64_t size,
1068*4882a593Smuzhiyun struct crat_subtype_memory *sub_type_hdr,
1069*4882a593Smuzhiyun uint32_t proximity_domain,
1070*4882a593Smuzhiyun const struct kfd_local_mem_info *local_mem_info)
1071*4882a593Smuzhiyun {
1072*4882a593Smuzhiyun *avail_size -= sizeof(struct crat_subtype_memory);
1073*4882a593Smuzhiyun if (*avail_size < 0)
1074*4882a593Smuzhiyun return -ENOMEM;
1075*4882a593Smuzhiyun
1076*4882a593Smuzhiyun memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_memory));
1077*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_MEMORY_AFFINITY;
1078*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_memory);
1079*4882a593Smuzhiyun sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1080*4882a593Smuzhiyun
1081*4882a593Smuzhiyun sub_type_hdr->proximity_domain = proximity_domain;
1082*4882a593Smuzhiyun
1083*4882a593Smuzhiyun pr_debug("Fill gpu memory affinity - type 0x%x size 0x%llx\n",
1084*4882a593Smuzhiyun type, size);
1085*4882a593Smuzhiyun
1086*4882a593Smuzhiyun sub_type_hdr->length_low = lower_32_bits(size);
1087*4882a593Smuzhiyun sub_type_hdr->length_high = upper_32_bits(size);
1088*4882a593Smuzhiyun
1089*4882a593Smuzhiyun sub_type_hdr->width = local_mem_info->vram_width;
1090*4882a593Smuzhiyun sub_type_hdr->visibility_type = type;
1091*4882a593Smuzhiyun
1092*4882a593Smuzhiyun return 0;
1093*4882a593Smuzhiyun }
1094*4882a593Smuzhiyun
1095*4882a593Smuzhiyun /* kfd_fill_gpu_direct_io_link - Fill in direct io link from GPU
1096*4882a593Smuzhiyun * to its NUMA node
1097*4882a593Smuzhiyun * @avail_size: Available size in the memory
1098*4882a593Smuzhiyun * @kdev - [IN] GPU device
1099*4882a593Smuzhiyun * @sub_type_hdr: Memory into which io link info will be filled in
1100*4882a593Smuzhiyun * @proximity_domain - proximity domain of the GPU node
1101*4882a593Smuzhiyun *
1102*4882a593Smuzhiyun * Return 0 if successful else return -ve value
1103*4882a593Smuzhiyun */
kfd_fill_gpu_direct_io_link_to_cpu(int * avail_size,struct kfd_dev * kdev,struct crat_subtype_iolink * sub_type_hdr,uint32_t proximity_domain)1104*4882a593Smuzhiyun static int kfd_fill_gpu_direct_io_link_to_cpu(int *avail_size,
1105*4882a593Smuzhiyun struct kfd_dev *kdev,
1106*4882a593Smuzhiyun struct crat_subtype_iolink *sub_type_hdr,
1107*4882a593Smuzhiyun uint32_t proximity_domain)
1108*4882a593Smuzhiyun {
1109*4882a593Smuzhiyun *avail_size -= sizeof(struct crat_subtype_iolink);
1110*4882a593Smuzhiyun if (*avail_size < 0)
1111*4882a593Smuzhiyun return -ENOMEM;
1112*4882a593Smuzhiyun
1113*4882a593Smuzhiyun memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1114*4882a593Smuzhiyun
1115*4882a593Smuzhiyun /* Fill in subtype header data */
1116*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1117*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1118*4882a593Smuzhiyun sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED;
1119*4882a593Smuzhiyun if (kfd_dev_is_large_bar(kdev))
1120*4882a593Smuzhiyun sub_type_hdr->flags |= CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1121*4882a593Smuzhiyun
1122*4882a593Smuzhiyun /* Fill in IOLINK subtype.
1123*4882a593Smuzhiyun * TODO: Fill-in other fields of iolink subtype
1124*4882a593Smuzhiyun */
1125*4882a593Smuzhiyun sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_PCIEXPRESS;
1126*4882a593Smuzhiyun sub_type_hdr->proximity_domain_from = proximity_domain;
1127*4882a593Smuzhiyun #ifdef CONFIG_NUMA
1128*4882a593Smuzhiyun if (kdev->pdev->dev.numa_node == NUMA_NO_NODE)
1129*4882a593Smuzhiyun sub_type_hdr->proximity_domain_to = 0;
1130*4882a593Smuzhiyun else
1131*4882a593Smuzhiyun sub_type_hdr->proximity_domain_to = kdev->pdev->dev.numa_node;
1132*4882a593Smuzhiyun #else
1133*4882a593Smuzhiyun sub_type_hdr->proximity_domain_to = 0;
1134*4882a593Smuzhiyun #endif
1135*4882a593Smuzhiyun return 0;
1136*4882a593Smuzhiyun }
1137*4882a593Smuzhiyun
kfd_fill_gpu_xgmi_link_to_gpu(int * avail_size,struct kfd_dev * kdev,struct kfd_dev * peer_kdev,struct crat_subtype_iolink * sub_type_hdr,uint32_t proximity_domain_from,uint32_t proximity_domain_to)1138*4882a593Smuzhiyun static int kfd_fill_gpu_xgmi_link_to_gpu(int *avail_size,
1139*4882a593Smuzhiyun struct kfd_dev *kdev,
1140*4882a593Smuzhiyun struct kfd_dev *peer_kdev,
1141*4882a593Smuzhiyun struct crat_subtype_iolink *sub_type_hdr,
1142*4882a593Smuzhiyun uint32_t proximity_domain_from,
1143*4882a593Smuzhiyun uint32_t proximity_domain_to)
1144*4882a593Smuzhiyun {
1145*4882a593Smuzhiyun *avail_size -= sizeof(struct crat_subtype_iolink);
1146*4882a593Smuzhiyun if (*avail_size < 0)
1147*4882a593Smuzhiyun return -ENOMEM;
1148*4882a593Smuzhiyun
1149*4882a593Smuzhiyun memset((void *)sub_type_hdr, 0, sizeof(struct crat_subtype_iolink));
1150*4882a593Smuzhiyun
1151*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_IOLINK_AFFINITY;
1152*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_iolink);
1153*4882a593Smuzhiyun sub_type_hdr->flags |= CRAT_SUBTYPE_FLAGS_ENABLED |
1154*4882a593Smuzhiyun CRAT_IOLINK_FLAGS_BI_DIRECTIONAL;
1155*4882a593Smuzhiyun
1156*4882a593Smuzhiyun sub_type_hdr->io_interface_type = CRAT_IOLINK_TYPE_XGMI;
1157*4882a593Smuzhiyun sub_type_hdr->proximity_domain_from = proximity_domain_from;
1158*4882a593Smuzhiyun sub_type_hdr->proximity_domain_to = proximity_domain_to;
1159*4882a593Smuzhiyun sub_type_hdr->num_hops_xgmi =
1160*4882a593Smuzhiyun amdgpu_amdkfd_get_xgmi_hops_count(kdev->kgd, peer_kdev->kgd);
1161*4882a593Smuzhiyun return 0;
1162*4882a593Smuzhiyun }
1163*4882a593Smuzhiyun
1164*4882a593Smuzhiyun /* kfd_create_vcrat_image_gpu - Create Virtual CRAT for CPU
1165*4882a593Smuzhiyun *
1166*4882a593Smuzhiyun * @pcrat_image: Fill in VCRAT for GPU
1167*4882a593Smuzhiyun * @size: [IN] allocated size of crat_image.
1168*4882a593Smuzhiyun * [OUT] actual size of data filled in crat_image
1169*4882a593Smuzhiyun */
kfd_create_vcrat_image_gpu(void * pcrat_image,size_t * size,struct kfd_dev * kdev,uint32_t proximity_domain)1170*4882a593Smuzhiyun static int kfd_create_vcrat_image_gpu(void *pcrat_image,
1171*4882a593Smuzhiyun size_t *size, struct kfd_dev *kdev,
1172*4882a593Smuzhiyun uint32_t proximity_domain)
1173*4882a593Smuzhiyun {
1174*4882a593Smuzhiyun struct crat_header *crat_table = (struct crat_header *)pcrat_image;
1175*4882a593Smuzhiyun struct crat_subtype_generic *sub_type_hdr;
1176*4882a593Smuzhiyun struct kfd_local_mem_info local_mem_info;
1177*4882a593Smuzhiyun struct kfd_topology_device *peer_dev;
1178*4882a593Smuzhiyun struct crat_subtype_computeunit *cu;
1179*4882a593Smuzhiyun struct kfd_cu_info cu_info;
1180*4882a593Smuzhiyun int avail_size = *size;
1181*4882a593Smuzhiyun uint32_t total_num_of_cu;
1182*4882a593Smuzhiyun int num_of_cache_entries = 0;
1183*4882a593Smuzhiyun int cache_mem_filled = 0;
1184*4882a593Smuzhiyun uint32_t nid = 0;
1185*4882a593Smuzhiyun int ret = 0;
1186*4882a593Smuzhiyun
1187*4882a593Smuzhiyun if (!pcrat_image || avail_size < VCRAT_SIZE_FOR_GPU)
1188*4882a593Smuzhiyun return -EINVAL;
1189*4882a593Smuzhiyun
1190*4882a593Smuzhiyun /* Fill the CRAT Header.
1191*4882a593Smuzhiyun * Modify length and total_entries as subunits are added.
1192*4882a593Smuzhiyun */
1193*4882a593Smuzhiyun avail_size -= sizeof(struct crat_header);
1194*4882a593Smuzhiyun if (avail_size < 0)
1195*4882a593Smuzhiyun return -ENOMEM;
1196*4882a593Smuzhiyun
1197*4882a593Smuzhiyun memset(crat_table, 0, sizeof(struct crat_header));
1198*4882a593Smuzhiyun
1199*4882a593Smuzhiyun memcpy(&crat_table->signature, CRAT_SIGNATURE,
1200*4882a593Smuzhiyun sizeof(crat_table->signature));
1201*4882a593Smuzhiyun /* Change length as we add more subtypes*/
1202*4882a593Smuzhiyun crat_table->length = sizeof(struct crat_header);
1203*4882a593Smuzhiyun crat_table->num_domains = 1;
1204*4882a593Smuzhiyun crat_table->total_entries = 0;
1205*4882a593Smuzhiyun
1206*4882a593Smuzhiyun /* Fill in Subtype: Compute Unit
1207*4882a593Smuzhiyun * First fill in the sub type header and then sub type data
1208*4882a593Smuzhiyun */
1209*4882a593Smuzhiyun avail_size -= sizeof(struct crat_subtype_computeunit);
1210*4882a593Smuzhiyun if (avail_size < 0)
1211*4882a593Smuzhiyun return -ENOMEM;
1212*4882a593Smuzhiyun
1213*4882a593Smuzhiyun sub_type_hdr = (struct crat_subtype_generic *)(crat_table + 1);
1214*4882a593Smuzhiyun memset(sub_type_hdr, 0, sizeof(struct crat_subtype_computeunit));
1215*4882a593Smuzhiyun
1216*4882a593Smuzhiyun sub_type_hdr->type = CRAT_SUBTYPE_COMPUTEUNIT_AFFINITY;
1217*4882a593Smuzhiyun sub_type_hdr->length = sizeof(struct crat_subtype_computeunit);
1218*4882a593Smuzhiyun sub_type_hdr->flags = CRAT_SUBTYPE_FLAGS_ENABLED;
1219*4882a593Smuzhiyun
1220*4882a593Smuzhiyun /* Fill CU subtype data */
1221*4882a593Smuzhiyun cu = (struct crat_subtype_computeunit *)sub_type_hdr;
1222*4882a593Smuzhiyun cu->flags |= CRAT_CU_FLAGS_GPU_PRESENT;
1223*4882a593Smuzhiyun cu->proximity_domain = proximity_domain;
1224*4882a593Smuzhiyun
1225*4882a593Smuzhiyun amdgpu_amdkfd_get_cu_info(kdev->kgd, &cu_info);
1226*4882a593Smuzhiyun cu->num_simd_per_cu = cu_info.simd_per_cu;
1227*4882a593Smuzhiyun cu->num_simd_cores = cu_info.simd_per_cu * cu_info.cu_active_number;
1228*4882a593Smuzhiyun cu->max_waves_simd = cu_info.max_waves_per_simd;
1229*4882a593Smuzhiyun
1230*4882a593Smuzhiyun cu->wave_front_size = cu_info.wave_front_size;
1231*4882a593Smuzhiyun cu->array_count = cu_info.num_shader_arrays_per_engine *
1232*4882a593Smuzhiyun cu_info.num_shader_engines;
1233*4882a593Smuzhiyun total_num_of_cu = (cu->array_count * cu_info.num_cu_per_sh);
1234*4882a593Smuzhiyun cu->processor_id_low = get_and_inc_gpu_processor_id(total_num_of_cu);
1235*4882a593Smuzhiyun cu->num_cu_per_array = cu_info.num_cu_per_sh;
1236*4882a593Smuzhiyun cu->max_slots_scatch_cu = cu_info.max_scratch_slots_per_cu;
1237*4882a593Smuzhiyun cu->num_banks = cu_info.num_shader_engines;
1238*4882a593Smuzhiyun cu->lds_size_in_kb = cu_info.lds_size;
1239*4882a593Smuzhiyun
1240*4882a593Smuzhiyun cu->hsa_capability = 0;
1241*4882a593Smuzhiyun
1242*4882a593Smuzhiyun /* Check if this node supports IOMMU. During parsing this flag will
1243*4882a593Smuzhiyun * translate to HSA_CAP_ATS_PRESENT
1244*4882a593Smuzhiyun */
1245*4882a593Smuzhiyun if (!kfd_iommu_check_device(kdev))
1246*4882a593Smuzhiyun cu->hsa_capability |= CRAT_CU_FLAGS_IOMMU_PRESENT;
1247*4882a593Smuzhiyun
1248*4882a593Smuzhiyun crat_table->length += sub_type_hdr->length;
1249*4882a593Smuzhiyun crat_table->total_entries++;
1250*4882a593Smuzhiyun
1251*4882a593Smuzhiyun /* Fill in Subtype: Memory. Only on systems with large BAR (no
1252*4882a593Smuzhiyun * private FB), report memory as public. On other systems
1253*4882a593Smuzhiyun * report the total FB size (public+private) as a single
1254*4882a593Smuzhiyun * private heap.
1255*4882a593Smuzhiyun */
1256*4882a593Smuzhiyun amdgpu_amdkfd_get_local_mem_info(kdev->kgd, &local_mem_info);
1257*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1258*4882a593Smuzhiyun sub_type_hdr->length);
1259*4882a593Smuzhiyun
1260*4882a593Smuzhiyun if (debug_largebar)
1261*4882a593Smuzhiyun local_mem_info.local_mem_size_private = 0;
1262*4882a593Smuzhiyun
1263*4882a593Smuzhiyun if (local_mem_info.local_mem_size_private == 0)
1264*4882a593Smuzhiyun ret = kfd_fill_gpu_memory_affinity(&avail_size,
1265*4882a593Smuzhiyun kdev, HSA_MEM_HEAP_TYPE_FB_PUBLIC,
1266*4882a593Smuzhiyun local_mem_info.local_mem_size_public,
1267*4882a593Smuzhiyun (struct crat_subtype_memory *)sub_type_hdr,
1268*4882a593Smuzhiyun proximity_domain,
1269*4882a593Smuzhiyun &local_mem_info);
1270*4882a593Smuzhiyun else
1271*4882a593Smuzhiyun ret = kfd_fill_gpu_memory_affinity(&avail_size,
1272*4882a593Smuzhiyun kdev, HSA_MEM_HEAP_TYPE_FB_PRIVATE,
1273*4882a593Smuzhiyun local_mem_info.local_mem_size_public +
1274*4882a593Smuzhiyun local_mem_info.local_mem_size_private,
1275*4882a593Smuzhiyun (struct crat_subtype_memory *)sub_type_hdr,
1276*4882a593Smuzhiyun proximity_domain,
1277*4882a593Smuzhiyun &local_mem_info);
1278*4882a593Smuzhiyun if (ret < 0)
1279*4882a593Smuzhiyun return ret;
1280*4882a593Smuzhiyun
1281*4882a593Smuzhiyun crat_table->length += sizeof(struct crat_subtype_memory);
1282*4882a593Smuzhiyun crat_table->total_entries++;
1283*4882a593Smuzhiyun
1284*4882a593Smuzhiyun /* TODO: Fill in cache information. This information is NOT readily
1285*4882a593Smuzhiyun * available in KGD
1286*4882a593Smuzhiyun */
1287*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1288*4882a593Smuzhiyun sub_type_hdr->length);
1289*4882a593Smuzhiyun ret = kfd_fill_gpu_cache_info(kdev, cu->processor_id_low,
1290*4882a593Smuzhiyun avail_size,
1291*4882a593Smuzhiyun &cu_info,
1292*4882a593Smuzhiyun (struct crat_subtype_cache *)sub_type_hdr,
1293*4882a593Smuzhiyun &cache_mem_filled,
1294*4882a593Smuzhiyun &num_of_cache_entries);
1295*4882a593Smuzhiyun
1296*4882a593Smuzhiyun if (ret < 0)
1297*4882a593Smuzhiyun return ret;
1298*4882a593Smuzhiyun
1299*4882a593Smuzhiyun crat_table->length += cache_mem_filled;
1300*4882a593Smuzhiyun crat_table->total_entries += num_of_cache_entries;
1301*4882a593Smuzhiyun avail_size -= cache_mem_filled;
1302*4882a593Smuzhiyun
1303*4882a593Smuzhiyun /* Fill in Subtype: IO_LINKS
1304*4882a593Smuzhiyun * Only direct links are added here which is Link from GPU to
1305*4882a593Smuzhiyun * to its NUMA node. Indirect links are added by userspace.
1306*4882a593Smuzhiyun */
1307*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))((char *)sub_type_hdr +
1308*4882a593Smuzhiyun cache_mem_filled);
1309*4882a593Smuzhiyun ret = kfd_fill_gpu_direct_io_link_to_cpu(&avail_size, kdev,
1310*4882a593Smuzhiyun (struct crat_subtype_iolink *)sub_type_hdr, proximity_domain);
1311*4882a593Smuzhiyun
1312*4882a593Smuzhiyun if (ret < 0)
1313*4882a593Smuzhiyun return ret;
1314*4882a593Smuzhiyun
1315*4882a593Smuzhiyun crat_table->length += sub_type_hdr->length;
1316*4882a593Smuzhiyun crat_table->total_entries++;
1317*4882a593Smuzhiyun
1318*4882a593Smuzhiyun
1319*4882a593Smuzhiyun /* Fill in Subtype: IO_LINKS
1320*4882a593Smuzhiyun * Direct links from GPU to other GPUs through xGMI.
1321*4882a593Smuzhiyun * We will loop GPUs that already be processed (with lower value
1322*4882a593Smuzhiyun * of proximity_domain), add the link for the GPUs with same
1323*4882a593Smuzhiyun * hive id (from this GPU to other GPU) . The reversed iolink
1324*4882a593Smuzhiyun * (from other GPU to this GPU) will be added
1325*4882a593Smuzhiyun * in kfd_parse_subtype_iolink.
1326*4882a593Smuzhiyun */
1327*4882a593Smuzhiyun if (kdev->hive_id) {
1328*4882a593Smuzhiyun for (nid = 0; nid < proximity_domain; ++nid) {
1329*4882a593Smuzhiyun peer_dev = kfd_topology_device_by_proximity_domain(nid);
1330*4882a593Smuzhiyun if (!peer_dev->gpu)
1331*4882a593Smuzhiyun continue;
1332*4882a593Smuzhiyun if (peer_dev->gpu->hive_id != kdev->hive_id)
1333*4882a593Smuzhiyun continue;
1334*4882a593Smuzhiyun sub_type_hdr = (typeof(sub_type_hdr))(
1335*4882a593Smuzhiyun (char *)sub_type_hdr +
1336*4882a593Smuzhiyun sizeof(struct crat_subtype_iolink));
1337*4882a593Smuzhiyun ret = kfd_fill_gpu_xgmi_link_to_gpu(
1338*4882a593Smuzhiyun &avail_size, kdev, peer_dev->gpu,
1339*4882a593Smuzhiyun (struct crat_subtype_iolink *)sub_type_hdr,
1340*4882a593Smuzhiyun proximity_domain, nid);
1341*4882a593Smuzhiyun if (ret < 0)
1342*4882a593Smuzhiyun return ret;
1343*4882a593Smuzhiyun crat_table->length += sub_type_hdr->length;
1344*4882a593Smuzhiyun crat_table->total_entries++;
1345*4882a593Smuzhiyun }
1346*4882a593Smuzhiyun }
1347*4882a593Smuzhiyun *size = crat_table->length;
1348*4882a593Smuzhiyun pr_info("Virtual CRAT table created for GPU\n");
1349*4882a593Smuzhiyun
1350*4882a593Smuzhiyun return ret;
1351*4882a593Smuzhiyun }
1352*4882a593Smuzhiyun
1353*4882a593Smuzhiyun /* kfd_create_crat_image_virtual - Allocates memory for CRAT image and
1354*4882a593Smuzhiyun * creates a Virtual CRAT (VCRAT) image
1355*4882a593Smuzhiyun *
1356*4882a593Smuzhiyun * NOTE: Call kfd_destroy_crat_image to free CRAT image memory
1357*4882a593Smuzhiyun *
1358*4882a593Smuzhiyun * @crat_image: VCRAT image created because ACPI does not have a
1359*4882a593Smuzhiyun * CRAT for this device
1360*4882a593Smuzhiyun * @size: [OUT] size of virtual crat_image
1361*4882a593Smuzhiyun * @flags: COMPUTE_UNIT_CPU - Create VCRAT for CPU device
1362*4882a593Smuzhiyun * COMPUTE_UNIT_GPU - Create VCRAT for GPU
1363*4882a593Smuzhiyun * (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU) - Create VCRAT for APU
1364*4882a593Smuzhiyun * -- this option is not currently implemented.
1365*4882a593Smuzhiyun * The assumption is that all AMD APUs will have CRAT
1366*4882a593Smuzhiyun * @kdev: Valid kfd_device required if flags contain COMPUTE_UNIT_GPU
1367*4882a593Smuzhiyun *
1368*4882a593Smuzhiyun * Return 0 if successful else return -ve value
1369*4882a593Smuzhiyun */
kfd_create_crat_image_virtual(void ** crat_image,size_t * size,int flags,struct kfd_dev * kdev,uint32_t proximity_domain)1370*4882a593Smuzhiyun int kfd_create_crat_image_virtual(void **crat_image, size_t *size,
1371*4882a593Smuzhiyun int flags, struct kfd_dev *kdev,
1372*4882a593Smuzhiyun uint32_t proximity_domain)
1373*4882a593Smuzhiyun {
1374*4882a593Smuzhiyun void *pcrat_image = NULL;
1375*4882a593Smuzhiyun int ret = 0, num_nodes;
1376*4882a593Smuzhiyun size_t dyn_size;
1377*4882a593Smuzhiyun
1378*4882a593Smuzhiyun if (!crat_image)
1379*4882a593Smuzhiyun return -EINVAL;
1380*4882a593Smuzhiyun
1381*4882a593Smuzhiyun *crat_image = NULL;
1382*4882a593Smuzhiyun
1383*4882a593Smuzhiyun /* Allocate the CPU Virtual CRAT size based on the number of online
1384*4882a593Smuzhiyun * nodes. Allocate VCRAT_SIZE_FOR_GPU for GPU virtual CRAT image.
1385*4882a593Smuzhiyun * This should cover all the current conditions. A check is put not
1386*4882a593Smuzhiyun * to overwrite beyond allocated size for GPUs
1387*4882a593Smuzhiyun */
1388*4882a593Smuzhiyun switch (flags) {
1389*4882a593Smuzhiyun case COMPUTE_UNIT_CPU:
1390*4882a593Smuzhiyun num_nodes = num_online_nodes();
1391*4882a593Smuzhiyun dyn_size = sizeof(struct crat_header) +
1392*4882a593Smuzhiyun num_nodes * (sizeof(struct crat_subtype_computeunit) +
1393*4882a593Smuzhiyun sizeof(struct crat_subtype_memory) +
1394*4882a593Smuzhiyun (num_nodes - 1) * sizeof(struct crat_subtype_iolink));
1395*4882a593Smuzhiyun pcrat_image = kvmalloc(dyn_size, GFP_KERNEL);
1396*4882a593Smuzhiyun if (!pcrat_image)
1397*4882a593Smuzhiyun return -ENOMEM;
1398*4882a593Smuzhiyun *size = dyn_size;
1399*4882a593Smuzhiyun pr_debug("CRAT size is %ld", dyn_size);
1400*4882a593Smuzhiyun ret = kfd_create_vcrat_image_cpu(pcrat_image, size);
1401*4882a593Smuzhiyun break;
1402*4882a593Smuzhiyun case COMPUTE_UNIT_GPU:
1403*4882a593Smuzhiyun if (!kdev)
1404*4882a593Smuzhiyun return -EINVAL;
1405*4882a593Smuzhiyun pcrat_image = kvmalloc(VCRAT_SIZE_FOR_GPU, GFP_KERNEL);
1406*4882a593Smuzhiyun if (!pcrat_image)
1407*4882a593Smuzhiyun return -ENOMEM;
1408*4882a593Smuzhiyun *size = VCRAT_SIZE_FOR_GPU;
1409*4882a593Smuzhiyun ret = kfd_create_vcrat_image_gpu(pcrat_image, size, kdev,
1410*4882a593Smuzhiyun proximity_domain);
1411*4882a593Smuzhiyun break;
1412*4882a593Smuzhiyun case (COMPUTE_UNIT_CPU | COMPUTE_UNIT_GPU):
1413*4882a593Smuzhiyun /* TODO: */
1414*4882a593Smuzhiyun ret = -EINVAL;
1415*4882a593Smuzhiyun pr_err("VCRAT not implemented for APU\n");
1416*4882a593Smuzhiyun break;
1417*4882a593Smuzhiyun default:
1418*4882a593Smuzhiyun ret = -EINVAL;
1419*4882a593Smuzhiyun }
1420*4882a593Smuzhiyun
1421*4882a593Smuzhiyun if (!ret)
1422*4882a593Smuzhiyun *crat_image = pcrat_image;
1423*4882a593Smuzhiyun else
1424*4882a593Smuzhiyun kvfree(pcrat_image);
1425*4882a593Smuzhiyun
1426*4882a593Smuzhiyun return ret;
1427*4882a593Smuzhiyun }
1428*4882a593Smuzhiyun
1429*4882a593Smuzhiyun
1430*4882a593Smuzhiyun /* kfd_destroy_crat_image
1431*4882a593Smuzhiyun *
1432*4882a593Smuzhiyun * @crat_image: [IN] - crat_image from kfd_create_crat_image_xxx(..)
1433*4882a593Smuzhiyun *
1434*4882a593Smuzhiyun */
kfd_destroy_crat_image(void * crat_image)1435*4882a593Smuzhiyun void kfd_destroy_crat_image(void *crat_image)
1436*4882a593Smuzhiyun {
1437*4882a593Smuzhiyun kvfree(crat_image);
1438*4882a593Smuzhiyun }
1439