1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2016 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include <linux/module.h>
25*4882a593Smuzhiyun
26*4882a593Smuzhiyun #include <drm/drm_drv.h>
27*4882a593Smuzhiyun
28*4882a593Smuzhiyun #include "amdgpu.h"
29*4882a593Smuzhiyun #include "amdgpu_ras.h"
30*4882a593Smuzhiyun #include "vi.h"
31*4882a593Smuzhiyun #include "soc15.h"
32*4882a593Smuzhiyun #include "nv.h"
33*4882a593Smuzhiyun
34*4882a593Smuzhiyun #define POPULATE_UCODE_INFO(vf2pf_info, ucode, ver) \
35*4882a593Smuzhiyun do { \
36*4882a593Smuzhiyun vf2pf_info->ucode_info[ucode].id = ucode; \
37*4882a593Smuzhiyun vf2pf_info->ucode_info[ucode].version = ver; \
38*4882a593Smuzhiyun } while (0)
39*4882a593Smuzhiyun
amdgpu_virt_mmio_blocked(struct amdgpu_device * adev)40*4882a593Smuzhiyun bool amdgpu_virt_mmio_blocked(struct amdgpu_device *adev)
41*4882a593Smuzhiyun {
42*4882a593Smuzhiyun /* By now all MMIO pages except mailbox are blocked */
43*4882a593Smuzhiyun /* if blocking is enabled in hypervisor. Choose the */
44*4882a593Smuzhiyun /* SCRATCH_REG0 to test. */
45*4882a593Smuzhiyun return RREG32_NO_KIQ(0xc040) == 0xffffffff;
46*4882a593Smuzhiyun }
47*4882a593Smuzhiyun
amdgpu_virt_init_setting(struct amdgpu_device * adev)48*4882a593Smuzhiyun void amdgpu_virt_init_setting(struct amdgpu_device *adev)
49*4882a593Smuzhiyun {
50*4882a593Smuzhiyun /* enable virtual display */
51*4882a593Smuzhiyun if (adev->mode_info.num_crtc == 0)
52*4882a593Smuzhiyun adev->mode_info.num_crtc = 1;
53*4882a593Smuzhiyun adev->enable_virtual_display = true;
54*4882a593Smuzhiyun adev_to_drm(adev)->driver->driver_features &= ~DRIVER_ATOMIC;
55*4882a593Smuzhiyun adev->cg_flags = 0;
56*4882a593Smuzhiyun adev->pg_flags = 0;
57*4882a593Smuzhiyun }
58*4882a593Smuzhiyun
amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device * adev,uint32_t reg0,uint32_t reg1,uint32_t ref,uint32_t mask)59*4882a593Smuzhiyun void amdgpu_virt_kiq_reg_write_reg_wait(struct amdgpu_device *adev,
60*4882a593Smuzhiyun uint32_t reg0, uint32_t reg1,
61*4882a593Smuzhiyun uint32_t ref, uint32_t mask)
62*4882a593Smuzhiyun {
63*4882a593Smuzhiyun struct amdgpu_kiq *kiq = &adev->gfx.kiq;
64*4882a593Smuzhiyun struct amdgpu_ring *ring = &kiq->ring;
65*4882a593Smuzhiyun signed long r, cnt = 0;
66*4882a593Smuzhiyun unsigned long flags;
67*4882a593Smuzhiyun uint32_t seq;
68*4882a593Smuzhiyun
69*4882a593Smuzhiyun spin_lock_irqsave(&kiq->ring_lock, flags);
70*4882a593Smuzhiyun amdgpu_ring_alloc(ring, 32);
71*4882a593Smuzhiyun amdgpu_ring_emit_reg_write_reg_wait(ring, reg0, reg1,
72*4882a593Smuzhiyun ref, mask);
73*4882a593Smuzhiyun r = amdgpu_fence_emit_polling(ring, &seq, MAX_KIQ_REG_WAIT);
74*4882a593Smuzhiyun if (r)
75*4882a593Smuzhiyun goto failed_undo;
76*4882a593Smuzhiyun
77*4882a593Smuzhiyun amdgpu_ring_commit(ring);
78*4882a593Smuzhiyun spin_unlock_irqrestore(&kiq->ring_lock, flags);
79*4882a593Smuzhiyun
80*4882a593Smuzhiyun r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
81*4882a593Smuzhiyun
82*4882a593Smuzhiyun /* don't wait anymore for IRQ context */
83*4882a593Smuzhiyun if (r < 1 && in_interrupt())
84*4882a593Smuzhiyun goto failed_kiq;
85*4882a593Smuzhiyun
86*4882a593Smuzhiyun might_sleep();
87*4882a593Smuzhiyun while (r < 1 && cnt++ < MAX_KIQ_REG_TRY) {
88*4882a593Smuzhiyun
89*4882a593Smuzhiyun msleep(MAX_KIQ_REG_BAILOUT_INTERVAL);
90*4882a593Smuzhiyun r = amdgpu_fence_wait_polling(ring, seq, MAX_KIQ_REG_WAIT);
91*4882a593Smuzhiyun }
92*4882a593Smuzhiyun
93*4882a593Smuzhiyun if (cnt > MAX_KIQ_REG_TRY)
94*4882a593Smuzhiyun goto failed_kiq;
95*4882a593Smuzhiyun
96*4882a593Smuzhiyun return;
97*4882a593Smuzhiyun
98*4882a593Smuzhiyun failed_undo:
99*4882a593Smuzhiyun amdgpu_ring_undo(ring);
100*4882a593Smuzhiyun spin_unlock_irqrestore(&kiq->ring_lock, flags);
101*4882a593Smuzhiyun failed_kiq:
102*4882a593Smuzhiyun dev_err(adev->dev, "failed to write reg %x wait reg %x\n", reg0, reg1);
103*4882a593Smuzhiyun }
104*4882a593Smuzhiyun
105*4882a593Smuzhiyun /**
106*4882a593Smuzhiyun * amdgpu_virt_request_full_gpu() - request full gpu access
107*4882a593Smuzhiyun * @amdgpu: amdgpu device.
108*4882a593Smuzhiyun * @init: is driver init time.
109*4882a593Smuzhiyun * When start to init/fini driver, first need to request full gpu access.
110*4882a593Smuzhiyun * Return: Zero if request success, otherwise will return error.
111*4882a593Smuzhiyun */
amdgpu_virt_request_full_gpu(struct amdgpu_device * adev,bool init)112*4882a593Smuzhiyun int amdgpu_virt_request_full_gpu(struct amdgpu_device *adev, bool init)
113*4882a593Smuzhiyun {
114*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
115*4882a593Smuzhiyun int r;
116*4882a593Smuzhiyun
117*4882a593Smuzhiyun if (virt->ops && virt->ops->req_full_gpu) {
118*4882a593Smuzhiyun r = virt->ops->req_full_gpu(adev, init);
119*4882a593Smuzhiyun if (r)
120*4882a593Smuzhiyun return r;
121*4882a593Smuzhiyun
122*4882a593Smuzhiyun adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
123*4882a593Smuzhiyun }
124*4882a593Smuzhiyun
125*4882a593Smuzhiyun return 0;
126*4882a593Smuzhiyun }
127*4882a593Smuzhiyun
128*4882a593Smuzhiyun /**
129*4882a593Smuzhiyun * amdgpu_virt_release_full_gpu() - release full gpu access
130*4882a593Smuzhiyun * @amdgpu: amdgpu device.
131*4882a593Smuzhiyun * @init: is driver init time.
132*4882a593Smuzhiyun * When finishing driver init/fini, need to release full gpu access.
133*4882a593Smuzhiyun * Return: Zero if release success, otherwise will returen error.
134*4882a593Smuzhiyun */
amdgpu_virt_release_full_gpu(struct amdgpu_device * adev,bool init)135*4882a593Smuzhiyun int amdgpu_virt_release_full_gpu(struct amdgpu_device *adev, bool init)
136*4882a593Smuzhiyun {
137*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
138*4882a593Smuzhiyun int r;
139*4882a593Smuzhiyun
140*4882a593Smuzhiyun if (virt->ops && virt->ops->rel_full_gpu) {
141*4882a593Smuzhiyun r = virt->ops->rel_full_gpu(adev, init);
142*4882a593Smuzhiyun if (r)
143*4882a593Smuzhiyun return r;
144*4882a593Smuzhiyun
145*4882a593Smuzhiyun adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
146*4882a593Smuzhiyun }
147*4882a593Smuzhiyun return 0;
148*4882a593Smuzhiyun }
149*4882a593Smuzhiyun
150*4882a593Smuzhiyun /**
151*4882a593Smuzhiyun * amdgpu_virt_reset_gpu() - reset gpu
152*4882a593Smuzhiyun * @amdgpu: amdgpu device.
153*4882a593Smuzhiyun * Send reset command to GPU hypervisor to reset GPU that VM is using
154*4882a593Smuzhiyun * Return: Zero if reset success, otherwise will return error.
155*4882a593Smuzhiyun */
amdgpu_virt_reset_gpu(struct amdgpu_device * adev)156*4882a593Smuzhiyun int amdgpu_virt_reset_gpu(struct amdgpu_device *adev)
157*4882a593Smuzhiyun {
158*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
159*4882a593Smuzhiyun int r;
160*4882a593Smuzhiyun
161*4882a593Smuzhiyun if (virt->ops && virt->ops->reset_gpu) {
162*4882a593Smuzhiyun r = virt->ops->reset_gpu(adev);
163*4882a593Smuzhiyun if (r)
164*4882a593Smuzhiyun return r;
165*4882a593Smuzhiyun
166*4882a593Smuzhiyun adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
167*4882a593Smuzhiyun }
168*4882a593Smuzhiyun
169*4882a593Smuzhiyun return 0;
170*4882a593Smuzhiyun }
171*4882a593Smuzhiyun
amdgpu_virt_request_init_data(struct amdgpu_device * adev)172*4882a593Smuzhiyun void amdgpu_virt_request_init_data(struct amdgpu_device *adev)
173*4882a593Smuzhiyun {
174*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
175*4882a593Smuzhiyun
176*4882a593Smuzhiyun if (virt->ops && virt->ops->req_init_data)
177*4882a593Smuzhiyun virt->ops->req_init_data(adev);
178*4882a593Smuzhiyun
179*4882a593Smuzhiyun if (adev->virt.req_init_data_ver > 0)
180*4882a593Smuzhiyun DRM_INFO("host supports REQ_INIT_DATA handshake\n");
181*4882a593Smuzhiyun else
182*4882a593Smuzhiyun DRM_WARN("host doesn't support REQ_INIT_DATA handshake\n");
183*4882a593Smuzhiyun }
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /**
186*4882a593Smuzhiyun * amdgpu_virt_wait_reset() - wait for reset gpu completed
187*4882a593Smuzhiyun * @amdgpu: amdgpu device.
188*4882a593Smuzhiyun * Wait for GPU reset completed.
189*4882a593Smuzhiyun * Return: Zero if reset success, otherwise will return error.
190*4882a593Smuzhiyun */
amdgpu_virt_wait_reset(struct amdgpu_device * adev)191*4882a593Smuzhiyun int amdgpu_virt_wait_reset(struct amdgpu_device *adev)
192*4882a593Smuzhiyun {
193*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
194*4882a593Smuzhiyun
195*4882a593Smuzhiyun if (!virt->ops || !virt->ops->wait_reset)
196*4882a593Smuzhiyun return -EINVAL;
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return virt->ops->wait_reset(adev);
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
201*4882a593Smuzhiyun /**
202*4882a593Smuzhiyun * amdgpu_virt_alloc_mm_table() - alloc memory for mm table
203*4882a593Smuzhiyun * @amdgpu: amdgpu device.
204*4882a593Smuzhiyun * MM table is used by UVD and VCE for its initialization
205*4882a593Smuzhiyun * Return: Zero if allocate success.
206*4882a593Smuzhiyun */
amdgpu_virt_alloc_mm_table(struct amdgpu_device * adev)207*4882a593Smuzhiyun int amdgpu_virt_alloc_mm_table(struct amdgpu_device *adev)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun int r;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun if (!amdgpu_sriov_vf(adev) || adev->virt.mm_table.gpu_addr)
212*4882a593Smuzhiyun return 0;
213*4882a593Smuzhiyun
214*4882a593Smuzhiyun r = amdgpu_bo_create_kernel(adev, PAGE_SIZE, PAGE_SIZE,
215*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_VRAM,
216*4882a593Smuzhiyun &adev->virt.mm_table.bo,
217*4882a593Smuzhiyun &adev->virt.mm_table.gpu_addr,
218*4882a593Smuzhiyun (void *)&adev->virt.mm_table.cpu_addr);
219*4882a593Smuzhiyun if (r) {
220*4882a593Smuzhiyun DRM_ERROR("failed to alloc mm table and error = %d.\n", r);
221*4882a593Smuzhiyun return r;
222*4882a593Smuzhiyun }
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun memset((void *)adev->virt.mm_table.cpu_addr, 0, PAGE_SIZE);
225*4882a593Smuzhiyun DRM_INFO("MM table gpu addr = 0x%llx, cpu addr = %p.\n",
226*4882a593Smuzhiyun adev->virt.mm_table.gpu_addr,
227*4882a593Smuzhiyun adev->virt.mm_table.cpu_addr);
228*4882a593Smuzhiyun return 0;
229*4882a593Smuzhiyun }
230*4882a593Smuzhiyun
231*4882a593Smuzhiyun /**
232*4882a593Smuzhiyun * amdgpu_virt_free_mm_table() - free mm table memory
233*4882a593Smuzhiyun * @amdgpu: amdgpu device.
234*4882a593Smuzhiyun * Free MM table memory
235*4882a593Smuzhiyun */
amdgpu_virt_free_mm_table(struct amdgpu_device * adev)236*4882a593Smuzhiyun void amdgpu_virt_free_mm_table(struct amdgpu_device *adev)
237*4882a593Smuzhiyun {
238*4882a593Smuzhiyun if (!amdgpu_sriov_vf(adev) || !adev->virt.mm_table.gpu_addr)
239*4882a593Smuzhiyun return;
240*4882a593Smuzhiyun
241*4882a593Smuzhiyun amdgpu_bo_free_kernel(&adev->virt.mm_table.bo,
242*4882a593Smuzhiyun &adev->virt.mm_table.gpu_addr,
243*4882a593Smuzhiyun (void *)&adev->virt.mm_table.cpu_addr);
244*4882a593Smuzhiyun adev->virt.mm_table.gpu_addr = 0;
245*4882a593Smuzhiyun }
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun
amd_sriov_msg_checksum(void * obj,unsigned long obj_size,unsigned int key,unsigned int checksum)248*4882a593Smuzhiyun unsigned int amd_sriov_msg_checksum(void *obj,
249*4882a593Smuzhiyun unsigned long obj_size,
250*4882a593Smuzhiyun unsigned int key,
251*4882a593Smuzhiyun unsigned int checksum)
252*4882a593Smuzhiyun {
253*4882a593Smuzhiyun unsigned int ret = key;
254*4882a593Smuzhiyun unsigned long i = 0;
255*4882a593Smuzhiyun unsigned char *pos;
256*4882a593Smuzhiyun
257*4882a593Smuzhiyun pos = (char *)obj;
258*4882a593Smuzhiyun /* calculate checksum */
259*4882a593Smuzhiyun for (i = 0; i < obj_size; ++i)
260*4882a593Smuzhiyun ret += *(pos + i);
261*4882a593Smuzhiyun /* minus the checksum itself */
262*4882a593Smuzhiyun pos = (char *)&checksum;
263*4882a593Smuzhiyun for (i = 0; i < sizeof(checksum); ++i)
264*4882a593Smuzhiyun ret -= *(pos + i);
265*4882a593Smuzhiyun return ret;
266*4882a593Smuzhiyun }
267*4882a593Smuzhiyun
amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device * adev)268*4882a593Smuzhiyun static int amdgpu_virt_init_ras_err_handler_data(struct amdgpu_device *adev)
269*4882a593Smuzhiyun {
270*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
271*4882a593Smuzhiyun struct amdgpu_virt_ras_err_handler_data **data = &virt->virt_eh_data;
272*4882a593Smuzhiyun /* GPU will be marked bad on host if bp count more then 10,
273*4882a593Smuzhiyun * so alloc 512 is enough.
274*4882a593Smuzhiyun */
275*4882a593Smuzhiyun unsigned int align_space = 512;
276*4882a593Smuzhiyun void *bps = NULL;
277*4882a593Smuzhiyun struct amdgpu_bo **bps_bo = NULL;
278*4882a593Smuzhiyun
279*4882a593Smuzhiyun *data = kmalloc(sizeof(struct amdgpu_virt_ras_err_handler_data), GFP_KERNEL);
280*4882a593Smuzhiyun if (!*data)
281*4882a593Smuzhiyun return -ENOMEM;
282*4882a593Smuzhiyun
283*4882a593Smuzhiyun bps = kmalloc(align_space * sizeof((*data)->bps), GFP_KERNEL);
284*4882a593Smuzhiyun bps_bo = kmalloc(align_space * sizeof((*data)->bps_bo), GFP_KERNEL);
285*4882a593Smuzhiyun
286*4882a593Smuzhiyun if (!bps || !bps_bo) {
287*4882a593Smuzhiyun kfree(bps);
288*4882a593Smuzhiyun kfree(bps_bo);
289*4882a593Smuzhiyun kfree(*data);
290*4882a593Smuzhiyun return -ENOMEM;
291*4882a593Smuzhiyun }
292*4882a593Smuzhiyun
293*4882a593Smuzhiyun (*data)->bps = bps;
294*4882a593Smuzhiyun (*data)->bps_bo = bps_bo;
295*4882a593Smuzhiyun (*data)->count = 0;
296*4882a593Smuzhiyun (*data)->last_reserved = 0;
297*4882a593Smuzhiyun
298*4882a593Smuzhiyun virt->ras_init_done = true;
299*4882a593Smuzhiyun
300*4882a593Smuzhiyun return 0;
301*4882a593Smuzhiyun }
302*4882a593Smuzhiyun
amdgpu_virt_ras_release_bp(struct amdgpu_device * adev)303*4882a593Smuzhiyun static void amdgpu_virt_ras_release_bp(struct amdgpu_device *adev)
304*4882a593Smuzhiyun {
305*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
306*4882a593Smuzhiyun struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
307*4882a593Smuzhiyun struct amdgpu_bo *bo;
308*4882a593Smuzhiyun int i;
309*4882a593Smuzhiyun
310*4882a593Smuzhiyun if (!data)
311*4882a593Smuzhiyun return;
312*4882a593Smuzhiyun
313*4882a593Smuzhiyun for (i = data->last_reserved - 1; i >= 0; i--) {
314*4882a593Smuzhiyun bo = data->bps_bo[i];
315*4882a593Smuzhiyun amdgpu_bo_free_kernel(&bo, NULL, NULL);
316*4882a593Smuzhiyun data->bps_bo[i] = bo;
317*4882a593Smuzhiyun data->last_reserved = i;
318*4882a593Smuzhiyun }
319*4882a593Smuzhiyun }
320*4882a593Smuzhiyun
amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device * adev)321*4882a593Smuzhiyun void amdgpu_virt_release_ras_err_handler_data(struct amdgpu_device *adev)
322*4882a593Smuzhiyun {
323*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
324*4882a593Smuzhiyun struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
325*4882a593Smuzhiyun
326*4882a593Smuzhiyun virt->ras_init_done = false;
327*4882a593Smuzhiyun
328*4882a593Smuzhiyun if (!data)
329*4882a593Smuzhiyun return;
330*4882a593Smuzhiyun
331*4882a593Smuzhiyun amdgpu_virt_ras_release_bp(adev);
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun kfree(data->bps);
334*4882a593Smuzhiyun kfree(data->bps_bo);
335*4882a593Smuzhiyun kfree(data);
336*4882a593Smuzhiyun virt->virt_eh_data = NULL;
337*4882a593Smuzhiyun }
338*4882a593Smuzhiyun
amdgpu_virt_ras_add_bps(struct amdgpu_device * adev,struct eeprom_table_record * bps,int pages)339*4882a593Smuzhiyun static void amdgpu_virt_ras_add_bps(struct amdgpu_device *adev,
340*4882a593Smuzhiyun struct eeprom_table_record *bps, int pages)
341*4882a593Smuzhiyun {
342*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
343*4882a593Smuzhiyun struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
344*4882a593Smuzhiyun
345*4882a593Smuzhiyun if (!data)
346*4882a593Smuzhiyun return;
347*4882a593Smuzhiyun
348*4882a593Smuzhiyun memcpy(&data->bps[data->count], bps, pages * sizeof(*data->bps));
349*4882a593Smuzhiyun data->count += pages;
350*4882a593Smuzhiyun }
351*4882a593Smuzhiyun
amdgpu_virt_ras_reserve_bps(struct amdgpu_device * adev)352*4882a593Smuzhiyun static void amdgpu_virt_ras_reserve_bps(struct amdgpu_device *adev)
353*4882a593Smuzhiyun {
354*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
355*4882a593Smuzhiyun struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
356*4882a593Smuzhiyun struct amdgpu_bo *bo = NULL;
357*4882a593Smuzhiyun uint64_t bp;
358*4882a593Smuzhiyun int i;
359*4882a593Smuzhiyun
360*4882a593Smuzhiyun if (!data)
361*4882a593Smuzhiyun return;
362*4882a593Smuzhiyun
363*4882a593Smuzhiyun for (i = data->last_reserved; i < data->count; i++) {
364*4882a593Smuzhiyun bp = data->bps[i].retired_page;
365*4882a593Smuzhiyun
366*4882a593Smuzhiyun /* There are two cases of reserve error should be ignored:
367*4882a593Smuzhiyun * 1) a ras bad page has been allocated (used by someone);
368*4882a593Smuzhiyun * 2) a ras bad page has been reserved (duplicate error injection
369*4882a593Smuzhiyun * for one page);
370*4882a593Smuzhiyun */
371*4882a593Smuzhiyun if (amdgpu_bo_create_kernel_at(adev, bp << AMDGPU_GPU_PAGE_SHIFT,
372*4882a593Smuzhiyun AMDGPU_GPU_PAGE_SIZE,
373*4882a593Smuzhiyun AMDGPU_GEM_DOMAIN_VRAM,
374*4882a593Smuzhiyun &bo, NULL))
375*4882a593Smuzhiyun DRM_DEBUG("RAS WARN: reserve vram for retired page %llx fail\n", bp);
376*4882a593Smuzhiyun
377*4882a593Smuzhiyun data->bps_bo[i] = bo;
378*4882a593Smuzhiyun data->last_reserved = i + 1;
379*4882a593Smuzhiyun bo = NULL;
380*4882a593Smuzhiyun }
381*4882a593Smuzhiyun }
382*4882a593Smuzhiyun
amdgpu_virt_ras_check_bad_page(struct amdgpu_device * adev,uint64_t retired_page)383*4882a593Smuzhiyun static bool amdgpu_virt_ras_check_bad_page(struct amdgpu_device *adev,
384*4882a593Smuzhiyun uint64_t retired_page)
385*4882a593Smuzhiyun {
386*4882a593Smuzhiyun struct amdgpu_virt *virt = &adev->virt;
387*4882a593Smuzhiyun struct amdgpu_virt_ras_err_handler_data *data = virt->virt_eh_data;
388*4882a593Smuzhiyun int i;
389*4882a593Smuzhiyun
390*4882a593Smuzhiyun if (!data)
391*4882a593Smuzhiyun return true;
392*4882a593Smuzhiyun
393*4882a593Smuzhiyun for (i = 0; i < data->count; i++)
394*4882a593Smuzhiyun if (retired_page == data->bps[i].retired_page)
395*4882a593Smuzhiyun return true;
396*4882a593Smuzhiyun
397*4882a593Smuzhiyun return false;
398*4882a593Smuzhiyun }
399*4882a593Smuzhiyun
amdgpu_virt_add_bad_page(struct amdgpu_device * adev,uint64_t bp_block_offset,uint32_t bp_block_size)400*4882a593Smuzhiyun static void amdgpu_virt_add_bad_page(struct amdgpu_device *adev,
401*4882a593Smuzhiyun uint64_t bp_block_offset, uint32_t bp_block_size)
402*4882a593Smuzhiyun {
403*4882a593Smuzhiyun struct eeprom_table_record bp;
404*4882a593Smuzhiyun uint64_t retired_page;
405*4882a593Smuzhiyun uint32_t bp_idx, bp_cnt;
406*4882a593Smuzhiyun
407*4882a593Smuzhiyun if (bp_block_size) {
408*4882a593Smuzhiyun bp_cnt = bp_block_size / sizeof(uint64_t);
409*4882a593Smuzhiyun for (bp_idx = 0; bp_idx < bp_cnt; bp_idx++) {
410*4882a593Smuzhiyun retired_page = *(uint64_t *)(adev->mman.fw_vram_usage_va +
411*4882a593Smuzhiyun bp_block_offset + bp_idx * sizeof(uint64_t));
412*4882a593Smuzhiyun bp.retired_page = retired_page;
413*4882a593Smuzhiyun
414*4882a593Smuzhiyun if (amdgpu_virt_ras_check_bad_page(adev, retired_page))
415*4882a593Smuzhiyun continue;
416*4882a593Smuzhiyun
417*4882a593Smuzhiyun amdgpu_virt_ras_add_bps(adev, &bp, 1);
418*4882a593Smuzhiyun
419*4882a593Smuzhiyun amdgpu_virt_ras_reserve_bps(adev);
420*4882a593Smuzhiyun }
421*4882a593Smuzhiyun }
422*4882a593Smuzhiyun }
423*4882a593Smuzhiyun
amdgpu_virt_read_pf2vf_data(struct amdgpu_device * adev)424*4882a593Smuzhiyun static int amdgpu_virt_read_pf2vf_data(struct amdgpu_device *adev)
425*4882a593Smuzhiyun {
426*4882a593Smuzhiyun struct amd_sriov_msg_pf2vf_info_header *pf2vf_info = adev->virt.fw_reserve.p_pf2vf;
427*4882a593Smuzhiyun uint32_t checksum;
428*4882a593Smuzhiyun uint32_t checkval;
429*4882a593Smuzhiyun
430*4882a593Smuzhiyun if (adev->virt.fw_reserve.p_pf2vf == NULL)
431*4882a593Smuzhiyun return -EINVAL;
432*4882a593Smuzhiyun
433*4882a593Smuzhiyun if (pf2vf_info->size > 1024) {
434*4882a593Smuzhiyun DRM_ERROR("invalid pf2vf message size\n");
435*4882a593Smuzhiyun return -EINVAL;
436*4882a593Smuzhiyun }
437*4882a593Smuzhiyun
438*4882a593Smuzhiyun switch (pf2vf_info->version) {
439*4882a593Smuzhiyun case 1:
440*4882a593Smuzhiyun checksum = ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->checksum;
441*4882a593Smuzhiyun checkval = amd_sriov_msg_checksum(
442*4882a593Smuzhiyun adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
443*4882a593Smuzhiyun adev->virt.fw_reserve.checksum_key, checksum);
444*4882a593Smuzhiyun if (checksum != checkval) {
445*4882a593Smuzhiyun DRM_ERROR("invalid pf2vf message\n");
446*4882a593Smuzhiyun return -EINVAL;
447*4882a593Smuzhiyun }
448*4882a593Smuzhiyun
449*4882a593Smuzhiyun adev->virt.gim_feature =
450*4882a593Smuzhiyun ((struct amdgim_pf2vf_info_v1 *)pf2vf_info)->feature_flags;
451*4882a593Smuzhiyun break;
452*4882a593Smuzhiyun case 2:
453*4882a593Smuzhiyun /* TODO: missing key, need to add it later */
454*4882a593Smuzhiyun checksum = ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->checksum;
455*4882a593Smuzhiyun checkval = amd_sriov_msg_checksum(
456*4882a593Smuzhiyun adev->virt.fw_reserve.p_pf2vf, pf2vf_info->size,
457*4882a593Smuzhiyun 0, checksum);
458*4882a593Smuzhiyun if (checksum != checkval) {
459*4882a593Smuzhiyun DRM_ERROR("invalid pf2vf message\n");
460*4882a593Smuzhiyun return -EINVAL;
461*4882a593Smuzhiyun }
462*4882a593Smuzhiyun
463*4882a593Smuzhiyun adev->virt.vf2pf_update_interval_ms =
464*4882a593Smuzhiyun ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->vf2pf_update_interval_ms;
465*4882a593Smuzhiyun adev->virt.gim_feature =
466*4882a593Smuzhiyun ((struct amd_sriov_msg_pf2vf_info *)pf2vf_info)->feature_flags.all;
467*4882a593Smuzhiyun
468*4882a593Smuzhiyun break;
469*4882a593Smuzhiyun default:
470*4882a593Smuzhiyun DRM_ERROR("invalid pf2vf version\n");
471*4882a593Smuzhiyun return -EINVAL;
472*4882a593Smuzhiyun }
473*4882a593Smuzhiyun
474*4882a593Smuzhiyun /* correct too large or too little interval value */
475*4882a593Smuzhiyun if (adev->virt.vf2pf_update_interval_ms < 200 || adev->virt.vf2pf_update_interval_ms > 10000)
476*4882a593Smuzhiyun adev->virt.vf2pf_update_interval_ms = 2000;
477*4882a593Smuzhiyun
478*4882a593Smuzhiyun return 0;
479*4882a593Smuzhiyun }
480*4882a593Smuzhiyun
amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device * adev)481*4882a593Smuzhiyun static void amdgpu_virt_populate_vf2pf_ucode_info(struct amdgpu_device *adev)
482*4882a593Smuzhiyun {
483*4882a593Smuzhiyun struct amd_sriov_msg_vf2pf_info *vf2pf_info;
484*4882a593Smuzhiyun vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
485*4882a593Smuzhiyun
486*4882a593Smuzhiyun if (adev->virt.fw_reserve.p_vf2pf == NULL)
487*4882a593Smuzhiyun return;
488*4882a593Smuzhiyun
489*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCE, adev->vce.fw_version);
490*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_UVD, adev->uvd.fw_version);
491*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MC, adev->gmc.fw_version);
492*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ME, adev->gfx.me_fw_version);
493*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_PFP, adev->gfx.pfp_fw_version);
494*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_CE, adev->gfx.ce_fw_version);
495*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC, adev->gfx.rlc_fw_version);
496*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLC, adev->gfx.rlc_srlc_fw_version);
497*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLG, adev->gfx.rlc_srlg_fw_version);
498*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_RLC_SRLS, adev->gfx.rlc_srls_fw_version);
499*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC, adev->gfx.mec_fw_version);
500*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_MEC2, adev->gfx.mec2_fw_version);
501*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SOS, adev->psp.sos_fw_version);
502*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_ASD, adev->psp.asd_fw_version);
503*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_RAS, adev->psp.ta_ras_ucode_version);
504*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_TA_XGMI, adev->psp.ta_xgmi_ucode_version);
505*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SMC, adev->pm.fw_version);
506*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA, adev->sdma.instance[0].fw_version);
507*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_SDMA2, adev->sdma.instance[1].fw_version);
508*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_VCN, adev->vcn.fw_version);
509*4882a593Smuzhiyun POPULATE_UCODE_INFO(vf2pf_info, AMD_SRIOV_UCODE_ID_DMCU, adev->dm.dmcu_fw_version);
510*4882a593Smuzhiyun }
511*4882a593Smuzhiyun
amdgpu_virt_write_vf2pf_data(struct amdgpu_device * adev)512*4882a593Smuzhiyun static int amdgpu_virt_write_vf2pf_data(struct amdgpu_device *adev)
513*4882a593Smuzhiyun {
514*4882a593Smuzhiyun struct amd_sriov_msg_vf2pf_info *vf2pf_info;
515*4882a593Smuzhiyun struct ttm_resource_manager *vram_man = ttm_manager_type(&adev->mman.bdev, TTM_PL_VRAM);
516*4882a593Smuzhiyun
517*4882a593Smuzhiyun vf2pf_info = (struct amd_sriov_msg_vf2pf_info *) adev->virt.fw_reserve.p_vf2pf;
518*4882a593Smuzhiyun
519*4882a593Smuzhiyun if (adev->virt.fw_reserve.p_vf2pf == NULL)
520*4882a593Smuzhiyun return -EINVAL;
521*4882a593Smuzhiyun
522*4882a593Smuzhiyun memset(vf2pf_info, 0, sizeof(struct amd_sriov_msg_vf2pf_info));
523*4882a593Smuzhiyun
524*4882a593Smuzhiyun vf2pf_info->header.size = sizeof(struct amd_sriov_msg_vf2pf_info);
525*4882a593Smuzhiyun vf2pf_info->header.version = AMD_SRIOV_MSG_FW_VRAM_VF2PF_VER;
526*4882a593Smuzhiyun
527*4882a593Smuzhiyun #ifdef MODULE
528*4882a593Smuzhiyun if (THIS_MODULE->version != NULL)
529*4882a593Smuzhiyun strcpy(vf2pf_info->driver_version, THIS_MODULE->version);
530*4882a593Smuzhiyun else
531*4882a593Smuzhiyun #endif
532*4882a593Smuzhiyun strcpy(vf2pf_info->driver_version, "N/A");
533*4882a593Smuzhiyun
534*4882a593Smuzhiyun vf2pf_info->pf2vf_version_required = 0; // no requirement, guest understands all
535*4882a593Smuzhiyun vf2pf_info->driver_cert = 0;
536*4882a593Smuzhiyun vf2pf_info->os_info.all = 0;
537*4882a593Smuzhiyun
538*4882a593Smuzhiyun vf2pf_info->fb_usage = amdgpu_vram_mgr_usage(vram_man) >> 20;
539*4882a593Smuzhiyun vf2pf_info->fb_vis_usage = amdgpu_vram_mgr_vis_usage(vram_man) >> 20;
540*4882a593Smuzhiyun vf2pf_info->fb_size = adev->gmc.real_vram_size >> 20;
541*4882a593Smuzhiyun vf2pf_info->fb_vis_size = adev->gmc.visible_vram_size >> 20;
542*4882a593Smuzhiyun
543*4882a593Smuzhiyun amdgpu_virt_populate_vf2pf_ucode_info(adev);
544*4882a593Smuzhiyun
545*4882a593Smuzhiyun /* TODO: read dynamic info */
546*4882a593Smuzhiyun vf2pf_info->gfx_usage = 0;
547*4882a593Smuzhiyun vf2pf_info->compute_usage = 0;
548*4882a593Smuzhiyun vf2pf_info->encode_usage = 0;
549*4882a593Smuzhiyun vf2pf_info->decode_usage = 0;
550*4882a593Smuzhiyun
551*4882a593Smuzhiyun vf2pf_info->checksum =
552*4882a593Smuzhiyun amd_sriov_msg_checksum(
553*4882a593Smuzhiyun vf2pf_info, vf2pf_info->header.size, 0, 0);
554*4882a593Smuzhiyun
555*4882a593Smuzhiyun return 0;
556*4882a593Smuzhiyun }
557*4882a593Smuzhiyun
amdgpu_virt_update_vf2pf_work_item(struct work_struct * work)558*4882a593Smuzhiyun void amdgpu_virt_update_vf2pf_work_item(struct work_struct *work)
559*4882a593Smuzhiyun {
560*4882a593Smuzhiyun struct amdgpu_device *adev = container_of(work, struct amdgpu_device, virt.vf2pf_work.work);
561*4882a593Smuzhiyun int ret;
562*4882a593Smuzhiyun
563*4882a593Smuzhiyun ret = amdgpu_virt_read_pf2vf_data(adev);
564*4882a593Smuzhiyun if (ret)
565*4882a593Smuzhiyun goto out;
566*4882a593Smuzhiyun amdgpu_virt_write_vf2pf_data(adev);
567*4882a593Smuzhiyun
568*4882a593Smuzhiyun out:
569*4882a593Smuzhiyun schedule_delayed_work(&(adev->virt.vf2pf_work), adev->virt.vf2pf_update_interval_ms);
570*4882a593Smuzhiyun }
571*4882a593Smuzhiyun
amdgpu_virt_fini_data_exchange(struct amdgpu_device * adev)572*4882a593Smuzhiyun void amdgpu_virt_fini_data_exchange(struct amdgpu_device *adev)
573*4882a593Smuzhiyun {
574*4882a593Smuzhiyun if (adev->virt.vf2pf_update_interval_ms != 0) {
575*4882a593Smuzhiyun DRM_INFO("clean up the vf2pf work item\n");
576*4882a593Smuzhiyun flush_delayed_work(&adev->virt.vf2pf_work);
577*4882a593Smuzhiyun cancel_delayed_work_sync(&adev->virt.vf2pf_work);
578*4882a593Smuzhiyun }
579*4882a593Smuzhiyun }
580*4882a593Smuzhiyun
amdgpu_virt_init_data_exchange(struct amdgpu_device * adev)581*4882a593Smuzhiyun void amdgpu_virt_init_data_exchange(struct amdgpu_device *adev)
582*4882a593Smuzhiyun {
583*4882a593Smuzhiyun adev->virt.fw_reserve.p_pf2vf = NULL;
584*4882a593Smuzhiyun adev->virt.fw_reserve.p_vf2pf = NULL;
585*4882a593Smuzhiyun adev->virt.vf2pf_update_interval_ms = 0;
586*4882a593Smuzhiyun
587*4882a593Smuzhiyun if (adev->mman.fw_vram_usage_va != NULL) {
588*4882a593Smuzhiyun /* go through this logic in ip_init and reset to init workqueue*/
589*4882a593Smuzhiyun amdgpu_virt_exchange_data(adev);
590*4882a593Smuzhiyun
591*4882a593Smuzhiyun INIT_DELAYED_WORK(&adev->virt.vf2pf_work, amdgpu_virt_update_vf2pf_work_item);
592*4882a593Smuzhiyun schedule_delayed_work(&(adev->virt.vf2pf_work), msecs_to_jiffies(adev->virt.vf2pf_update_interval_ms));
593*4882a593Smuzhiyun } else if (adev->bios != NULL) {
594*4882a593Smuzhiyun /* got through this logic in early init stage to get necessary flags, e.g. rlcg_acc related*/
595*4882a593Smuzhiyun adev->virt.fw_reserve.p_pf2vf =
596*4882a593Smuzhiyun (struct amd_sriov_msg_pf2vf_info_header *)
597*4882a593Smuzhiyun (adev->bios + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
598*4882a593Smuzhiyun
599*4882a593Smuzhiyun amdgpu_virt_read_pf2vf_data(adev);
600*4882a593Smuzhiyun }
601*4882a593Smuzhiyun }
602*4882a593Smuzhiyun
603*4882a593Smuzhiyun
amdgpu_virt_exchange_data(struct amdgpu_device * adev)604*4882a593Smuzhiyun void amdgpu_virt_exchange_data(struct amdgpu_device *adev)
605*4882a593Smuzhiyun {
606*4882a593Smuzhiyun uint64_t bp_block_offset = 0;
607*4882a593Smuzhiyun uint32_t bp_block_size = 0;
608*4882a593Smuzhiyun struct amd_sriov_msg_pf2vf_info *pf2vf_v2 = NULL;
609*4882a593Smuzhiyun
610*4882a593Smuzhiyun if (adev->mman.fw_vram_usage_va != NULL) {
611*4882a593Smuzhiyun
612*4882a593Smuzhiyun adev->virt.fw_reserve.p_pf2vf =
613*4882a593Smuzhiyun (struct amd_sriov_msg_pf2vf_info_header *)
614*4882a593Smuzhiyun (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_PF2VF_OFFSET_KB << 10));
615*4882a593Smuzhiyun adev->virt.fw_reserve.p_vf2pf =
616*4882a593Smuzhiyun (struct amd_sriov_msg_vf2pf_info_header *)
617*4882a593Smuzhiyun (adev->mman.fw_vram_usage_va + (AMD_SRIOV_MSG_VF2PF_OFFSET_KB << 10));
618*4882a593Smuzhiyun
619*4882a593Smuzhiyun amdgpu_virt_read_pf2vf_data(adev);
620*4882a593Smuzhiyun amdgpu_virt_write_vf2pf_data(adev);
621*4882a593Smuzhiyun
622*4882a593Smuzhiyun /* bad page handling for version 2 */
623*4882a593Smuzhiyun if (adev->virt.fw_reserve.p_pf2vf->version == 2) {
624*4882a593Smuzhiyun pf2vf_v2 = (struct amd_sriov_msg_pf2vf_info *)adev->virt.fw_reserve.p_pf2vf;
625*4882a593Smuzhiyun
626*4882a593Smuzhiyun bp_block_offset = ((uint64_t)pf2vf_v2->bp_block_offset_low & 0xFFFFFFFF) |
627*4882a593Smuzhiyun ((((uint64_t)pf2vf_v2->bp_block_offset_high) << 32) & 0xFFFFFFFF00000000);
628*4882a593Smuzhiyun bp_block_size = pf2vf_v2->bp_block_size;
629*4882a593Smuzhiyun
630*4882a593Smuzhiyun if (bp_block_size && !adev->virt.ras_init_done)
631*4882a593Smuzhiyun amdgpu_virt_init_ras_err_handler_data(adev);
632*4882a593Smuzhiyun
633*4882a593Smuzhiyun if (adev->virt.ras_init_done)
634*4882a593Smuzhiyun amdgpu_virt_add_bad_page(adev, bp_block_offset, bp_block_size);
635*4882a593Smuzhiyun }
636*4882a593Smuzhiyun }
637*4882a593Smuzhiyun }
638*4882a593Smuzhiyun
639*4882a593Smuzhiyun
amdgpu_detect_virtualization(struct amdgpu_device * adev)640*4882a593Smuzhiyun void amdgpu_detect_virtualization(struct amdgpu_device *adev)
641*4882a593Smuzhiyun {
642*4882a593Smuzhiyun uint32_t reg;
643*4882a593Smuzhiyun
644*4882a593Smuzhiyun switch (adev->asic_type) {
645*4882a593Smuzhiyun case CHIP_TONGA:
646*4882a593Smuzhiyun case CHIP_FIJI:
647*4882a593Smuzhiyun reg = RREG32(mmBIF_IOV_FUNC_IDENTIFIER);
648*4882a593Smuzhiyun break;
649*4882a593Smuzhiyun case CHIP_VEGA10:
650*4882a593Smuzhiyun case CHIP_VEGA20:
651*4882a593Smuzhiyun case CHIP_NAVI10:
652*4882a593Smuzhiyun case CHIP_NAVI12:
653*4882a593Smuzhiyun case CHIP_SIENNA_CICHLID:
654*4882a593Smuzhiyun case CHIP_ARCTURUS:
655*4882a593Smuzhiyun reg = RREG32(mmRCC_IOV_FUNC_IDENTIFIER);
656*4882a593Smuzhiyun break;
657*4882a593Smuzhiyun default: /* other chip doesn't support SRIOV */
658*4882a593Smuzhiyun reg = 0;
659*4882a593Smuzhiyun break;
660*4882a593Smuzhiyun }
661*4882a593Smuzhiyun
662*4882a593Smuzhiyun if (reg & 1)
663*4882a593Smuzhiyun adev->virt.caps |= AMDGPU_SRIOV_CAPS_IS_VF;
664*4882a593Smuzhiyun
665*4882a593Smuzhiyun if (reg & 0x80000000)
666*4882a593Smuzhiyun adev->virt.caps |= AMDGPU_SRIOV_CAPS_ENABLE_IOV;
667*4882a593Smuzhiyun
668*4882a593Smuzhiyun if (!reg) {
669*4882a593Smuzhiyun if (is_virtual_machine()) /* passthrough mode exclus sriov mod */
670*4882a593Smuzhiyun adev->virt.caps |= AMDGPU_PASSTHROUGH_MODE;
671*4882a593Smuzhiyun }
672*4882a593Smuzhiyun
673*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev) && adev->asic_type == CHIP_SIENNA_CICHLID)
674*4882a593Smuzhiyun /* VF MMIO access (except mailbox range) from CPU
675*4882a593Smuzhiyun * will be blocked during sriov runtime
676*4882a593Smuzhiyun */
677*4882a593Smuzhiyun adev->virt.caps |= AMDGPU_VF_MMIO_ACCESS_PROTECT;
678*4882a593Smuzhiyun
679*4882a593Smuzhiyun /* we have the ability to check now */
680*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev)) {
681*4882a593Smuzhiyun switch (adev->asic_type) {
682*4882a593Smuzhiyun case CHIP_TONGA:
683*4882a593Smuzhiyun case CHIP_FIJI:
684*4882a593Smuzhiyun vi_set_virt_ops(adev);
685*4882a593Smuzhiyun break;
686*4882a593Smuzhiyun case CHIP_VEGA10:
687*4882a593Smuzhiyun case CHIP_VEGA20:
688*4882a593Smuzhiyun case CHIP_ARCTURUS:
689*4882a593Smuzhiyun soc15_set_virt_ops(adev);
690*4882a593Smuzhiyun break;
691*4882a593Smuzhiyun case CHIP_NAVI10:
692*4882a593Smuzhiyun case CHIP_NAVI12:
693*4882a593Smuzhiyun case CHIP_SIENNA_CICHLID:
694*4882a593Smuzhiyun nv_set_virt_ops(adev);
695*4882a593Smuzhiyun /* try send GPU_INIT_DATA request to host */
696*4882a593Smuzhiyun amdgpu_virt_request_init_data(adev);
697*4882a593Smuzhiyun break;
698*4882a593Smuzhiyun default: /* other chip doesn't support SRIOV */
699*4882a593Smuzhiyun DRM_ERROR("Unknown asic type: %d!\n", adev->asic_type);
700*4882a593Smuzhiyun break;
701*4882a593Smuzhiyun }
702*4882a593Smuzhiyun }
703*4882a593Smuzhiyun }
704*4882a593Smuzhiyun
amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device * adev)705*4882a593Smuzhiyun static bool amdgpu_virt_access_debugfs_is_mmio(struct amdgpu_device *adev)
706*4882a593Smuzhiyun {
707*4882a593Smuzhiyun return amdgpu_sriov_is_debug(adev) ? true : false;
708*4882a593Smuzhiyun }
709*4882a593Smuzhiyun
amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device * adev)710*4882a593Smuzhiyun static bool amdgpu_virt_access_debugfs_is_kiq(struct amdgpu_device *adev)
711*4882a593Smuzhiyun {
712*4882a593Smuzhiyun return amdgpu_sriov_is_normal(adev) ? true : false;
713*4882a593Smuzhiyun }
714*4882a593Smuzhiyun
amdgpu_virt_enable_access_debugfs(struct amdgpu_device * adev)715*4882a593Smuzhiyun int amdgpu_virt_enable_access_debugfs(struct amdgpu_device *adev)
716*4882a593Smuzhiyun {
717*4882a593Smuzhiyun if (!amdgpu_sriov_vf(adev) ||
718*4882a593Smuzhiyun amdgpu_virt_access_debugfs_is_kiq(adev))
719*4882a593Smuzhiyun return 0;
720*4882a593Smuzhiyun
721*4882a593Smuzhiyun if (amdgpu_virt_access_debugfs_is_mmio(adev))
722*4882a593Smuzhiyun adev->virt.caps &= ~AMDGPU_SRIOV_CAPS_RUNTIME;
723*4882a593Smuzhiyun else
724*4882a593Smuzhiyun return -EPERM;
725*4882a593Smuzhiyun
726*4882a593Smuzhiyun return 0;
727*4882a593Smuzhiyun }
728*4882a593Smuzhiyun
amdgpu_virt_disable_access_debugfs(struct amdgpu_device * adev)729*4882a593Smuzhiyun void amdgpu_virt_disable_access_debugfs(struct amdgpu_device *adev)
730*4882a593Smuzhiyun {
731*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev))
732*4882a593Smuzhiyun adev->virt.caps |= AMDGPU_SRIOV_CAPS_RUNTIME;
733*4882a593Smuzhiyun }
734*4882a593Smuzhiyun
amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device * adev)735*4882a593Smuzhiyun enum amdgpu_sriov_vf_mode amdgpu_virt_get_sriov_vf_mode(struct amdgpu_device *adev)
736*4882a593Smuzhiyun {
737*4882a593Smuzhiyun enum amdgpu_sriov_vf_mode mode;
738*4882a593Smuzhiyun
739*4882a593Smuzhiyun if (amdgpu_sriov_vf(adev)) {
740*4882a593Smuzhiyun if (amdgpu_sriov_is_pp_one_vf(adev))
741*4882a593Smuzhiyun mode = SRIOV_VF_MODE_ONE_VF;
742*4882a593Smuzhiyun else
743*4882a593Smuzhiyun mode = SRIOV_VF_MODE_MULTI_VF;
744*4882a593Smuzhiyun } else {
745*4882a593Smuzhiyun mode = SRIOV_VF_MODE_BARE_METAL;
746*4882a593Smuzhiyun }
747*4882a593Smuzhiyun
748*4882a593Smuzhiyun return mode;
749*4882a593Smuzhiyun }
750