1*4882a593Smuzhiyun /*
2*4882a593Smuzhiyun * Copyright 2014 Advanced Micro Devices, Inc.
3*4882a593Smuzhiyun *
4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a
5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"),
6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation
7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense,
8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the
9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions:
10*4882a593Smuzhiyun *
11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in
12*4882a593Smuzhiyun * all copies or substantial portions of the Software.
13*4882a593Smuzhiyun *
14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE.
21*4882a593Smuzhiyun *
22*4882a593Smuzhiyun */
23*4882a593Smuzhiyun
24*4882a593Smuzhiyun #include "amdgpu.h"
25*4882a593Smuzhiyun #include "nbio/nbio_2_3_offset.h"
26*4882a593Smuzhiyun #include "nbio/nbio_2_3_sh_mask.h"
27*4882a593Smuzhiyun #include "gc/gc_10_1_0_offset.h"
28*4882a593Smuzhiyun #include "gc/gc_10_1_0_sh_mask.h"
29*4882a593Smuzhiyun #include "soc15.h"
30*4882a593Smuzhiyun #include "navi10_ih.h"
31*4882a593Smuzhiyun #include "soc15_common.h"
32*4882a593Smuzhiyun #include "mxgpu_nv.h"
33*4882a593Smuzhiyun
xgpu_nv_mailbox_send_ack(struct amdgpu_device * adev)34*4882a593Smuzhiyun static void xgpu_nv_mailbox_send_ack(struct amdgpu_device *adev)
35*4882a593Smuzhiyun {
36*4882a593Smuzhiyun WREG8(NV_MAIBOX_CONTROL_RCV_OFFSET_BYTE, 2);
37*4882a593Smuzhiyun }
38*4882a593Smuzhiyun
xgpu_nv_mailbox_set_valid(struct amdgpu_device * adev,bool val)39*4882a593Smuzhiyun static void xgpu_nv_mailbox_set_valid(struct amdgpu_device *adev, bool val)
40*4882a593Smuzhiyun {
41*4882a593Smuzhiyun WREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE, val ? 1 : 0);
42*4882a593Smuzhiyun }
43*4882a593Smuzhiyun
44*4882a593Smuzhiyun /*
45*4882a593Smuzhiyun * this peek_msg could *only* be called in IRQ routine becuase in IRQ routine
46*4882a593Smuzhiyun * RCV_MSG_VALID filed of BIF_BX_PF_MAILBOX_CONTROL must already be set to 1
47*4882a593Smuzhiyun * by host.
48*4882a593Smuzhiyun *
49*4882a593Smuzhiyun * if called no in IRQ routine, this peek_msg cannot guaranteed to return the
50*4882a593Smuzhiyun * correct value since it doesn't return the RCV_DW0 under the case that
51*4882a593Smuzhiyun * RCV_MSG_VALID is set by host.
52*4882a593Smuzhiyun */
xgpu_nv_mailbox_peek_msg(struct amdgpu_device * adev)53*4882a593Smuzhiyun static enum idh_event xgpu_nv_mailbox_peek_msg(struct amdgpu_device *adev)
54*4882a593Smuzhiyun {
55*4882a593Smuzhiyun return RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
56*4882a593Smuzhiyun }
57*4882a593Smuzhiyun
58*4882a593Smuzhiyun
xgpu_nv_mailbox_rcv_msg(struct amdgpu_device * adev,enum idh_event event)59*4882a593Smuzhiyun static int xgpu_nv_mailbox_rcv_msg(struct amdgpu_device *adev,
60*4882a593Smuzhiyun enum idh_event event)
61*4882a593Smuzhiyun {
62*4882a593Smuzhiyun u32 reg;
63*4882a593Smuzhiyun
64*4882a593Smuzhiyun reg = RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW0);
65*4882a593Smuzhiyun if (reg != event)
66*4882a593Smuzhiyun return -ENOENT;
67*4882a593Smuzhiyun
68*4882a593Smuzhiyun xgpu_nv_mailbox_send_ack(adev);
69*4882a593Smuzhiyun
70*4882a593Smuzhiyun return 0;
71*4882a593Smuzhiyun }
72*4882a593Smuzhiyun
xgpu_nv_peek_ack(struct amdgpu_device * adev)73*4882a593Smuzhiyun static uint8_t xgpu_nv_peek_ack(struct amdgpu_device *adev)
74*4882a593Smuzhiyun {
75*4882a593Smuzhiyun return RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE) & 2;
76*4882a593Smuzhiyun }
77*4882a593Smuzhiyun
xgpu_nv_poll_ack(struct amdgpu_device * adev)78*4882a593Smuzhiyun static int xgpu_nv_poll_ack(struct amdgpu_device *adev)
79*4882a593Smuzhiyun {
80*4882a593Smuzhiyun int timeout = NV_MAILBOX_POLL_ACK_TIMEDOUT;
81*4882a593Smuzhiyun u8 reg;
82*4882a593Smuzhiyun
83*4882a593Smuzhiyun do {
84*4882a593Smuzhiyun reg = RREG8(NV_MAIBOX_CONTROL_TRN_OFFSET_BYTE);
85*4882a593Smuzhiyun if (reg & 2)
86*4882a593Smuzhiyun return 0;
87*4882a593Smuzhiyun
88*4882a593Smuzhiyun mdelay(5);
89*4882a593Smuzhiyun timeout -= 5;
90*4882a593Smuzhiyun } while (timeout > 1);
91*4882a593Smuzhiyun
92*4882a593Smuzhiyun pr_err("Doesn't get TRN_MSG_ACK from pf in %d msec\n", NV_MAILBOX_POLL_ACK_TIMEDOUT);
93*4882a593Smuzhiyun
94*4882a593Smuzhiyun return -ETIME;
95*4882a593Smuzhiyun }
96*4882a593Smuzhiyun
xgpu_nv_poll_msg(struct amdgpu_device * adev,enum idh_event event)97*4882a593Smuzhiyun static int xgpu_nv_poll_msg(struct amdgpu_device *adev, enum idh_event event)
98*4882a593Smuzhiyun {
99*4882a593Smuzhiyun int r, timeout = NV_MAILBOX_POLL_MSG_TIMEDOUT;
100*4882a593Smuzhiyun
101*4882a593Smuzhiyun do {
102*4882a593Smuzhiyun r = xgpu_nv_mailbox_rcv_msg(adev, event);
103*4882a593Smuzhiyun if (!r)
104*4882a593Smuzhiyun return 0;
105*4882a593Smuzhiyun
106*4882a593Smuzhiyun msleep(10);
107*4882a593Smuzhiyun timeout -= 10;
108*4882a593Smuzhiyun } while (timeout > 1);
109*4882a593Smuzhiyun
110*4882a593Smuzhiyun
111*4882a593Smuzhiyun return -ETIME;
112*4882a593Smuzhiyun }
113*4882a593Smuzhiyun
xgpu_nv_mailbox_trans_msg(struct amdgpu_device * adev,enum idh_request req,u32 data1,u32 data2,u32 data3)114*4882a593Smuzhiyun static void xgpu_nv_mailbox_trans_msg (struct amdgpu_device *adev,
115*4882a593Smuzhiyun enum idh_request req, u32 data1, u32 data2, u32 data3)
116*4882a593Smuzhiyun {
117*4882a593Smuzhiyun int r;
118*4882a593Smuzhiyun uint8_t trn;
119*4882a593Smuzhiyun
120*4882a593Smuzhiyun /* IMPORTANT:
121*4882a593Smuzhiyun * clear TRN_MSG_VALID valid to clear host's RCV_MSG_ACK
122*4882a593Smuzhiyun * and with host's RCV_MSG_ACK cleared hw automatically clear host's RCV_MSG_ACK
123*4882a593Smuzhiyun * which lead to VF's TRN_MSG_ACK cleared, otherwise below xgpu_nv_poll_ack()
124*4882a593Smuzhiyun * will return immediatly
125*4882a593Smuzhiyun */
126*4882a593Smuzhiyun do {
127*4882a593Smuzhiyun xgpu_nv_mailbox_set_valid(adev, false);
128*4882a593Smuzhiyun trn = xgpu_nv_peek_ack(adev);
129*4882a593Smuzhiyun if (trn) {
130*4882a593Smuzhiyun pr_err("trn=%x ACK should not assert! wait again !\n", trn);
131*4882a593Smuzhiyun msleep(1);
132*4882a593Smuzhiyun }
133*4882a593Smuzhiyun } while (trn);
134*4882a593Smuzhiyun
135*4882a593Smuzhiyun WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW0, req);
136*4882a593Smuzhiyun WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW1, data1);
137*4882a593Smuzhiyun WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW2, data2);
138*4882a593Smuzhiyun WREG32_NO_KIQ(mmMAILBOX_MSGBUF_TRN_DW3, data3);
139*4882a593Smuzhiyun xgpu_nv_mailbox_set_valid(adev, true);
140*4882a593Smuzhiyun
141*4882a593Smuzhiyun /* start to poll ack */
142*4882a593Smuzhiyun r = xgpu_nv_poll_ack(adev);
143*4882a593Smuzhiyun if (r)
144*4882a593Smuzhiyun pr_err("Doesn't get ack from pf, continue\n");
145*4882a593Smuzhiyun
146*4882a593Smuzhiyun xgpu_nv_mailbox_set_valid(adev, false);
147*4882a593Smuzhiyun }
148*4882a593Smuzhiyun
xgpu_nv_send_access_requests(struct amdgpu_device * adev,enum idh_request req)149*4882a593Smuzhiyun static int xgpu_nv_send_access_requests(struct amdgpu_device *adev,
150*4882a593Smuzhiyun enum idh_request req)
151*4882a593Smuzhiyun {
152*4882a593Smuzhiyun int r;
153*4882a593Smuzhiyun enum idh_event event = -1;
154*4882a593Smuzhiyun
155*4882a593Smuzhiyun xgpu_nv_mailbox_trans_msg(adev, req, 0, 0, 0);
156*4882a593Smuzhiyun
157*4882a593Smuzhiyun switch (req) {
158*4882a593Smuzhiyun case IDH_REQ_GPU_INIT_ACCESS:
159*4882a593Smuzhiyun case IDH_REQ_GPU_FINI_ACCESS:
160*4882a593Smuzhiyun case IDH_REQ_GPU_RESET_ACCESS:
161*4882a593Smuzhiyun event = IDH_READY_TO_ACCESS_GPU;
162*4882a593Smuzhiyun break;
163*4882a593Smuzhiyun case IDH_REQ_GPU_INIT_DATA:
164*4882a593Smuzhiyun event = IDH_REQ_GPU_INIT_DATA_READY;
165*4882a593Smuzhiyun break;
166*4882a593Smuzhiyun default:
167*4882a593Smuzhiyun break;
168*4882a593Smuzhiyun }
169*4882a593Smuzhiyun
170*4882a593Smuzhiyun if (event != -1) {
171*4882a593Smuzhiyun r = xgpu_nv_poll_msg(adev, event);
172*4882a593Smuzhiyun if (r) {
173*4882a593Smuzhiyun if (req != IDH_REQ_GPU_INIT_DATA) {
174*4882a593Smuzhiyun pr_err("Doesn't get msg:%d from pf, error=%d\n", event, r);
175*4882a593Smuzhiyun return r;
176*4882a593Smuzhiyun }
177*4882a593Smuzhiyun else /* host doesn't support REQ_GPU_INIT_DATA handshake */
178*4882a593Smuzhiyun adev->virt.req_init_data_ver = 0;
179*4882a593Smuzhiyun } else {
180*4882a593Smuzhiyun if (req == IDH_REQ_GPU_INIT_DATA)
181*4882a593Smuzhiyun {
182*4882a593Smuzhiyun adev->virt.req_init_data_ver =
183*4882a593Smuzhiyun RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW1);
184*4882a593Smuzhiyun
185*4882a593Smuzhiyun /* assume V1 in case host doesn't set version number */
186*4882a593Smuzhiyun if (adev->virt.req_init_data_ver < 1)
187*4882a593Smuzhiyun adev->virt.req_init_data_ver = 1;
188*4882a593Smuzhiyun }
189*4882a593Smuzhiyun }
190*4882a593Smuzhiyun
191*4882a593Smuzhiyun /* Retrieve checksum from mailbox2 */
192*4882a593Smuzhiyun if (req == IDH_REQ_GPU_INIT_ACCESS || req == IDH_REQ_GPU_RESET_ACCESS) {
193*4882a593Smuzhiyun adev->virt.fw_reserve.checksum_key =
194*4882a593Smuzhiyun RREG32_NO_KIQ(mmMAILBOX_MSGBUF_RCV_DW2);
195*4882a593Smuzhiyun }
196*4882a593Smuzhiyun }
197*4882a593Smuzhiyun
198*4882a593Smuzhiyun return 0;
199*4882a593Smuzhiyun }
200*4882a593Smuzhiyun
xgpu_nv_request_reset(struct amdgpu_device * adev)201*4882a593Smuzhiyun static int xgpu_nv_request_reset(struct amdgpu_device *adev)
202*4882a593Smuzhiyun {
203*4882a593Smuzhiyun return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_RESET_ACCESS);
204*4882a593Smuzhiyun }
205*4882a593Smuzhiyun
xgpu_nv_request_full_gpu_access(struct amdgpu_device * adev,bool init)206*4882a593Smuzhiyun static int xgpu_nv_request_full_gpu_access(struct amdgpu_device *adev,
207*4882a593Smuzhiyun bool init)
208*4882a593Smuzhiyun {
209*4882a593Smuzhiyun enum idh_request req;
210*4882a593Smuzhiyun
211*4882a593Smuzhiyun req = init ? IDH_REQ_GPU_INIT_ACCESS : IDH_REQ_GPU_FINI_ACCESS;
212*4882a593Smuzhiyun return xgpu_nv_send_access_requests(adev, req);
213*4882a593Smuzhiyun }
214*4882a593Smuzhiyun
xgpu_nv_release_full_gpu_access(struct amdgpu_device * adev,bool init)215*4882a593Smuzhiyun static int xgpu_nv_release_full_gpu_access(struct amdgpu_device *adev,
216*4882a593Smuzhiyun bool init)
217*4882a593Smuzhiyun {
218*4882a593Smuzhiyun enum idh_request req;
219*4882a593Smuzhiyun int r = 0;
220*4882a593Smuzhiyun
221*4882a593Smuzhiyun req = init ? IDH_REL_GPU_INIT_ACCESS : IDH_REL_GPU_FINI_ACCESS;
222*4882a593Smuzhiyun r = xgpu_nv_send_access_requests(adev, req);
223*4882a593Smuzhiyun
224*4882a593Smuzhiyun return r;
225*4882a593Smuzhiyun }
226*4882a593Smuzhiyun
xgpu_nv_request_init_data(struct amdgpu_device * adev)227*4882a593Smuzhiyun static int xgpu_nv_request_init_data(struct amdgpu_device *adev)
228*4882a593Smuzhiyun {
229*4882a593Smuzhiyun return xgpu_nv_send_access_requests(adev, IDH_REQ_GPU_INIT_DATA);
230*4882a593Smuzhiyun }
231*4882a593Smuzhiyun
xgpu_nv_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)232*4882a593Smuzhiyun static int xgpu_nv_mailbox_ack_irq(struct amdgpu_device *adev,
233*4882a593Smuzhiyun struct amdgpu_irq_src *source,
234*4882a593Smuzhiyun struct amdgpu_iv_entry *entry)
235*4882a593Smuzhiyun {
236*4882a593Smuzhiyun DRM_DEBUG("get ack intr and do nothing.\n");
237*4882a593Smuzhiyun return 0;
238*4882a593Smuzhiyun }
239*4882a593Smuzhiyun
xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,unsigned type,enum amdgpu_interrupt_state state)240*4882a593Smuzhiyun static int xgpu_nv_set_mailbox_ack_irq(struct amdgpu_device *adev,
241*4882a593Smuzhiyun struct amdgpu_irq_src *source,
242*4882a593Smuzhiyun unsigned type,
243*4882a593Smuzhiyun enum amdgpu_interrupt_state state)
244*4882a593Smuzhiyun {
245*4882a593Smuzhiyun u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
246*4882a593Smuzhiyun
247*4882a593Smuzhiyun if (state == AMDGPU_IRQ_STATE_ENABLE)
248*4882a593Smuzhiyun tmp |= 2;
249*4882a593Smuzhiyun else
250*4882a593Smuzhiyun tmp &= ~2;
251*4882a593Smuzhiyun
252*4882a593Smuzhiyun WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
253*4882a593Smuzhiyun
254*4882a593Smuzhiyun return 0;
255*4882a593Smuzhiyun }
256*4882a593Smuzhiyun
xgpu_nv_mailbox_flr_work(struct work_struct * work)257*4882a593Smuzhiyun static void xgpu_nv_mailbox_flr_work(struct work_struct *work)
258*4882a593Smuzhiyun {
259*4882a593Smuzhiyun struct amdgpu_virt *virt = container_of(work, struct amdgpu_virt, flr_work);
260*4882a593Smuzhiyun struct amdgpu_device *adev = container_of(virt, struct amdgpu_device, virt);
261*4882a593Smuzhiyun int timeout = NV_MAILBOX_POLL_FLR_TIMEDOUT;
262*4882a593Smuzhiyun
263*4882a593Smuzhiyun /* block amdgpu_gpu_recover till msg FLR COMPLETE received,
264*4882a593Smuzhiyun * otherwise the mailbox msg will be ruined/reseted by
265*4882a593Smuzhiyun * the VF FLR.
266*4882a593Smuzhiyun */
267*4882a593Smuzhiyun if (!down_read_trylock(&adev->reset_sem))
268*4882a593Smuzhiyun return;
269*4882a593Smuzhiyun
270*4882a593Smuzhiyun atomic_set(&adev->in_gpu_reset, 1);
271*4882a593Smuzhiyun
272*4882a593Smuzhiyun do {
273*4882a593Smuzhiyun if (xgpu_nv_mailbox_peek_msg(adev) == IDH_FLR_NOTIFICATION_CMPL)
274*4882a593Smuzhiyun goto flr_done;
275*4882a593Smuzhiyun
276*4882a593Smuzhiyun msleep(10);
277*4882a593Smuzhiyun timeout -= 10;
278*4882a593Smuzhiyun } while (timeout > 1);
279*4882a593Smuzhiyun
280*4882a593Smuzhiyun flr_done:
281*4882a593Smuzhiyun atomic_set(&adev->in_gpu_reset, 0);
282*4882a593Smuzhiyun up_read(&adev->reset_sem);
283*4882a593Smuzhiyun
284*4882a593Smuzhiyun /* Trigger recovery for world switch failure if no TDR */
285*4882a593Smuzhiyun if (amdgpu_device_should_recover_gpu(adev)
286*4882a593Smuzhiyun && (!amdgpu_device_has_job_running(adev) ||
287*4882a593Smuzhiyun adev->sdma_timeout == MAX_SCHEDULE_TIMEOUT ||
288*4882a593Smuzhiyun adev->gfx_timeout == MAX_SCHEDULE_TIMEOUT ||
289*4882a593Smuzhiyun adev->compute_timeout == MAX_SCHEDULE_TIMEOUT ||
290*4882a593Smuzhiyun adev->video_timeout == MAX_SCHEDULE_TIMEOUT))
291*4882a593Smuzhiyun amdgpu_device_gpu_recover(adev, NULL);
292*4882a593Smuzhiyun }
293*4882a593Smuzhiyun
xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * src,unsigned type,enum amdgpu_interrupt_state state)294*4882a593Smuzhiyun static int xgpu_nv_set_mailbox_rcv_irq(struct amdgpu_device *adev,
295*4882a593Smuzhiyun struct amdgpu_irq_src *src,
296*4882a593Smuzhiyun unsigned type,
297*4882a593Smuzhiyun enum amdgpu_interrupt_state state)
298*4882a593Smuzhiyun {
299*4882a593Smuzhiyun u32 tmp = RREG32_NO_KIQ(mmMAILBOX_INT_CNTL);
300*4882a593Smuzhiyun
301*4882a593Smuzhiyun if (state == AMDGPU_IRQ_STATE_ENABLE)
302*4882a593Smuzhiyun tmp |= 1;
303*4882a593Smuzhiyun else
304*4882a593Smuzhiyun tmp &= ~1;
305*4882a593Smuzhiyun
306*4882a593Smuzhiyun WREG32_NO_KIQ(mmMAILBOX_INT_CNTL, tmp);
307*4882a593Smuzhiyun
308*4882a593Smuzhiyun return 0;
309*4882a593Smuzhiyun }
310*4882a593Smuzhiyun
xgpu_nv_mailbox_rcv_irq(struct amdgpu_device * adev,struct amdgpu_irq_src * source,struct amdgpu_iv_entry * entry)311*4882a593Smuzhiyun static int xgpu_nv_mailbox_rcv_irq(struct amdgpu_device *adev,
312*4882a593Smuzhiyun struct amdgpu_irq_src *source,
313*4882a593Smuzhiyun struct amdgpu_iv_entry *entry)
314*4882a593Smuzhiyun {
315*4882a593Smuzhiyun enum idh_event event = xgpu_nv_mailbox_peek_msg(adev);
316*4882a593Smuzhiyun
317*4882a593Smuzhiyun switch (event) {
318*4882a593Smuzhiyun case IDH_FLR_NOTIFICATION:
319*4882a593Smuzhiyun if (amdgpu_sriov_runtime(adev))
320*4882a593Smuzhiyun schedule_work(&adev->virt.flr_work);
321*4882a593Smuzhiyun break;
322*4882a593Smuzhiyun /* READY_TO_ACCESS_GPU is fetched by kernel polling, IRQ can ignore
323*4882a593Smuzhiyun * it byfar since that polling thread will handle it,
324*4882a593Smuzhiyun * other msg like flr complete is not handled here.
325*4882a593Smuzhiyun */
326*4882a593Smuzhiyun case IDH_CLR_MSG_BUF:
327*4882a593Smuzhiyun case IDH_FLR_NOTIFICATION_CMPL:
328*4882a593Smuzhiyun case IDH_READY_TO_ACCESS_GPU:
329*4882a593Smuzhiyun default:
330*4882a593Smuzhiyun break;
331*4882a593Smuzhiyun }
332*4882a593Smuzhiyun
333*4882a593Smuzhiyun return 0;
334*4882a593Smuzhiyun }
335*4882a593Smuzhiyun
336*4882a593Smuzhiyun static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_ack_irq_funcs = {
337*4882a593Smuzhiyun .set = xgpu_nv_set_mailbox_ack_irq,
338*4882a593Smuzhiyun .process = xgpu_nv_mailbox_ack_irq,
339*4882a593Smuzhiyun };
340*4882a593Smuzhiyun
341*4882a593Smuzhiyun static const struct amdgpu_irq_src_funcs xgpu_nv_mailbox_rcv_irq_funcs = {
342*4882a593Smuzhiyun .set = xgpu_nv_set_mailbox_rcv_irq,
343*4882a593Smuzhiyun .process = xgpu_nv_mailbox_rcv_irq,
344*4882a593Smuzhiyun };
345*4882a593Smuzhiyun
xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device * adev)346*4882a593Smuzhiyun void xgpu_nv_mailbox_set_irq_funcs(struct amdgpu_device *adev)
347*4882a593Smuzhiyun {
348*4882a593Smuzhiyun adev->virt.ack_irq.num_types = 1;
349*4882a593Smuzhiyun adev->virt.ack_irq.funcs = &xgpu_nv_mailbox_ack_irq_funcs;
350*4882a593Smuzhiyun adev->virt.rcv_irq.num_types = 1;
351*4882a593Smuzhiyun adev->virt.rcv_irq.funcs = &xgpu_nv_mailbox_rcv_irq_funcs;
352*4882a593Smuzhiyun }
353*4882a593Smuzhiyun
xgpu_nv_mailbox_add_irq_id(struct amdgpu_device * adev)354*4882a593Smuzhiyun int xgpu_nv_mailbox_add_irq_id(struct amdgpu_device *adev)
355*4882a593Smuzhiyun {
356*4882a593Smuzhiyun int r;
357*4882a593Smuzhiyun
358*4882a593Smuzhiyun r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 135, &adev->virt.rcv_irq);
359*4882a593Smuzhiyun if (r)
360*4882a593Smuzhiyun return r;
361*4882a593Smuzhiyun
362*4882a593Smuzhiyun r = amdgpu_irq_add_id(adev, SOC15_IH_CLIENTID_BIF, 138, &adev->virt.ack_irq);
363*4882a593Smuzhiyun if (r) {
364*4882a593Smuzhiyun amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
365*4882a593Smuzhiyun return r;
366*4882a593Smuzhiyun }
367*4882a593Smuzhiyun
368*4882a593Smuzhiyun return 0;
369*4882a593Smuzhiyun }
370*4882a593Smuzhiyun
xgpu_nv_mailbox_get_irq(struct amdgpu_device * adev)371*4882a593Smuzhiyun int xgpu_nv_mailbox_get_irq(struct amdgpu_device *adev)
372*4882a593Smuzhiyun {
373*4882a593Smuzhiyun int r;
374*4882a593Smuzhiyun
375*4882a593Smuzhiyun r = amdgpu_irq_get(adev, &adev->virt.rcv_irq, 0);
376*4882a593Smuzhiyun if (r)
377*4882a593Smuzhiyun return r;
378*4882a593Smuzhiyun r = amdgpu_irq_get(adev, &adev->virt.ack_irq, 0);
379*4882a593Smuzhiyun if (r) {
380*4882a593Smuzhiyun amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
381*4882a593Smuzhiyun return r;
382*4882a593Smuzhiyun }
383*4882a593Smuzhiyun
384*4882a593Smuzhiyun INIT_WORK(&adev->virt.flr_work, xgpu_nv_mailbox_flr_work);
385*4882a593Smuzhiyun
386*4882a593Smuzhiyun return 0;
387*4882a593Smuzhiyun }
388*4882a593Smuzhiyun
xgpu_nv_mailbox_put_irq(struct amdgpu_device * adev)389*4882a593Smuzhiyun void xgpu_nv_mailbox_put_irq(struct amdgpu_device *adev)
390*4882a593Smuzhiyun {
391*4882a593Smuzhiyun amdgpu_irq_put(adev, &adev->virt.ack_irq, 0);
392*4882a593Smuzhiyun amdgpu_irq_put(adev, &adev->virt.rcv_irq, 0);
393*4882a593Smuzhiyun }
394*4882a593Smuzhiyun
395*4882a593Smuzhiyun const struct amdgpu_virt_ops xgpu_nv_virt_ops = {
396*4882a593Smuzhiyun .req_full_gpu = xgpu_nv_request_full_gpu_access,
397*4882a593Smuzhiyun .rel_full_gpu = xgpu_nv_release_full_gpu_access,
398*4882a593Smuzhiyun .req_init_data = xgpu_nv_request_init_data,
399*4882a593Smuzhiyun .reset_gpu = xgpu_nv_request_reset,
400*4882a593Smuzhiyun .wait_reset = NULL,
401*4882a593Smuzhiyun .trans_msg = xgpu_nv_mailbox_trans_msg,
402*4882a593Smuzhiyun };
403