1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright 2019 Advanced Micro Devices, Inc. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in 12*4882a593Smuzhiyun * all copies or substantial portions of the Software. 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE. 21*4882a593Smuzhiyun * 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun #ifndef __AMDGPU_NBIO_H__ 24*4882a593Smuzhiyun #define __AMDGPU_NBIO_H__ 25*4882a593Smuzhiyun 26*4882a593Smuzhiyun /* 27*4882a593Smuzhiyun * amdgpu nbio functions 28*4882a593Smuzhiyun */ 29*4882a593Smuzhiyun struct nbio_hdp_flush_reg { 30*4882a593Smuzhiyun u32 ref_and_mask_cp0; 31*4882a593Smuzhiyun u32 ref_and_mask_cp1; 32*4882a593Smuzhiyun u32 ref_and_mask_cp2; 33*4882a593Smuzhiyun u32 ref_and_mask_cp3; 34*4882a593Smuzhiyun u32 ref_and_mask_cp4; 35*4882a593Smuzhiyun u32 ref_and_mask_cp5; 36*4882a593Smuzhiyun u32 ref_and_mask_cp6; 37*4882a593Smuzhiyun u32 ref_and_mask_cp7; 38*4882a593Smuzhiyun u32 ref_and_mask_cp8; 39*4882a593Smuzhiyun u32 ref_and_mask_cp9; 40*4882a593Smuzhiyun u32 ref_and_mask_sdma0; 41*4882a593Smuzhiyun u32 ref_and_mask_sdma1; 42*4882a593Smuzhiyun u32 ref_and_mask_sdma2; 43*4882a593Smuzhiyun u32 ref_and_mask_sdma3; 44*4882a593Smuzhiyun u32 ref_and_mask_sdma4; 45*4882a593Smuzhiyun u32 ref_and_mask_sdma5; 46*4882a593Smuzhiyun u32 ref_and_mask_sdma6; 47*4882a593Smuzhiyun u32 ref_and_mask_sdma7; 48*4882a593Smuzhiyun }; 49*4882a593Smuzhiyun 50*4882a593Smuzhiyun struct amdgpu_nbio_funcs { 51*4882a593Smuzhiyun const struct nbio_hdp_flush_reg *hdp_flush_reg; 52*4882a593Smuzhiyun u32 (*get_hdp_flush_req_offset)(struct amdgpu_device *adev); 53*4882a593Smuzhiyun u32 (*get_hdp_flush_done_offset)(struct amdgpu_device *adev); 54*4882a593Smuzhiyun u32 (*get_pcie_index_offset)(struct amdgpu_device *adev); 55*4882a593Smuzhiyun u32 (*get_pcie_data_offset)(struct amdgpu_device *adev); 56*4882a593Smuzhiyun u32 (*get_rev_id)(struct amdgpu_device *adev); 57*4882a593Smuzhiyun void (*mc_access_enable)(struct amdgpu_device *adev, bool enable); 58*4882a593Smuzhiyun void (*hdp_flush)(struct amdgpu_device *adev, struct amdgpu_ring *ring); 59*4882a593Smuzhiyun u32 (*get_memsize)(struct amdgpu_device *adev); 60*4882a593Smuzhiyun void (*sdma_doorbell_range)(struct amdgpu_device *adev, int instance, 61*4882a593Smuzhiyun bool use_doorbell, int doorbell_index, int doorbell_size); 62*4882a593Smuzhiyun void (*vcn_doorbell_range)(struct amdgpu_device *adev, bool use_doorbell, 63*4882a593Smuzhiyun int doorbell_index, int instance); 64*4882a593Smuzhiyun void (*enable_doorbell_aperture)(struct amdgpu_device *adev, 65*4882a593Smuzhiyun bool enable); 66*4882a593Smuzhiyun void (*enable_doorbell_selfring_aperture)(struct amdgpu_device *adev, 67*4882a593Smuzhiyun bool enable); 68*4882a593Smuzhiyun void (*ih_doorbell_range)(struct amdgpu_device *adev, 69*4882a593Smuzhiyun bool use_doorbell, int doorbell_index); 70*4882a593Smuzhiyun void (*enable_doorbell_interrupt)(struct amdgpu_device *adev, 71*4882a593Smuzhiyun bool enable); 72*4882a593Smuzhiyun void (*update_medium_grain_clock_gating)(struct amdgpu_device *adev, 73*4882a593Smuzhiyun bool enable); 74*4882a593Smuzhiyun void (*update_medium_grain_light_sleep)(struct amdgpu_device *adev, 75*4882a593Smuzhiyun bool enable); 76*4882a593Smuzhiyun void (*get_clockgating_state)(struct amdgpu_device *adev, 77*4882a593Smuzhiyun u32 *flags); 78*4882a593Smuzhiyun void (*ih_control)(struct amdgpu_device *adev); 79*4882a593Smuzhiyun void (*init_registers)(struct amdgpu_device *adev); 80*4882a593Smuzhiyun void (*remap_hdp_registers)(struct amdgpu_device *adev); 81*4882a593Smuzhiyun void (*handle_ras_controller_intr_no_bifring)(struct amdgpu_device *adev); 82*4882a593Smuzhiyun void (*handle_ras_err_event_athub_intr_no_bifring)(struct amdgpu_device *adev); 83*4882a593Smuzhiyun int (*init_ras_controller_interrupt)(struct amdgpu_device *adev); 84*4882a593Smuzhiyun int (*init_ras_err_event_athub_interrupt)(struct amdgpu_device *adev); 85*4882a593Smuzhiyun void (*query_ras_error_count)(struct amdgpu_device *adev, 86*4882a593Smuzhiyun void *ras_error_status); 87*4882a593Smuzhiyun int (*ras_late_init)(struct amdgpu_device *adev); 88*4882a593Smuzhiyun }; 89*4882a593Smuzhiyun 90*4882a593Smuzhiyun struct amdgpu_nbio { 91*4882a593Smuzhiyun const struct nbio_hdp_flush_reg *hdp_flush_reg; 92*4882a593Smuzhiyun struct amdgpu_irq_src ras_controller_irq; 93*4882a593Smuzhiyun struct amdgpu_irq_src ras_err_event_athub_irq; 94*4882a593Smuzhiyun struct ras_common_if *ras_if; 95*4882a593Smuzhiyun const struct amdgpu_nbio_funcs *funcs; 96*4882a593Smuzhiyun }; 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun int amdgpu_nbio_ras_late_init(struct amdgpu_device *adev); 99*4882a593Smuzhiyun void amdgpu_nbio_ras_fini(struct amdgpu_device *adev); 100*4882a593Smuzhiyun #endif 101