1*4882a593Smuzhiyun /* 2*4882a593Smuzhiyun * Copyright 2018 Advanced Micro Devices, Inc. 3*4882a593Smuzhiyun * 4*4882a593Smuzhiyun * Permission is hereby granted, free of charge, to any person obtaining a 5*4882a593Smuzhiyun * copy of this software and associated documentation files (the "Software"), 6*4882a593Smuzhiyun * to deal in the Software without restriction, including without limitation 7*4882a593Smuzhiyun * the rights to use, copy, modify, merge, publish, distribute, sublicense, 8*4882a593Smuzhiyun * and/or sell copies of the Software, and to permit persons to whom the 9*4882a593Smuzhiyun * Software is furnished to do so, subject to the following conditions: 10*4882a593Smuzhiyun * 11*4882a593Smuzhiyun * The above copyright notice and this permission notice shall be included in 12*4882a593Smuzhiyun * all copies or substantial portions of the Software. 13*4882a593Smuzhiyun * 14*4882a593Smuzhiyun * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR 15*4882a593Smuzhiyun * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, 16*4882a593Smuzhiyun * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL 17*4882a593Smuzhiyun * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR 18*4882a593Smuzhiyun * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, 19*4882a593Smuzhiyun * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR 20*4882a593Smuzhiyun * OTHER DEALINGS IN THE SOFTWARE. 21*4882a593Smuzhiyun * 22*4882a593Smuzhiyun */ 23*4882a593Smuzhiyun 24*4882a593Smuzhiyun #ifndef __AMDGPU_SDMA_H__ 25*4882a593Smuzhiyun #define __AMDGPU_SDMA_H__ 26*4882a593Smuzhiyun 27*4882a593Smuzhiyun /* max number of IP instances */ 28*4882a593Smuzhiyun #define AMDGPU_MAX_SDMA_INSTANCES 8 29*4882a593Smuzhiyun 30*4882a593Smuzhiyun enum amdgpu_sdma_irq { 31*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE0 = 0, 32*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE1, 33*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE2, 34*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE3, 35*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE4, 36*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE5, 37*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE6, 38*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_INSTANCE7, 39*4882a593Smuzhiyun AMDGPU_SDMA_IRQ_LAST 40*4882a593Smuzhiyun }; 41*4882a593Smuzhiyun 42*4882a593Smuzhiyun struct amdgpu_sdma_instance { 43*4882a593Smuzhiyun /* SDMA firmware */ 44*4882a593Smuzhiyun const struct firmware *fw; 45*4882a593Smuzhiyun uint32_t fw_version; 46*4882a593Smuzhiyun uint32_t feature_version; 47*4882a593Smuzhiyun 48*4882a593Smuzhiyun struct amdgpu_ring ring; 49*4882a593Smuzhiyun struct amdgpu_ring page; 50*4882a593Smuzhiyun bool burst_nop; 51*4882a593Smuzhiyun }; 52*4882a593Smuzhiyun 53*4882a593Smuzhiyun struct amdgpu_sdma_ras_funcs { 54*4882a593Smuzhiyun int (*ras_late_init)(struct amdgpu_device *adev, 55*4882a593Smuzhiyun void *ras_ih_info); 56*4882a593Smuzhiyun void (*ras_fini)(struct amdgpu_device *adev); 57*4882a593Smuzhiyun int (*query_ras_error_count)(struct amdgpu_device *adev, 58*4882a593Smuzhiyun uint32_t instance, void *ras_error_status); 59*4882a593Smuzhiyun void (*reset_ras_error_count)(struct amdgpu_device *adev); 60*4882a593Smuzhiyun }; 61*4882a593Smuzhiyun 62*4882a593Smuzhiyun struct amdgpu_sdma { 63*4882a593Smuzhiyun struct amdgpu_sdma_instance instance[AMDGPU_MAX_SDMA_INSTANCES]; 64*4882a593Smuzhiyun struct amdgpu_irq_src trap_irq; 65*4882a593Smuzhiyun struct amdgpu_irq_src illegal_inst_irq; 66*4882a593Smuzhiyun struct amdgpu_irq_src ecc_irq; 67*4882a593Smuzhiyun int num_instances; 68*4882a593Smuzhiyun uint32_t srbm_soft_reset; 69*4882a593Smuzhiyun bool has_page_queue; 70*4882a593Smuzhiyun struct ras_common_if *ras_if; 71*4882a593Smuzhiyun const struct amdgpu_sdma_ras_funcs *funcs; 72*4882a593Smuzhiyun }; 73*4882a593Smuzhiyun 74*4882a593Smuzhiyun /* 75*4882a593Smuzhiyun * Provided by hw blocks that can move/clear data. e.g., gfx or sdma 76*4882a593Smuzhiyun * But currently, we use sdma to move data. 77*4882a593Smuzhiyun */ 78*4882a593Smuzhiyun struct amdgpu_buffer_funcs { 79*4882a593Smuzhiyun /* maximum bytes in a single operation */ 80*4882a593Smuzhiyun uint32_t copy_max_bytes; 81*4882a593Smuzhiyun 82*4882a593Smuzhiyun /* number of dw to reserve per operation */ 83*4882a593Smuzhiyun unsigned copy_num_dw; 84*4882a593Smuzhiyun 85*4882a593Smuzhiyun /* used for buffer migration */ 86*4882a593Smuzhiyun void (*emit_copy_buffer)(struct amdgpu_ib *ib, 87*4882a593Smuzhiyun /* src addr in bytes */ 88*4882a593Smuzhiyun uint64_t src_offset, 89*4882a593Smuzhiyun /* dst addr in bytes */ 90*4882a593Smuzhiyun uint64_t dst_offset, 91*4882a593Smuzhiyun /* number of byte to transfer */ 92*4882a593Smuzhiyun uint32_t byte_count, 93*4882a593Smuzhiyun bool tmz); 94*4882a593Smuzhiyun 95*4882a593Smuzhiyun /* maximum bytes in a single operation */ 96*4882a593Smuzhiyun uint32_t fill_max_bytes; 97*4882a593Smuzhiyun 98*4882a593Smuzhiyun /* number of dw to reserve per operation */ 99*4882a593Smuzhiyun unsigned fill_num_dw; 100*4882a593Smuzhiyun 101*4882a593Smuzhiyun /* used for buffer clearing */ 102*4882a593Smuzhiyun void (*emit_fill_buffer)(struct amdgpu_ib *ib, 103*4882a593Smuzhiyun /* value to write to memory */ 104*4882a593Smuzhiyun uint32_t src_data, 105*4882a593Smuzhiyun /* dst addr in bytes */ 106*4882a593Smuzhiyun uint64_t dst_offset, 107*4882a593Smuzhiyun /* number of byte to fill */ 108*4882a593Smuzhiyun uint32_t byte_count); 109*4882a593Smuzhiyun }; 110*4882a593Smuzhiyun 111*4882a593Smuzhiyun #define amdgpu_emit_copy_buffer(adev, ib, s, d, b, t) (adev)->mman.buffer_funcs->emit_copy_buffer((ib), (s), (d), (b), (t)) 112*4882a593Smuzhiyun #define amdgpu_emit_fill_buffer(adev, ib, s, d, b) (adev)->mman.buffer_funcs->emit_fill_buffer((ib), (s), (d), (b)) 113*4882a593Smuzhiyun 114*4882a593Smuzhiyun struct amdgpu_sdma_instance * 115*4882a593Smuzhiyun amdgpu_sdma_get_instance_from_ring(struct amdgpu_ring *ring); 116*4882a593Smuzhiyun int amdgpu_sdma_get_index_from_ring(struct amdgpu_ring *ring, uint32_t *index); 117*4882a593Smuzhiyun uint64_t amdgpu_sdma_get_csa_mc_addr(struct amdgpu_ring *ring, unsigned vmid); 118*4882a593Smuzhiyun int amdgpu_sdma_ras_late_init(struct amdgpu_device *adev, 119*4882a593Smuzhiyun void *ras_ih_info); 120*4882a593Smuzhiyun void amdgpu_sdma_ras_fini(struct amdgpu_device *adev); 121*4882a593Smuzhiyun int amdgpu_sdma_process_ras_data_cb(struct amdgpu_device *adev, 122*4882a593Smuzhiyun void *err_data, 123*4882a593Smuzhiyun struct amdgpu_iv_entry *entry); 124*4882a593Smuzhiyun int amdgpu_sdma_process_ecc_irq(struct amdgpu_device *adev, 125*4882a593Smuzhiyun struct amdgpu_irq_src *source, 126*4882a593Smuzhiyun struct amdgpu_iv_entry *entry); 127*4882a593Smuzhiyun #endif 128