1*4882a593Smuzhiyun /* SPDX-License-Identifier: GPL-2.0 WITH Linux-syscall-note 2*4882a593Smuzhiyun * 3*4882a593Smuzhiyun * Copyright 2016-2020 HabanaLabs, Ltd. 4*4882a593Smuzhiyun * All Rights Reserved. 5*4882a593Smuzhiyun * 6*4882a593Smuzhiyun */ 7*4882a593Smuzhiyun 8*4882a593Smuzhiyun #ifndef HABANALABS_H_ 9*4882a593Smuzhiyun #define HABANALABS_H_ 10*4882a593Smuzhiyun 11*4882a593Smuzhiyun #include <linux/types.h> 12*4882a593Smuzhiyun #include <linux/ioctl.h> 13*4882a593Smuzhiyun 14*4882a593Smuzhiyun /* 15*4882a593Smuzhiyun * Defines that are asic-specific but constitutes as ABI between kernel driver 16*4882a593Smuzhiyun * and userspace 17*4882a593Smuzhiyun */ 18*4882a593Smuzhiyun #define GOYA_KMD_SRAM_RESERVED_SIZE_FROM_START 0x8000 /* 32KB */ 19*4882a593Smuzhiyun #define GAUDI_DRIVER_SRAM_RESERVED_SIZE_FROM_START 0x80 /* 128 bytes */ 20*4882a593Smuzhiyun 21*4882a593Smuzhiyun #define GAUDI_FIRST_AVAILABLE_W_S_SYNC_OBJECT 48 22*4882a593Smuzhiyun #define GAUDI_FIRST_AVAILABLE_W_S_MONITOR 24 23*4882a593Smuzhiyun /* 24*4882a593Smuzhiyun * Goya queue Numbering 25*4882a593Smuzhiyun * 26*4882a593Smuzhiyun * The external queues (PCI DMA channels) MUST be before the internal queues 27*4882a593Smuzhiyun * and each group (PCI DMA channels and internal) must be contiguous inside 28*4882a593Smuzhiyun * itself but there can be a gap between the two groups (although not 29*4882a593Smuzhiyun * recommended) 30*4882a593Smuzhiyun */ 31*4882a593Smuzhiyun 32*4882a593Smuzhiyun enum goya_queue_id { 33*4882a593Smuzhiyun GOYA_QUEUE_ID_DMA_0 = 0, 34*4882a593Smuzhiyun GOYA_QUEUE_ID_DMA_1 = 1, 35*4882a593Smuzhiyun GOYA_QUEUE_ID_DMA_2 = 2, 36*4882a593Smuzhiyun GOYA_QUEUE_ID_DMA_3 = 3, 37*4882a593Smuzhiyun GOYA_QUEUE_ID_DMA_4 = 4, 38*4882a593Smuzhiyun GOYA_QUEUE_ID_CPU_PQ = 5, 39*4882a593Smuzhiyun GOYA_QUEUE_ID_MME = 6, /* Internal queues start here */ 40*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC0 = 7, 41*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC1 = 8, 42*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC2 = 9, 43*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC3 = 10, 44*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC4 = 11, 45*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC5 = 12, 46*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC6 = 13, 47*4882a593Smuzhiyun GOYA_QUEUE_ID_TPC7 = 14, 48*4882a593Smuzhiyun GOYA_QUEUE_ID_SIZE 49*4882a593Smuzhiyun }; 50*4882a593Smuzhiyun 51*4882a593Smuzhiyun /* 52*4882a593Smuzhiyun * Gaudi queue Numbering 53*4882a593Smuzhiyun * External queues (PCI DMA channels) are DMA_0_*, DMA_1_* and DMA_5_*. 54*4882a593Smuzhiyun * Except one CPU queue, all the rest are internal queues. 55*4882a593Smuzhiyun */ 56*4882a593Smuzhiyun 57*4882a593Smuzhiyun enum gaudi_queue_id { 58*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_0_0 = 0, /* external */ 59*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_0_1 = 1, /* external */ 60*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_0_2 = 2, /* external */ 61*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_0_3 = 3, /* external */ 62*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_1_0 = 4, /* external */ 63*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_1_1 = 5, /* external */ 64*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_1_2 = 6, /* external */ 65*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_1_3 = 7, /* external */ 66*4882a593Smuzhiyun GAUDI_QUEUE_ID_CPU_PQ = 8, /* CPU */ 67*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_2_0 = 9, /* internal */ 68*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_2_1 = 10, /* internal */ 69*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_2_2 = 11, /* internal */ 70*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_2_3 = 12, /* internal */ 71*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_3_0 = 13, /* internal */ 72*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_3_1 = 14, /* internal */ 73*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_3_2 = 15, /* internal */ 74*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_3_3 = 16, /* internal */ 75*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_4_0 = 17, /* internal */ 76*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_4_1 = 18, /* internal */ 77*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_4_2 = 19, /* internal */ 78*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_4_3 = 20, /* internal */ 79*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_5_0 = 21, /* external */ 80*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_5_1 = 22, /* external */ 81*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_5_2 = 23, /* external */ 82*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_5_3 = 24, /* external */ 83*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_6_0 = 25, /* internal */ 84*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_6_1 = 26, /* internal */ 85*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_6_2 = 27, /* internal */ 86*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_6_3 = 28, /* internal */ 87*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_7_0 = 29, /* internal */ 88*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_7_1 = 30, /* internal */ 89*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_7_2 = 31, /* internal */ 90*4882a593Smuzhiyun GAUDI_QUEUE_ID_DMA_7_3 = 32, /* internal */ 91*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_0_0 = 33, /* internal */ 92*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_0_1 = 34, /* internal */ 93*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_0_2 = 35, /* internal */ 94*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_0_3 = 36, /* internal */ 95*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_1_0 = 37, /* internal */ 96*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_1_1 = 38, /* internal */ 97*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_1_2 = 39, /* internal */ 98*4882a593Smuzhiyun GAUDI_QUEUE_ID_MME_1_3 = 40, /* internal */ 99*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_0_0 = 41, /* internal */ 100*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_0_1 = 42, /* internal */ 101*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_0_2 = 43, /* internal */ 102*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_0_3 = 44, /* internal */ 103*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_1_0 = 45, /* internal */ 104*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_1_1 = 46, /* internal */ 105*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_1_2 = 47, /* internal */ 106*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_1_3 = 48, /* internal */ 107*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_2_0 = 49, /* internal */ 108*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_2_1 = 50, /* internal */ 109*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_2_2 = 51, /* internal */ 110*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_2_3 = 52, /* internal */ 111*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_3_0 = 53, /* internal */ 112*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_3_1 = 54, /* internal */ 113*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_3_2 = 55, /* internal */ 114*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_3_3 = 56, /* internal */ 115*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_4_0 = 57, /* internal */ 116*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_4_1 = 58, /* internal */ 117*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_4_2 = 59, /* internal */ 118*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_4_3 = 60, /* internal */ 119*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_5_0 = 61, /* internal */ 120*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_5_1 = 62, /* internal */ 121*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_5_2 = 63, /* internal */ 122*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_5_3 = 64, /* internal */ 123*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_6_0 = 65, /* internal */ 124*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_6_1 = 66, /* internal */ 125*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_6_2 = 67, /* internal */ 126*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_6_3 = 68, /* internal */ 127*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_7_0 = 69, /* internal */ 128*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_7_1 = 70, /* internal */ 129*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_7_2 = 71, /* internal */ 130*4882a593Smuzhiyun GAUDI_QUEUE_ID_TPC_7_3 = 72, /* internal */ 131*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_0_0 = 73, /* internal */ 132*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_0_1 = 74, /* internal */ 133*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_0_2 = 75, /* internal */ 134*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_0_3 = 76, /* internal */ 135*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_1_0 = 77, /* internal */ 136*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_1_1 = 78, /* internal */ 137*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_1_2 = 79, /* internal */ 138*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_1_3 = 80, /* internal */ 139*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_2_0 = 81, /* internal */ 140*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_2_1 = 82, /* internal */ 141*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_2_2 = 83, /* internal */ 142*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_2_3 = 84, /* internal */ 143*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_3_0 = 85, /* internal */ 144*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_3_1 = 86, /* internal */ 145*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_3_2 = 87, /* internal */ 146*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_3_3 = 88, /* internal */ 147*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_4_0 = 89, /* internal */ 148*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_4_1 = 90, /* internal */ 149*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_4_2 = 91, /* internal */ 150*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_4_3 = 92, /* internal */ 151*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_5_0 = 93, /* internal */ 152*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_5_1 = 94, /* internal */ 153*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_5_2 = 95, /* internal */ 154*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_5_3 = 96, /* internal */ 155*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_6_0 = 97, /* internal */ 156*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_6_1 = 98, /* internal */ 157*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_6_2 = 99, /* internal */ 158*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_6_3 = 100, /* internal */ 159*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_7_0 = 101, /* internal */ 160*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_7_1 = 102, /* internal */ 161*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_7_2 = 103, /* internal */ 162*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_7_3 = 104, /* internal */ 163*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_8_0 = 105, /* internal */ 164*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_8_1 = 106, /* internal */ 165*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_8_2 = 107, /* internal */ 166*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_8_3 = 108, /* internal */ 167*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_9_0 = 109, /* internal */ 168*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_9_1 = 110, /* internal */ 169*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_9_2 = 111, /* internal */ 170*4882a593Smuzhiyun GAUDI_QUEUE_ID_NIC_9_3 = 112, /* internal */ 171*4882a593Smuzhiyun GAUDI_QUEUE_ID_SIZE 172*4882a593Smuzhiyun }; 173*4882a593Smuzhiyun 174*4882a593Smuzhiyun /* 175*4882a593Smuzhiyun * Engine Numbering 176*4882a593Smuzhiyun * 177*4882a593Smuzhiyun * Used in the "busy_engines_mask" field in `struct hl_info_hw_idle' 178*4882a593Smuzhiyun */ 179*4882a593Smuzhiyun 180*4882a593Smuzhiyun enum goya_engine_id { 181*4882a593Smuzhiyun GOYA_ENGINE_ID_DMA_0 = 0, 182*4882a593Smuzhiyun GOYA_ENGINE_ID_DMA_1, 183*4882a593Smuzhiyun GOYA_ENGINE_ID_DMA_2, 184*4882a593Smuzhiyun GOYA_ENGINE_ID_DMA_3, 185*4882a593Smuzhiyun GOYA_ENGINE_ID_DMA_4, 186*4882a593Smuzhiyun GOYA_ENGINE_ID_MME_0, 187*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_0, 188*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_1, 189*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_2, 190*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_3, 191*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_4, 192*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_5, 193*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_6, 194*4882a593Smuzhiyun GOYA_ENGINE_ID_TPC_7, 195*4882a593Smuzhiyun GOYA_ENGINE_ID_SIZE 196*4882a593Smuzhiyun }; 197*4882a593Smuzhiyun 198*4882a593Smuzhiyun enum gaudi_engine_id { 199*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_0 = 0, 200*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_1, 201*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_2, 202*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_3, 203*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_4, 204*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_5, 205*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_6, 206*4882a593Smuzhiyun GAUDI_ENGINE_ID_DMA_7, 207*4882a593Smuzhiyun GAUDI_ENGINE_ID_MME_0, 208*4882a593Smuzhiyun GAUDI_ENGINE_ID_MME_1, 209*4882a593Smuzhiyun GAUDI_ENGINE_ID_MME_2, 210*4882a593Smuzhiyun GAUDI_ENGINE_ID_MME_3, 211*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_0, 212*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_1, 213*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_2, 214*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_3, 215*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_4, 216*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_5, 217*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_6, 218*4882a593Smuzhiyun GAUDI_ENGINE_ID_TPC_7, 219*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_0, 220*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_1, 221*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_2, 222*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_3, 223*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_4, 224*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_5, 225*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_6, 226*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_7, 227*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_8, 228*4882a593Smuzhiyun GAUDI_ENGINE_ID_NIC_9, 229*4882a593Smuzhiyun GAUDI_ENGINE_ID_SIZE 230*4882a593Smuzhiyun }; 231*4882a593Smuzhiyun 232*4882a593Smuzhiyun enum hl_device_status { 233*4882a593Smuzhiyun HL_DEVICE_STATUS_OPERATIONAL, 234*4882a593Smuzhiyun HL_DEVICE_STATUS_IN_RESET, 235*4882a593Smuzhiyun HL_DEVICE_STATUS_MALFUNCTION 236*4882a593Smuzhiyun }; 237*4882a593Smuzhiyun 238*4882a593Smuzhiyun /* Opcode for management ioctl 239*4882a593Smuzhiyun * 240*4882a593Smuzhiyun * HW_IP_INFO - Receive information about different IP blocks in the 241*4882a593Smuzhiyun * device. 242*4882a593Smuzhiyun * HL_INFO_HW_EVENTS - Receive an array describing how many times each event 243*4882a593Smuzhiyun * occurred since the last hard reset. 244*4882a593Smuzhiyun * HL_INFO_DRAM_USAGE - Retrieve the dram usage inside the device and of the 245*4882a593Smuzhiyun * specific context. This is relevant only for devices 246*4882a593Smuzhiyun * where the dram is managed by the kernel driver 247*4882a593Smuzhiyun * HL_INFO_HW_IDLE - Retrieve information about the idle status of each 248*4882a593Smuzhiyun * internal engine. 249*4882a593Smuzhiyun * HL_INFO_DEVICE_STATUS - Retrieve the device's status. This opcode doesn't 250*4882a593Smuzhiyun * require an open context. 251*4882a593Smuzhiyun * HL_INFO_DEVICE_UTILIZATION - Retrieve the total utilization of the device 252*4882a593Smuzhiyun * over the last period specified by the user. 253*4882a593Smuzhiyun * The period can be between 100ms to 1s, in 254*4882a593Smuzhiyun * resolution of 100ms. The return value is a 255*4882a593Smuzhiyun * percentage of the utilization rate. 256*4882a593Smuzhiyun * HL_INFO_HW_EVENTS_AGGREGATE - Receive an array describing how many times each 257*4882a593Smuzhiyun * event occurred since the driver was loaded. 258*4882a593Smuzhiyun * HL_INFO_CLK_RATE - Retrieve the current and maximum clock rate 259*4882a593Smuzhiyun * of the device in MHz. The maximum clock rate is 260*4882a593Smuzhiyun * configurable via sysfs parameter 261*4882a593Smuzhiyun * HL_INFO_RESET_COUNT - Retrieve the counts of the soft and hard reset 262*4882a593Smuzhiyun * operations performed on the device since the last 263*4882a593Smuzhiyun * time the driver was loaded. 264*4882a593Smuzhiyun * HL_INFO_TIME_SYNC - Retrieve the device's time alongside the host's time 265*4882a593Smuzhiyun * for synchronization. 266*4882a593Smuzhiyun * HL_INFO_CS_COUNTERS - Retrieve command submission counters 267*4882a593Smuzhiyun * HL_INFO_PCI_COUNTERS - Retrieve PCI counters 268*4882a593Smuzhiyun * HL_INFO_CLK_THROTTLE_REASON - Retrieve clock throttling reason 269*4882a593Smuzhiyun * HL_INFO_SYNC_MANAGER - Retrieve sync manager info per dcore 270*4882a593Smuzhiyun * HL_INFO_TOTAL_ENERGY - Retrieve total energy consumption 271*4882a593Smuzhiyun */ 272*4882a593Smuzhiyun #define HL_INFO_HW_IP_INFO 0 273*4882a593Smuzhiyun #define HL_INFO_HW_EVENTS 1 274*4882a593Smuzhiyun #define HL_INFO_DRAM_USAGE 2 275*4882a593Smuzhiyun #define HL_INFO_HW_IDLE 3 276*4882a593Smuzhiyun #define HL_INFO_DEVICE_STATUS 4 277*4882a593Smuzhiyun #define HL_INFO_DEVICE_UTILIZATION 6 278*4882a593Smuzhiyun #define HL_INFO_HW_EVENTS_AGGREGATE 7 279*4882a593Smuzhiyun #define HL_INFO_CLK_RATE 8 280*4882a593Smuzhiyun #define HL_INFO_RESET_COUNT 9 281*4882a593Smuzhiyun #define HL_INFO_TIME_SYNC 10 282*4882a593Smuzhiyun #define HL_INFO_CS_COUNTERS 11 283*4882a593Smuzhiyun #define HL_INFO_PCI_COUNTERS 12 284*4882a593Smuzhiyun #define HL_INFO_CLK_THROTTLE_REASON 13 285*4882a593Smuzhiyun #define HL_INFO_SYNC_MANAGER 14 286*4882a593Smuzhiyun #define HL_INFO_TOTAL_ENERGY 15 287*4882a593Smuzhiyun 288*4882a593Smuzhiyun #define HL_INFO_VERSION_MAX_LEN 128 289*4882a593Smuzhiyun #define HL_INFO_CARD_NAME_MAX_LEN 16 290*4882a593Smuzhiyun 291*4882a593Smuzhiyun struct hl_info_hw_ip_info { 292*4882a593Smuzhiyun __u64 sram_base_address; 293*4882a593Smuzhiyun __u64 dram_base_address; 294*4882a593Smuzhiyun __u64 dram_size; 295*4882a593Smuzhiyun __u32 sram_size; 296*4882a593Smuzhiyun __u32 num_of_events; 297*4882a593Smuzhiyun __u32 device_id; /* PCI Device ID */ 298*4882a593Smuzhiyun __u32 module_id; /* For mezzanine cards in servers (From OCP spec.) */ 299*4882a593Smuzhiyun __u32 reserved[2]; 300*4882a593Smuzhiyun __u32 cpld_version; 301*4882a593Smuzhiyun __u32 psoc_pci_pll_nr; 302*4882a593Smuzhiyun __u32 psoc_pci_pll_nf; 303*4882a593Smuzhiyun __u32 psoc_pci_pll_od; 304*4882a593Smuzhiyun __u32 psoc_pci_pll_div_factor; 305*4882a593Smuzhiyun __u8 tpc_enabled_mask; 306*4882a593Smuzhiyun __u8 dram_enabled; 307*4882a593Smuzhiyun __u8 pad[2]; 308*4882a593Smuzhiyun __u8 cpucp_version[HL_INFO_VERSION_MAX_LEN]; 309*4882a593Smuzhiyun __u8 card_name[HL_INFO_CARD_NAME_MAX_LEN]; 310*4882a593Smuzhiyun }; 311*4882a593Smuzhiyun 312*4882a593Smuzhiyun struct hl_info_dram_usage { 313*4882a593Smuzhiyun __u64 dram_free_mem; 314*4882a593Smuzhiyun __u64 ctx_dram_mem; 315*4882a593Smuzhiyun }; 316*4882a593Smuzhiyun 317*4882a593Smuzhiyun struct hl_info_hw_idle { 318*4882a593Smuzhiyun __u32 is_idle; 319*4882a593Smuzhiyun /* 320*4882a593Smuzhiyun * Bitmask of busy engines. 321*4882a593Smuzhiyun * Bits definition is according to `enum <chip>_enging_id'. 322*4882a593Smuzhiyun */ 323*4882a593Smuzhiyun __u32 busy_engines_mask; 324*4882a593Smuzhiyun 325*4882a593Smuzhiyun /* 326*4882a593Smuzhiyun * Extended Bitmask of busy engines. 327*4882a593Smuzhiyun * Bits definition is according to `enum <chip>_enging_id'. 328*4882a593Smuzhiyun */ 329*4882a593Smuzhiyun __u64 busy_engines_mask_ext; 330*4882a593Smuzhiyun }; 331*4882a593Smuzhiyun 332*4882a593Smuzhiyun struct hl_info_device_status { 333*4882a593Smuzhiyun __u32 status; 334*4882a593Smuzhiyun __u32 pad; 335*4882a593Smuzhiyun }; 336*4882a593Smuzhiyun 337*4882a593Smuzhiyun struct hl_info_device_utilization { 338*4882a593Smuzhiyun __u32 utilization; 339*4882a593Smuzhiyun __u32 pad; 340*4882a593Smuzhiyun }; 341*4882a593Smuzhiyun 342*4882a593Smuzhiyun struct hl_info_clk_rate { 343*4882a593Smuzhiyun __u32 cur_clk_rate_mhz; 344*4882a593Smuzhiyun __u32 max_clk_rate_mhz; 345*4882a593Smuzhiyun }; 346*4882a593Smuzhiyun 347*4882a593Smuzhiyun struct hl_info_reset_count { 348*4882a593Smuzhiyun __u32 hard_reset_cnt; 349*4882a593Smuzhiyun __u32 soft_reset_cnt; 350*4882a593Smuzhiyun }; 351*4882a593Smuzhiyun 352*4882a593Smuzhiyun struct hl_info_time_sync { 353*4882a593Smuzhiyun __u64 device_time; 354*4882a593Smuzhiyun __u64 host_time; 355*4882a593Smuzhiyun }; 356*4882a593Smuzhiyun 357*4882a593Smuzhiyun /** 358*4882a593Smuzhiyun * struct hl_info_pci_counters - pci counters 359*4882a593Smuzhiyun * @rx_throughput: PCI rx throughput KBps 360*4882a593Smuzhiyun * @tx_throughput: PCI tx throughput KBps 361*4882a593Smuzhiyun * @replay_cnt: PCI replay counter 362*4882a593Smuzhiyun */ 363*4882a593Smuzhiyun struct hl_info_pci_counters { 364*4882a593Smuzhiyun __u64 rx_throughput; 365*4882a593Smuzhiyun __u64 tx_throughput; 366*4882a593Smuzhiyun __u64 replay_cnt; 367*4882a593Smuzhiyun }; 368*4882a593Smuzhiyun 369*4882a593Smuzhiyun #define HL_CLK_THROTTLE_POWER 0x1 370*4882a593Smuzhiyun #define HL_CLK_THROTTLE_THERMAL 0x2 371*4882a593Smuzhiyun 372*4882a593Smuzhiyun /** 373*4882a593Smuzhiyun * struct hl_info_clk_throttle - clock throttling reason 374*4882a593Smuzhiyun * @clk_throttling_reason: each bit represents a clk throttling reason 375*4882a593Smuzhiyun */ 376*4882a593Smuzhiyun struct hl_info_clk_throttle { 377*4882a593Smuzhiyun __u32 clk_throttling_reason; 378*4882a593Smuzhiyun }; 379*4882a593Smuzhiyun 380*4882a593Smuzhiyun /** 381*4882a593Smuzhiyun * struct hl_info_energy - device energy information 382*4882a593Smuzhiyun * @total_energy_consumption: total device energy consumption 383*4882a593Smuzhiyun */ 384*4882a593Smuzhiyun struct hl_info_energy { 385*4882a593Smuzhiyun __u64 total_energy_consumption; 386*4882a593Smuzhiyun }; 387*4882a593Smuzhiyun 388*4882a593Smuzhiyun /** 389*4882a593Smuzhiyun * struct hl_info_sync_manager - sync manager information 390*4882a593Smuzhiyun * @first_available_sync_object: first available sob 391*4882a593Smuzhiyun * @first_available_monitor: first available monitor 392*4882a593Smuzhiyun */ 393*4882a593Smuzhiyun struct hl_info_sync_manager { 394*4882a593Smuzhiyun __u32 first_available_sync_object; 395*4882a593Smuzhiyun __u32 first_available_monitor; 396*4882a593Smuzhiyun }; 397*4882a593Smuzhiyun 398*4882a593Smuzhiyun /** 399*4882a593Smuzhiyun * struct hl_info_cs_counters - command submission counters 400*4882a593Smuzhiyun * @out_of_mem_drop_cnt: dropped due to memory allocation issue 401*4882a593Smuzhiyun * @parsing_drop_cnt: dropped due to error in packet parsing 402*4882a593Smuzhiyun * @queue_full_drop_cnt: dropped due to queue full 403*4882a593Smuzhiyun * @device_in_reset_drop_cnt: dropped due to device in reset 404*4882a593Smuzhiyun * @max_cs_in_flight_drop_cnt: dropped due to maximum CS in-flight 405*4882a593Smuzhiyun */ 406*4882a593Smuzhiyun struct hl_cs_counters { 407*4882a593Smuzhiyun __u64 out_of_mem_drop_cnt; 408*4882a593Smuzhiyun __u64 parsing_drop_cnt; 409*4882a593Smuzhiyun __u64 queue_full_drop_cnt; 410*4882a593Smuzhiyun __u64 device_in_reset_drop_cnt; 411*4882a593Smuzhiyun __u64 max_cs_in_flight_drop_cnt; 412*4882a593Smuzhiyun }; 413*4882a593Smuzhiyun 414*4882a593Smuzhiyun struct hl_info_cs_counters { 415*4882a593Smuzhiyun struct hl_cs_counters cs_counters; 416*4882a593Smuzhiyun struct hl_cs_counters ctx_cs_counters; 417*4882a593Smuzhiyun }; 418*4882a593Smuzhiyun 419*4882a593Smuzhiyun enum gaudi_dcores { 420*4882a593Smuzhiyun HL_GAUDI_WS_DCORE, 421*4882a593Smuzhiyun HL_GAUDI_WN_DCORE, 422*4882a593Smuzhiyun HL_GAUDI_EN_DCORE, 423*4882a593Smuzhiyun HL_GAUDI_ES_DCORE 424*4882a593Smuzhiyun }; 425*4882a593Smuzhiyun 426*4882a593Smuzhiyun struct hl_info_args { 427*4882a593Smuzhiyun /* Location of relevant struct in userspace */ 428*4882a593Smuzhiyun __u64 return_pointer; 429*4882a593Smuzhiyun /* 430*4882a593Smuzhiyun * The size of the return value. Just like "size" in "snprintf", 431*4882a593Smuzhiyun * it limits how many bytes the kernel can write 432*4882a593Smuzhiyun * 433*4882a593Smuzhiyun * For hw_events array, the size should be 434*4882a593Smuzhiyun * hl_info_hw_ip_info.num_of_events * sizeof(__u32) 435*4882a593Smuzhiyun */ 436*4882a593Smuzhiyun __u32 return_size; 437*4882a593Smuzhiyun 438*4882a593Smuzhiyun /* HL_INFO_* */ 439*4882a593Smuzhiyun __u32 op; 440*4882a593Smuzhiyun 441*4882a593Smuzhiyun union { 442*4882a593Smuzhiyun /* Dcore id for which the information is relevant. 443*4882a593Smuzhiyun * For Gaudi refer to 'enum gaudi_dcores' 444*4882a593Smuzhiyun */ 445*4882a593Smuzhiyun __u32 dcore_id; 446*4882a593Smuzhiyun /* Context ID - Currently not in use */ 447*4882a593Smuzhiyun __u32 ctx_id; 448*4882a593Smuzhiyun /* Period value for utilization rate (100ms - 1000ms, in 100ms 449*4882a593Smuzhiyun * resolution. 450*4882a593Smuzhiyun */ 451*4882a593Smuzhiyun __u32 period_ms; 452*4882a593Smuzhiyun }; 453*4882a593Smuzhiyun 454*4882a593Smuzhiyun __u32 pad; 455*4882a593Smuzhiyun }; 456*4882a593Smuzhiyun 457*4882a593Smuzhiyun /* Opcode to create a new command buffer */ 458*4882a593Smuzhiyun #define HL_CB_OP_CREATE 0 459*4882a593Smuzhiyun /* Opcode to destroy previously created command buffer */ 460*4882a593Smuzhiyun #define HL_CB_OP_DESTROY 1 461*4882a593Smuzhiyun 462*4882a593Smuzhiyun /* 2MB minus 32 bytes for 2xMSG_PROT */ 463*4882a593Smuzhiyun #define HL_MAX_CB_SIZE (0x200000 - 32) 464*4882a593Smuzhiyun 465*4882a593Smuzhiyun /* Indicates whether the command buffer should be mapped to the device's MMU */ 466*4882a593Smuzhiyun #define HL_CB_FLAGS_MAP 0x1 467*4882a593Smuzhiyun 468*4882a593Smuzhiyun struct hl_cb_in { 469*4882a593Smuzhiyun /* Handle of CB or 0 if we want to create one */ 470*4882a593Smuzhiyun __u64 cb_handle; 471*4882a593Smuzhiyun /* HL_CB_OP_* */ 472*4882a593Smuzhiyun __u32 op; 473*4882a593Smuzhiyun /* Size of CB. Maximum size is HL_MAX_CB_SIZE. The minimum size that 474*4882a593Smuzhiyun * will be allocated, regardless of this parameter's value, is PAGE_SIZE 475*4882a593Smuzhiyun */ 476*4882a593Smuzhiyun __u32 cb_size; 477*4882a593Smuzhiyun /* Context ID - Currently not in use */ 478*4882a593Smuzhiyun __u32 ctx_id; 479*4882a593Smuzhiyun /* HL_CB_FLAGS_* */ 480*4882a593Smuzhiyun __u32 flags; 481*4882a593Smuzhiyun }; 482*4882a593Smuzhiyun 483*4882a593Smuzhiyun struct hl_cb_out { 484*4882a593Smuzhiyun /* Handle of CB */ 485*4882a593Smuzhiyun __u64 cb_handle; 486*4882a593Smuzhiyun }; 487*4882a593Smuzhiyun 488*4882a593Smuzhiyun union hl_cb_args { 489*4882a593Smuzhiyun struct hl_cb_in in; 490*4882a593Smuzhiyun struct hl_cb_out out; 491*4882a593Smuzhiyun }; 492*4882a593Smuzhiyun 493*4882a593Smuzhiyun /* 494*4882a593Smuzhiyun * This structure size must always be fixed to 64-bytes for backward 495*4882a593Smuzhiyun * compatibility 496*4882a593Smuzhiyun */ 497*4882a593Smuzhiyun struct hl_cs_chunk { 498*4882a593Smuzhiyun union { 499*4882a593Smuzhiyun /* For external queue, this represents a Handle of CB on the 500*4882a593Smuzhiyun * Host. 501*4882a593Smuzhiyun * For internal queue in Goya, this represents an SRAM or 502*4882a593Smuzhiyun * a DRAM address of the internal CB. In Gaudi, this might also 503*4882a593Smuzhiyun * represent a mapped host address of the CB. 504*4882a593Smuzhiyun * 505*4882a593Smuzhiyun * A mapped host address is in the device address space, after 506*4882a593Smuzhiyun * a host address was mapped by the device MMU. 507*4882a593Smuzhiyun */ 508*4882a593Smuzhiyun __u64 cb_handle; 509*4882a593Smuzhiyun 510*4882a593Smuzhiyun /* Relevant only when HL_CS_FLAGS_WAIT is set. 511*4882a593Smuzhiyun * This holds address of array of u64 values that contain 512*4882a593Smuzhiyun * signal CS sequence numbers. The wait described by this job 513*4882a593Smuzhiyun * will listen on all those signals (wait event per signal) 514*4882a593Smuzhiyun */ 515*4882a593Smuzhiyun __u64 signal_seq_arr; 516*4882a593Smuzhiyun }; 517*4882a593Smuzhiyun 518*4882a593Smuzhiyun /* Index of queue to put the CB on */ 519*4882a593Smuzhiyun __u32 queue_index; 520*4882a593Smuzhiyun 521*4882a593Smuzhiyun union { 522*4882a593Smuzhiyun /* 523*4882a593Smuzhiyun * Size of command buffer with valid packets 524*4882a593Smuzhiyun * Can be smaller then actual CB size 525*4882a593Smuzhiyun */ 526*4882a593Smuzhiyun __u32 cb_size; 527*4882a593Smuzhiyun 528*4882a593Smuzhiyun /* Relevant only when HL_CS_FLAGS_WAIT is set. 529*4882a593Smuzhiyun * Number of entries in signal_seq_arr 530*4882a593Smuzhiyun */ 531*4882a593Smuzhiyun __u32 num_signal_seq_arr; 532*4882a593Smuzhiyun }; 533*4882a593Smuzhiyun 534*4882a593Smuzhiyun /* HL_CS_CHUNK_FLAGS_* */ 535*4882a593Smuzhiyun __u32 cs_chunk_flags; 536*4882a593Smuzhiyun 537*4882a593Smuzhiyun /* Align structure to 64 bytes */ 538*4882a593Smuzhiyun __u32 pad[11]; 539*4882a593Smuzhiyun }; 540*4882a593Smuzhiyun 541*4882a593Smuzhiyun /* SIGNAL and WAIT flags are mutually exclusive */ 542*4882a593Smuzhiyun #define HL_CS_FLAGS_FORCE_RESTORE 0x1 543*4882a593Smuzhiyun #define HL_CS_FLAGS_SIGNAL 0x2 544*4882a593Smuzhiyun #define HL_CS_FLAGS_WAIT 0x4 545*4882a593Smuzhiyun 546*4882a593Smuzhiyun #define HL_CS_STATUS_SUCCESS 0 547*4882a593Smuzhiyun 548*4882a593Smuzhiyun #define HL_MAX_JOBS_PER_CS 512 549*4882a593Smuzhiyun 550*4882a593Smuzhiyun struct hl_cs_in { 551*4882a593Smuzhiyun 552*4882a593Smuzhiyun /* this holds address of array of hl_cs_chunk for restore phase */ 553*4882a593Smuzhiyun __u64 chunks_restore; 554*4882a593Smuzhiyun 555*4882a593Smuzhiyun /* holds address of array of hl_cs_chunk for execution phase */ 556*4882a593Smuzhiyun __u64 chunks_execute; 557*4882a593Smuzhiyun 558*4882a593Smuzhiyun /* this holds address of array of hl_cs_chunk for store phase - 559*4882a593Smuzhiyun * Currently not in use 560*4882a593Smuzhiyun */ 561*4882a593Smuzhiyun __u64 chunks_store; 562*4882a593Smuzhiyun 563*4882a593Smuzhiyun /* Number of chunks in restore phase array. Maximum number is 564*4882a593Smuzhiyun * HL_MAX_JOBS_PER_CS 565*4882a593Smuzhiyun */ 566*4882a593Smuzhiyun __u32 num_chunks_restore; 567*4882a593Smuzhiyun 568*4882a593Smuzhiyun /* Number of chunks in execution array. Maximum number is 569*4882a593Smuzhiyun * HL_MAX_JOBS_PER_CS 570*4882a593Smuzhiyun */ 571*4882a593Smuzhiyun __u32 num_chunks_execute; 572*4882a593Smuzhiyun 573*4882a593Smuzhiyun /* Number of chunks in restore phase array - Currently not in use */ 574*4882a593Smuzhiyun __u32 num_chunks_store; 575*4882a593Smuzhiyun 576*4882a593Smuzhiyun /* HL_CS_FLAGS_* */ 577*4882a593Smuzhiyun __u32 cs_flags; 578*4882a593Smuzhiyun 579*4882a593Smuzhiyun /* Context ID - Currently not in use */ 580*4882a593Smuzhiyun __u32 ctx_id; 581*4882a593Smuzhiyun }; 582*4882a593Smuzhiyun 583*4882a593Smuzhiyun struct hl_cs_out { 584*4882a593Smuzhiyun /* 585*4882a593Smuzhiyun * seq holds the sequence number of the CS to pass to wait ioctl. All 586*4882a593Smuzhiyun * values are valid except for 0 and ULLONG_MAX 587*4882a593Smuzhiyun */ 588*4882a593Smuzhiyun __u64 seq; 589*4882a593Smuzhiyun /* HL_CS_STATUS_* */ 590*4882a593Smuzhiyun __u32 status; 591*4882a593Smuzhiyun __u32 pad; 592*4882a593Smuzhiyun }; 593*4882a593Smuzhiyun 594*4882a593Smuzhiyun union hl_cs_args { 595*4882a593Smuzhiyun struct hl_cs_in in; 596*4882a593Smuzhiyun struct hl_cs_out out; 597*4882a593Smuzhiyun }; 598*4882a593Smuzhiyun 599*4882a593Smuzhiyun struct hl_wait_cs_in { 600*4882a593Smuzhiyun /* Command submission sequence number */ 601*4882a593Smuzhiyun __u64 seq; 602*4882a593Smuzhiyun /* Absolute timeout to wait in microseconds */ 603*4882a593Smuzhiyun __u64 timeout_us; 604*4882a593Smuzhiyun /* Context ID - Currently not in use */ 605*4882a593Smuzhiyun __u32 ctx_id; 606*4882a593Smuzhiyun __u32 pad; 607*4882a593Smuzhiyun }; 608*4882a593Smuzhiyun 609*4882a593Smuzhiyun #define HL_WAIT_CS_STATUS_COMPLETED 0 610*4882a593Smuzhiyun #define HL_WAIT_CS_STATUS_BUSY 1 611*4882a593Smuzhiyun #define HL_WAIT_CS_STATUS_TIMEDOUT 2 612*4882a593Smuzhiyun #define HL_WAIT_CS_STATUS_ABORTED 3 613*4882a593Smuzhiyun #define HL_WAIT_CS_STATUS_INTERRUPTED 4 614*4882a593Smuzhiyun 615*4882a593Smuzhiyun struct hl_wait_cs_out { 616*4882a593Smuzhiyun /* HL_WAIT_CS_STATUS_* */ 617*4882a593Smuzhiyun __u32 status; 618*4882a593Smuzhiyun __u32 pad; 619*4882a593Smuzhiyun }; 620*4882a593Smuzhiyun 621*4882a593Smuzhiyun union hl_wait_cs_args { 622*4882a593Smuzhiyun struct hl_wait_cs_in in; 623*4882a593Smuzhiyun struct hl_wait_cs_out out; 624*4882a593Smuzhiyun }; 625*4882a593Smuzhiyun 626*4882a593Smuzhiyun /* Opcode to allocate device memory */ 627*4882a593Smuzhiyun #define HL_MEM_OP_ALLOC 0 628*4882a593Smuzhiyun /* Opcode to free previously allocated device memory */ 629*4882a593Smuzhiyun #define HL_MEM_OP_FREE 1 630*4882a593Smuzhiyun /* Opcode to map host and device memory */ 631*4882a593Smuzhiyun #define HL_MEM_OP_MAP 2 632*4882a593Smuzhiyun /* Opcode to unmap previously mapped host and device memory */ 633*4882a593Smuzhiyun #define HL_MEM_OP_UNMAP 3 634*4882a593Smuzhiyun 635*4882a593Smuzhiyun /* Memory flags */ 636*4882a593Smuzhiyun #define HL_MEM_CONTIGUOUS 0x1 637*4882a593Smuzhiyun #define HL_MEM_SHARED 0x2 638*4882a593Smuzhiyun #define HL_MEM_USERPTR 0x4 639*4882a593Smuzhiyun 640*4882a593Smuzhiyun struct hl_mem_in { 641*4882a593Smuzhiyun union { 642*4882a593Smuzhiyun /* HL_MEM_OP_ALLOC- allocate device memory */ 643*4882a593Smuzhiyun struct { 644*4882a593Smuzhiyun /* Size to alloc */ 645*4882a593Smuzhiyun __u64 mem_size; 646*4882a593Smuzhiyun } alloc; 647*4882a593Smuzhiyun 648*4882a593Smuzhiyun /* HL_MEM_OP_FREE - free device memory */ 649*4882a593Smuzhiyun struct { 650*4882a593Smuzhiyun /* Handle returned from HL_MEM_OP_ALLOC */ 651*4882a593Smuzhiyun __u64 handle; 652*4882a593Smuzhiyun } free; 653*4882a593Smuzhiyun 654*4882a593Smuzhiyun /* HL_MEM_OP_MAP - map device memory */ 655*4882a593Smuzhiyun struct { 656*4882a593Smuzhiyun /* 657*4882a593Smuzhiyun * Requested virtual address of mapped memory. 658*4882a593Smuzhiyun * The driver will try to map the requested region to 659*4882a593Smuzhiyun * this hint address, as long as the address is valid 660*4882a593Smuzhiyun * and not already mapped. The user should check the 661*4882a593Smuzhiyun * returned address of the IOCTL to make sure he got 662*4882a593Smuzhiyun * the hint address. Passing 0 here means that the 663*4882a593Smuzhiyun * driver will choose the address itself. 664*4882a593Smuzhiyun */ 665*4882a593Smuzhiyun __u64 hint_addr; 666*4882a593Smuzhiyun /* Handle returned from HL_MEM_OP_ALLOC */ 667*4882a593Smuzhiyun __u64 handle; 668*4882a593Smuzhiyun } map_device; 669*4882a593Smuzhiyun 670*4882a593Smuzhiyun /* HL_MEM_OP_MAP - map host memory */ 671*4882a593Smuzhiyun struct { 672*4882a593Smuzhiyun /* Address of allocated host memory */ 673*4882a593Smuzhiyun __u64 host_virt_addr; 674*4882a593Smuzhiyun /* 675*4882a593Smuzhiyun * Requested virtual address of mapped memory. 676*4882a593Smuzhiyun * The driver will try to map the requested region to 677*4882a593Smuzhiyun * this hint address, as long as the address is valid 678*4882a593Smuzhiyun * and not already mapped. The user should check the 679*4882a593Smuzhiyun * returned address of the IOCTL to make sure he got 680*4882a593Smuzhiyun * the hint address. Passing 0 here means that the 681*4882a593Smuzhiyun * driver will choose the address itself. 682*4882a593Smuzhiyun */ 683*4882a593Smuzhiyun __u64 hint_addr; 684*4882a593Smuzhiyun /* Size of allocated host memory */ 685*4882a593Smuzhiyun __u64 mem_size; 686*4882a593Smuzhiyun } map_host; 687*4882a593Smuzhiyun 688*4882a593Smuzhiyun /* HL_MEM_OP_UNMAP - unmap host memory */ 689*4882a593Smuzhiyun struct { 690*4882a593Smuzhiyun /* Virtual address returned from HL_MEM_OP_MAP */ 691*4882a593Smuzhiyun __u64 device_virt_addr; 692*4882a593Smuzhiyun } unmap; 693*4882a593Smuzhiyun }; 694*4882a593Smuzhiyun 695*4882a593Smuzhiyun /* HL_MEM_OP_* */ 696*4882a593Smuzhiyun __u32 op; 697*4882a593Smuzhiyun /* HL_MEM_* flags */ 698*4882a593Smuzhiyun __u32 flags; 699*4882a593Smuzhiyun /* Context ID - Currently not in use */ 700*4882a593Smuzhiyun __u32 ctx_id; 701*4882a593Smuzhiyun __u32 pad; 702*4882a593Smuzhiyun }; 703*4882a593Smuzhiyun 704*4882a593Smuzhiyun struct hl_mem_out { 705*4882a593Smuzhiyun union { 706*4882a593Smuzhiyun /* 707*4882a593Smuzhiyun * Used for HL_MEM_OP_MAP as the virtual address that was 708*4882a593Smuzhiyun * assigned in the device VA space. 709*4882a593Smuzhiyun * A value of 0 means the requested operation failed. 710*4882a593Smuzhiyun */ 711*4882a593Smuzhiyun __u64 device_virt_addr; 712*4882a593Smuzhiyun 713*4882a593Smuzhiyun /* 714*4882a593Smuzhiyun * Used for HL_MEM_OP_ALLOC. This is the assigned 715*4882a593Smuzhiyun * handle for the allocated memory 716*4882a593Smuzhiyun */ 717*4882a593Smuzhiyun __u64 handle; 718*4882a593Smuzhiyun }; 719*4882a593Smuzhiyun }; 720*4882a593Smuzhiyun 721*4882a593Smuzhiyun union hl_mem_args { 722*4882a593Smuzhiyun struct hl_mem_in in; 723*4882a593Smuzhiyun struct hl_mem_out out; 724*4882a593Smuzhiyun }; 725*4882a593Smuzhiyun 726*4882a593Smuzhiyun #define HL_DEBUG_MAX_AUX_VALUES 10 727*4882a593Smuzhiyun 728*4882a593Smuzhiyun struct hl_debug_params_etr { 729*4882a593Smuzhiyun /* Address in memory to allocate buffer */ 730*4882a593Smuzhiyun __u64 buffer_address; 731*4882a593Smuzhiyun 732*4882a593Smuzhiyun /* Size of buffer to allocate */ 733*4882a593Smuzhiyun __u64 buffer_size; 734*4882a593Smuzhiyun 735*4882a593Smuzhiyun /* Sink operation mode: SW fifo, HW fifo, Circular buffer */ 736*4882a593Smuzhiyun __u32 sink_mode; 737*4882a593Smuzhiyun __u32 pad; 738*4882a593Smuzhiyun }; 739*4882a593Smuzhiyun 740*4882a593Smuzhiyun struct hl_debug_params_etf { 741*4882a593Smuzhiyun /* Address in memory to allocate buffer */ 742*4882a593Smuzhiyun __u64 buffer_address; 743*4882a593Smuzhiyun 744*4882a593Smuzhiyun /* Size of buffer to allocate */ 745*4882a593Smuzhiyun __u64 buffer_size; 746*4882a593Smuzhiyun 747*4882a593Smuzhiyun /* Sink operation mode: SW fifo, HW fifo, Circular buffer */ 748*4882a593Smuzhiyun __u32 sink_mode; 749*4882a593Smuzhiyun __u32 pad; 750*4882a593Smuzhiyun }; 751*4882a593Smuzhiyun 752*4882a593Smuzhiyun struct hl_debug_params_stm { 753*4882a593Smuzhiyun /* Two bit masks for HW event and Stimulus Port */ 754*4882a593Smuzhiyun __u64 he_mask; 755*4882a593Smuzhiyun __u64 sp_mask; 756*4882a593Smuzhiyun 757*4882a593Smuzhiyun /* Trace source ID */ 758*4882a593Smuzhiyun __u32 id; 759*4882a593Smuzhiyun 760*4882a593Smuzhiyun /* Frequency for the timestamp register */ 761*4882a593Smuzhiyun __u32 frequency; 762*4882a593Smuzhiyun }; 763*4882a593Smuzhiyun 764*4882a593Smuzhiyun struct hl_debug_params_bmon { 765*4882a593Smuzhiyun /* Two address ranges that the user can request to filter */ 766*4882a593Smuzhiyun __u64 start_addr0; 767*4882a593Smuzhiyun __u64 addr_mask0; 768*4882a593Smuzhiyun 769*4882a593Smuzhiyun __u64 start_addr1; 770*4882a593Smuzhiyun __u64 addr_mask1; 771*4882a593Smuzhiyun 772*4882a593Smuzhiyun /* Capture window configuration */ 773*4882a593Smuzhiyun __u32 bw_win; 774*4882a593Smuzhiyun __u32 win_capture; 775*4882a593Smuzhiyun 776*4882a593Smuzhiyun /* Trace source ID */ 777*4882a593Smuzhiyun __u32 id; 778*4882a593Smuzhiyun __u32 pad; 779*4882a593Smuzhiyun }; 780*4882a593Smuzhiyun 781*4882a593Smuzhiyun struct hl_debug_params_spmu { 782*4882a593Smuzhiyun /* Event types selection */ 783*4882a593Smuzhiyun __u64 event_types[HL_DEBUG_MAX_AUX_VALUES]; 784*4882a593Smuzhiyun 785*4882a593Smuzhiyun /* Number of event types selection */ 786*4882a593Smuzhiyun __u32 event_types_num; 787*4882a593Smuzhiyun __u32 pad; 788*4882a593Smuzhiyun }; 789*4882a593Smuzhiyun 790*4882a593Smuzhiyun /* Opcode for ETR component */ 791*4882a593Smuzhiyun #define HL_DEBUG_OP_ETR 0 792*4882a593Smuzhiyun /* Opcode for ETF component */ 793*4882a593Smuzhiyun #define HL_DEBUG_OP_ETF 1 794*4882a593Smuzhiyun /* Opcode for STM component */ 795*4882a593Smuzhiyun #define HL_DEBUG_OP_STM 2 796*4882a593Smuzhiyun /* Opcode for FUNNEL component */ 797*4882a593Smuzhiyun #define HL_DEBUG_OP_FUNNEL 3 798*4882a593Smuzhiyun /* Opcode for BMON component */ 799*4882a593Smuzhiyun #define HL_DEBUG_OP_BMON 4 800*4882a593Smuzhiyun /* Opcode for SPMU component */ 801*4882a593Smuzhiyun #define HL_DEBUG_OP_SPMU 5 802*4882a593Smuzhiyun /* Opcode for timestamp (deprecated) */ 803*4882a593Smuzhiyun #define HL_DEBUG_OP_TIMESTAMP 6 804*4882a593Smuzhiyun /* Opcode for setting the device into or out of debug mode. The enable 805*4882a593Smuzhiyun * variable should be 1 for enabling debug mode and 0 for disabling it 806*4882a593Smuzhiyun */ 807*4882a593Smuzhiyun #define HL_DEBUG_OP_SET_MODE 7 808*4882a593Smuzhiyun 809*4882a593Smuzhiyun struct hl_debug_args { 810*4882a593Smuzhiyun /* 811*4882a593Smuzhiyun * Pointer to user input structure. 812*4882a593Smuzhiyun * This field is relevant to specific opcodes. 813*4882a593Smuzhiyun */ 814*4882a593Smuzhiyun __u64 input_ptr; 815*4882a593Smuzhiyun /* Pointer to user output structure */ 816*4882a593Smuzhiyun __u64 output_ptr; 817*4882a593Smuzhiyun /* Size of user input structure */ 818*4882a593Smuzhiyun __u32 input_size; 819*4882a593Smuzhiyun /* Size of user output structure */ 820*4882a593Smuzhiyun __u32 output_size; 821*4882a593Smuzhiyun /* HL_DEBUG_OP_* */ 822*4882a593Smuzhiyun __u32 op; 823*4882a593Smuzhiyun /* 824*4882a593Smuzhiyun * Register index in the component, taken from the debug_regs_index enum 825*4882a593Smuzhiyun * in the various ASIC header files 826*4882a593Smuzhiyun */ 827*4882a593Smuzhiyun __u32 reg_idx; 828*4882a593Smuzhiyun /* Enable/disable */ 829*4882a593Smuzhiyun __u32 enable; 830*4882a593Smuzhiyun /* Context ID - Currently not in use */ 831*4882a593Smuzhiyun __u32 ctx_id; 832*4882a593Smuzhiyun }; 833*4882a593Smuzhiyun 834*4882a593Smuzhiyun /* 835*4882a593Smuzhiyun * Various information operations such as: 836*4882a593Smuzhiyun * - H/W IP information 837*4882a593Smuzhiyun * - Current dram usage 838*4882a593Smuzhiyun * 839*4882a593Smuzhiyun * The user calls this IOCTL with an opcode that describes the required 840*4882a593Smuzhiyun * information. The user should supply a pointer to a user-allocated memory 841*4882a593Smuzhiyun * chunk, which will be filled by the driver with the requested information. 842*4882a593Smuzhiyun * 843*4882a593Smuzhiyun * The user supplies the maximum amount of size to copy into the user's memory, 844*4882a593Smuzhiyun * in order to prevent data corruption in case of differences between the 845*4882a593Smuzhiyun * definitions of structures in kernel and userspace, e.g. in case of old 846*4882a593Smuzhiyun * userspace and new kernel driver 847*4882a593Smuzhiyun */ 848*4882a593Smuzhiyun #define HL_IOCTL_INFO \ 849*4882a593Smuzhiyun _IOWR('H', 0x01, struct hl_info_args) 850*4882a593Smuzhiyun 851*4882a593Smuzhiyun /* 852*4882a593Smuzhiyun * Command Buffer 853*4882a593Smuzhiyun * - Request a Command Buffer 854*4882a593Smuzhiyun * - Destroy a Command Buffer 855*4882a593Smuzhiyun * 856*4882a593Smuzhiyun * The command buffers are memory blocks that reside in DMA-able address 857*4882a593Smuzhiyun * space and are physically contiguous so they can be accessed by the device 858*4882a593Smuzhiyun * directly. They are allocated using the coherent DMA API. 859*4882a593Smuzhiyun * 860*4882a593Smuzhiyun * When creating a new CB, the IOCTL returns a handle of it, and the user-space 861*4882a593Smuzhiyun * process needs to use that handle to mmap the buffer so it can access them. 862*4882a593Smuzhiyun * 863*4882a593Smuzhiyun * In some instances, the device must access the command buffer through the 864*4882a593Smuzhiyun * device's MMU, and thus its memory should be mapped. In these cases, user can 865*4882a593Smuzhiyun * indicate the driver that such a mapping is required. 866*4882a593Smuzhiyun * The resulting device virtual address will be used internally by the driver, 867*4882a593Smuzhiyun * and won't be returned to user. 868*4882a593Smuzhiyun * 869*4882a593Smuzhiyun */ 870*4882a593Smuzhiyun #define HL_IOCTL_CB \ 871*4882a593Smuzhiyun _IOWR('H', 0x02, union hl_cb_args) 872*4882a593Smuzhiyun 873*4882a593Smuzhiyun /* 874*4882a593Smuzhiyun * Command Submission 875*4882a593Smuzhiyun * 876*4882a593Smuzhiyun * To submit work to the device, the user need to call this IOCTL with a set 877*4882a593Smuzhiyun * of JOBS. That set of JOBS constitutes a CS object. 878*4882a593Smuzhiyun * Each JOB will be enqueued on a specific queue, according to the user's input. 879*4882a593Smuzhiyun * There can be more then one JOB per queue. 880*4882a593Smuzhiyun * 881*4882a593Smuzhiyun * The CS IOCTL will receive three sets of JOBS. One set is for "restore" phase, 882*4882a593Smuzhiyun * a second set is for "execution" phase and a third set is for "store" phase. 883*4882a593Smuzhiyun * The JOBS on the "restore" phase are enqueued only after context-switch 884*4882a593Smuzhiyun * (or if its the first CS for this context). The user can also order the 885*4882a593Smuzhiyun * driver to run the "restore" phase explicitly 886*4882a593Smuzhiyun * 887*4882a593Smuzhiyun * There are two types of queues - external and internal. External queues 888*4882a593Smuzhiyun * are DMA queues which transfer data from/to the Host. All other queues are 889*4882a593Smuzhiyun * internal. The driver will get completion notifications from the device only 890*4882a593Smuzhiyun * on JOBS which are enqueued in the external queues. 891*4882a593Smuzhiyun * 892*4882a593Smuzhiyun * For jobs on external queues, the user needs to create command buffers 893*4882a593Smuzhiyun * through the CB ioctl and give the CB's handle to the CS ioctl. For jobs on 894*4882a593Smuzhiyun * internal queues, the user needs to prepare a "command buffer" with packets 895*4882a593Smuzhiyun * on either the device SRAM/DRAM or the host, and give the device address of 896*4882a593Smuzhiyun * that buffer to the CS ioctl. 897*4882a593Smuzhiyun * 898*4882a593Smuzhiyun * This IOCTL is asynchronous in regard to the actual execution of the CS. This 899*4882a593Smuzhiyun * means it returns immediately after ALL the JOBS were enqueued on their 900*4882a593Smuzhiyun * relevant queues. Therefore, the user mustn't assume the CS has been completed 901*4882a593Smuzhiyun * or has even started to execute. 902*4882a593Smuzhiyun * 903*4882a593Smuzhiyun * Upon successful enqueue, the IOCTL returns a sequence number which the user 904*4882a593Smuzhiyun * can use with the "Wait for CS" IOCTL to check whether the handle's CS 905*4882a593Smuzhiyun * external JOBS have been completed. Note that if the CS has internal JOBS 906*4882a593Smuzhiyun * which can execute AFTER the external JOBS have finished, the driver might 907*4882a593Smuzhiyun * report that the CS has finished executing BEFORE the internal JOBS have 908*4882a593Smuzhiyun * actually finished executing. 909*4882a593Smuzhiyun * 910*4882a593Smuzhiyun * Even though the sequence number increments per CS, the user can NOT 911*4882a593Smuzhiyun * automatically assume that if CS with sequence number N finished, then CS 912*4882a593Smuzhiyun * with sequence number N-1 also finished. The user can make this assumption if 913*4882a593Smuzhiyun * and only if CS N and CS N-1 are exactly the same (same CBs for the same 914*4882a593Smuzhiyun * queues). 915*4882a593Smuzhiyun */ 916*4882a593Smuzhiyun #define HL_IOCTL_CS \ 917*4882a593Smuzhiyun _IOWR('H', 0x03, union hl_cs_args) 918*4882a593Smuzhiyun 919*4882a593Smuzhiyun /* 920*4882a593Smuzhiyun * Wait for Command Submission 921*4882a593Smuzhiyun * 922*4882a593Smuzhiyun * The user can call this IOCTL with a handle it received from the CS IOCTL 923*4882a593Smuzhiyun * to wait until the handle's CS has finished executing. The user will wait 924*4882a593Smuzhiyun * inside the kernel until the CS has finished or until the user-requested 925*4882a593Smuzhiyun * timeout has expired. 926*4882a593Smuzhiyun * 927*4882a593Smuzhiyun * If the timeout value is 0, the driver won't sleep at all. It will check 928*4882a593Smuzhiyun * the status of the CS and return immediately 929*4882a593Smuzhiyun * 930*4882a593Smuzhiyun * The return value of the IOCTL is a standard Linux error code. The possible 931*4882a593Smuzhiyun * values are: 932*4882a593Smuzhiyun * 933*4882a593Smuzhiyun * EINTR - Kernel waiting has been interrupted, e.g. due to OS signal 934*4882a593Smuzhiyun * that the user process received 935*4882a593Smuzhiyun * ETIMEDOUT - The CS has caused a timeout on the device 936*4882a593Smuzhiyun * EIO - The CS was aborted (usually because the device was reset) 937*4882a593Smuzhiyun * ENODEV - The device wants to do hard-reset (so user need to close FD) 938*4882a593Smuzhiyun * 939*4882a593Smuzhiyun * The driver also returns a custom define inside the IOCTL which can be: 940*4882a593Smuzhiyun * 941*4882a593Smuzhiyun * HL_WAIT_CS_STATUS_COMPLETED - The CS has been completed successfully (0) 942*4882a593Smuzhiyun * HL_WAIT_CS_STATUS_BUSY - The CS is still executing (0) 943*4882a593Smuzhiyun * HL_WAIT_CS_STATUS_TIMEDOUT - The CS has caused a timeout on the device 944*4882a593Smuzhiyun * (ETIMEDOUT) 945*4882a593Smuzhiyun * HL_WAIT_CS_STATUS_ABORTED - The CS was aborted, usually because the 946*4882a593Smuzhiyun * device was reset (EIO) 947*4882a593Smuzhiyun * HL_WAIT_CS_STATUS_INTERRUPTED - Waiting for the CS was interrupted (EINTR) 948*4882a593Smuzhiyun * 949*4882a593Smuzhiyun */ 950*4882a593Smuzhiyun 951*4882a593Smuzhiyun #define HL_IOCTL_WAIT_CS \ 952*4882a593Smuzhiyun _IOWR('H', 0x04, union hl_wait_cs_args) 953*4882a593Smuzhiyun 954*4882a593Smuzhiyun /* 955*4882a593Smuzhiyun * Memory 956*4882a593Smuzhiyun * - Map host memory to device MMU 957*4882a593Smuzhiyun * - Unmap host memory from device MMU 958*4882a593Smuzhiyun * 959*4882a593Smuzhiyun * This IOCTL allows the user to map host memory to the device MMU 960*4882a593Smuzhiyun * 961*4882a593Smuzhiyun * For host memory, the IOCTL doesn't allocate memory. The user is supposed 962*4882a593Smuzhiyun * to allocate the memory in user-space (malloc/new). The driver pins the 963*4882a593Smuzhiyun * physical pages (up to the allowed limit by the OS), assigns a virtual 964*4882a593Smuzhiyun * address in the device VA space and initializes the device MMU. 965*4882a593Smuzhiyun * 966*4882a593Smuzhiyun * There is an option for the user to specify the requested virtual address. 967*4882a593Smuzhiyun * 968*4882a593Smuzhiyun */ 969*4882a593Smuzhiyun #define HL_IOCTL_MEMORY \ 970*4882a593Smuzhiyun _IOWR('H', 0x05, union hl_mem_args) 971*4882a593Smuzhiyun 972*4882a593Smuzhiyun /* 973*4882a593Smuzhiyun * Debug 974*4882a593Smuzhiyun * - Enable/disable the ETR/ETF/FUNNEL/STM/BMON/SPMU debug traces 975*4882a593Smuzhiyun * 976*4882a593Smuzhiyun * This IOCTL allows the user to get debug traces from the chip. 977*4882a593Smuzhiyun * 978*4882a593Smuzhiyun * Before the user can send configuration requests of the various 979*4882a593Smuzhiyun * debug/profile engines, it needs to set the device into debug mode. 980*4882a593Smuzhiyun * This is because the debug/profile infrastructure is shared component in the 981*4882a593Smuzhiyun * device and we can't allow multiple users to access it at the same time. 982*4882a593Smuzhiyun * 983*4882a593Smuzhiyun * Once a user set the device into debug mode, the driver won't allow other 984*4882a593Smuzhiyun * users to "work" with the device, i.e. open a FD. If there are multiple users 985*4882a593Smuzhiyun * opened on the device, the driver won't allow any user to debug the device. 986*4882a593Smuzhiyun * 987*4882a593Smuzhiyun * For each configuration request, the user needs to provide the register index 988*4882a593Smuzhiyun * and essential data such as buffer address and size. 989*4882a593Smuzhiyun * 990*4882a593Smuzhiyun * Once the user has finished using the debug/profile engines, he should 991*4882a593Smuzhiyun * set the device into non-debug mode, i.e. disable debug mode. 992*4882a593Smuzhiyun * 993*4882a593Smuzhiyun * The driver can decide to "kick out" the user if he abuses this interface. 994*4882a593Smuzhiyun * 995*4882a593Smuzhiyun */ 996*4882a593Smuzhiyun #define HL_IOCTL_DEBUG \ 997*4882a593Smuzhiyun _IOWR('H', 0x06, struct hl_debug_args) 998*4882a593Smuzhiyun 999*4882a593Smuzhiyun #define HL_COMMAND_START 0x01 1000*4882a593Smuzhiyun #define HL_COMMAND_END 0x07 1001*4882a593Smuzhiyun 1002*4882a593Smuzhiyun #endif /* HABANALABS_H_ */ 1003