| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* |
| 4 | * Copyright 2020-2022 HabanaLabs, Ltd. |
| 5 | * All Rights Reserved. |
| 6 | */ |
| 7 | |
| 8 | #include "gaudi2P.h" |
| 9 | #include "gaudi2_masks.h" |
| 10 | #include "../include/gaudi2/gaudi2_special_blocks.h" |
| 11 | #include "../include/hw_ip/mmu/mmu_general.h" |
| 12 | #include "../include/hw_ip/mmu/mmu_v2_0.h" |
| 13 | #include "../include/gaudi2/gaudi2_packets.h" |
| 14 | #include "../include/gaudi2/gaudi2_reg_map.h" |
| 15 | #include "../include/gaudi2/gaudi2_async_ids_map_extended.h" |
| 16 | #include "../include/gaudi2/arc/gaudi2_arc_common_packets.h" |
| 17 | |
| 18 | #include <linux/module.h> |
| 19 | #include <linux/pci.h> |
| 20 | #include <linux/hwmon.h> |
| 21 | #include <linux/iommu.h> |
| 22 | |
| 23 | #define GAUDI2_DMA_POOL_BLK_SIZE SZ_256 /* 256 bytes */ |
| 24 | |
| 25 | #define GAUDI2_RESET_TIMEOUT_MSEC 2000 /* 2000ms */ |
| 26 | |
| 27 | #define GAUDI2_RESET_POLL_TIMEOUT_USEC 500000 /* 500ms */ |
| 28 | #define GAUDI2_PLDM_HRESET_TIMEOUT_MSEC 25000 /* 25s */ |
| 29 | #define GAUDI2_PLDM_SRESET_TIMEOUT_MSEC 25000 /* 25s */ |
| 30 | #define GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC 3000000 /* 3s */ |
| 31 | #define GAUDI2_RESET_POLL_CNT 3 |
| 32 | #define GAUDI2_RESET_WAIT_MSEC 1 /* 1ms */ |
| 33 | #define GAUDI2_CPU_RESET_WAIT_MSEC 100 /* 100ms */ |
| 34 | #define GAUDI2_PLDM_RESET_WAIT_MSEC 1000 /* 1s */ |
| 35 | #define GAUDI2_CB_POOL_CB_CNT 512 |
| 36 | #define GAUDI2_CB_POOL_CB_SIZE SZ_128K /* 128KB */ |
| 37 | #define GAUDI2_MSG_TO_CPU_TIMEOUT_USEC 4000000 /* 4s */ |
| 38 | #define GAUDI2_WAIT_FOR_BL_TIMEOUT_USEC 25000000 /* 25s */ |
| 39 | #define GAUDI2_TEST_QUEUE_WAIT_USEC 100000 /* 100ms */ |
| 40 | #define GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC 1000000 /* 1s */ |
| 41 | |
| 42 | #define GAUDI2_ALLOC_CPU_MEM_RETRY_CNT 3 |
| 43 | |
| 44 | /* |
| 45 | * since the code already has built-in support for binning of up to MAX_FAULTY_TPCS TPCs |
| 46 | * and the code relies on that value (for array size etc..) we define another value |
| 47 | * for MAX faulty TPCs which reflects the cluster binning requirements |
| 48 | */ |
| 49 | #define MAX_CLUSTER_BINNING_FAULTY_TPCS 1 |
| 50 | #define MAX_FAULTY_XBARS 1 |
| 51 | #define MAX_FAULTY_EDMAS 1 |
| 52 | #define MAX_FAULTY_DECODERS 1 |
| 53 | |
| 54 | #define GAUDI2_TPC_FULL_MASK 0x1FFFFFF |
| 55 | #define GAUDI2_HIF_HMMU_FULL_MASK 0xFFFF |
| 56 | #define GAUDI2_DECODER_FULL_MASK 0x3FF |
| 57 | |
| 58 | #define GAUDI2_NA_EVENT_CAUSE 0xFF |
| 59 | #define GAUDI2_NUM_OF_QM_ERR_CAUSE 18 |
| 60 | #define GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE 25 |
| 61 | #define GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE 3 |
| 62 | #define GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE 14 |
| 63 | #define GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE 3 |
| 64 | #define GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE 2 |
| 65 | #define GAUDI2_NUM_OF_ROT_ERR_CAUSE 22 |
| 66 | #define GAUDI2_NUM_OF_TPC_INTR_CAUSE 31 |
| 67 | #define GAUDI2_NUM_OF_DEC_ERR_CAUSE 25 |
| 68 | #define GAUDI2_NUM_OF_MME_ERR_CAUSE 16 |
| 69 | #define GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE 7 |
| 70 | #define GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE 8 |
| 71 | #define GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE 19 |
| 72 | #define GAUDI2_NUM_OF_HBM_SEI_CAUSE 9 |
| 73 | #define GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE 3 |
| 74 | #define GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE 3 |
| 75 | #define GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE 2 |
| 76 | #define GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE 2 |
| 77 | #define GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE 2 |
| 78 | #define GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE 5 |
| 79 | |
| 80 | #define GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 10) |
| 81 | #define GAUDI2_PLDM_MMU_TIMEOUT_USEC (MMU_CONFIG_TIMEOUT_USEC * 200) |
| 82 | #define GAUDI2_ARB_WDT_TIMEOUT (0x1000000) |
| 83 | |
| 84 | #define GAUDI2_VDEC_TIMEOUT_USEC 10000 /* 10ms */ |
| 85 | #define GAUDI2_PLDM_VDEC_TIMEOUT_USEC (GAUDI2_VDEC_TIMEOUT_USEC * 100) |
| 86 | |
| 87 | #define KDMA_TIMEOUT_USEC USEC_PER_SEC |
| 88 | |
| 89 | #define IS_DMA_IDLE(dma_core_sts0) \ |
| 90 | (!((dma_core_sts0) & (DCORE0_EDMA0_CORE_STS0_BUSY_MASK))) |
| 91 | |
| 92 | #define IS_DMA_HALTED(dma_core_sts1) \ |
| 93 | ((dma_core_sts1) & (DCORE0_EDMA0_CORE_STS1_IS_HALT_MASK)) |
| 94 | |
| 95 | #define IS_MME_IDLE(mme_arch_sts) (((mme_arch_sts) & MME_ARCH_IDLE_MASK) == MME_ARCH_IDLE_MASK) |
| 96 | |
| 97 | #define IS_TPC_IDLE(tpc_cfg_sts) (((tpc_cfg_sts) & (TPC_IDLE_MASK)) == (TPC_IDLE_MASK)) |
| 98 | |
| 99 | #define IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) \ |
| 100 | ((((qm_glbl_sts0) & (QM_IDLE_MASK)) == (QM_IDLE_MASK)) && \ |
| 101 | (((qm_glbl_sts1) & (QM_ARC_IDLE_MASK)) == (QM_ARC_IDLE_MASK)) && \ |
| 102 | (((qm_cgm_sts) & (CGM_IDLE_MASK)) == (CGM_IDLE_MASK))) |
| 103 | |
| 104 | #define PCIE_DEC_EN_MASK 0x300 |
| 105 | #define DEC_WORK_STATE_IDLE 0 |
| 106 | #define DEC_WORK_STATE_PEND 3 |
| 107 | #define IS_DEC_IDLE(dec_swreg15) \ |
| 108 | (((dec_swreg15) & DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK) == DEC_WORK_STATE_IDLE || \ |
| 109 | ((dec_swreg15) & DCORE0_DEC0_CMD_SWREG15_SW_WORK_STATE_MASK) == DEC_WORK_STATE_PEND) |
| 110 | |
| 111 | /* HBM MMU address scrambling parameters */ |
| 112 | #define GAUDI2_HBM_MMU_SCRM_MEM_SIZE SZ_8M |
| 113 | #define GAUDI2_HBM_MMU_SCRM_DIV_SHIFT 26 |
| 114 | #define GAUDI2_HBM_MMU_SCRM_MOD_SHIFT 0 |
| 115 | #define GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK DRAM_VA_HINT_MASK |
| 116 | #define GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR 16 |
| 117 | #define MMU_RANGE_INV_VA_LSB_SHIFT 12 |
| 118 | #define MMU_RANGE_INV_VA_MSB_SHIFT 44 |
| 119 | #define MMU_RANGE_INV_EN_SHIFT 0 |
| 120 | #define MMU_RANGE_INV_ASID_EN_SHIFT 1 |
| 121 | #define MMU_RANGE_INV_ASID_SHIFT 2 |
| 122 | |
| 123 | /* The last SPI_SEI cause bit, "burst_fifo_full", is expected to be triggered in PMMU because it has |
| 124 | * a 2 entries FIFO, and hence it is not enabled for it. |
| 125 | */ |
| 126 | #define GAUDI2_PMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 2, 0) |
| 127 | #define GAUDI2_HMMU_SPI_SEI_ENABLE_MASK GENMASK(GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE - 1, 0) |
| 128 | |
| 129 | #define GAUDI2_MAX_STRING_LEN 64 |
| 130 | |
| 131 | #define GAUDI2_VDEC_MSIX_ENTRIES (GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM - \ |
| 132 | GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 1) |
| 133 | |
| 134 | #define ENGINE_ID_DCORE_OFFSET (GAUDI2_DCORE1_ENGINE_ID_EDMA_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0) |
| 135 | |
| 136 | /* RAZWI initiator coordinates */ |
| 137 | #define RAZWI_GET_AXUSER_XY(x) \ |
| 138 | ((x & 0xF8001FF0) >> 4) |
| 139 | |
| 140 | #define RAZWI_GET_AXUSER_LOW_XY(x) \ |
| 141 | ((x & 0x00001FF0) >> 4) |
| 142 | |
| 143 | #define RAZWI_INITIATOR_AXUER_L_X_SHIFT 0 |
| 144 | #define RAZWI_INITIATOR_AXUER_L_X_MASK 0x1F |
| 145 | #define RAZWI_INITIATOR_AXUER_L_Y_SHIFT 5 |
| 146 | #define RAZWI_INITIATOR_AXUER_L_Y_MASK 0xF |
| 147 | |
| 148 | #define RAZWI_INITIATOR_AXUER_H_X_SHIFT 23 |
| 149 | #define RAZWI_INITIATOR_AXUER_H_X_MASK 0x1F |
| 150 | |
| 151 | #define RAZWI_INITIATOR_ID_X_Y_LOW(x, y) \ |
| 152 | ((((y) & RAZWI_INITIATOR_AXUER_L_Y_MASK) << RAZWI_INITIATOR_AXUER_L_Y_SHIFT) | \ |
| 153 | (((x) & RAZWI_INITIATOR_AXUER_L_X_MASK) << RAZWI_INITIATOR_AXUER_L_X_SHIFT)) |
| 154 | |
| 155 | #define RAZWI_INITIATOR_ID_X_HIGH(x) \ |
| 156 | (((x) & RAZWI_INITIATOR_AXUER_H_X_MASK) << RAZWI_INITIATOR_AXUER_H_X_SHIFT) |
| 157 | |
| 158 | #define RAZWI_INITIATOR_ID_X_Y(xl, yl, xh) \ |
| 159 | (RAZWI_INITIATOR_ID_X_Y_LOW(xl, yl) | RAZWI_INITIATOR_ID_X_HIGH(xh)) |
| 160 | |
| 161 | #define PSOC_RAZWI_ENG_STR_SIZE 128 |
| 162 | #define PSOC_RAZWI_MAX_ENG_PER_RTR 5 |
| 163 | |
| 164 | /* HW scrambles only bits 0-25 */ |
| 165 | #define HW_UNSCRAMBLED_BITS_MASK GENMASK_ULL(63, 26) |
| 166 | |
| 167 | #define GAUDI2_GLBL_ERR_MAX_CAUSE_NUM 17 |
| 168 | |
| 169 | struct gaudi2_razwi_info { |
| 170 | u32 axuser_xy; |
| 171 | u32 rtr_ctrl; |
| 172 | u16 eng_id; |
| 173 | char *eng_name; |
| 174 | }; |
| 175 | |
| 176 | static struct gaudi2_razwi_info common_razwi_info[] = { |
| 177 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 0), mmDCORE0_RTR0_CTRL_BASE, |
| 178 | GAUDI2_DCORE0_ENGINE_ID_DEC_0, "DEC0" }, |
| 179 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE, |
| 180 | GAUDI2_DCORE0_ENGINE_ID_DEC_1, "DEC1" }, |
| 181 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 18), mmDCORE1_RTR7_CTRL_BASE, |
| 182 | GAUDI2_DCORE1_ENGINE_ID_DEC_0, "DEC2" }, |
| 183 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE, |
| 184 | GAUDI2_DCORE1_ENGINE_ID_DEC_1, "DEC3" }, |
| 185 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 0), mmDCORE2_RTR0_CTRL_BASE, |
| 186 | GAUDI2_DCORE2_ENGINE_ID_DEC_0, "DEC4" }, |
| 187 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE, |
| 188 | GAUDI2_DCORE2_ENGINE_ID_DEC_1, "DEC5" }, |
| 189 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 18), mmDCORE3_RTR7_CTRL_BASE, |
| 190 | GAUDI2_DCORE3_ENGINE_ID_DEC_0, "DEC6" }, |
| 191 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE, |
| 192 | GAUDI2_DCORE3_ENGINE_ID_DEC_1, "DEC7" }, |
| 193 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 6), mmDCORE0_RTR0_CTRL_BASE, |
| 194 | GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC8" }, |
| 195 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 7), mmDCORE0_RTR0_CTRL_BASE, |
| 196 | GAUDI2_PCIE_ENGINE_ID_DEC_0, "DEC9" }, |
| 197 | {RAZWI_INITIATOR_ID_X_Y(3, 4, 2), mmDCORE0_RTR1_CTRL_BASE, |
| 198 | GAUDI2_DCORE0_ENGINE_ID_TPC_0, "TPC0" }, |
| 199 | {RAZWI_INITIATOR_ID_X_Y(3, 4, 4), mmDCORE0_RTR1_CTRL_BASE, |
| 200 | GAUDI2_DCORE0_ENGINE_ID_TPC_1, "TPC1" }, |
| 201 | {RAZWI_INITIATOR_ID_X_Y(4, 4, 2), mmDCORE0_RTR2_CTRL_BASE, |
| 202 | GAUDI2_DCORE0_ENGINE_ID_TPC_2, "TPC2" }, |
| 203 | {RAZWI_INITIATOR_ID_X_Y(4, 4, 4), mmDCORE0_RTR2_CTRL_BASE, |
| 204 | GAUDI2_DCORE0_ENGINE_ID_TPC_3, "TPC3" }, |
| 205 | {RAZWI_INITIATOR_ID_X_Y(5, 4, 2), mmDCORE0_RTR3_CTRL_BASE, |
| 206 | GAUDI2_DCORE0_ENGINE_ID_TPC_4, "TPC4" }, |
| 207 | {RAZWI_INITIATOR_ID_X_Y(5, 4, 4), mmDCORE0_RTR3_CTRL_BASE, |
| 208 | GAUDI2_DCORE0_ENGINE_ID_TPC_5, "TPC5" }, |
| 209 | {RAZWI_INITIATOR_ID_X_Y(16, 4, 14), mmDCORE1_RTR6_CTRL_BASE, |
| 210 | GAUDI2_DCORE1_ENGINE_ID_TPC_0, "TPC6" }, |
| 211 | {RAZWI_INITIATOR_ID_X_Y(16, 4, 16), mmDCORE1_RTR6_CTRL_BASE, |
| 212 | GAUDI2_DCORE1_ENGINE_ID_TPC_1, "TPC7" }, |
| 213 | {RAZWI_INITIATOR_ID_X_Y(15, 4, 14), mmDCORE1_RTR5_CTRL_BASE, |
| 214 | GAUDI2_DCORE1_ENGINE_ID_TPC_2, "TPC8" }, |
| 215 | {RAZWI_INITIATOR_ID_X_Y(15, 4, 16), mmDCORE1_RTR5_CTRL_BASE, |
| 216 | GAUDI2_DCORE1_ENGINE_ID_TPC_3, "TPC9" }, |
| 217 | {RAZWI_INITIATOR_ID_X_Y(14, 4, 14), mmDCORE1_RTR4_CTRL_BASE, |
| 218 | GAUDI2_DCORE1_ENGINE_ID_TPC_4, "TPC10" }, |
| 219 | {RAZWI_INITIATOR_ID_X_Y(14, 4, 16), mmDCORE1_RTR4_CTRL_BASE, |
| 220 | GAUDI2_DCORE1_ENGINE_ID_TPC_5, "TPC11" }, |
| 221 | {RAZWI_INITIATOR_ID_X_Y(5, 11, 2), mmDCORE2_RTR3_CTRL_BASE, |
| 222 | GAUDI2_DCORE2_ENGINE_ID_TPC_0, "TPC12" }, |
| 223 | {RAZWI_INITIATOR_ID_X_Y(5, 11, 4), mmDCORE2_RTR3_CTRL_BASE, |
| 224 | GAUDI2_DCORE2_ENGINE_ID_TPC_1, "TPC13" }, |
| 225 | {RAZWI_INITIATOR_ID_X_Y(4, 11, 2), mmDCORE2_RTR2_CTRL_BASE, |
| 226 | GAUDI2_DCORE2_ENGINE_ID_TPC_2, "TPC14" }, |
| 227 | {RAZWI_INITIATOR_ID_X_Y(4, 11, 4), mmDCORE2_RTR2_CTRL_BASE, |
| 228 | GAUDI2_DCORE2_ENGINE_ID_TPC_3, "TPC15" }, |
| 229 | {RAZWI_INITIATOR_ID_X_Y(3, 11, 2), mmDCORE2_RTR1_CTRL_BASE, |
| 230 | GAUDI2_DCORE2_ENGINE_ID_TPC_4, "TPC16" }, |
| 231 | {RAZWI_INITIATOR_ID_X_Y(3, 11, 4), mmDCORE2_RTR1_CTRL_BASE, |
| 232 | GAUDI2_DCORE2_ENGINE_ID_TPC_5, "TPC17" }, |
| 233 | {RAZWI_INITIATOR_ID_X_Y(14, 11, 14), mmDCORE3_RTR4_CTRL_BASE, |
| 234 | GAUDI2_DCORE3_ENGINE_ID_TPC_0, "TPC18" }, |
| 235 | {RAZWI_INITIATOR_ID_X_Y(14, 11, 16), mmDCORE3_RTR4_CTRL_BASE, |
| 236 | GAUDI2_DCORE3_ENGINE_ID_TPC_1, "TPC19" }, |
| 237 | {RAZWI_INITIATOR_ID_X_Y(15, 11, 14), mmDCORE3_RTR5_CTRL_BASE, |
| 238 | GAUDI2_DCORE3_ENGINE_ID_TPC_2, "TPC20" }, |
| 239 | {RAZWI_INITIATOR_ID_X_Y(15, 11, 16), mmDCORE3_RTR5_CTRL_BASE, |
| 240 | GAUDI2_DCORE3_ENGINE_ID_TPC_3, "TPC21" }, |
| 241 | {RAZWI_INITIATOR_ID_X_Y(16, 11, 14), mmDCORE3_RTR6_CTRL_BASE, |
| 242 | GAUDI2_DCORE3_ENGINE_ID_TPC_4, "TPC22" }, |
| 243 | {RAZWI_INITIATOR_ID_X_Y(16, 11, 16), mmDCORE3_RTR6_CTRL_BASE, |
| 244 | GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC23" }, |
| 245 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE, |
| 246 | GAUDI2_DCORE3_ENGINE_ID_TPC_5, "TPC24" }, |
| 247 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 8), mmDCORE1_RTR7_CTRL_BASE, |
| 248 | GAUDI2_ENGINE_ID_NIC0_0, "NIC0" }, |
| 249 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 10), mmDCORE1_RTR7_CTRL_BASE, |
| 250 | GAUDI2_ENGINE_ID_NIC0_1, "NIC1" }, |
| 251 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 12), mmDCORE1_RTR7_CTRL_BASE, |
| 252 | GAUDI2_ENGINE_ID_NIC1_0, "NIC2" }, |
| 253 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 14), mmDCORE1_RTR7_CTRL_BASE, |
| 254 | GAUDI2_ENGINE_ID_NIC1_1, "NIC3" }, |
| 255 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 15), mmDCORE1_RTR7_CTRL_BASE, |
| 256 | GAUDI2_ENGINE_ID_NIC2_0, "NIC4" }, |
| 257 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE, |
| 258 | GAUDI2_ENGINE_ID_NIC2_1, "NIC5" }, |
| 259 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 4), mmDCORE2_RTR0_CTRL_BASE, |
| 260 | GAUDI2_ENGINE_ID_NIC3_0, "NIC6" }, |
| 261 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 6), mmDCORE2_RTR0_CTRL_BASE, |
| 262 | GAUDI2_ENGINE_ID_NIC3_1, "NIC7" }, |
| 263 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 8), mmDCORE2_RTR0_CTRL_BASE, |
| 264 | GAUDI2_ENGINE_ID_NIC4_0, "NIC8" }, |
| 265 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 12), mmDCORE3_RTR7_CTRL_BASE, |
| 266 | GAUDI2_ENGINE_ID_NIC4_1, "NIC9" }, |
| 267 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 14), mmDCORE3_RTR7_CTRL_BASE, |
| 268 | GAUDI2_ENGINE_ID_NIC5_0, "NIC10" }, |
| 269 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE, |
| 270 | GAUDI2_ENGINE_ID_NIC5_1, "NIC11" }, |
| 271 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 2), mmDCORE0_RTR0_CTRL_BASE, |
| 272 | GAUDI2_ENGINE_ID_PDMA_0, "PDMA0" }, |
| 273 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 3), mmDCORE0_RTR0_CTRL_BASE, |
| 274 | GAUDI2_ENGINE_ID_PDMA_1, "PDMA1" }, |
| 275 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 4), mmDCORE0_RTR0_CTRL_BASE, |
| 276 | GAUDI2_ENGINE_ID_SIZE, "PMMU" }, |
| 277 | {RAZWI_INITIATOR_ID_X_Y(2, 4, 5), mmDCORE0_RTR0_CTRL_BASE, |
| 278 | GAUDI2_ENGINE_ID_SIZE, "PCIE" }, |
| 279 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 16), mmDCORE1_RTR7_CTRL_BASE, |
| 280 | GAUDI2_ENGINE_ID_ARC_FARM, "ARC_FARM" }, |
| 281 | {RAZWI_INITIATOR_ID_X_Y(17, 4, 17), mmDCORE1_RTR7_CTRL_BASE, |
| 282 | GAUDI2_ENGINE_ID_KDMA, "KDMA" }, |
| 283 | {RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF1_RTR_CTRL_BASE, |
| 284 | GAUDI2_DCORE0_ENGINE_ID_EDMA_0, "EDMA0" }, |
| 285 | {RAZWI_INITIATOR_ID_X_Y(1, 5, 1), mmSFT0_HBW_RTR_IF0_RTR_CTRL_BASE, |
| 286 | GAUDI2_DCORE0_ENGINE_ID_EDMA_1, "EDMA1" }, |
| 287 | {RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF1_RTR_CTRL_BASE, |
| 288 | GAUDI2_DCORE1_ENGINE_ID_EDMA_0, "EDMA2" }, |
| 289 | {RAZWI_INITIATOR_ID_X_Y(18, 5, 18), mmSFT1_HBW_RTR_IF0_RTR_CTRL_BASE, |
| 290 | GAUDI2_DCORE1_ENGINE_ID_EDMA_1, "EDMA3" }, |
| 291 | {RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE, |
| 292 | GAUDI2_DCORE2_ENGINE_ID_EDMA_0, "EDMA4" }, |
| 293 | {RAZWI_INITIATOR_ID_X_Y(1, 10, 1), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE, |
| 294 | GAUDI2_DCORE2_ENGINE_ID_EDMA_1, "EDMA5" }, |
| 295 | {RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF0_RTR_CTRL_BASE, |
| 296 | GAUDI2_DCORE3_ENGINE_ID_EDMA_0, "EDMA6" }, |
| 297 | {RAZWI_INITIATOR_ID_X_Y(18, 10, 18), mmSFT2_HBW_RTR_IF1_RTR_CTRL_BASE, |
| 298 | GAUDI2_DCORE3_ENGINE_ID_EDMA_1, "EDMA7" }, |
| 299 | {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE, |
| 300 | GAUDI2_ENGINE_ID_SIZE, "HMMU0" }, |
| 301 | {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE, |
| 302 | GAUDI2_ENGINE_ID_SIZE, "HMMU1" }, |
| 303 | {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE, |
| 304 | GAUDI2_ENGINE_ID_SIZE, "HMMU2" }, |
| 305 | {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE, |
| 306 | GAUDI2_ENGINE_ID_SIZE, "HMMU3" }, |
| 307 | {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE, |
| 308 | GAUDI2_ENGINE_ID_SIZE, "HMMU4" }, |
| 309 | {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE, |
| 310 | GAUDI2_ENGINE_ID_SIZE, "HMMU5" }, |
| 311 | {RAZWI_INITIATOR_ID_X_Y(1, 5, 0), mmDCORE0_RTR0_CTRL_BASE, |
| 312 | GAUDI2_ENGINE_ID_SIZE, "HMMU6" }, |
| 313 | {RAZWI_INITIATOR_ID_X_Y(18, 5, 19), mmDCORE1_RTR7_CTRL_BASE, |
| 314 | GAUDI2_ENGINE_ID_SIZE, "HMMU7" }, |
| 315 | {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE, |
| 316 | GAUDI2_ENGINE_ID_SIZE, "HMMU8" }, |
| 317 | {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE, |
| 318 | GAUDI2_ENGINE_ID_SIZE, "HMMU9" }, |
| 319 | {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE, |
| 320 | GAUDI2_ENGINE_ID_SIZE, "HMMU10" }, |
| 321 | {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE, |
| 322 | GAUDI2_ENGINE_ID_SIZE, "HMMU11" }, |
| 323 | {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE, |
| 324 | GAUDI2_ENGINE_ID_SIZE, "HMMU12" }, |
| 325 | {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE, |
| 326 | GAUDI2_ENGINE_ID_SIZE, "HMMU13" }, |
| 327 | {RAZWI_INITIATOR_ID_X_Y(1, 10, 0), mmDCORE2_RTR0_CTRL_BASE, |
| 328 | GAUDI2_ENGINE_ID_SIZE, "HMMU14" }, |
| 329 | {RAZWI_INITIATOR_ID_X_Y(18, 10, 19), mmDCORE3_RTR7_CTRL_BASE, |
| 330 | GAUDI2_ENGINE_ID_SIZE, "HMMU15" }, |
| 331 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE, |
| 332 | GAUDI2_ENGINE_ID_ROT_0, "ROT0" }, |
| 333 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 16), mmDCORE3_RTR7_CTRL_BASE, |
| 334 | GAUDI2_ENGINE_ID_ROT_1, "ROT1" }, |
| 335 | {RAZWI_INITIATOR_ID_X_Y(2, 11, 2), mmDCORE2_RTR0_CTRL_BASE, |
| 336 | GAUDI2_ENGINE_ID_PSOC, "CPU" }, |
| 337 | {RAZWI_INITIATOR_ID_X_Y(17, 11, 11), mmDCORE3_RTR7_CTRL_BASE, |
| 338 | GAUDI2_ENGINE_ID_PSOC, "PSOC" } |
| 339 | }; |
| 340 | |
| 341 | static struct gaudi2_razwi_info mme_razwi_info[] = { |
| 342 | /* MME X high coordinate is N/A, hence using only low coordinates */ |
| 343 | {RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE, |
| 344 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP0" }, |
| 345 | {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE, |
| 346 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_WAP1" }, |
| 347 | {RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE, |
| 348 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_WR" }, |
| 349 | {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE, |
| 350 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_CTRL_RD" }, |
| 351 | {RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE, |
| 352 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE0" }, |
| 353 | {RAZWI_INITIATOR_ID_X_Y_LOW(6, 4), mmDCORE0_RTR4_CTRL_BASE, |
| 354 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE1" }, |
| 355 | {RAZWI_INITIATOR_ID_X_Y_LOW(7, 4), mmDCORE0_RTR5_CTRL_BASE, |
| 356 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE2" }, |
| 357 | {RAZWI_INITIATOR_ID_X_Y_LOW(8, 4), mmDCORE0_RTR6_CTRL_BASE, |
| 358 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE3" }, |
| 359 | {RAZWI_INITIATOR_ID_X_Y_LOW(9, 4), mmDCORE0_RTR7_CTRL_BASE, |
| 360 | GAUDI2_DCORE0_ENGINE_ID_MME, "MME0_SBTE4" }, |
| 361 | {RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE, |
| 362 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP0" }, |
| 363 | {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE, |
| 364 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_WAP1" }, |
| 365 | {RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE, |
| 366 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_WR" }, |
| 367 | {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE, |
| 368 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_CTRL_RD" }, |
| 369 | {RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE, |
| 370 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE0" }, |
| 371 | {RAZWI_INITIATOR_ID_X_Y_LOW(13, 4), mmDCORE1_RTR3_CTRL_BASE, |
| 372 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE1" }, |
| 373 | {RAZWI_INITIATOR_ID_X_Y_LOW(12, 4), mmDCORE1_RTR2_CTRL_BASE, |
| 374 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE2" }, |
| 375 | {RAZWI_INITIATOR_ID_X_Y_LOW(11, 4), mmDCORE1_RTR1_CTRL_BASE, |
| 376 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE3" }, |
| 377 | {RAZWI_INITIATOR_ID_X_Y_LOW(10, 4), mmDCORE1_RTR0_CTRL_BASE, |
| 378 | GAUDI2_DCORE1_ENGINE_ID_MME, "MME1_SBTE4" }, |
| 379 | {RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE, |
| 380 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP0" }, |
| 381 | {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE, |
| 382 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_WAP1" }, |
| 383 | {RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE, |
| 384 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_WR" }, |
| 385 | {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE, |
| 386 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_CTRL_RD" }, |
| 387 | {RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE, |
| 388 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE0" }, |
| 389 | {RAZWI_INITIATOR_ID_X_Y_LOW(6, 11), mmDCORE2_RTR4_CTRL_BASE, |
| 390 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE1" }, |
| 391 | {RAZWI_INITIATOR_ID_X_Y_LOW(7, 11), mmDCORE2_RTR5_CTRL_BASE, |
| 392 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE2" }, |
| 393 | {RAZWI_INITIATOR_ID_X_Y_LOW(8, 11), mmDCORE2_RTR6_CTRL_BASE, |
| 394 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE3" }, |
| 395 | {RAZWI_INITIATOR_ID_X_Y_LOW(9, 11), mmDCORE2_RTR7_CTRL_BASE, |
| 396 | GAUDI2_DCORE2_ENGINE_ID_MME, "MME2_SBTE4" }, |
| 397 | {RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE, |
| 398 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP0" }, |
| 399 | {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE, |
| 400 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_WAP1" }, |
| 401 | {RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE, |
| 402 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_WR" }, |
| 403 | {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE, |
| 404 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_CTRL_RD" }, |
| 405 | {RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE, |
| 406 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE0" }, |
| 407 | {RAZWI_INITIATOR_ID_X_Y_LOW(13, 11), mmDCORE3_RTR3_CTRL_BASE, |
| 408 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE1" }, |
| 409 | {RAZWI_INITIATOR_ID_X_Y_LOW(12, 11), mmDCORE3_RTR2_CTRL_BASE, |
| 410 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE2" }, |
| 411 | {RAZWI_INITIATOR_ID_X_Y_LOW(11, 11), mmDCORE3_RTR1_CTRL_BASE, |
| 412 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE3" }, |
| 413 | {RAZWI_INITIATOR_ID_X_Y_LOW(10, 11), mmDCORE3_RTR0_CTRL_BASE, |
| 414 | GAUDI2_DCORE3_ENGINE_ID_MME, "MME3_SBTE4" } |
| 415 | }; |
| 416 | |
| 417 | enum hl_pmmu_fatal_cause { |
| 418 | LATENCY_RD_OUT_FIFO_OVERRUN, |
| 419 | LATENCY_WR_OUT_FIFO_OVERRUN, |
| 420 | }; |
| 421 | |
| 422 | enum hl_pcie_drain_ind_cause { |
| 423 | LBW_AXI_DRAIN_IND, |
| 424 | HBW_AXI_DRAIN_IND |
| 425 | }; |
| 426 | |
| 427 | static const u32 cluster_hmmu_hif_enabled_mask[GAUDI2_HBM_NUM] = { |
| 428 | [HBM_ID0] = 0xFFFC, |
| 429 | [HBM_ID1] = 0xFFCF, |
| 430 | [HBM_ID2] = 0xF7F7, |
| 431 | [HBM_ID3] = 0x7F7F, |
| 432 | [HBM_ID4] = 0xFCFF, |
| 433 | [HBM_ID5] = 0xCFFF, |
| 434 | }; |
| 435 | |
| 436 | static const u8 xbar_edge_to_hbm_cluster[EDMA_ID_SIZE] = { |
| 437 | [0] = HBM_ID0, |
| 438 | [1] = HBM_ID1, |
| 439 | [2] = HBM_ID4, |
| 440 | [3] = HBM_ID5, |
| 441 | }; |
| 442 | |
| 443 | static const u8 edma_to_hbm_cluster[EDMA_ID_SIZE] = { |
| 444 | [EDMA_ID_DCORE0_INSTANCE0] = HBM_ID0, |
| 445 | [EDMA_ID_DCORE0_INSTANCE1] = HBM_ID2, |
| 446 | [EDMA_ID_DCORE1_INSTANCE0] = HBM_ID1, |
| 447 | [EDMA_ID_DCORE1_INSTANCE1] = HBM_ID3, |
| 448 | [EDMA_ID_DCORE2_INSTANCE0] = HBM_ID2, |
| 449 | [EDMA_ID_DCORE2_INSTANCE1] = HBM_ID4, |
| 450 | [EDMA_ID_DCORE3_INSTANCE0] = HBM_ID3, |
| 451 | [EDMA_ID_DCORE3_INSTANCE1] = HBM_ID5, |
| 452 | }; |
| 453 | |
| 454 | static const int gaudi2_qman_async_event_id[] = { |
| 455 | [GAUDI2_QUEUE_ID_PDMA_0_0] = GAUDI2_EVENT_PDMA0_QM, |
| 456 | [GAUDI2_QUEUE_ID_PDMA_0_1] = GAUDI2_EVENT_PDMA0_QM, |
| 457 | [GAUDI2_QUEUE_ID_PDMA_0_2] = GAUDI2_EVENT_PDMA0_QM, |
| 458 | [GAUDI2_QUEUE_ID_PDMA_0_3] = GAUDI2_EVENT_PDMA0_QM, |
| 459 | [GAUDI2_QUEUE_ID_PDMA_1_0] = GAUDI2_EVENT_PDMA1_QM, |
| 460 | [GAUDI2_QUEUE_ID_PDMA_1_1] = GAUDI2_EVENT_PDMA1_QM, |
| 461 | [GAUDI2_QUEUE_ID_PDMA_1_2] = GAUDI2_EVENT_PDMA1_QM, |
| 462 | [GAUDI2_QUEUE_ID_PDMA_1_3] = GAUDI2_EVENT_PDMA1_QM, |
| 463 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = GAUDI2_EVENT_HDMA0_QM, |
| 464 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = GAUDI2_EVENT_HDMA0_QM, |
| 465 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = GAUDI2_EVENT_HDMA0_QM, |
| 466 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = GAUDI2_EVENT_HDMA0_QM, |
| 467 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = GAUDI2_EVENT_HDMA1_QM, |
| 468 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = GAUDI2_EVENT_HDMA1_QM, |
| 469 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = GAUDI2_EVENT_HDMA1_QM, |
| 470 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = GAUDI2_EVENT_HDMA1_QM, |
| 471 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = GAUDI2_EVENT_MME0_QM, |
| 472 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = GAUDI2_EVENT_MME0_QM, |
| 473 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = GAUDI2_EVENT_MME0_QM, |
| 474 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = GAUDI2_EVENT_MME0_QM, |
| 475 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = GAUDI2_EVENT_TPC0_QM, |
| 476 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = GAUDI2_EVENT_TPC0_QM, |
| 477 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = GAUDI2_EVENT_TPC0_QM, |
| 478 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = GAUDI2_EVENT_TPC0_QM, |
| 479 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = GAUDI2_EVENT_TPC1_QM, |
| 480 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = GAUDI2_EVENT_TPC1_QM, |
| 481 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = GAUDI2_EVENT_TPC1_QM, |
| 482 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = GAUDI2_EVENT_TPC1_QM, |
| 483 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = GAUDI2_EVENT_TPC2_QM, |
| 484 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = GAUDI2_EVENT_TPC2_QM, |
| 485 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = GAUDI2_EVENT_TPC2_QM, |
| 486 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = GAUDI2_EVENT_TPC2_QM, |
| 487 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = GAUDI2_EVENT_TPC3_QM, |
| 488 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = GAUDI2_EVENT_TPC3_QM, |
| 489 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = GAUDI2_EVENT_TPC3_QM, |
| 490 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = GAUDI2_EVENT_TPC3_QM, |
| 491 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = GAUDI2_EVENT_TPC4_QM, |
| 492 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = GAUDI2_EVENT_TPC4_QM, |
| 493 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = GAUDI2_EVENT_TPC4_QM, |
| 494 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = GAUDI2_EVENT_TPC4_QM, |
| 495 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = GAUDI2_EVENT_TPC5_QM, |
| 496 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = GAUDI2_EVENT_TPC5_QM, |
| 497 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = GAUDI2_EVENT_TPC5_QM, |
| 498 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = GAUDI2_EVENT_TPC5_QM, |
| 499 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = GAUDI2_EVENT_TPC24_QM, |
| 500 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = GAUDI2_EVENT_TPC24_QM, |
| 501 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = GAUDI2_EVENT_TPC24_QM, |
| 502 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = GAUDI2_EVENT_TPC24_QM, |
| 503 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = GAUDI2_EVENT_HDMA2_QM, |
| 504 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = GAUDI2_EVENT_HDMA2_QM, |
| 505 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = GAUDI2_EVENT_HDMA2_QM, |
| 506 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = GAUDI2_EVENT_HDMA2_QM, |
| 507 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = GAUDI2_EVENT_HDMA3_QM, |
| 508 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = GAUDI2_EVENT_HDMA3_QM, |
| 509 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = GAUDI2_EVENT_HDMA3_QM, |
| 510 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = GAUDI2_EVENT_HDMA3_QM, |
| 511 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = GAUDI2_EVENT_MME1_QM, |
| 512 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = GAUDI2_EVENT_MME1_QM, |
| 513 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = GAUDI2_EVENT_MME1_QM, |
| 514 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = GAUDI2_EVENT_MME1_QM, |
| 515 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = GAUDI2_EVENT_TPC6_QM, |
| 516 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = GAUDI2_EVENT_TPC6_QM, |
| 517 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = GAUDI2_EVENT_TPC6_QM, |
| 518 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = GAUDI2_EVENT_TPC6_QM, |
| 519 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = GAUDI2_EVENT_TPC7_QM, |
| 520 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = GAUDI2_EVENT_TPC7_QM, |
| 521 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = GAUDI2_EVENT_TPC7_QM, |
| 522 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = GAUDI2_EVENT_TPC7_QM, |
| 523 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = GAUDI2_EVENT_TPC8_QM, |
| 524 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = GAUDI2_EVENT_TPC8_QM, |
| 525 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = GAUDI2_EVENT_TPC8_QM, |
| 526 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = GAUDI2_EVENT_TPC8_QM, |
| 527 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = GAUDI2_EVENT_TPC9_QM, |
| 528 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = GAUDI2_EVENT_TPC9_QM, |
| 529 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = GAUDI2_EVENT_TPC9_QM, |
| 530 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = GAUDI2_EVENT_TPC9_QM, |
| 531 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = GAUDI2_EVENT_TPC10_QM, |
| 532 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = GAUDI2_EVENT_TPC10_QM, |
| 533 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = GAUDI2_EVENT_TPC10_QM, |
| 534 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = GAUDI2_EVENT_TPC10_QM, |
| 535 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = GAUDI2_EVENT_TPC11_QM, |
| 536 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = GAUDI2_EVENT_TPC11_QM, |
| 537 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = GAUDI2_EVENT_TPC11_QM, |
| 538 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = GAUDI2_EVENT_TPC11_QM, |
| 539 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = GAUDI2_EVENT_HDMA4_QM, |
| 540 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = GAUDI2_EVENT_HDMA4_QM, |
| 541 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = GAUDI2_EVENT_HDMA4_QM, |
| 542 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = GAUDI2_EVENT_HDMA4_QM, |
| 543 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = GAUDI2_EVENT_HDMA5_QM, |
| 544 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = GAUDI2_EVENT_HDMA5_QM, |
| 545 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = GAUDI2_EVENT_HDMA5_QM, |
| 546 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = GAUDI2_EVENT_HDMA5_QM, |
| 547 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = GAUDI2_EVENT_MME2_QM, |
| 548 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = GAUDI2_EVENT_MME2_QM, |
| 549 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = GAUDI2_EVENT_MME2_QM, |
| 550 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = GAUDI2_EVENT_MME2_QM, |
| 551 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = GAUDI2_EVENT_TPC12_QM, |
| 552 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = GAUDI2_EVENT_TPC12_QM, |
| 553 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = GAUDI2_EVENT_TPC12_QM, |
| 554 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = GAUDI2_EVENT_TPC12_QM, |
| 555 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = GAUDI2_EVENT_TPC13_QM, |
| 556 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = GAUDI2_EVENT_TPC13_QM, |
| 557 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = GAUDI2_EVENT_TPC13_QM, |
| 558 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = GAUDI2_EVENT_TPC13_QM, |
| 559 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = GAUDI2_EVENT_TPC14_QM, |
| 560 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = GAUDI2_EVENT_TPC14_QM, |
| 561 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = GAUDI2_EVENT_TPC14_QM, |
| 562 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = GAUDI2_EVENT_TPC14_QM, |
| 563 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = GAUDI2_EVENT_TPC15_QM, |
| 564 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = GAUDI2_EVENT_TPC15_QM, |
| 565 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = GAUDI2_EVENT_TPC15_QM, |
| 566 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = GAUDI2_EVENT_TPC15_QM, |
| 567 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = GAUDI2_EVENT_TPC16_QM, |
| 568 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = GAUDI2_EVENT_TPC16_QM, |
| 569 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = GAUDI2_EVENT_TPC16_QM, |
| 570 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = GAUDI2_EVENT_TPC16_QM, |
| 571 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = GAUDI2_EVENT_TPC17_QM, |
| 572 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = GAUDI2_EVENT_TPC17_QM, |
| 573 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = GAUDI2_EVENT_TPC17_QM, |
| 574 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = GAUDI2_EVENT_TPC17_QM, |
| 575 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = GAUDI2_EVENT_HDMA6_QM, |
| 576 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = GAUDI2_EVENT_HDMA6_QM, |
| 577 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = GAUDI2_EVENT_HDMA6_QM, |
| 578 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = GAUDI2_EVENT_HDMA6_QM, |
| 579 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = GAUDI2_EVENT_HDMA7_QM, |
| 580 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = GAUDI2_EVENT_HDMA7_QM, |
| 581 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = GAUDI2_EVENT_HDMA7_QM, |
| 582 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = GAUDI2_EVENT_HDMA7_QM, |
| 583 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = GAUDI2_EVENT_MME3_QM, |
| 584 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = GAUDI2_EVENT_MME3_QM, |
| 585 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = GAUDI2_EVENT_MME3_QM, |
| 586 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = GAUDI2_EVENT_MME3_QM, |
| 587 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = GAUDI2_EVENT_TPC18_QM, |
| 588 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = GAUDI2_EVENT_TPC18_QM, |
| 589 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = GAUDI2_EVENT_TPC18_QM, |
| 590 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = GAUDI2_EVENT_TPC18_QM, |
| 591 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = GAUDI2_EVENT_TPC19_QM, |
| 592 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = GAUDI2_EVENT_TPC19_QM, |
| 593 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = GAUDI2_EVENT_TPC19_QM, |
| 594 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = GAUDI2_EVENT_TPC19_QM, |
| 595 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = GAUDI2_EVENT_TPC20_QM, |
| 596 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = GAUDI2_EVENT_TPC20_QM, |
| 597 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = GAUDI2_EVENT_TPC20_QM, |
| 598 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = GAUDI2_EVENT_TPC20_QM, |
| 599 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = GAUDI2_EVENT_TPC21_QM, |
| 600 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = GAUDI2_EVENT_TPC21_QM, |
| 601 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = GAUDI2_EVENT_TPC21_QM, |
| 602 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = GAUDI2_EVENT_TPC21_QM, |
| 603 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = GAUDI2_EVENT_TPC22_QM, |
| 604 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = GAUDI2_EVENT_TPC22_QM, |
| 605 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = GAUDI2_EVENT_TPC22_QM, |
| 606 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = GAUDI2_EVENT_TPC22_QM, |
| 607 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = GAUDI2_EVENT_TPC23_QM, |
| 608 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = GAUDI2_EVENT_TPC23_QM, |
| 609 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = GAUDI2_EVENT_TPC23_QM, |
| 610 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = GAUDI2_EVENT_TPC23_QM, |
| 611 | [GAUDI2_QUEUE_ID_NIC_0_0] = GAUDI2_EVENT_NIC0_QM0, |
| 612 | [GAUDI2_QUEUE_ID_NIC_0_1] = GAUDI2_EVENT_NIC0_QM0, |
| 613 | [GAUDI2_QUEUE_ID_NIC_0_2] = GAUDI2_EVENT_NIC0_QM0, |
| 614 | [GAUDI2_QUEUE_ID_NIC_0_3] = GAUDI2_EVENT_NIC0_QM0, |
| 615 | [GAUDI2_QUEUE_ID_NIC_1_0] = GAUDI2_EVENT_NIC0_QM1, |
| 616 | [GAUDI2_QUEUE_ID_NIC_1_1] = GAUDI2_EVENT_NIC0_QM1, |
| 617 | [GAUDI2_QUEUE_ID_NIC_1_2] = GAUDI2_EVENT_NIC0_QM1, |
| 618 | [GAUDI2_QUEUE_ID_NIC_1_3] = GAUDI2_EVENT_NIC0_QM1, |
| 619 | [GAUDI2_QUEUE_ID_NIC_2_0] = GAUDI2_EVENT_NIC1_QM0, |
| 620 | [GAUDI2_QUEUE_ID_NIC_2_1] = GAUDI2_EVENT_NIC1_QM0, |
| 621 | [GAUDI2_QUEUE_ID_NIC_2_2] = GAUDI2_EVENT_NIC1_QM0, |
| 622 | [GAUDI2_QUEUE_ID_NIC_2_3] = GAUDI2_EVENT_NIC1_QM0, |
| 623 | [GAUDI2_QUEUE_ID_NIC_3_0] = GAUDI2_EVENT_NIC1_QM1, |
| 624 | [GAUDI2_QUEUE_ID_NIC_3_1] = GAUDI2_EVENT_NIC1_QM1, |
| 625 | [GAUDI2_QUEUE_ID_NIC_3_2] = GAUDI2_EVENT_NIC1_QM1, |
| 626 | [GAUDI2_QUEUE_ID_NIC_3_3] = GAUDI2_EVENT_NIC1_QM1, |
| 627 | [GAUDI2_QUEUE_ID_NIC_4_0] = GAUDI2_EVENT_NIC2_QM0, |
| 628 | [GAUDI2_QUEUE_ID_NIC_4_1] = GAUDI2_EVENT_NIC2_QM0, |
| 629 | [GAUDI2_QUEUE_ID_NIC_4_2] = GAUDI2_EVENT_NIC2_QM0, |
| 630 | [GAUDI2_QUEUE_ID_NIC_4_3] = GAUDI2_EVENT_NIC2_QM0, |
| 631 | [GAUDI2_QUEUE_ID_NIC_5_0] = GAUDI2_EVENT_NIC2_QM1, |
| 632 | [GAUDI2_QUEUE_ID_NIC_5_1] = GAUDI2_EVENT_NIC2_QM1, |
| 633 | [GAUDI2_QUEUE_ID_NIC_5_2] = GAUDI2_EVENT_NIC2_QM1, |
| 634 | [GAUDI2_QUEUE_ID_NIC_5_3] = GAUDI2_EVENT_NIC2_QM1, |
| 635 | [GAUDI2_QUEUE_ID_NIC_6_0] = GAUDI2_EVENT_NIC3_QM0, |
| 636 | [GAUDI2_QUEUE_ID_NIC_6_1] = GAUDI2_EVENT_NIC3_QM0, |
| 637 | [GAUDI2_QUEUE_ID_NIC_6_2] = GAUDI2_EVENT_NIC3_QM0, |
| 638 | [GAUDI2_QUEUE_ID_NIC_6_3] = GAUDI2_EVENT_NIC3_QM0, |
| 639 | [GAUDI2_QUEUE_ID_NIC_7_0] = GAUDI2_EVENT_NIC3_QM1, |
| 640 | [GAUDI2_QUEUE_ID_NIC_7_1] = GAUDI2_EVENT_NIC3_QM1, |
| 641 | [GAUDI2_QUEUE_ID_NIC_7_2] = GAUDI2_EVENT_NIC3_QM1, |
| 642 | [GAUDI2_QUEUE_ID_NIC_7_3] = GAUDI2_EVENT_NIC3_QM1, |
| 643 | [GAUDI2_QUEUE_ID_NIC_8_0] = GAUDI2_EVENT_NIC4_QM0, |
| 644 | [GAUDI2_QUEUE_ID_NIC_8_1] = GAUDI2_EVENT_NIC4_QM0, |
| 645 | [GAUDI2_QUEUE_ID_NIC_8_2] = GAUDI2_EVENT_NIC4_QM0, |
| 646 | [GAUDI2_QUEUE_ID_NIC_8_3] = GAUDI2_EVENT_NIC4_QM0, |
| 647 | [GAUDI2_QUEUE_ID_NIC_9_0] = GAUDI2_EVENT_NIC4_QM1, |
| 648 | [GAUDI2_QUEUE_ID_NIC_9_1] = GAUDI2_EVENT_NIC4_QM1, |
| 649 | [GAUDI2_QUEUE_ID_NIC_9_2] = GAUDI2_EVENT_NIC4_QM1, |
| 650 | [GAUDI2_QUEUE_ID_NIC_9_3] = GAUDI2_EVENT_NIC4_QM1, |
| 651 | [GAUDI2_QUEUE_ID_NIC_10_0] = GAUDI2_EVENT_NIC5_QM0, |
| 652 | [GAUDI2_QUEUE_ID_NIC_10_1] = GAUDI2_EVENT_NIC5_QM0, |
| 653 | [GAUDI2_QUEUE_ID_NIC_10_2] = GAUDI2_EVENT_NIC5_QM0, |
| 654 | [GAUDI2_QUEUE_ID_NIC_10_3] = GAUDI2_EVENT_NIC5_QM0, |
| 655 | [GAUDI2_QUEUE_ID_NIC_11_0] = GAUDI2_EVENT_NIC5_QM1, |
| 656 | [GAUDI2_QUEUE_ID_NIC_11_1] = GAUDI2_EVENT_NIC5_QM1, |
| 657 | [GAUDI2_QUEUE_ID_NIC_11_2] = GAUDI2_EVENT_NIC5_QM1, |
| 658 | [GAUDI2_QUEUE_ID_NIC_11_3] = GAUDI2_EVENT_NIC5_QM1, |
| 659 | [GAUDI2_QUEUE_ID_NIC_12_0] = GAUDI2_EVENT_NIC6_QM0, |
| 660 | [GAUDI2_QUEUE_ID_NIC_12_1] = GAUDI2_EVENT_NIC6_QM0, |
| 661 | [GAUDI2_QUEUE_ID_NIC_12_2] = GAUDI2_EVENT_NIC6_QM0, |
| 662 | [GAUDI2_QUEUE_ID_NIC_12_3] = GAUDI2_EVENT_NIC6_QM0, |
| 663 | [GAUDI2_QUEUE_ID_NIC_13_0] = GAUDI2_EVENT_NIC6_QM1, |
| 664 | [GAUDI2_QUEUE_ID_NIC_13_1] = GAUDI2_EVENT_NIC6_QM1, |
| 665 | [GAUDI2_QUEUE_ID_NIC_13_2] = GAUDI2_EVENT_NIC6_QM1, |
| 666 | [GAUDI2_QUEUE_ID_NIC_13_3] = GAUDI2_EVENT_NIC6_QM1, |
| 667 | [GAUDI2_QUEUE_ID_NIC_14_0] = GAUDI2_EVENT_NIC7_QM0, |
| 668 | [GAUDI2_QUEUE_ID_NIC_14_1] = GAUDI2_EVENT_NIC7_QM0, |
| 669 | [GAUDI2_QUEUE_ID_NIC_14_2] = GAUDI2_EVENT_NIC7_QM0, |
| 670 | [GAUDI2_QUEUE_ID_NIC_14_3] = GAUDI2_EVENT_NIC7_QM0, |
| 671 | [GAUDI2_QUEUE_ID_NIC_15_0] = GAUDI2_EVENT_NIC7_QM1, |
| 672 | [GAUDI2_QUEUE_ID_NIC_15_1] = GAUDI2_EVENT_NIC7_QM1, |
| 673 | [GAUDI2_QUEUE_ID_NIC_15_2] = GAUDI2_EVENT_NIC7_QM1, |
| 674 | [GAUDI2_QUEUE_ID_NIC_15_3] = GAUDI2_EVENT_NIC7_QM1, |
| 675 | [GAUDI2_QUEUE_ID_NIC_16_0] = GAUDI2_EVENT_NIC8_QM0, |
| 676 | [GAUDI2_QUEUE_ID_NIC_16_1] = GAUDI2_EVENT_NIC8_QM0, |
| 677 | [GAUDI2_QUEUE_ID_NIC_16_2] = GAUDI2_EVENT_NIC8_QM0, |
| 678 | [GAUDI2_QUEUE_ID_NIC_16_3] = GAUDI2_EVENT_NIC8_QM0, |
| 679 | [GAUDI2_QUEUE_ID_NIC_17_0] = GAUDI2_EVENT_NIC8_QM1, |
| 680 | [GAUDI2_QUEUE_ID_NIC_17_1] = GAUDI2_EVENT_NIC8_QM1, |
| 681 | [GAUDI2_QUEUE_ID_NIC_17_2] = GAUDI2_EVENT_NIC8_QM1, |
| 682 | [GAUDI2_QUEUE_ID_NIC_17_3] = GAUDI2_EVENT_NIC8_QM1, |
| 683 | [GAUDI2_QUEUE_ID_NIC_18_0] = GAUDI2_EVENT_NIC9_QM0, |
| 684 | [GAUDI2_QUEUE_ID_NIC_18_1] = GAUDI2_EVENT_NIC9_QM0, |
| 685 | [GAUDI2_QUEUE_ID_NIC_18_2] = GAUDI2_EVENT_NIC9_QM0, |
| 686 | [GAUDI2_QUEUE_ID_NIC_18_3] = GAUDI2_EVENT_NIC9_QM0, |
| 687 | [GAUDI2_QUEUE_ID_NIC_19_0] = GAUDI2_EVENT_NIC9_QM1, |
| 688 | [GAUDI2_QUEUE_ID_NIC_19_1] = GAUDI2_EVENT_NIC9_QM1, |
| 689 | [GAUDI2_QUEUE_ID_NIC_19_2] = GAUDI2_EVENT_NIC9_QM1, |
| 690 | [GAUDI2_QUEUE_ID_NIC_19_3] = GAUDI2_EVENT_NIC9_QM1, |
| 691 | [GAUDI2_QUEUE_ID_NIC_20_0] = GAUDI2_EVENT_NIC10_QM0, |
| 692 | [GAUDI2_QUEUE_ID_NIC_20_1] = GAUDI2_EVENT_NIC10_QM0, |
| 693 | [GAUDI2_QUEUE_ID_NIC_20_2] = GAUDI2_EVENT_NIC10_QM0, |
| 694 | [GAUDI2_QUEUE_ID_NIC_20_3] = GAUDI2_EVENT_NIC10_QM0, |
| 695 | [GAUDI2_QUEUE_ID_NIC_21_0] = GAUDI2_EVENT_NIC10_QM1, |
| 696 | [GAUDI2_QUEUE_ID_NIC_21_1] = GAUDI2_EVENT_NIC10_QM1, |
| 697 | [GAUDI2_QUEUE_ID_NIC_21_2] = GAUDI2_EVENT_NIC10_QM1, |
| 698 | [GAUDI2_QUEUE_ID_NIC_21_3] = GAUDI2_EVENT_NIC10_QM1, |
| 699 | [GAUDI2_QUEUE_ID_NIC_22_0] = GAUDI2_EVENT_NIC11_QM0, |
| 700 | [GAUDI2_QUEUE_ID_NIC_22_1] = GAUDI2_EVENT_NIC11_QM0, |
| 701 | [GAUDI2_QUEUE_ID_NIC_22_2] = GAUDI2_EVENT_NIC11_QM0, |
| 702 | [GAUDI2_QUEUE_ID_NIC_22_3] = GAUDI2_EVENT_NIC11_QM0, |
| 703 | [GAUDI2_QUEUE_ID_NIC_23_0] = GAUDI2_EVENT_NIC11_QM1, |
| 704 | [GAUDI2_QUEUE_ID_NIC_23_1] = GAUDI2_EVENT_NIC11_QM1, |
| 705 | [GAUDI2_QUEUE_ID_NIC_23_2] = GAUDI2_EVENT_NIC11_QM1, |
| 706 | [GAUDI2_QUEUE_ID_NIC_23_3] = GAUDI2_EVENT_NIC11_QM1, |
| 707 | [GAUDI2_QUEUE_ID_ROT_0_0] = GAUDI2_EVENT_ROTATOR0_ROT0_QM, |
| 708 | [GAUDI2_QUEUE_ID_ROT_0_1] = GAUDI2_EVENT_ROTATOR0_ROT0_QM, |
| 709 | [GAUDI2_QUEUE_ID_ROT_0_2] = GAUDI2_EVENT_ROTATOR0_ROT0_QM, |
| 710 | [GAUDI2_QUEUE_ID_ROT_0_3] = GAUDI2_EVENT_ROTATOR0_ROT0_QM, |
| 711 | [GAUDI2_QUEUE_ID_ROT_1_0] = GAUDI2_EVENT_ROTATOR1_ROT1_QM, |
| 712 | [GAUDI2_QUEUE_ID_ROT_1_1] = GAUDI2_EVENT_ROTATOR1_ROT1_QM, |
| 713 | [GAUDI2_QUEUE_ID_ROT_1_2] = GAUDI2_EVENT_ROTATOR1_ROT1_QM, |
| 714 | [GAUDI2_QUEUE_ID_ROT_1_3] = GAUDI2_EVENT_ROTATOR1_ROT1_QM |
| 715 | }; |
| 716 | |
| 717 | static const int gaudi2_dma_core_async_event_id[] = { |
| 718 | [DMA_CORE_ID_EDMA0] = GAUDI2_EVENT_HDMA0_CORE, |
| 719 | [DMA_CORE_ID_EDMA1] = GAUDI2_EVENT_HDMA1_CORE, |
| 720 | [DMA_CORE_ID_EDMA2] = GAUDI2_EVENT_HDMA2_CORE, |
| 721 | [DMA_CORE_ID_EDMA3] = GAUDI2_EVENT_HDMA3_CORE, |
| 722 | [DMA_CORE_ID_EDMA4] = GAUDI2_EVENT_HDMA4_CORE, |
| 723 | [DMA_CORE_ID_EDMA5] = GAUDI2_EVENT_HDMA5_CORE, |
| 724 | [DMA_CORE_ID_EDMA6] = GAUDI2_EVENT_HDMA6_CORE, |
| 725 | [DMA_CORE_ID_EDMA7] = GAUDI2_EVENT_HDMA7_CORE, |
| 726 | [DMA_CORE_ID_PDMA0] = GAUDI2_EVENT_PDMA0_CORE, |
| 727 | [DMA_CORE_ID_PDMA1] = GAUDI2_EVENT_PDMA1_CORE, |
| 728 | [DMA_CORE_ID_KDMA] = GAUDI2_EVENT_KDMA0_CORE, |
| 729 | }; |
| 730 | |
| 731 | const char *gaudi2_engine_id_str[] = { |
| 732 | __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_0), |
| 733 | __stringify(GAUDI2_DCORE0_ENGINE_ID_EDMA_1), |
| 734 | __stringify(GAUDI2_DCORE0_ENGINE_ID_MME), |
| 735 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_0), |
| 736 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_1), |
| 737 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_2), |
| 738 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_3), |
| 739 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_4), |
| 740 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_5), |
| 741 | __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_0), |
| 742 | __stringify(GAUDI2_DCORE0_ENGINE_ID_DEC_1), |
| 743 | __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_0), |
| 744 | __stringify(GAUDI2_DCORE1_ENGINE_ID_EDMA_1), |
| 745 | __stringify(GAUDI2_DCORE1_ENGINE_ID_MME), |
| 746 | __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_0), |
| 747 | __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_1), |
| 748 | __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_2), |
| 749 | __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_3), |
| 750 | __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_4), |
| 751 | __stringify(GAUDI2_DCORE1_ENGINE_ID_TPC_5), |
| 752 | __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_0), |
| 753 | __stringify(GAUDI2_DCORE1_ENGINE_ID_DEC_1), |
| 754 | __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_0), |
| 755 | __stringify(GAUDI2_DCORE2_ENGINE_ID_EDMA_1), |
| 756 | __stringify(GAUDI2_DCORE2_ENGINE_ID_MME), |
| 757 | __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_0), |
| 758 | __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_1), |
| 759 | __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_2), |
| 760 | __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_3), |
| 761 | __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_4), |
| 762 | __stringify(GAUDI2_DCORE2_ENGINE_ID_TPC_5), |
| 763 | __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_0), |
| 764 | __stringify(GAUDI2_DCORE2_ENGINE_ID_DEC_1), |
| 765 | __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_0), |
| 766 | __stringify(GAUDI2_DCORE3_ENGINE_ID_EDMA_1), |
| 767 | __stringify(GAUDI2_DCORE3_ENGINE_ID_MME), |
| 768 | __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_0), |
| 769 | __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_1), |
| 770 | __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_2), |
| 771 | __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_3), |
| 772 | __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_4), |
| 773 | __stringify(GAUDI2_DCORE3_ENGINE_ID_TPC_5), |
| 774 | __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_0), |
| 775 | __stringify(GAUDI2_DCORE3_ENGINE_ID_DEC_1), |
| 776 | __stringify(GAUDI2_DCORE0_ENGINE_ID_TPC_6), |
| 777 | __stringify(GAUDI2_ENGINE_ID_PDMA_0), |
| 778 | __stringify(GAUDI2_ENGINE_ID_PDMA_1), |
| 779 | __stringify(GAUDI2_ENGINE_ID_ROT_0), |
| 780 | __stringify(GAUDI2_ENGINE_ID_ROT_1), |
| 781 | __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_0), |
| 782 | __stringify(GAUDI2_PCIE_ENGINE_ID_DEC_1), |
| 783 | __stringify(GAUDI2_ENGINE_ID_NIC0_0), |
| 784 | __stringify(GAUDI2_ENGINE_ID_NIC0_1), |
| 785 | __stringify(GAUDI2_ENGINE_ID_NIC1_0), |
| 786 | __stringify(GAUDI2_ENGINE_ID_NIC1_1), |
| 787 | __stringify(GAUDI2_ENGINE_ID_NIC2_0), |
| 788 | __stringify(GAUDI2_ENGINE_ID_NIC2_1), |
| 789 | __stringify(GAUDI2_ENGINE_ID_NIC3_0), |
| 790 | __stringify(GAUDI2_ENGINE_ID_NIC3_1), |
| 791 | __stringify(GAUDI2_ENGINE_ID_NIC4_0), |
| 792 | __stringify(GAUDI2_ENGINE_ID_NIC4_1), |
| 793 | __stringify(GAUDI2_ENGINE_ID_NIC5_0), |
| 794 | __stringify(GAUDI2_ENGINE_ID_NIC5_1), |
| 795 | __stringify(GAUDI2_ENGINE_ID_NIC6_0), |
| 796 | __stringify(GAUDI2_ENGINE_ID_NIC6_1), |
| 797 | __stringify(GAUDI2_ENGINE_ID_NIC7_0), |
| 798 | __stringify(GAUDI2_ENGINE_ID_NIC7_1), |
| 799 | __stringify(GAUDI2_ENGINE_ID_NIC8_0), |
| 800 | __stringify(GAUDI2_ENGINE_ID_NIC8_1), |
| 801 | __stringify(GAUDI2_ENGINE_ID_NIC9_0), |
| 802 | __stringify(GAUDI2_ENGINE_ID_NIC9_1), |
| 803 | __stringify(GAUDI2_ENGINE_ID_NIC10_0), |
| 804 | __stringify(GAUDI2_ENGINE_ID_NIC10_1), |
| 805 | __stringify(GAUDI2_ENGINE_ID_NIC11_0), |
| 806 | __stringify(GAUDI2_ENGINE_ID_NIC11_1), |
| 807 | __stringify(GAUDI2_ENGINE_ID_PCIE), |
| 808 | __stringify(GAUDI2_ENGINE_ID_PSOC), |
| 809 | __stringify(GAUDI2_ENGINE_ID_ARC_FARM), |
| 810 | __stringify(GAUDI2_ENGINE_ID_KDMA), |
| 811 | __stringify(GAUDI2_ENGINE_ID_SIZE), |
| 812 | }; |
| 813 | |
| 814 | const char *gaudi2_queue_id_str[] = { |
| 815 | __stringify(GAUDI2_QUEUE_ID_PDMA_0_0), |
| 816 | __stringify(GAUDI2_QUEUE_ID_PDMA_0_1), |
| 817 | __stringify(GAUDI2_QUEUE_ID_PDMA_0_2), |
| 818 | __stringify(GAUDI2_QUEUE_ID_PDMA_0_3), |
| 819 | __stringify(GAUDI2_QUEUE_ID_PDMA_1_0), |
| 820 | __stringify(GAUDI2_QUEUE_ID_PDMA_1_1), |
| 821 | __stringify(GAUDI2_QUEUE_ID_PDMA_1_2), |
| 822 | __stringify(GAUDI2_QUEUE_ID_PDMA_1_3), |
| 823 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0), |
| 824 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1), |
| 825 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2), |
| 826 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3), |
| 827 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0), |
| 828 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1), |
| 829 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2), |
| 830 | __stringify(GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3), |
| 831 | __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_0), |
| 832 | __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_1), |
| 833 | __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_2), |
| 834 | __stringify(GAUDI2_QUEUE_ID_DCORE0_MME_0_3), |
| 835 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_0), |
| 836 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_1), |
| 837 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_2), |
| 838 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_0_3), |
| 839 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_0), |
| 840 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_1), |
| 841 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_2), |
| 842 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_1_3), |
| 843 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_0), |
| 844 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_1), |
| 845 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_2), |
| 846 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_2_3), |
| 847 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_0), |
| 848 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_1), |
| 849 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_2), |
| 850 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_3_3), |
| 851 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_0), |
| 852 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_1), |
| 853 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_2), |
| 854 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_4_3), |
| 855 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_0), |
| 856 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_1), |
| 857 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_2), |
| 858 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_5_3), |
| 859 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_0), |
| 860 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_1), |
| 861 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_2), |
| 862 | __stringify(GAUDI2_QUEUE_ID_DCORE0_TPC_6_3), |
| 863 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0), |
| 864 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1), |
| 865 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2), |
| 866 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3), |
| 867 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0), |
| 868 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1), |
| 869 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2), |
| 870 | __stringify(GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3), |
| 871 | __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_0), |
| 872 | __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_1), |
| 873 | __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_2), |
| 874 | __stringify(GAUDI2_QUEUE_ID_DCORE1_MME_0_3), |
| 875 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_0), |
| 876 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_1), |
| 877 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_2), |
| 878 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_0_3), |
| 879 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_0), |
| 880 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_1), |
| 881 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_2), |
| 882 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_1_3), |
| 883 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_0), |
| 884 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_1), |
| 885 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_2), |
| 886 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_2_3), |
| 887 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_0), |
| 888 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_1), |
| 889 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_2), |
| 890 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_3_3), |
| 891 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_0), |
| 892 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_1), |
| 893 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_2), |
| 894 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_4_3), |
| 895 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_0), |
| 896 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_1), |
| 897 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_2), |
| 898 | __stringify(GAUDI2_QUEUE_ID_DCORE1_TPC_5_3), |
| 899 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0), |
| 900 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1), |
| 901 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2), |
| 902 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3), |
| 903 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0), |
| 904 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1), |
| 905 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2), |
| 906 | __stringify(GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3), |
| 907 | __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_0), |
| 908 | __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_1), |
| 909 | __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_2), |
| 910 | __stringify(GAUDI2_QUEUE_ID_DCORE2_MME_0_3), |
| 911 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_0), |
| 912 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_1), |
| 913 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_2), |
| 914 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_0_3), |
| 915 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_0), |
| 916 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_1), |
| 917 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_2), |
| 918 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_1_3), |
| 919 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_0), |
| 920 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_1), |
| 921 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_2), |
| 922 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_2_3), |
| 923 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_0), |
| 924 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_1), |
| 925 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_2), |
| 926 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_3_3), |
| 927 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_0), |
| 928 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_1), |
| 929 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_2), |
| 930 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_4_3), |
| 931 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_0), |
| 932 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_1), |
| 933 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_2), |
| 934 | __stringify(GAUDI2_QUEUE_ID_DCORE2_TPC_5_3), |
| 935 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0), |
| 936 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1), |
| 937 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2), |
| 938 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3), |
| 939 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0), |
| 940 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1), |
| 941 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2), |
| 942 | __stringify(GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3), |
| 943 | __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_0), |
| 944 | __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_1), |
| 945 | __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_2), |
| 946 | __stringify(GAUDI2_QUEUE_ID_DCORE3_MME_0_3), |
| 947 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_0), |
| 948 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_1), |
| 949 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_2), |
| 950 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_0_3), |
| 951 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_0), |
| 952 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_1), |
| 953 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_2), |
| 954 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_1_3), |
| 955 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_0), |
| 956 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_1), |
| 957 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_2), |
| 958 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_2_3), |
| 959 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_0), |
| 960 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_1), |
| 961 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_2), |
| 962 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_3_3), |
| 963 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_0), |
| 964 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_1), |
| 965 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_2), |
| 966 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_4_3), |
| 967 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_0), |
| 968 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_1), |
| 969 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_2), |
| 970 | __stringify(GAUDI2_QUEUE_ID_DCORE3_TPC_5_3), |
| 971 | __stringify(GAUDI2_QUEUE_ID_NIC_0_0), |
| 972 | __stringify(GAUDI2_QUEUE_ID_NIC_0_1), |
| 973 | __stringify(GAUDI2_QUEUE_ID_NIC_0_2), |
| 974 | __stringify(GAUDI2_QUEUE_ID_NIC_0_3), |
| 975 | __stringify(GAUDI2_QUEUE_ID_NIC_1_0), |
| 976 | __stringify(GAUDI2_QUEUE_ID_NIC_1_1), |
| 977 | __stringify(GAUDI2_QUEUE_ID_NIC_1_2), |
| 978 | __stringify(GAUDI2_QUEUE_ID_NIC_1_3), |
| 979 | __stringify(GAUDI2_QUEUE_ID_NIC_2_0), |
| 980 | __stringify(GAUDI2_QUEUE_ID_NIC_2_1), |
| 981 | __stringify(GAUDI2_QUEUE_ID_NIC_2_2), |
| 982 | __stringify(GAUDI2_QUEUE_ID_NIC_2_3), |
| 983 | __stringify(GAUDI2_QUEUE_ID_NIC_3_0), |
| 984 | __stringify(GAUDI2_QUEUE_ID_NIC_3_1), |
| 985 | __stringify(GAUDI2_QUEUE_ID_NIC_3_2), |
| 986 | __stringify(GAUDI2_QUEUE_ID_NIC_3_3), |
| 987 | __stringify(GAUDI2_QUEUE_ID_NIC_4_0), |
| 988 | __stringify(GAUDI2_QUEUE_ID_NIC_4_1), |
| 989 | __stringify(GAUDI2_QUEUE_ID_NIC_4_2), |
| 990 | __stringify(GAUDI2_QUEUE_ID_NIC_4_3), |
| 991 | __stringify(GAUDI2_QUEUE_ID_NIC_5_0), |
| 992 | __stringify(GAUDI2_QUEUE_ID_NIC_5_1), |
| 993 | __stringify(GAUDI2_QUEUE_ID_NIC_5_2), |
| 994 | __stringify(GAUDI2_QUEUE_ID_NIC_5_3), |
| 995 | __stringify(GAUDI2_QUEUE_ID_NIC_6_0), |
| 996 | __stringify(GAUDI2_QUEUE_ID_NIC_6_1), |
| 997 | __stringify(GAUDI2_QUEUE_ID_NIC_6_2), |
| 998 | __stringify(GAUDI2_QUEUE_ID_NIC_6_3), |
| 999 | __stringify(GAUDI2_QUEUE_ID_NIC_7_0), |
| 1000 | __stringify(GAUDI2_QUEUE_ID_NIC_7_1), |
| 1001 | __stringify(GAUDI2_QUEUE_ID_NIC_7_2), |
| 1002 | __stringify(GAUDI2_QUEUE_ID_NIC_7_3), |
| 1003 | __stringify(GAUDI2_QUEUE_ID_NIC_8_0), |
| 1004 | __stringify(GAUDI2_QUEUE_ID_NIC_8_1), |
| 1005 | __stringify(GAUDI2_QUEUE_ID_NIC_8_2), |
| 1006 | __stringify(GAUDI2_QUEUE_ID_NIC_8_3), |
| 1007 | __stringify(GAUDI2_QUEUE_ID_NIC_9_0), |
| 1008 | __stringify(GAUDI2_QUEUE_ID_NIC_9_1), |
| 1009 | __stringify(GAUDI2_QUEUE_ID_NIC_9_2), |
| 1010 | __stringify(GAUDI2_QUEUE_ID_NIC_9_3), |
| 1011 | __stringify(GAUDI2_QUEUE_ID_NIC_10_0), |
| 1012 | __stringify(GAUDI2_QUEUE_ID_NIC_10_1), |
| 1013 | __stringify(GAUDI2_QUEUE_ID_NIC_10_2), |
| 1014 | __stringify(GAUDI2_QUEUE_ID_NIC_10_3), |
| 1015 | __stringify(GAUDI2_QUEUE_ID_NIC_11_0), |
| 1016 | __stringify(GAUDI2_QUEUE_ID_NIC_11_1), |
| 1017 | __stringify(GAUDI2_QUEUE_ID_NIC_11_2), |
| 1018 | __stringify(GAUDI2_QUEUE_ID_NIC_11_3), |
| 1019 | __stringify(GAUDI2_QUEUE_ID_NIC_12_0), |
| 1020 | __stringify(GAUDI2_QUEUE_ID_NIC_12_1), |
| 1021 | __stringify(GAUDI2_QUEUE_ID_NIC_12_2), |
| 1022 | __stringify(GAUDI2_QUEUE_ID_NIC_12_3), |
| 1023 | __stringify(GAUDI2_QUEUE_ID_NIC_13_0), |
| 1024 | __stringify(GAUDI2_QUEUE_ID_NIC_13_1), |
| 1025 | __stringify(GAUDI2_QUEUE_ID_NIC_13_2), |
| 1026 | __stringify(GAUDI2_QUEUE_ID_NIC_13_3), |
| 1027 | __stringify(GAUDI2_QUEUE_ID_NIC_14_0), |
| 1028 | __stringify(GAUDI2_QUEUE_ID_NIC_14_1), |
| 1029 | __stringify(GAUDI2_QUEUE_ID_NIC_14_2), |
| 1030 | __stringify(GAUDI2_QUEUE_ID_NIC_14_3), |
| 1031 | __stringify(GAUDI2_QUEUE_ID_NIC_15_0), |
| 1032 | __stringify(GAUDI2_QUEUE_ID_NIC_15_1), |
| 1033 | __stringify(GAUDI2_QUEUE_ID_NIC_15_2), |
| 1034 | __stringify(GAUDI2_QUEUE_ID_NIC_15_3), |
| 1035 | __stringify(GAUDI2_QUEUE_ID_NIC_16_0), |
| 1036 | __stringify(GAUDI2_QUEUE_ID_NIC_16_1), |
| 1037 | __stringify(GAUDI2_QUEUE_ID_NIC_16_2), |
| 1038 | __stringify(GAUDI2_QUEUE_ID_NIC_16_3), |
| 1039 | __stringify(GAUDI2_QUEUE_ID_NIC_17_0), |
| 1040 | __stringify(GAUDI2_QUEUE_ID_NIC_17_1), |
| 1041 | __stringify(GAUDI2_QUEUE_ID_NIC_17_2), |
| 1042 | __stringify(GAUDI2_QUEUE_ID_NIC_17_3), |
| 1043 | __stringify(GAUDI2_QUEUE_ID_NIC_18_0), |
| 1044 | __stringify(GAUDI2_QUEUE_ID_NIC_18_1), |
| 1045 | __stringify(GAUDI2_QUEUE_ID_NIC_18_2), |
| 1046 | __stringify(GAUDI2_QUEUE_ID_NIC_18_3), |
| 1047 | __stringify(GAUDI2_QUEUE_ID_NIC_19_0), |
| 1048 | __stringify(GAUDI2_QUEUE_ID_NIC_19_1), |
| 1049 | __stringify(GAUDI2_QUEUE_ID_NIC_19_2), |
| 1050 | __stringify(GAUDI2_QUEUE_ID_NIC_19_3), |
| 1051 | __stringify(GAUDI2_QUEUE_ID_NIC_20_0), |
| 1052 | __stringify(GAUDI2_QUEUE_ID_NIC_20_1), |
| 1053 | __stringify(GAUDI2_QUEUE_ID_NIC_20_2), |
| 1054 | __stringify(GAUDI2_QUEUE_ID_NIC_20_3), |
| 1055 | __stringify(GAUDI2_QUEUE_ID_NIC_21_0), |
| 1056 | __stringify(GAUDI2_QUEUE_ID_NIC_21_1), |
| 1057 | __stringify(GAUDI2_QUEUE_ID_NIC_21_2), |
| 1058 | __stringify(GAUDI2_QUEUE_ID_NIC_21_3), |
| 1059 | __stringify(GAUDI2_QUEUE_ID_NIC_22_0), |
| 1060 | __stringify(GAUDI2_QUEUE_ID_NIC_22_1), |
| 1061 | __stringify(GAUDI2_QUEUE_ID_NIC_22_2), |
| 1062 | __stringify(GAUDI2_QUEUE_ID_NIC_22_3), |
| 1063 | __stringify(GAUDI2_QUEUE_ID_NIC_23_0), |
| 1064 | __stringify(GAUDI2_QUEUE_ID_NIC_23_1), |
| 1065 | __stringify(GAUDI2_QUEUE_ID_NIC_23_2), |
| 1066 | __stringify(GAUDI2_QUEUE_ID_NIC_23_3), |
| 1067 | __stringify(GAUDI2_QUEUE_ID_ROT_0_0), |
| 1068 | __stringify(GAUDI2_QUEUE_ID_ROT_0_1), |
| 1069 | __stringify(GAUDI2_QUEUE_ID_ROT_0_2), |
| 1070 | __stringify(GAUDI2_QUEUE_ID_ROT_0_3), |
| 1071 | __stringify(GAUDI2_QUEUE_ID_ROT_1_0), |
| 1072 | __stringify(GAUDI2_QUEUE_ID_ROT_1_1), |
| 1073 | __stringify(GAUDI2_QUEUE_ID_ROT_1_2), |
| 1074 | __stringify(GAUDI2_QUEUE_ID_ROT_1_3), |
| 1075 | __stringify(GAUDI2_QUEUE_ID_CPU_PQ), |
| 1076 | __stringify(GAUDI2_QUEUE_ID_SIZE), |
| 1077 | }; |
| 1078 | |
| 1079 | static const char * const gaudi2_qm_sei_error_cause[GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE] = { |
| 1080 | "qman sei intr" , |
| 1081 | "arc sei intr" |
| 1082 | }; |
| 1083 | |
| 1084 | static const char * const gaudi2_cpu_sei_error_cause[GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE] = { |
| 1085 | "AXI_TERMINATOR WR" , |
| 1086 | "AXI_TERMINATOR RD" , |
| 1087 | "AXI SPLIT SEI Status" |
| 1088 | }; |
| 1089 | |
| 1090 | static const char * const gaudi2_arc_sei_error_cause[GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE] = { |
| 1091 | "cbu_bresp_sei_intr_cause" , |
| 1092 | "cbu_rresp_sei_intr_cause" , |
| 1093 | "lbu_bresp_sei_intr_cause" , |
| 1094 | "lbu_rresp_sei_intr_cause" , |
| 1095 | "cbu_axi_split_intr_cause" , |
| 1096 | "lbu_axi_split_intr_cause" , |
| 1097 | "arc_ip_excptn_sei_intr_cause" , |
| 1098 | "dmi_bresp_sei_intr_cause" , |
| 1099 | "aux2apb_err_sei_intr_cause" , |
| 1100 | "cfg_lbw_wr_terminated_intr_cause" , |
| 1101 | "cfg_lbw_rd_terminated_intr_cause" , |
| 1102 | "cfg_dccm_wr_terminated_intr_cause" , |
| 1103 | "cfg_dccm_rd_terminated_intr_cause" , |
| 1104 | "cfg_hbw_rd_terminated_intr_cause" |
| 1105 | }; |
| 1106 | |
| 1107 | static const char * const gaudi2_dec_error_cause[GAUDI2_NUM_OF_DEC_ERR_CAUSE] = { |
| 1108 | "msix_vcd_hbw_sei" , |
| 1109 | "msix_l2c_hbw_sei" , |
| 1110 | "msix_nrm_hbw_sei" , |
| 1111 | "msix_abnrm_hbw_sei" , |
| 1112 | "msix_vcd_lbw_sei" , |
| 1113 | "msix_l2c_lbw_sei" , |
| 1114 | "msix_nrm_lbw_sei" , |
| 1115 | "msix_abnrm_lbw_sei" , |
| 1116 | "apb_vcd_lbw_sei" , |
| 1117 | "apb_l2c_lbw_sei" , |
| 1118 | "apb_nrm_lbw_sei" , |
| 1119 | "apb_abnrm_lbw_sei" , |
| 1120 | "dec_sei" , |
| 1121 | "dec_apb_sei" , |
| 1122 | "trc_apb_sei" , |
| 1123 | "lbw_mstr_if_sei" , |
| 1124 | "axi_split_bresp_err_sei" , |
| 1125 | "hbw_axi_wr_viol_sei" , |
| 1126 | "hbw_axi_rd_viol_sei" , |
| 1127 | "lbw_axi_wr_viol_sei" , |
| 1128 | "lbw_axi_rd_viol_sei" , |
| 1129 | "vcd_spi" , |
| 1130 | "l2c_spi" , |
| 1131 | "nrm_spi" , |
| 1132 | "abnrm_spi" , |
| 1133 | }; |
| 1134 | |
| 1135 | static const char * const gaudi2_qman_error_cause[GAUDI2_NUM_OF_QM_ERR_CAUSE] = { |
| 1136 | "PQ AXI HBW error" , |
| 1137 | "CQ AXI HBW error" , |
| 1138 | "CP AXI HBW error" , |
| 1139 | "CP error due to undefined OPCODE" , |
| 1140 | "CP encountered STOP OPCODE" , |
| 1141 | "CP AXI LBW error" , |
| 1142 | "CP WRREG32 or WRBULK returned error" , |
| 1143 | "N/A" , |
| 1144 | "FENCE 0 inc over max value and clipped" , |
| 1145 | "FENCE 1 inc over max value and clipped" , |
| 1146 | "FENCE 2 inc over max value and clipped" , |
| 1147 | "FENCE 3 inc over max value and clipped" , |
| 1148 | "FENCE 0 dec under min value and clipped" , |
| 1149 | "FENCE 1 dec under min value and clipped" , |
| 1150 | "FENCE 2 dec under min value and clipped" , |
| 1151 | "FENCE 3 dec under min value and clipped" , |
| 1152 | "CPDMA Up overflow" , |
| 1153 | "PQC L2H error" |
| 1154 | }; |
| 1155 | |
| 1156 | static const char * const gaudi2_lower_qman_error_cause[GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE] = { |
| 1157 | "RSVD0" , |
| 1158 | "CQ AXI HBW error" , |
| 1159 | "CP AXI HBW error" , |
| 1160 | "CP error due to undefined OPCODE" , |
| 1161 | "CP encountered STOP OPCODE" , |
| 1162 | "CP AXI LBW error" , |
| 1163 | "CP WRREG32 or WRBULK returned error" , |
| 1164 | "N/A" , |
| 1165 | "FENCE 0 inc over max value and clipped" , |
| 1166 | "FENCE 1 inc over max value and clipped" , |
| 1167 | "FENCE 2 inc over max value and clipped" , |
| 1168 | "FENCE 3 inc over max value and clipped" , |
| 1169 | "FENCE 0 dec under min value and clipped" , |
| 1170 | "FENCE 1 dec under min value and clipped" , |
| 1171 | "FENCE 2 dec under min value and clipped" , |
| 1172 | "FENCE 3 dec under min value and clipped" , |
| 1173 | "CPDMA Up overflow" , |
| 1174 | "RSVD17" , |
| 1175 | "CQ_WR_IFIFO_CI_ERR" , |
| 1176 | "CQ_WR_CTL_CI_ERR" , |
| 1177 | "ARC_CQF_RD_ERR" , |
| 1178 | "ARC_CQ_WR_IFIFO_CI_ERR" , |
| 1179 | "ARC_CQ_WR_CTL_CI_ERR" , |
| 1180 | "ARC_AXI_ERR" , |
| 1181 | "CP_SWITCH_WDT_ERR" |
| 1182 | }; |
| 1183 | |
| 1184 | static const char * const gaudi2_qman_arb_error_cause[GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE] = { |
| 1185 | "Choice push while full error" , |
| 1186 | "Choice Q watchdog error" , |
| 1187 | "MSG AXI LBW returned with error" |
| 1188 | }; |
| 1189 | |
| 1190 | static const char * const guadi2_rot_error_cause[GAUDI2_NUM_OF_ROT_ERR_CAUSE] = { |
| 1191 | "qm_axi_err" , |
| 1192 | "qm_trace_fence_events" , |
| 1193 | "qm_sw_err" , |
| 1194 | "qm_cp_sw_stop" , |
| 1195 | "lbw_mstr_rresp_err" , |
| 1196 | "lbw_mstr_bresp_err" , |
| 1197 | "lbw_msg_slverr" , |
| 1198 | "hbw_msg_slverr" , |
| 1199 | "wbc_slverr" , |
| 1200 | "hbw_mstr_rresp_err" , |
| 1201 | "hbw_mstr_bresp_err" , |
| 1202 | "sb_resp_intr" , |
| 1203 | "mrsb_resp_intr" , |
| 1204 | "core_dw_status_0" , |
| 1205 | "core_dw_status_1" , |
| 1206 | "core_dw_status_2" , |
| 1207 | "core_dw_status_3" , |
| 1208 | "core_dw_status_4" , |
| 1209 | "core_dw_status_5" , |
| 1210 | "core_dw_status_6" , |
| 1211 | "core_dw_status_7" , |
| 1212 | "async_arc2cpu_sei_intr" , |
| 1213 | }; |
| 1214 | |
| 1215 | static const char * const gaudi2_tpc_interrupts_cause[GAUDI2_NUM_OF_TPC_INTR_CAUSE] = { |
| 1216 | "tpc_address_exceed_slm" , |
| 1217 | "tpc_div_by_0" , |
| 1218 | "tpc_spu_mac_overflow" , |
| 1219 | "tpc_spu_addsub_overflow" , |
| 1220 | "tpc_spu_abs_overflow" , |
| 1221 | "tpc_spu_fma_fp_dst_nan" , |
| 1222 | "tpc_spu_fma_fp_dst_inf" , |
| 1223 | "tpc_spu_convert_fp_dst_nan" , |
| 1224 | "tpc_spu_convert_fp_dst_inf" , |
| 1225 | "tpc_spu_fp_dst_denorm" , |
| 1226 | "tpc_vpu_mac_overflow" , |
| 1227 | "tpc_vpu_addsub_overflow" , |
| 1228 | "tpc_vpu_abs_overflow" , |
| 1229 | "tpc_vpu_convert_fp_dst_nan" , |
| 1230 | "tpc_vpu_convert_fp_dst_inf" , |
| 1231 | "tpc_vpu_fma_fp_dst_nan" , |
| 1232 | "tpc_vpu_fma_fp_dst_inf" , |
| 1233 | "tpc_vpu_fp_dst_denorm" , |
| 1234 | "tpc_assertions" , |
| 1235 | "tpc_illegal_instruction" , |
| 1236 | "tpc_pc_wrap_around" , |
| 1237 | "tpc_qm_sw_err" , |
| 1238 | "tpc_hbw_rresp_err" , |
| 1239 | "tpc_hbw_bresp_err" , |
| 1240 | "tpc_lbw_rresp_err" , |
| 1241 | "tpc_lbw_bresp_err" , |
| 1242 | "st_unlock_already_locked" , |
| 1243 | "invalid_lock_access" , |
| 1244 | "LD_L protection violation" , |
| 1245 | "ST_L protection violation" , |
| 1246 | "D$ L0CS mismatch" , |
| 1247 | }; |
| 1248 | |
| 1249 | static const char * const guadi2_mme_error_cause[GAUDI2_NUM_OF_MME_ERR_CAUSE] = { |
| 1250 | "agu_resp_intr" , |
| 1251 | "qman_axi_err" , |
| 1252 | "wap sei (wbc axi err)" , |
| 1253 | "arc sei" , |
| 1254 | "cfg access error" , |
| 1255 | "qm_sw_err" , |
| 1256 | "sbte_dbg_intr_0" , |
| 1257 | "sbte_dbg_intr_1" , |
| 1258 | "sbte_dbg_intr_2" , |
| 1259 | "sbte_dbg_intr_3" , |
| 1260 | "sbte_dbg_intr_4" , |
| 1261 | "sbte_prtn_intr_0" , |
| 1262 | "sbte_prtn_intr_1" , |
| 1263 | "sbte_prtn_intr_2" , |
| 1264 | "sbte_prtn_intr_3" , |
| 1265 | "sbte_prtn_intr_4" , |
| 1266 | }; |
| 1267 | |
| 1268 | static const char * const guadi2_mme_wap_error_cause[GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE] = { |
| 1269 | "WBC ERR RESP_0" , |
| 1270 | "WBC ERR RESP_1" , |
| 1271 | "AP SOURCE POS INF" , |
| 1272 | "AP SOURCE NEG INF" , |
| 1273 | "AP SOURCE NAN" , |
| 1274 | "AP RESULT POS INF" , |
| 1275 | "AP RESULT NEG INF" , |
| 1276 | }; |
| 1277 | |
| 1278 | static const char * const gaudi2_dma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE] = { |
| 1279 | "HBW Read returned with error RRESP" , |
| 1280 | "HBW write returned with error BRESP" , |
| 1281 | "LBW write returned with error BRESP" , |
| 1282 | "descriptor_fifo_overflow" , |
| 1283 | "KDMA SB LBW Read returned with error" , |
| 1284 | "KDMA WBC LBW Write returned with error" , |
| 1285 | "TRANSPOSE ENGINE DESC FIFO OVERFLOW" , |
| 1286 | "WRONG CFG FOR COMMIT IN LIN DMA" |
| 1287 | }; |
| 1288 | |
| 1289 | static const char * const gaudi2_kdma_core_interrupts_cause[GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE] = { |
| 1290 | "HBW/LBW Read returned with error RRESP" , |
| 1291 | "HBW/LBW write returned with error BRESP" , |
| 1292 | "LBW write returned with error BRESP" , |
| 1293 | "descriptor_fifo_overflow" , |
| 1294 | "KDMA SB LBW Read returned with error" , |
| 1295 | "KDMA WBC LBW Write returned with error" , |
| 1296 | "TRANSPOSE ENGINE DESC FIFO OVERFLOW" , |
| 1297 | "WRONG CFG FOR COMMIT IN LIN DMA" |
| 1298 | }; |
| 1299 | |
| 1300 | struct gaudi2_sm_sei_cause_data { |
| 1301 | const char *cause_name; |
| 1302 | const char *log_name; |
| 1303 | }; |
| 1304 | |
| 1305 | static const struct gaudi2_sm_sei_cause_data |
| 1306 | gaudi2_sm_sei_cause[GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE] = { |
| 1307 | {"calculated SO value overflow/underflow" , "SOB ID" }, |
| 1308 | {"payload address of monitor is not aligned to 4B" , "monitor addr" }, |
| 1309 | {"armed monitor write got BRESP (SLVERR or DECERR)" , "AXI id" }, |
| 1310 | }; |
| 1311 | |
| 1312 | static const char * const |
| 1313 | gaudi2_pmmu_fatal_interrupts_cause[GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE] = { |
| 1314 | "LATENCY_RD_OUT_FIFO_OVERRUN" , |
| 1315 | "LATENCY_WR_OUT_FIFO_OVERRUN" , |
| 1316 | }; |
| 1317 | |
| 1318 | static const char * const |
| 1319 | gaudi2_hif_fatal_interrupts_cause[GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE] = { |
| 1320 | "LATENCY_RD_OUT_FIFO_OVERRUN" , |
| 1321 | "LATENCY_WR_OUT_FIFO_OVERRUN" , |
| 1322 | }; |
| 1323 | |
| 1324 | static const char * const |
| 1325 | gaudi2_psoc_axi_drain_interrupts_cause[GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE] = { |
| 1326 | "AXI drain HBW" , |
| 1327 | "AXI drain LBW" , |
| 1328 | }; |
| 1329 | |
| 1330 | static const char * const |
| 1331 | gaudi2_pcie_addr_dec_error_cause[GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE] = { |
| 1332 | "HBW error response" , |
| 1333 | "LBW error response" , |
| 1334 | "TLP is blocked by RR" |
| 1335 | }; |
| 1336 | |
| 1337 | static const int gaudi2_queue_id_to_engine_id[] = { |
| 1338 | [GAUDI2_QUEUE_ID_PDMA_0_0...GAUDI2_QUEUE_ID_PDMA_0_3] = GAUDI2_ENGINE_ID_PDMA_0, |
| 1339 | [GAUDI2_QUEUE_ID_PDMA_1_0...GAUDI2_QUEUE_ID_PDMA_1_3] = GAUDI2_ENGINE_ID_PDMA_1, |
| 1340 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = |
| 1341 | GAUDI2_DCORE0_ENGINE_ID_EDMA_0, |
| 1342 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = |
| 1343 | GAUDI2_DCORE0_ENGINE_ID_EDMA_1, |
| 1344 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = |
| 1345 | GAUDI2_DCORE1_ENGINE_ID_EDMA_0, |
| 1346 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = |
| 1347 | GAUDI2_DCORE1_ENGINE_ID_EDMA_1, |
| 1348 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = |
| 1349 | GAUDI2_DCORE2_ENGINE_ID_EDMA_0, |
| 1350 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = |
| 1351 | GAUDI2_DCORE2_ENGINE_ID_EDMA_1, |
| 1352 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = |
| 1353 | GAUDI2_DCORE3_ENGINE_ID_EDMA_0, |
| 1354 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = |
| 1355 | GAUDI2_DCORE3_ENGINE_ID_EDMA_1, |
| 1356 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_0...GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = |
| 1357 | GAUDI2_DCORE0_ENGINE_ID_MME, |
| 1358 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_0...GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = |
| 1359 | GAUDI2_DCORE1_ENGINE_ID_MME, |
| 1360 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_0...GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = |
| 1361 | GAUDI2_DCORE2_ENGINE_ID_MME, |
| 1362 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_0...GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = |
| 1363 | GAUDI2_DCORE3_ENGINE_ID_MME, |
| 1364 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0...GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = |
| 1365 | GAUDI2_DCORE0_ENGINE_ID_TPC_0, |
| 1366 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0...GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = |
| 1367 | GAUDI2_DCORE0_ENGINE_ID_TPC_1, |
| 1368 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0...GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = |
| 1369 | GAUDI2_DCORE0_ENGINE_ID_TPC_2, |
| 1370 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0...GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = |
| 1371 | GAUDI2_DCORE0_ENGINE_ID_TPC_3, |
| 1372 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0...GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = |
| 1373 | GAUDI2_DCORE0_ENGINE_ID_TPC_4, |
| 1374 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0...GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = |
| 1375 | GAUDI2_DCORE0_ENGINE_ID_TPC_5, |
| 1376 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0...GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = |
| 1377 | GAUDI2_DCORE0_ENGINE_ID_TPC_6, |
| 1378 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0...GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = |
| 1379 | GAUDI2_DCORE1_ENGINE_ID_TPC_0, |
| 1380 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0...GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = |
| 1381 | GAUDI2_DCORE1_ENGINE_ID_TPC_1, |
| 1382 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0...GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = |
| 1383 | GAUDI2_DCORE1_ENGINE_ID_TPC_2, |
| 1384 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0...GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = |
| 1385 | GAUDI2_DCORE1_ENGINE_ID_TPC_3, |
| 1386 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0...GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = |
| 1387 | GAUDI2_DCORE1_ENGINE_ID_TPC_4, |
| 1388 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0...GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = |
| 1389 | GAUDI2_DCORE1_ENGINE_ID_TPC_5, |
| 1390 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0...GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = |
| 1391 | GAUDI2_DCORE2_ENGINE_ID_TPC_0, |
| 1392 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0...GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = |
| 1393 | GAUDI2_DCORE2_ENGINE_ID_TPC_1, |
| 1394 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0...GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = |
| 1395 | GAUDI2_DCORE2_ENGINE_ID_TPC_2, |
| 1396 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0...GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = |
| 1397 | GAUDI2_DCORE2_ENGINE_ID_TPC_3, |
| 1398 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0...GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = |
| 1399 | GAUDI2_DCORE2_ENGINE_ID_TPC_4, |
| 1400 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0...GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = |
| 1401 | GAUDI2_DCORE2_ENGINE_ID_TPC_5, |
| 1402 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0...GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = |
| 1403 | GAUDI2_DCORE3_ENGINE_ID_TPC_0, |
| 1404 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0...GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = |
| 1405 | GAUDI2_DCORE3_ENGINE_ID_TPC_1, |
| 1406 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0...GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = |
| 1407 | GAUDI2_DCORE3_ENGINE_ID_TPC_2, |
| 1408 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0...GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = |
| 1409 | GAUDI2_DCORE3_ENGINE_ID_TPC_3, |
| 1410 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0...GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = |
| 1411 | GAUDI2_DCORE3_ENGINE_ID_TPC_4, |
| 1412 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0...GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = |
| 1413 | GAUDI2_DCORE3_ENGINE_ID_TPC_5, |
| 1414 | [GAUDI2_QUEUE_ID_NIC_0_0...GAUDI2_QUEUE_ID_NIC_0_3] = GAUDI2_ENGINE_ID_NIC0_0, |
| 1415 | [GAUDI2_QUEUE_ID_NIC_1_0...GAUDI2_QUEUE_ID_NIC_1_3] = GAUDI2_ENGINE_ID_NIC0_1, |
| 1416 | [GAUDI2_QUEUE_ID_NIC_2_0...GAUDI2_QUEUE_ID_NIC_2_3] = GAUDI2_ENGINE_ID_NIC1_0, |
| 1417 | [GAUDI2_QUEUE_ID_NIC_3_0...GAUDI2_QUEUE_ID_NIC_3_3] = GAUDI2_ENGINE_ID_NIC1_1, |
| 1418 | [GAUDI2_QUEUE_ID_NIC_4_0...GAUDI2_QUEUE_ID_NIC_4_3] = GAUDI2_ENGINE_ID_NIC2_0, |
| 1419 | [GAUDI2_QUEUE_ID_NIC_5_0...GAUDI2_QUEUE_ID_NIC_5_3] = GAUDI2_ENGINE_ID_NIC2_1, |
| 1420 | [GAUDI2_QUEUE_ID_NIC_6_0...GAUDI2_QUEUE_ID_NIC_6_3] = GAUDI2_ENGINE_ID_NIC3_0, |
| 1421 | [GAUDI2_QUEUE_ID_NIC_7_0...GAUDI2_QUEUE_ID_NIC_7_3] = GAUDI2_ENGINE_ID_NIC3_1, |
| 1422 | [GAUDI2_QUEUE_ID_NIC_8_0...GAUDI2_QUEUE_ID_NIC_8_3] = GAUDI2_ENGINE_ID_NIC4_0, |
| 1423 | [GAUDI2_QUEUE_ID_NIC_9_0...GAUDI2_QUEUE_ID_NIC_9_3] = GAUDI2_ENGINE_ID_NIC4_1, |
| 1424 | [GAUDI2_QUEUE_ID_NIC_10_0...GAUDI2_QUEUE_ID_NIC_10_3] = GAUDI2_ENGINE_ID_NIC5_0, |
| 1425 | [GAUDI2_QUEUE_ID_NIC_11_0...GAUDI2_QUEUE_ID_NIC_11_3] = GAUDI2_ENGINE_ID_NIC5_1, |
| 1426 | [GAUDI2_QUEUE_ID_NIC_12_0...GAUDI2_QUEUE_ID_NIC_12_3] = GAUDI2_ENGINE_ID_NIC6_0, |
| 1427 | [GAUDI2_QUEUE_ID_NIC_13_0...GAUDI2_QUEUE_ID_NIC_13_3] = GAUDI2_ENGINE_ID_NIC6_1, |
| 1428 | [GAUDI2_QUEUE_ID_NIC_14_0...GAUDI2_QUEUE_ID_NIC_14_3] = GAUDI2_ENGINE_ID_NIC7_0, |
| 1429 | [GAUDI2_QUEUE_ID_NIC_15_0...GAUDI2_QUEUE_ID_NIC_15_3] = GAUDI2_ENGINE_ID_NIC7_1, |
| 1430 | [GAUDI2_QUEUE_ID_NIC_16_0...GAUDI2_QUEUE_ID_NIC_16_3] = GAUDI2_ENGINE_ID_NIC8_0, |
| 1431 | [GAUDI2_QUEUE_ID_NIC_17_0...GAUDI2_QUEUE_ID_NIC_17_3] = GAUDI2_ENGINE_ID_NIC8_1, |
| 1432 | [GAUDI2_QUEUE_ID_NIC_18_0...GAUDI2_QUEUE_ID_NIC_18_3] = GAUDI2_ENGINE_ID_NIC9_0, |
| 1433 | [GAUDI2_QUEUE_ID_NIC_19_0...GAUDI2_QUEUE_ID_NIC_19_3] = GAUDI2_ENGINE_ID_NIC9_1, |
| 1434 | [GAUDI2_QUEUE_ID_NIC_20_0...GAUDI2_QUEUE_ID_NIC_20_3] = GAUDI2_ENGINE_ID_NIC10_0, |
| 1435 | [GAUDI2_QUEUE_ID_NIC_21_0...GAUDI2_QUEUE_ID_NIC_21_3] = GAUDI2_ENGINE_ID_NIC10_1, |
| 1436 | [GAUDI2_QUEUE_ID_NIC_22_0...GAUDI2_QUEUE_ID_NIC_22_3] = GAUDI2_ENGINE_ID_NIC11_0, |
| 1437 | [GAUDI2_QUEUE_ID_NIC_23_0...GAUDI2_QUEUE_ID_NIC_23_3] = GAUDI2_ENGINE_ID_NIC11_1, |
| 1438 | [GAUDI2_QUEUE_ID_ROT_0_0...GAUDI2_QUEUE_ID_ROT_0_3] = GAUDI2_ENGINE_ID_ROT_0, |
| 1439 | [GAUDI2_QUEUE_ID_ROT_1_0...GAUDI2_QUEUE_ID_ROT_1_3] = GAUDI2_ENGINE_ID_ROT_1, |
| 1440 | }; |
| 1441 | |
| 1442 | const u32 gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_SIZE] = { |
| 1443 | [GAUDI2_QUEUE_ID_PDMA_0_0] = mmPDMA0_QM_BASE, |
| 1444 | [GAUDI2_QUEUE_ID_PDMA_0_1] = mmPDMA0_QM_BASE, |
| 1445 | [GAUDI2_QUEUE_ID_PDMA_0_2] = mmPDMA0_QM_BASE, |
| 1446 | [GAUDI2_QUEUE_ID_PDMA_0_3] = mmPDMA0_QM_BASE, |
| 1447 | [GAUDI2_QUEUE_ID_PDMA_1_0] = mmPDMA1_QM_BASE, |
| 1448 | [GAUDI2_QUEUE_ID_PDMA_1_1] = mmPDMA1_QM_BASE, |
| 1449 | [GAUDI2_QUEUE_ID_PDMA_1_2] = mmPDMA1_QM_BASE, |
| 1450 | [GAUDI2_QUEUE_ID_PDMA_1_3] = mmPDMA1_QM_BASE, |
| 1451 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = mmDCORE0_EDMA0_QM_BASE, |
| 1452 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = mmDCORE0_EDMA0_QM_BASE, |
| 1453 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = mmDCORE0_EDMA0_QM_BASE, |
| 1454 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = mmDCORE0_EDMA0_QM_BASE, |
| 1455 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = mmDCORE0_EDMA1_QM_BASE, |
| 1456 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = mmDCORE0_EDMA1_QM_BASE, |
| 1457 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = mmDCORE0_EDMA1_QM_BASE, |
| 1458 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = mmDCORE0_EDMA1_QM_BASE, |
| 1459 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = mmDCORE0_MME_QM_BASE, |
| 1460 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = mmDCORE0_MME_QM_BASE, |
| 1461 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = mmDCORE0_MME_QM_BASE, |
| 1462 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = mmDCORE0_MME_QM_BASE, |
| 1463 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = mmDCORE0_TPC0_QM_BASE, |
| 1464 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = mmDCORE0_TPC0_QM_BASE, |
| 1465 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = mmDCORE0_TPC0_QM_BASE, |
| 1466 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = mmDCORE0_TPC0_QM_BASE, |
| 1467 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = mmDCORE0_TPC1_QM_BASE, |
| 1468 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = mmDCORE0_TPC1_QM_BASE, |
| 1469 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = mmDCORE0_TPC1_QM_BASE, |
| 1470 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = mmDCORE0_TPC1_QM_BASE, |
| 1471 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = mmDCORE0_TPC2_QM_BASE, |
| 1472 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = mmDCORE0_TPC2_QM_BASE, |
| 1473 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = mmDCORE0_TPC2_QM_BASE, |
| 1474 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = mmDCORE0_TPC2_QM_BASE, |
| 1475 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = mmDCORE0_TPC3_QM_BASE, |
| 1476 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = mmDCORE0_TPC3_QM_BASE, |
| 1477 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = mmDCORE0_TPC3_QM_BASE, |
| 1478 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = mmDCORE0_TPC3_QM_BASE, |
| 1479 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = mmDCORE0_TPC4_QM_BASE, |
| 1480 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = mmDCORE0_TPC4_QM_BASE, |
| 1481 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = mmDCORE0_TPC4_QM_BASE, |
| 1482 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = mmDCORE0_TPC4_QM_BASE, |
| 1483 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = mmDCORE0_TPC5_QM_BASE, |
| 1484 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = mmDCORE0_TPC5_QM_BASE, |
| 1485 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = mmDCORE0_TPC5_QM_BASE, |
| 1486 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = mmDCORE0_TPC5_QM_BASE, |
| 1487 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = mmDCORE0_TPC6_QM_BASE, |
| 1488 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = mmDCORE0_TPC6_QM_BASE, |
| 1489 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = mmDCORE0_TPC6_QM_BASE, |
| 1490 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = mmDCORE0_TPC6_QM_BASE, |
| 1491 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = mmDCORE1_EDMA0_QM_BASE, |
| 1492 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = mmDCORE1_EDMA0_QM_BASE, |
| 1493 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = mmDCORE1_EDMA0_QM_BASE, |
| 1494 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = mmDCORE1_EDMA0_QM_BASE, |
| 1495 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = mmDCORE1_EDMA1_QM_BASE, |
| 1496 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = mmDCORE1_EDMA1_QM_BASE, |
| 1497 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = mmDCORE1_EDMA1_QM_BASE, |
| 1498 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = mmDCORE1_EDMA1_QM_BASE, |
| 1499 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = mmDCORE1_MME_QM_BASE, |
| 1500 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = mmDCORE1_MME_QM_BASE, |
| 1501 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = mmDCORE1_MME_QM_BASE, |
| 1502 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = mmDCORE1_MME_QM_BASE, |
| 1503 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = mmDCORE1_TPC0_QM_BASE, |
| 1504 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = mmDCORE1_TPC0_QM_BASE, |
| 1505 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = mmDCORE1_TPC0_QM_BASE, |
| 1506 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = mmDCORE1_TPC0_QM_BASE, |
| 1507 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = mmDCORE1_TPC1_QM_BASE, |
| 1508 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = mmDCORE1_TPC1_QM_BASE, |
| 1509 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = mmDCORE1_TPC1_QM_BASE, |
| 1510 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = mmDCORE1_TPC1_QM_BASE, |
| 1511 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = mmDCORE1_TPC2_QM_BASE, |
| 1512 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = mmDCORE1_TPC2_QM_BASE, |
| 1513 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = mmDCORE1_TPC2_QM_BASE, |
| 1514 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = mmDCORE1_TPC2_QM_BASE, |
| 1515 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = mmDCORE1_TPC3_QM_BASE, |
| 1516 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = mmDCORE1_TPC3_QM_BASE, |
| 1517 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = mmDCORE1_TPC3_QM_BASE, |
| 1518 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = mmDCORE1_TPC3_QM_BASE, |
| 1519 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = mmDCORE1_TPC4_QM_BASE, |
| 1520 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = mmDCORE1_TPC4_QM_BASE, |
| 1521 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = mmDCORE1_TPC4_QM_BASE, |
| 1522 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = mmDCORE1_TPC4_QM_BASE, |
| 1523 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = mmDCORE1_TPC5_QM_BASE, |
| 1524 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = mmDCORE1_TPC5_QM_BASE, |
| 1525 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = mmDCORE1_TPC5_QM_BASE, |
| 1526 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = mmDCORE1_TPC5_QM_BASE, |
| 1527 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = mmDCORE2_EDMA0_QM_BASE, |
| 1528 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = mmDCORE2_EDMA0_QM_BASE, |
| 1529 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = mmDCORE2_EDMA0_QM_BASE, |
| 1530 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = mmDCORE2_EDMA0_QM_BASE, |
| 1531 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = mmDCORE2_EDMA1_QM_BASE, |
| 1532 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = mmDCORE2_EDMA1_QM_BASE, |
| 1533 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = mmDCORE2_EDMA1_QM_BASE, |
| 1534 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = mmDCORE2_EDMA1_QM_BASE, |
| 1535 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = mmDCORE2_MME_QM_BASE, |
| 1536 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = mmDCORE2_MME_QM_BASE, |
| 1537 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = mmDCORE2_MME_QM_BASE, |
| 1538 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = mmDCORE2_MME_QM_BASE, |
| 1539 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = mmDCORE2_TPC0_QM_BASE, |
| 1540 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = mmDCORE2_TPC0_QM_BASE, |
| 1541 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = mmDCORE2_TPC0_QM_BASE, |
| 1542 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = mmDCORE2_TPC0_QM_BASE, |
| 1543 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = mmDCORE2_TPC1_QM_BASE, |
| 1544 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = mmDCORE2_TPC1_QM_BASE, |
| 1545 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = mmDCORE2_TPC1_QM_BASE, |
| 1546 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = mmDCORE2_TPC1_QM_BASE, |
| 1547 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = mmDCORE2_TPC2_QM_BASE, |
| 1548 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = mmDCORE2_TPC2_QM_BASE, |
| 1549 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = mmDCORE2_TPC2_QM_BASE, |
| 1550 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = mmDCORE2_TPC2_QM_BASE, |
| 1551 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = mmDCORE2_TPC3_QM_BASE, |
| 1552 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = mmDCORE2_TPC3_QM_BASE, |
| 1553 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = mmDCORE2_TPC3_QM_BASE, |
| 1554 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = mmDCORE2_TPC3_QM_BASE, |
| 1555 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = mmDCORE2_TPC4_QM_BASE, |
| 1556 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = mmDCORE2_TPC4_QM_BASE, |
| 1557 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = mmDCORE2_TPC4_QM_BASE, |
| 1558 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = mmDCORE2_TPC4_QM_BASE, |
| 1559 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = mmDCORE2_TPC5_QM_BASE, |
| 1560 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = mmDCORE2_TPC5_QM_BASE, |
| 1561 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = mmDCORE2_TPC5_QM_BASE, |
| 1562 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = mmDCORE2_TPC5_QM_BASE, |
| 1563 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = mmDCORE3_EDMA0_QM_BASE, |
| 1564 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = mmDCORE3_EDMA0_QM_BASE, |
| 1565 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = mmDCORE3_EDMA0_QM_BASE, |
| 1566 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = mmDCORE3_EDMA0_QM_BASE, |
| 1567 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = mmDCORE3_EDMA1_QM_BASE, |
| 1568 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = mmDCORE3_EDMA1_QM_BASE, |
| 1569 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = mmDCORE3_EDMA1_QM_BASE, |
| 1570 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = mmDCORE3_EDMA1_QM_BASE, |
| 1571 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = mmDCORE3_MME_QM_BASE, |
| 1572 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = mmDCORE3_MME_QM_BASE, |
| 1573 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = mmDCORE3_MME_QM_BASE, |
| 1574 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = mmDCORE3_MME_QM_BASE, |
| 1575 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = mmDCORE3_TPC0_QM_BASE, |
| 1576 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = mmDCORE3_TPC0_QM_BASE, |
| 1577 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = mmDCORE3_TPC0_QM_BASE, |
| 1578 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = mmDCORE3_TPC0_QM_BASE, |
| 1579 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = mmDCORE3_TPC1_QM_BASE, |
| 1580 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = mmDCORE3_TPC1_QM_BASE, |
| 1581 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = mmDCORE3_TPC1_QM_BASE, |
| 1582 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = mmDCORE3_TPC1_QM_BASE, |
| 1583 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = mmDCORE3_TPC2_QM_BASE, |
| 1584 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = mmDCORE3_TPC2_QM_BASE, |
| 1585 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = mmDCORE3_TPC2_QM_BASE, |
| 1586 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = mmDCORE3_TPC2_QM_BASE, |
| 1587 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = mmDCORE3_TPC3_QM_BASE, |
| 1588 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = mmDCORE3_TPC3_QM_BASE, |
| 1589 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = mmDCORE3_TPC3_QM_BASE, |
| 1590 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = mmDCORE3_TPC3_QM_BASE, |
| 1591 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = mmDCORE3_TPC4_QM_BASE, |
| 1592 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = mmDCORE3_TPC4_QM_BASE, |
| 1593 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = mmDCORE3_TPC4_QM_BASE, |
| 1594 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = mmDCORE3_TPC4_QM_BASE, |
| 1595 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = mmDCORE3_TPC5_QM_BASE, |
| 1596 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = mmDCORE3_TPC5_QM_BASE, |
| 1597 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = mmDCORE3_TPC5_QM_BASE, |
| 1598 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = mmDCORE3_TPC5_QM_BASE, |
| 1599 | [GAUDI2_QUEUE_ID_NIC_0_0] = mmNIC0_QM0_BASE, |
| 1600 | [GAUDI2_QUEUE_ID_NIC_0_1] = mmNIC0_QM0_BASE, |
| 1601 | [GAUDI2_QUEUE_ID_NIC_0_2] = mmNIC0_QM0_BASE, |
| 1602 | [GAUDI2_QUEUE_ID_NIC_0_3] = mmNIC0_QM0_BASE, |
| 1603 | [GAUDI2_QUEUE_ID_NIC_1_0] = mmNIC0_QM1_BASE, |
| 1604 | [GAUDI2_QUEUE_ID_NIC_1_1] = mmNIC0_QM1_BASE, |
| 1605 | [GAUDI2_QUEUE_ID_NIC_1_2] = mmNIC0_QM1_BASE, |
| 1606 | [GAUDI2_QUEUE_ID_NIC_1_3] = mmNIC0_QM1_BASE, |
| 1607 | [GAUDI2_QUEUE_ID_NIC_2_0] = mmNIC1_QM0_BASE, |
| 1608 | [GAUDI2_QUEUE_ID_NIC_2_1] = mmNIC1_QM0_BASE, |
| 1609 | [GAUDI2_QUEUE_ID_NIC_2_2] = mmNIC1_QM0_BASE, |
| 1610 | [GAUDI2_QUEUE_ID_NIC_2_3] = mmNIC1_QM0_BASE, |
| 1611 | [GAUDI2_QUEUE_ID_NIC_3_0] = mmNIC1_QM1_BASE, |
| 1612 | [GAUDI2_QUEUE_ID_NIC_3_1] = mmNIC1_QM1_BASE, |
| 1613 | [GAUDI2_QUEUE_ID_NIC_3_2] = mmNIC1_QM1_BASE, |
| 1614 | [GAUDI2_QUEUE_ID_NIC_3_3] = mmNIC1_QM1_BASE, |
| 1615 | [GAUDI2_QUEUE_ID_NIC_4_0] = mmNIC2_QM0_BASE, |
| 1616 | [GAUDI2_QUEUE_ID_NIC_4_1] = mmNIC2_QM0_BASE, |
| 1617 | [GAUDI2_QUEUE_ID_NIC_4_2] = mmNIC2_QM0_BASE, |
| 1618 | [GAUDI2_QUEUE_ID_NIC_4_3] = mmNIC2_QM0_BASE, |
| 1619 | [GAUDI2_QUEUE_ID_NIC_5_0] = mmNIC2_QM1_BASE, |
| 1620 | [GAUDI2_QUEUE_ID_NIC_5_1] = mmNIC2_QM1_BASE, |
| 1621 | [GAUDI2_QUEUE_ID_NIC_5_2] = mmNIC2_QM1_BASE, |
| 1622 | [GAUDI2_QUEUE_ID_NIC_5_3] = mmNIC2_QM1_BASE, |
| 1623 | [GAUDI2_QUEUE_ID_NIC_6_0] = mmNIC3_QM0_BASE, |
| 1624 | [GAUDI2_QUEUE_ID_NIC_6_1] = mmNIC3_QM0_BASE, |
| 1625 | [GAUDI2_QUEUE_ID_NIC_6_2] = mmNIC3_QM0_BASE, |
| 1626 | [GAUDI2_QUEUE_ID_NIC_6_3] = mmNIC3_QM0_BASE, |
| 1627 | [GAUDI2_QUEUE_ID_NIC_7_0] = mmNIC3_QM1_BASE, |
| 1628 | [GAUDI2_QUEUE_ID_NIC_7_1] = mmNIC3_QM1_BASE, |
| 1629 | [GAUDI2_QUEUE_ID_NIC_7_2] = mmNIC3_QM1_BASE, |
| 1630 | [GAUDI2_QUEUE_ID_NIC_7_3] = mmNIC3_QM1_BASE, |
| 1631 | [GAUDI2_QUEUE_ID_NIC_8_0] = mmNIC4_QM0_BASE, |
| 1632 | [GAUDI2_QUEUE_ID_NIC_8_1] = mmNIC4_QM0_BASE, |
| 1633 | [GAUDI2_QUEUE_ID_NIC_8_2] = mmNIC4_QM0_BASE, |
| 1634 | [GAUDI2_QUEUE_ID_NIC_8_3] = mmNIC4_QM0_BASE, |
| 1635 | [GAUDI2_QUEUE_ID_NIC_9_0] = mmNIC4_QM1_BASE, |
| 1636 | [GAUDI2_QUEUE_ID_NIC_9_1] = mmNIC4_QM1_BASE, |
| 1637 | [GAUDI2_QUEUE_ID_NIC_9_2] = mmNIC4_QM1_BASE, |
| 1638 | [GAUDI2_QUEUE_ID_NIC_9_3] = mmNIC4_QM1_BASE, |
| 1639 | [GAUDI2_QUEUE_ID_NIC_10_0] = mmNIC5_QM0_BASE, |
| 1640 | [GAUDI2_QUEUE_ID_NIC_10_1] = mmNIC5_QM0_BASE, |
| 1641 | [GAUDI2_QUEUE_ID_NIC_10_2] = mmNIC5_QM0_BASE, |
| 1642 | [GAUDI2_QUEUE_ID_NIC_10_3] = mmNIC5_QM0_BASE, |
| 1643 | [GAUDI2_QUEUE_ID_NIC_11_0] = mmNIC5_QM1_BASE, |
| 1644 | [GAUDI2_QUEUE_ID_NIC_11_1] = mmNIC5_QM1_BASE, |
| 1645 | [GAUDI2_QUEUE_ID_NIC_11_2] = mmNIC5_QM1_BASE, |
| 1646 | [GAUDI2_QUEUE_ID_NIC_11_3] = mmNIC5_QM1_BASE, |
| 1647 | [GAUDI2_QUEUE_ID_NIC_12_0] = mmNIC6_QM0_BASE, |
| 1648 | [GAUDI2_QUEUE_ID_NIC_12_1] = mmNIC6_QM0_BASE, |
| 1649 | [GAUDI2_QUEUE_ID_NIC_12_2] = mmNIC6_QM0_BASE, |
| 1650 | [GAUDI2_QUEUE_ID_NIC_12_3] = mmNIC6_QM0_BASE, |
| 1651 | [GAUDI2_QUEUE_ID_NIC_13_0] = mmNIC6_QM1_BASE, |
| 1652 | [GAUDI2_QUEUE_ID_NIC_13_1] = mmNIC6_QM1_BASE, |
| 1653 | [GAUDI2_QUEUE_ID_NIC_13_2] = mmNIC6_QM1_BASE, |
| 1654 | [GAUDI2_QUEUE_ID_NIC_13_3] = mmNIC6_QM1_BASE, |
| 1655 | [GAUDI2_QUEUE_ID_NIC_14_0] = mmNIC7_QM0_BASE, |
| 1656 | [GAUDI2_QUEUE_ID_NIC_14_1] = mmNIC7_QM0_BASE, |
| 1657 | [GAUDI2_QUEUE_ID_NIC_14_2] = mmNIC7_QM0_BASE, |
| 1658 | [GAUDI2_QUEUE_ID_NIC_14_3] = mmNIC7_QM0_BASE, |
| 1659 | [GAUDI2_QUEUE_ID_NIC_15_0] = mmNIC7_QM1_BASE, |
| 1660 | [GAUDI2_QUEUE_ID_NIC_15_1] = mmNIC7_QM1_BASE, |
| 1661 | [GAUDI2_QUEUE_ID_NIC_15_2] = mmNIC7_QM1_BASE, |
| 1662 | [GAUDI2_QUEUE_ID_NIC_15_3] = mmNIC7_QM1_BASE, |
| 1663 | [GAUDI2_QUEUE_ID_NIC_16_0] = mmNIC8_QM0_BASE, |
| 1664 | [GAUDI2_QUEUE_ID_NIC_16_1] = mmNIC8_QM0_BASE, |
| 1665 | [GAUDI2_QUEUE_ID_NIC_16_2] = mmNIC8_QM0_BASE, |
| 1666 | [GAUDI2_QUEUE_ID_NIC_16_3] = mmNIC8_QM0_BASE, |
| 1667 | [GAUDI2_QUEUE_ID_NIC_17_0] = mmNIC8_QM1_BASE, |
| 1668 | [GAUDI2_QUEUE_ID_NIC_17_1] = mmNIC8_QM1_BASE, |
| 1669 | [GAUDI2_QUEUE_ID_NIC_17_2] = mmNIC8_QM1_BASE, |
| 1670 | [GAUDI2_QUEUE_ID_NIC_17_3] = mmNIC8_QM1_BASE, |
| 1671 | [GAUDI2_QUEUE_ID_NIC_18_0] = mmNIC9_QM0_BASE, |
| 1672 | [GAUDI2_QUEUE_ID_NIC_18_1] = mmNIC9_QM0_BASE, |
| 1673 | [GAUDI2_QUEUE_ID_NIC_18_2] = mmNIC9_QM0_BASE, |
| 1674 | [GAUDI2_QUEUE_ID_NIC_18_3] = mmNIC9_QM0_BASE, |
| 1675 | [GAUDI2_QUEUE_ID_NIC_19_0] = mmNIC9_QM1_BASE, |
| 1676 | [GAUDI2_QUEUE_ID_NIC_19_1] = mmNIC9_QM1_BASE, |
| 1677 | [GAUDI2_QUEUE_ID_NIC_19_2] = mmNIC9_QM1_BASE, |
| 1678 | [GAUDI2_QUEUE_ID_NIC_19_3] = mmNIC9_QM1_BASE, |
| 1679 | [GAUDI2_QUEUE_ID_NIC_20_0] = mmNIC10_QM0_BASE, |
| 1680 | [GAUDI2_QUEUE_ID_NIC_20_1] = mmNIC10_QM0_BASE, |
| 1681 | [GAUDI2_QUEUE_ID_NIC_20_2] = mmNIC10_QM0_BASE, |
| 1682 | [GAUDI2_QUEUE_ID_NIC_20_3] = mmNIC10_QM0_BASE, |
| 1683 | [GAUDI2_QUEUE_ID_NIC_21_0] = mmNIC10_QM1_BASE, |
| 1684 | [GAUDI2_QUEUE_ID_NIC_21_1] = mmNIC10_QM1_BASE, |
| 1685 | [GAUDI2_QUEUE_ID_NIC_21_2] = mmNIC10_QM1_BASE, |
| 1686 | [GAUDI2_QUEUE_ID_NIC_21_3] = mmNIC10_QM1_BASE, |
| 1687 | [GAUDI2_QUEUE_ID_NIC_22_0] = mmNIC11_QM0_BASE, |
| 1688 | [GAUDI2_QUEUE_ID_NIC_22_1] = mmNIC11_QM0_BASE, |
| 1689 | [GAUDI2_QUEUE_ID_NIC_22_2] = mmNIC11_QM0_BASE, |
| 1690 | [GAUDI2_QUEUE_ID_NIC_22_3] = mmNIC11_QM0_BASE, |
| 1691 | [GAUDI2_QUEUE_ID_NIC_23_0] = mmNIC11_QM1_BASE, |
| 1692 | [GAUDI2_QUEUE_ID_NIC_23_1] = mmNIC11_QM1_BASE, |
| 1693 | [GAUDI2_QUEUE_ID_NIC_23_2] = mmNIC11_QM1_BASE, |
| 1694 | [GAUDI2_QUEUE_ID_NIC_23_3] = mmNIC11_QM1_BASE, |
| 1695 | [GAUDI2_QUEUE_ID_ROT_0_0] = mmROT0_QM_BASE, |
| 1696 | [GAUDI2_QUEUE_ID_ROT_0_1] = mmROT0_QM_BASE, |
| 1697 | [GAUDI2_QUEUE_ID_ROT_0_2] = mmROT0_QM_BASE, |
| 1698 | [GAUDI2_QUEUE_ID_ROT_0_3] = mmROT0_QM_BASE, |
| 1699 | [GAUDI2_QUEUE_ID_ROT_1_0] = mmROT1_QM_BASE, |
| 1700 | [GAUDI2_QUEUE_ID_ROT_1_1] = mmROT1_QM_BASE, |
| 1701 | [GAUDI2_QUEUE_ID_ROT_1_2] = mmROT1_QM_BASE, |
| 1702 | [GAUDI2_QUEUE_ID_ROT_1_3] = mmROT1_QM_BASE |
| 1703 | }; |
| 1704 | |
| 1705 | static const u32 gaudi2_arc_blocks_bases[NUM_ARC_CPUS] = { |
| 1706 | [CPU_ID_SCHED_ARC0] = mmARC_FARM_ARC0_AUX_BASE, |
| 1707 | [CPU_ID_SCHED_ARC1] = mmARC_FARM_ARC1_AUX_BASE, |
| 1708 | [CPU_ID_SCHED_ARC2] = mmARC_FARM_ARC2_AUX_BASE, |
| 1709 | [CPU_ID_SCHED_ARC3] = mmARC_FARM_ARC3_AUX_BASE, |
| 1710 | [CPU_ID_SCHED_ARC4] = mmDCORE1_MME_QM_ARC_AUX_BASE, |
| 1711 | [CPU_ID_SCHED_ARC5] = mmDCORE3_MME_QM_ARC_AUX_BASE, |
| 1712 | [CPU_ID_TPC_QMAN_ARC0] = mmDCORE0_TPC0_QM_ARC_AUX_BASE, |
| 1713 | [CPU_ID_TPC_QMAN_ARC1] = mmDCORE0_TPC1_QM_ARC_AUX_BASE, |
| 1714 | [CPU_ID_TPC_QMAN_ARC2] = mmDCORE0_TPC2_QM_ARC_AUX_BASE, |
| 1715 | [CPU_ID_TPC_QMAN_ARC3] = mmDCORE0_TPC3_QM_ARC_AUX_BASE, |
| 1716 | [CPU_ID_TPC_QMAN_ARC4] = mmDCORE0_TPC4_QM_ARC_AUX_BASE, |
| 1717 | [CPU_ID_TPC_QMAN_ARC5] = mmDCORE0_TPC5_QM_ARC_AUX_BASE, |
| 1718 | [CPU_ID_TPC_QMAN_ARC6] = mmDCORE1_TPC0_QM_ARC_AUX_BASE, |
| 1719 | [CPU_ID_TPC_QMAN_ARC7] = mmDCORE1_TPC1_QM_ARC_AUX_BASE, |
| 1720 | [CPU_ID_TPC_QMAN_ARC8] = mmDCORE1_TPC2_QM_ARC_AUX_BASE, |
| 1721 | [CPU_ID_TPC_QMAN_ARC9] = mmDCORE1_TPC3_QM_ARC_AUX_BASE, |
| 1722 | [CPU_ID_TPC_QMAN_ARC10] = mmDCORE1_TPC4_QM_ARC_AUX_BASE, |
| 1723 | [CPU_ID_TPC_QMAN_ARC11] = mmDCORE1_TPC5_QM_ARC_AUX_BASE, |
| 1724 | [CPU_ID_TPC_QMAN_ARC12] = mmDCORE2_TPC0_QM_ARC_AUX_BASE, |
| 1725 | [CPU_ID_TPC_QMAN_ARC13] = mmDCORE2_TPC1_QM_ARC_AUX_BASE, |
| 1726 | [CPU_ID_TPC_QMAN_ARC14] = mmDCORE2_TPC2_QM_ARC_AUX_BASE, |
| 1727 | [CPU_ID_TPC_QMAN_ARC15] = mmDCORE2_TPC3_QM_ARC_AUX_BASE, |
| 1728 | [CPU_ID_TPC_QMAN_ARC16] = mmDCORE2_TPC4_QM_ARC_AUX_BASE, |
| 1729 | [CPU_ID_TPC_QMAN_ARC17] = mmDCORE2_TPC5_QM_ARC_AUX_BASE, |
| 1730 | [CPU_ID_TPC_QMAN_ARC18] = mmDCORE3_TPC0_QM_ARC_AUX_BASE, |
| 1731 | [CPU_ID_TPC_QMAN_ARC19] = mmDCORE3_TPC1_QM_ARC_AUX_BASE, |
| 1732 | [CPU_ID_TPC_QMAN_ARC20] = mmDCORE3_TPC2_QM_ARC_AUX_BASE, |
| 1733 | [CPU_ID_TPC_QMAN_ARC21] = mmDCORE3_TPC3_QM_ARC_AUX_BASE, |
| 1734 | [CPU_ID_TPC_QMAN_ARC22] = mmDCORE3_TPC4_QM_ARC_AUX_BASE, |
| 1735 | [CPU_ID_TPC_QMAN_ARC23] = mmDCORE3_TPC5_QM_ARC_AUX_BASE, |
| 1736 | [CPU_ID_TPC_QMAN_ARC24] = mmDCORE0_TPC6_QM_ARC_AUX_BASE, |
| 1737 | [CPU_ID_MME_QMAN_ARC0] = mmDCORE0_MME_QM_ARC_AUX_BASE, |
| 1738 | [CPU_ID_MME_QMAN_ARC1] = mmDCORE2_MME_QM_ARC_AUX_BASE, |
| 1739 | [CPU_ID_EDMA_QMAN_ARC0] = mmDCORE0_EDMA0_QM_ARC_AUX_BASE, |
| 1740 | [CPU_ID_EDMA_QMAN_ARC1] = mmDCORE0_EDMA1_QM_ARC_AUX_BASE, |
| 1741 | [CPU_ID_EDMA_QMAN_ARC2] = mmDCORE1_EDMA0_QM_ARC_AUX_BASE, |
| 1742 | [CPU_ID_EDMA_QMAN_ARC3] = mmDCORE1_EDMA1_QM_ARC_AUX_BASE, |
| 1743 | [CPU_ID_EDMA_QMAN_ARC4] = mmDCORE2_EDMA0_QM_ARC_AUX_BASE, |
| 1744 | [CPU_ID_EDMA_QMAN_ARC5] = mmDCORE2_EDMA1_QM_ARC_AUX_BASE, |
| 1745 | [CPU_ID_EDMA_QMAN_ARC6] = mmDCORE3_EDMA0_QM_ARC_AUX_BASE, |
| 1746 | [CPU_ID_EDMA_QMAN_ARC7] = mmDCORE3_EDMA1_QM_ARC_AUX_BASE, |
| 1747 | [CPU_ID_PDMA_QMAN_ARC0] = mmPDMA0_QM_ARC_AUX_BASE, |
| 1748 | [CPU_ID_PDMA_QMAN_ARC1] = mmPDMA1_QM_ARC_AUX_BASE, |
| 1749 | [CPU_ID_ROT_QMAN_ARC0] = mmROT0_QM_ARC_AUX_BASE, |
| 1750 | [CPU_ID_ROT_QMAN_ARC1] = mmROT1_QM_ARC_AUX_BASE, |
| 1751 | [CPU_ID_NIC_QMAN_ARC0] = mmNIC0_QM_ARC_AUX0_BASE, |
| 1752 | [CPU_ID_NIC_QMAN_ARC1] = mmNIC0_QM_ARC_AUX1_BASE, |
| 1753 | [CPU_ID_NIC_QMAN_ARC2] = mmNIC1_QM_ARC_AUX0_BASE, |
| 1754 | [CPU_ID_NIC_QMAN_ARC3] = mmNIC1_QM_ARC_AUX1_BASE, |
| 1755 | [CPU_ID_NIC_QMAN_ARC4] = mmNIC2_QM_ARC_AUX0_BASE, |
| 1756 | [CPU_ID_NIC_QMAN_ARC5] = mmNIC2_QM_ARC_AUX1_BASE, |
| 1757 | [CPU_ID_NIC_QMAN_ARC6] = mmNIC3_QM_ARC_AUX0_BASE, |
| 1758 | [CPU_ID_NIC_QMAN_ARC7] = mmNIC3_QM_ARC_AUX1_BASE, |
| 1759 | [CPU_ID_NIC_QMAN_ARC8] = mmNIC4_QM_ARC_AUX0_BASE, |
| 1760 | [CPU_ID_NIC_QMAN_ARC9] = mmNIC4_QM_ARC_AUX1_BASE, |
| 1761 | [CPU_ID_NIC_QMAN_ARC10] = mmNIC5_QM_ARC_AUX0_BASE, |
| 1762 | [CPU_ID_NIC_QMAN_ARC11] = mmNIC5_QM_ARC_AUX1_BASE, |
| 1763 | [CPU_ID_NIC_QMAN_ARC12] = mmNIC6_QM_ARC_AUX0_BASE, |
| 1764 | [CPU_ID_NIC_QMAN_ARC13] = mmNIC6_QM_ARC_AUX1_BASE, |
| 1765 | [CPU_ID_NIC_QMAN_ARC14] = mmNIC7_QM_ARC_AUX0_BASE, |
| 1766 | [CPU_ID_NIC_QMAN_ARC15] = mmNIC7_QM_ARC_AUX1_BASE, |
| 1767 | [CPU_ID_NIC_QMAN_ARC16] = mmNIC8_QM_ARC_AUX0_BASE, |
| 1768 | [CPU_ID_NIC_QMAN_ARC17] = mmNIC8_QM_ARC_AUX1_BASE, |
| 1769 | [CPU_ID_NIC_QMAN_ARC18] = mmNIC9_QM_ARC_AUX0_BASE, |
| 1770 | [CPU_ID_NIC_QMAN_ARC19] = mmNIC9_QM_ARC_AUX1_BASE, |
| 1771 | [CPU_ID_NIC_QMAN_ARC20] = mmNIC10_QM_ARC_AUX0_BASE, |
| 1772 | [CPU_ID_NIC_QMAN_ARC21] = mmNIC10_QM_ARC_AUX1_BASE, |
| 1773 | [CPU_ID_NIC_QMAN_ARC22] = mmNIC11_QM_ARC_AUX0_BASE, |
| 1774 | [CPU_ID_NIC_QMAN_ARC23] = mmNIC11_QM_ARC_AUX1_BASE, |
| 1775 | }; |
| 1776 | |
| 1777 | static const u32 gaudi2_arc_dccm_bases[NUM_ARC_CPUS] = { |
| 1778 | [CPU_ID_SCHED_ARC0] = mmARC_FARM_ARC0_DCCM0_BASE, |
| 1779 | [CPU_ID_SCHED_ARC1] = mmARC_FARM_ARC1_DCCM0_BASE, |
| 1780 | [CPU_ID_SCHED_ARC2] = mmARC_FARM_ARC2_DCCM0_BASE, |
| 1781 | [CPU_ID_SCHED_ARC3] = mmARC_FARM_ARC3_DCCM0_BASE, |
| 1782 | [CPU_ID_SCHED_ARC4] = mmDCORE1_MME_QM_ARC_DCCM_BASE, |
| 1783 | [CPU_ID_SCHED_ARC5] = mmDCORE3_MME_QM_ARC_DCCM_BASE, |
| 1784 | [CPU_ID_TPC_QMAN_ARC0] = mmDCORE0_TPC0_QM_DCCM_BASE, |
| 1785 | [CPU_ID_TPC_QMAN_ARC1] = mmDCORE0_TPC1_QM_DCCM_BASE, |
| 1786 | [CPU_ID_TPC_QMAN_ARC2] = mmDCORE0_TPC2_QM_DCCM_BASE, |
| 1787 | [CPU_ID_TPC_QMAN_ARC3] = mmDCORE0_TPC3_QM_DCCM_BASE, |
| 1788 | [CPU_ID_TPC_QMAN_ARC4] = mmDCORE0_TPC4_QM_DCCM_BASE, |
| 1789 | [CPU_ID_TPC_QMAN_ARC5] = mmDCORE0_TPC5_QM_DCCM_BASE, |
| 1790 | [CPU_ID_TPC_QMAN_ARC6] = mmDCORE1_TPC0_QM_DCCM_BASE, |
| 1791 | [CPU_ID_TPC_QMAN_ARC7] = mmDCORE1_TPC1_QM_DCCM_BASE, |
| 1792 | [CPU_ID_TPC_QMAN_ARC8] = mmDCORE1_TPC2_QM_DCCM_BASE, |
| 1793 | [CPU_ID_TPC_QMAN_ARC9] = mmDCORE1_TPC3_QM_DCCM_BASE, |
| 1794 | [CPU_ID_TPC_QMAN_ARC10] = mmDCORE1_TPC4_QM_DCCM_BASE, |
| 1795 | [CPU_ID_TPC_QMAN_ARC11] = mmDCORE1_TPC5_QM_DCCM_BASE, |
| 1796 | [CPU_ID_TPC_QMAN_ARC12] = mmDCORE2_TPC0_QM_DCCM_BASE, |
| 1797 | [CPU_ID_TPC_QMAN_ARC13] = mmDCORE2_TPC1_QM_DCCM_BASE, |
| 1798 | [CPU_ID_TPC_QMAN_ARC14] = mmDCORE2_TPC2_QM_DCCM_BASE, |
| 1799 | [CPU_ID_TPC_QMAN_ARC15] = mmDCORE2_TPC3_QM_DCCM_BASE, |
| 1800 | [CPU_ID_TPC_QMAN_ARC16] = mmDCORE2_TPC4_QM_DCCM_BASE, |
| 1801 | [CPU_ID_TPC_QMAN_ARC17] = mmDCORE2_TPC5_QM_DCCM_BASE, |
| 1802 | [CPU_ID_TPC_QMAN_ARC18] = mmDCORE3_TPC0_QM_DCCM_BASE, |
| 1803 | [CPU_ID_TPC_QMAN_ARC19] = mmDCORE3_TPC1_QM_DCCM_BASE, |
| 1804 | [CPU_ID_TPC_QMAN_ARC20] = mmDCORE3_TPC2_QM_DCCM_BASE, |
| 1805 | [CPU_ID_TPC_QMAN_ARC21] = mmDCORE3_TPC3_QM_DCCM_BASE, |
| 1806 | [CPU_ID_TPC_QMAN_ARC22] = mmDCORE3_TPC4_QM_DCCM_BASE, |
| 1807 | [CPU_ID_TPC_QMAN_ARC23] = mmDCORE3_TPC5_QM_DCCM_BASE, |
| 1808 | [CPU_ID_TPC_QMAN_ARC24] = mmDCORE0_TPC6_QM_DCCM_BASE, |
| 1809 | [CPU_ID_MME_QMAN_ARC0] = mmDCORE0_MME_QM_ARC_DCCM_BASE, |
| 1810 | [CPU_ID_MME_QMAN_ARC1] = mmDCORE2_MME_QM_ARC_DCCM_BASE, |
| 1811 | [CPU_ID_EDMA_QMAN_ARC0] = mmDCORE0_EDMA0_QM_DCCM_BASE, |
| 1812 | [CPU_ID_EDMA_QMAN_ARC1] = mmDCORE0_EDMA1_QM_DCCM_BASE, |
| 1813 | [CPU_ID_EDMA_QMAN_ARC2] = mmDCORE1_EDMA0_QM_DCCM_BASE, |
| 1814 | [CPU_ID_EDMA_QMAN_ARC3] = mmDCORE1_EDMA1_QM_DCCM_BASE, |
| 1815 | [CPU_ID_EDMA_QMAN_ARC4] = mmDCORE2_EDMA0_QM_DCCM_BASE, |
| 1816 | [CPU_ID_EDMA_QMAN_ARC5] = mmDCORE2_EDMA1_QM_DCCM_BASE, |
| 1817 | [CPU_ID_EDMA_QMAN_ARC6] = mmDCORE3_EDMA0_QM_DCCM_BASE, |
| 1818 | [CPU_ID_EDMA_QMAN_ARC7] = mmDCORE3_EDMA1_QM_DCCM_BASE, |
| 1819 | [CPU_ID_PDMA_QMAN_ARC0] = mmPDMA0_QM_ARC_DCCM_BASE, |
| 1820 | [CPU_ID_PDMA_QMAN_ARC1] = mmPDMA1_QM_ARC_DCCM_BASE, |
| 1821 | [CPU_ID_ROT_QMAN_ARC0] = mmROT0_QM_ARC_DCCM_BASE, |
| 1822 | [CPU_ID_ROT_QMAN_ARC1] = mmROT1_QM_ARC_DCCM_BASE, |
| 1823 | [CPU_ID_NIC_QMAN_ARC0] = mmNIC0_QM_DCCM0_BASE, |
| 1824 | [CPU_ID_NIC_QMAN_ARC1] = mmNIC0_QM_DCCM1_BASE, |
| 1825 | [CPU_ID_NIC_QMAN_ARC2] = mmNIC1_QM_DCCM0_BASE, |
| 1826 | [CPU_ID_NIC_QMAN_ARC3] = mmNIC1_QM_DCCM1_BASE, |
| 1827 | [CPU_ID_NIC_QMAN_ARC4] = mmNIC2_QM_DCCM0_BASE, |
| 1828 | [CPU_ID_NIC_QMAN_ARC5] = mmNIC2_QM_DCCM1_BASE, |
| 1829 | [CPU_ID_NIC_QMAN_ARC6] = mmNIC3_QM_DCCM0_BASE, |
| 1830 | [CPU_ID_NIC_QMAN_ARC7] = mmNIC3_QM_DCCM1_BASE, |
| 1831 | [CPU_ID_NIC_QMAN_ARC8] = mmNIC4_QM_DCCM0_BASE, |
| 1832 | [CPU_ID_NIC_QMAN_ARC9] = mmNIC4_QM_DCCM1_BASE, |
| 1833 | [CPU_ID_NIC_QMAN_ARC10] = mmNIC5_QM_DCCM0_BASE, |
| 1834 | [CPU_ID_NIC_QMAN_ARC11] = mmNIC5_QM_DCCM1_BASE, |
| 1835 | [CPU_ID_NIC_QMAN_ARC12] = mmNIC6_QM_DCCM0_BASE, |
| 1836 | [CPU_ID_NIC_QMAN_ARC13] = mmNIC6_QM_DCCM1_BASE, |
| 1837 | [CPU_ID_NIC_QMAN_ARC14] = mmNIC7_QM_DCCM0_BASE, |
| 1838 | [CPU_ID_NIC_QMAN_ARC15] = mmNIC7_QM_DCCM1_BASE, |
| 1839 | [CPU_ID_NIC_QMAN_ARC16] = mmNIC8_QM_DCCM0_BASE, |
| 1840 | [CPU_ID_NIC_QMAN_ARC17] = mmNIC8_QM_DCCM1_BASE, |
| 1841 | [CPU_ID_NIC_QMAN_ARC18] = mmNIC9_QM_DCCM0_BASE, |
| 1842 | [CPU_ID_NIC_QMAN_ARC19] = mmNIC9_QM_DCCM1_BASE, |
| 1843 | [CPU_ID_NIC_QMAN_ARC20] = mmNIC10_QM_DCCM0_BASE, |
| 1844 | [CPU_ID_NIC_QMAN_ARC21] = mmNIC10_QM_DCCM1_BASE, |
| 1845 | [CPU_ID_NIC_QMAN_ARC22] = mmNIC11_QM_DCCM0_BASE, |
| 1846 | [CPU_ID_NIC_QMAN_ARC23] = mmNIC11_QM_DCCM1_BASE, |
| 1847 | }; |
| 1848 | |
| 1849 | const u32 gaudi2_mme_ctrl_lo_blocks_bases[MME_ID_SIZE] = { |
| 1850 | [MME_ID_DCORE0] = mmDCORE0_MME_CTRL_LO_BASE, |
| 1851 | [MME_ID_DCORE1] = mmDCORE1_MME_CTRL_LO_BASE, |
| 1852 | [MME_ID_DCORE2] = mmDCORE2_MME_CTRL_LO_BASE, |
| 1853 | [MME_ID_DCORE3] = mmDCORE3_MME_CTRL_LO_BASE, |
| 1854 | }; |
| 1855 | |
| 1856 | static const u32 gaudi2_queue_id_to_arc_id[GAUDI2_QUEUE_ID_SIZE] = { |
| 1857 | [GAUDI2_QUEUE_ID_PDMA_0_0] = CPU_ID_PDMA_QMAN_ARC0, |
| 1858 | [GAUDI2_QUEUE_ID_PDMA_0_1] = CPU_ID_PDMA_QMAN_ARC0, |
| 1859 | [GAUDI2_QUEUE_ID_PDMA_0_2] = CPU_ID_PDMA_QMAN_ARC0, |
| 1860 | [GAUDI2_QUEUE_ID_PDMA_0_3] = CPU_ID_PDMA_QMAN_ARC0, |
| 1861 | [GAUDI2_QUEUE_ID_PDMA_1_0] = CPU_ID_PDMA_QMAN_ARC1, |
| 1862 | [GAUDI2_QUEUE_ID_PDMA_1_1] = CPU_ID_PDMA_QMAN_ARC1, |
| 1863 | [GAUDI2_QUEUE_ID_PDMA_1_2] = CPU_ID_PDMA_QMAN_ARC1, |
| 1864 | [GAUDI2_QUEUE_ID_PDMA_1_3] = CPU_ID_PDMA_QMAN_ARC1, |
| 1865 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC0, |
| 1866 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC0, |
| 1867 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC0, |
| 1868 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC0, |
| 1869 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC1, |
| 1870 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC1, |
| 1871 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC1, |
| 1872 | [GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC1, |
| 1873 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_0] = CPU_ID_MME_QMAN_ARC0, |
| 1874 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_1] = CPU_ID_MME_QMAN_ARC0, |
| 1875 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_2] = CPU_ID_MME_QMAN_ARC0, |
| 1876 | [GAUDI2_QUEUE_ID_DCORE0_MME_0_3] = CPU_ID_MME_QMAN_ARC0, |
| 1877 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_0] = CPU_ID_TPC_QMAN_ARC0, |
| 1878 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_1] = CPU_ID_TPC_QMAN_ARC0, |
| 1879 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_2] = CPU_ID_TPC_QMAN_ARC0, |
| 1880 | [GAUDI2_QUEUE_ID_DCORE0_TPC_0_3] = CPU_ID_TPC_QMAN_ARC0, |
| 1881 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_0] = CPU_ID_TPC_QMAN_ARC1, |
| 1882 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_1] = CPU_ID_TPC_QMAN_ARC1, |
| 1883 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_2] = CPU_ID_TPC_QMAN_ARC1, |
| 1884 | [GAUDI2_QUEUE_ID_DCORE0_TPC_1_3] = CPU_ID_TPC_QMAN_ARC1, |
| 1885 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_0] = CPU_ID_TPC_QMAN_ARC2, |
| 1886 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_1] = CPU_ID_TPC_QMAN_ARC2, |
| 1887 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_2] = CPU_ID_TPC_QMAN_ARC2, |
| 1888 | [GAUDI2_QUEUE_ID_DCORE0_TPC_2_3] = CPU_ID_TPC_QMAN_ARC2, |
| 1889 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_0] = CPU_ID_TPC_QMAN_ARC3, |
| 1890 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_1] = CPU_ID_TPC_QMAN_ARC3, |
| 1891 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_2] = CPU_ID_TPC_QMAN_ARC3, |
| 1892 | [GAUDI2_QUEUE_ID_DCORE0_TPC_3_3] = CPU_ID_TPC_QMAN_ARC3, |
| 1893 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_0] = CPU_ID_TPC_QMAN_ARC4, |
| 1894 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_1] = CPU_ID_TPC_QMAN_ARC4, |
| 1895 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_2] = CPU_ID_TPC_QMAN_ARC4, |
| 1896 | [GAUDI2_QUEUE_ID_DCORE0_TPC_4_3] = CPU_ID_TPC_QMAN_ARC4, |
| 1897 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_0] = CPU_ID_TPC_QMAN_ARC5, |
| 1898 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_1] = CPU_ID_TPC_QMAN_ARC5, |
| 1899 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_2] = CPU_ID_TPC_QMAN_ARC5, |
| 1900 | [GAUDI2_QUEUE_ID_DCORE0_TPC_5_3] = CPU_ID_TPC_QMAN_ARC5, |
| 1901 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_0] = CPU_ID_TPC_QMAN_ARC24, |
| 1902 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_1] = CPU_ID_TPC_QMAN_ARC24, |
| 1903 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_2] = CPU_ID_TPC_QMAN_ARC24, |
| 1904 | [GAUDI2_QUEUE_ID_DCORE0_TPC_6_3] = CPU_ID_TPC_QMAN_ARC24, |
| 1905 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC2, |
| 1906 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC2, |
| 1907 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC2, |
| 1908 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC2, |
| 1909 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC3, |
| 1910 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC3, |
| 1911 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC3, |
| 1912 | [GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC3, |
| 1913 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_0] = CPU_ID_SCHED_ARC4, |
| 1914 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_1] = CPU_ID_SCHED_ARC4, |
| 1915 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_2] = CPU_ID_SCHED_ARC4, |
| 1916 | [GAUDI2_QUEUE_ID_DCORE1_MME_0_3] = CPU_ID_SCHED_ARC4, |
| 1917 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_0] = CPU_ID_TPC_QMAN_ARC6, |
| 1918 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_1] = CPU_ID_TPC_QMAN_ARC6, |
| 1919 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_2] = CPU_ID_TPC_QMAN_ARC6, |
| 1920 | [GAUDI2_QUEUE_ID_DCORE1_TPC_0_3] = CPU_ID_TPC_QMAN_ARC6, |
| 1921 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_0] = CPU_ID_TPC_QMAN_ARC7, |
| 1922 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_1] = CPU_ID_TPC_QMAN_ARC7, |
| 1923 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_2] = CPU_ID_TPC_QMAN_ARC7, |
| 1924 | [GAUDI2_QUEUE_ID_DCORE1_TPC_1_3] = CPU_ID_TPC_QMAN_ARC7, |
| 1925 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_0] = CPU_ID_TPC_QMAN_ARC8, |
| 1926 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_1] = CPU_ID_TPC_QMAN_ARC8, |
| 1927 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_2] = CPU_ID_TPC_QMAN_ARC8, |
| 1928 | [GAUDI2_QUEUE_ID_DCORE1_TPC_2_3] = CPU_ID_TPC_QMAN_ARC8, |
| 1929 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_0] = CPU_ID_TPC_QMAN_ARC9, |
| 1930 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_1] = CPU_ID_TPC_QMAN_ARC9, |
| 1931 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_2] = CPU_ID_TPC_QMAN_ARC9, |
| 1932 | [GAUDI2_QUEUE_ID_DCORE1_TPC_3_3] = CPU_ID_TPC_QMAN_ARC9, |
| 1933 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_0] = CPU_ID_TPC_QMAN_ARC10, |
| 1934 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_1] = CPU_ID_TPC_QMAN_ARC10, |
| 1935 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_2] = CPU_ID_TPC_QMAN_ARC10, |
| 1936 | [GAUDI2_QUEUE_ID_DCORE1_TPC_4_3] = CPU_ID_TPC_QMAN_ARC10, |
| 1937 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_0] = CPU_ID_TPC_QMAN_ARC11, |
| 1938 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_1] = CPU_ID_TPC_QMAN_ARC11, |
| 1939 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_2] = CPU_ID_TPC_QMAN_ARC11, |
| 1940 | [GAUDI2_QUEUE_ID_DCORE1_TPC_5_3] = CPU_ID_TPC_QMAN_ARC11, |
| 1941 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC4, |
| 1942 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC4, |
| 1943 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC4, |
| 1944 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC4, |
| 1945 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC5, |
| 1946 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC5, |
| 1947 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC5, |
| 1948 | [GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC5, |
| 1949 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_0] = CPU_ID_MME_QMAN_ARC1, |
| 1950 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_1] = CPU_ID_MME_QMAN_ARC1, |
| 1951 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_2] = CPU_ID_MME_QMAN_ARC1, |
| 1952 | [GAUDI2_QUEUE_ID_DCORE2_MME_0_3] = CPU_ID_MME_QMAN_ARC1, |
| 1953 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_0] = CPU_ID_TPC_QMAN_ARC12, |
| 1954 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_1] = CPU_ID_TPC_QMAN_ARC12, |
| 1955 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_2] = CPU_ID_TPC_QMAN_ARC12, |
| 1956 | [GAUDI2_QUEUE_ID_DCORE2_TPC_0_3] = CPU_ID_TPC_QMAN_ARC12, |
| 1957 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_0] = CPU_ID_TPC_QMAN_ARC13, |
| 1958 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_1] = CPU_ID_TPC_QMAN_ARC13, |
| 1959 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_2] = CPU_ID_TPC_QMAN_ARC13, |
| 1960 | [GAUDI2_QUEUE_ID_DCORE2_TPC_1_3] = CPU_ID_TPC_QMAN_ARC13, |
| 1961 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_0] = CPU_ID_TPC_QMAN_ARC14, |
| 1962 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_1] = CPU_ID_TPC_QMAN_ARC14, |
| 1963 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_2] = CPU_ID_TPC_QMAN_ARC14, |
| 1964 | [GAUDI2_QUEUE_ID_DCORE2_TPC_2_3] = CPU_ID_TPC_QMAN_ARC14, |
| 1965 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_0] = CPU_ID_TPC_QMAN_ARC15, |
| 1966 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_1] = CPU_ID_TPC_QMAN_ARC15, |
| 1967 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_2] = CPU_ID_TPC_QMAN_ARC15, |
| 1968 | [GAUDI2_QUEUE_ID_DCORE2_TPC_3_3] = CPU_ID_TPC_QMAN_ARC15, |
| 1969 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_0] = CPU_ID_TPC_QMAN_ARC16, |
| 1970 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_1] = CPU_ID_TPC_QMAN_ARC16, |
| 1971 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_2] = CPU_ID_TPC_QMAN_ARC16, |
| 1972 | [GAUDI2_QUEUE_ID_DCORE2_TPC_4_3] = CPU_ID_TPC_QMAN_ARC16, |
| 1973 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_0] = CPU_ID_TPC_QMAN_ARC17, |
| 1974 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_1] = CPU_ID_TPC_QMAN_ARC17, |
| 1975 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_2] = CPU_ID_TPC_QMAN_ARC17, |
| 1976 | [GAUDI2_QUEUE_ID_DCORE2_TPC_5_3] = CPU_ID_TPC_QMAN_ARC17, |
| 1977 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0] = CPU_ID_EDMA_QMAN_ARC6, |
| 1978 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_1] = CPU_ID_EDMA_QMAN_ARC6, |
| 1979 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_2] = CPU_ID_EDMA_QMAN_ARC6, |
| 1980 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_0_3] = CPU_ID_EDMA_QMAN_ARC6, |
| 1981 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0] = CPU_ID_EDMA_QMAN_ARC7, |
| 1982 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1] = CPU_ID_EDMA_QMAN_ARC7, |
| 1983 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2] = CPU_ID_EDMA_QMAN_ARC7, |
| 1984 | [GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3] = CPU_ID_EDMA_QMAN_ARC7, |
| 1985 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_0] = CPU_ID_SCHED_ARC5, |
| 1986 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_1] = CPU_ID_SCHED_ARC5, |
| 1987 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_2] = CPU_ID_SCHED_ARC5, |
| 1988 | [GAUDI2_QUEUE_ID_DCORE3_MME_0_3] = CPU_ID_SCHED_ARC5, |
| 1989 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_0] = CPU_ID_TPC_QMAN_ARC18, |
| 1990 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_1] = CPU_ID_TPC_QMAN_ARC18, |
| 1991 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_2] = CPU_ID_TPC_QMAN_ARC18, |
| 1992 | [GAUDI2_QUEUE_ID_DCORE3_TPC_0_3] = CPU_ID_TPC_QMAN_ARC18, |
| 1993 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_0] = CPU_ID_TPC_QMAN_ARC19, |
| 1994 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_1] = CPU_ID_TPC_QMAN_ARC19, |
| 1995 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_2] = CPU_ID_TPC_QMAN_ARC19, |
| 1996 | [GAUDI2_QUEUE_ID_DCORE3_TPC_1_3] = CPU_ID_TPC_QMAN_ARC19, |
| 1997 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_0] = CPU_ID_TPC_QMAN_ARC20, |
| 1998 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_1] = CPU_ID_TPC_QMAN_ARC20, |
| 1999 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_2] = CPU_ID_TPC_QMAN_ARC20, |
| 2000 | [GAUDI2_QUEUE_ID_DCORE3_TPC_2_3] = CPU_ID_TPC_QMAN_ARC20, |
| 2001 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_0] = CPU_ID_TPC_QMAN_ARC21, |
| 2002 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_1] = CPU_ID_TPC_QMAN_ARC21, |
| 2003 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_2] = CPU_ID_TPC_QMAN_ARC21, |
| 2004 | [GAUDI2_QUEUE_ID_DCORE3_TPC_3_3] = CPU_ID_TPC_QMAN_ARC21, |
| 2005 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_0] = CPU_ID_TPC_QMAN_ARC22, |
| 2006 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_1] = CPU_ID_TPC_QMAN_ARC22, |
| 2007 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_2] = CPU_ID_TPC_QMAN_ARC22, |
| 2008 | [GAUDI2_QUEUE_ID_DCORE3_TPC_4_3] = CPU_ID_TPC_QMAN_ARC22, |
| 2009 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_0] = CPU_ID_TPC_QMAN_ARC23, |
| 2010 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_1] = CPU_ID_TPC_QMAN_ARC23, |
| 2011 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_2] = CPU_ID_TPC_QMAN_ARC23, |
| 2012 | [GAUDI2_QUEUE_ID_DCORE3_TPC_5_3] = CPU_ID_TPC_QMAN_ARC23, |
| 2013 | [GAUDI2_QUEUE_ID_NIC_0_0] = CPU_ID_NIC_QMAN_ARC0, |
| 2014 | [GAUDI2_QUEUE_ID_NIC_0_1] = CPU_ID_NIC_QMAN_ARC0, |
| 2015 | [GAUDI2_QUEUE_ID_NIC_0_2] = CPU_ID_NIC_QMAN_ARC0, |
| 2016 | [GAUDI2_QUEUE_ID_NIC_0_3] = CPU_ID_NIC_QMAN_ARC0, |
| 2017 | [GAUDI2_QUEUE_ID_NIC_1_0] = CPU_ID_NIC_QMAN_ARC1, |
| 2018 | [GAUDI2_QUEUE_ID_NIC_1_1] = CPU_ID_NIC_QMAN_ARC1, |
| 2019 | [GAUDI2_QUEUE_ID_NIC_1_2] = CPU_ID_NIC_QMAN_ARC1, |
| 2020 | [GAUDI2_QUEUE_ID_NIC_1_3] = CPU_ID_NIC_QMAN_ARC1, |
| 2021 | [GAUDI2_QUEUE_ID_NIC_2_0] = CPU_ID_NIC_QMAN_ARC2, |
| 2022 | [GAUDI2_QUEUE_ID_NIC_2_1] = CPU_ID_NIC_QMAN_ARC2, |
| 2023 | [GAUDI2_QUEUE_ID_NIC_2_2] = CPU_ID_NIC_QMAN_ARC2, |
| 2024 | [GAUDI2_QUEUE_ID_NIC_2_3] = CPU_ID_NIC_QMAN_ARC2, |
| 2025 | [GAUDI2_QUEUE_ID_NIC_3_0] = CPU_ID_NIC_QMAN_ARC3, |
| 2026 | [GAUDI2_QUEUE_ID_NIC_3_1] = CPU_ID_NIC_QMAN_ARC3, |
| 2027 | [GAUDI2_QUEUE_ID_NIC_3_2] = CPU_ID_NIC_QMAN_ARC3, |
| 2028 | [GAUDI2_QUEUE_ID_NIC_3_3] = CPU_ID_NIC_QMAN_ARC3, |
| 2029 | [GAUDI2_QUEUE_ID_NIC_4_0] = CPU_ID_NIC_QMAN_ARC4, |
| 2030 | [GAUDI2_QUEUE_ID_NIC_4_1] = CPU_ID_NIC_QMAN_ARC4, |
| 2031 | [GAUDI2_QUEUE_ID_NIC_4_2] = CPU_ID_NIC_QMAN_ARC4, |
| 2032 | [GAUDI2_QUEUE_ID_NIC_4_3] = CPU_ID_NIC_QMAN_ARC4, |
| 2033 | [GAUDI2_QUEUE_ID_NIC_5_0] = CPU_ID_NIC_QMAN_ARC5, |
| 2034 | [GAUDI2_QUEUE_ID_NIC_5_1] = CPU_ID_NIC_QMAN_ARC5, |
| 2035 | [GAUDI2_QUEUE_ID_NIC_5_2] = CPU_ID_NIC_QMAN_ARC5, |
| 2036 | [GAUDI2_QUEUE_ID_NIC_5_3] = CPU_ID_NIC_QMAN_ARC5, |
| 2037 | [GAUDI2_QUEUE_ID_NIC_6_0] = CPU_ID_NIC_QMAN_ARC6, |
| 2038 | [GAUDI2_QUEUE_ID_NIC_6_1] = CPU_ID_NIC_QMAN_ARC6, |
| 2039 | [GAUDI2_QUEUE_ID_NIC_6_2] = CPU_ID_NIC_QMAN_ARC6, |
| 2040 | [GAUDI2_QUEUE_ID_NIC_6_3] = CPU_ID_NIC_QMAN_ARC6, |
| 2041 | [GAUDI2_QUEUE_ID_NIC_7_0] = CPU_ID_NIC_QMAN_ARC7, |
| 2042 | [GAUDI2_QUEUE_ID_NIC_7_1] = CPU_ID_NIC_QMAN_ARC7, |
| 2043 | [GAUDI2_QUEUE_ID_NIC_7_2] = CPU_ID_NIC_QMAN_ARC7, |
| 2044 | [GAUDI2_QUEUE_ID_NIC_7_3] = CPU_ID_NIC_QMAN_ARC7, |
| 2045 | [GAUDI2_QUEUE_ID_NIC_8_0] = CPU_ID_NIC_QMAN_ARC8, |
| 2046 | [GAUDI2_QUEUE_ID_NIC_8_1] = CPU_ID_NIC_QMAN_ARC8, |
| 2047 | [GAUDI2_QUEUE_ID_NIC_8_2] = CPU_ID_NIC_QMAN_ARC8, |
| 2048 | [GAUDI2_QUEUE_ID_NIC_8_3] = CPU_ID_NIC_QMAN_ARC8, |
| 2049 | [GAUDI2_QUEUE_ID_NIC_9_0] = CPU_ID_NIC_QMAN_ARC9, |
| 2050 | [GAUDI2_QUEUE_ID_NIC_9_1] = CPU_ID_NIC_QMAN_ARC9, |
| 2051 | [GAUDI2_QUEUE_ID_NIC_9_2] = CPU_ID_NIC_QMAN_ARC9, |
| 2052 | [GAUDI2_QUEUE_ID_NIC_9_3] = CPU_ID_NIC_QMAN_ARC9, |
| 2053 | [GAUDI2_QUEUE_ID_NIC_10_0] = CPU_ID_NIC_QMAN_ARC10, |
| 2054 | [GAUDI2_QUEUE_ID_NIC_10_1] = CPU_ID_NIC_QMAN_ARC10, |
| 2055 | [GAUDI2_QUEUE_ID_NIC_10_2] = CPU_ID_NIC_QMAN_ARC10, |
| 2056 | [GAUDI2_QUEUE_ID_NIC_10_3] = CPU_ID_NIC_QMAN_ARC10, |
| 2057 | [GAUDI2_QUEUE_ID_NIC_11_0] = CPU_ID_NIC_QMAN_ARC11, |
| 2058 | [GAUDI2_QUEUE_ID_NIC_11_1] = CPU_ID_NIC_QMAN_ARC11, |
| 2059 | [GAUDI2_QUEUE_ID_NIC_11_2] = CPU_ID_NIC_QMAN_ARC11, |
| 2060 | [GAUDI2_QUEUE_ID_NIC_11_3] = CPU_ID_NIC_QMAN_ARC11, |
| 2061 | [GAUDI2_QUEUE_ID_NIC_12_0] = CPU_ID_NIC_QMAN_ARC12, |
| 2062 | [GAUDI2_QUEUE_ID_NIC_12_1] = CPU_ID_NIC_QMAN_ARC12, |
| 2063 | [GAUDI2_QUEUE_ID_NIC_12_2] = CPU_ID_NIC_QMAN_ARC12, |
| 2064 | [GAUDI2_QUEUE_ID_NIC_12_3] = CPU_ID_NIC_QMAN_ARC12, |
| 2065 | [GAUDI2_QUEUE_ID_NIC_13_0] = CPU_ID_NIC_QMAN_ARC13, |
| 2066 | [GAUDI2_QUEUE_ID_NIC_13_1] = CPU_ID_NIC_QMAN_ARC13, |
| 2067 | [GAUDI2_QUEUE_ID_NIC_13_2] = CPU_ID_NIC_QMAN_ARC13, |
| 2068 | [GAUDI2_QUEUE_ID_NIC_13_3] = CPU_ID_NIC_QMAN_ARC13, |
| 2069 | [GAUDI2_QUEUE_ID_NIC_14_0] = CPU_ID_NIC_QMAN_ARC14, |
| 2070 | [GAUDI2_QUEUE_ID_NIC_14_1] = CPU_ID_NIC_QMAN_ARC14, |
| 2071 | [GAUDI2_QUEUE_ID_NIC_14_2] = CPU_ID_NIC_QMAN_ARC14, |
| 2072 | [GAUDI2_QUEUE_ID_NIC_14_3] = CPU_ID_NIC_QMAN_ARC14, |
| 2073 | [GAUDI2_QUEUE_ID_NIC_15_0] = CPU_ID_NIC_QMAN_ARC15, |
| 2074 | [GAUDI2_QUEUE_ID_NIC_15_1] = CPU_ID_NIC_QMAN_ARC15, |
| 2075 | [GAUDI2_QUEUE_ID_NIC_15_2] = CPU_ID_NIC_QMAN_ARC15, |
| 2076 | [GAUDI2_QUEUE_ID_NIC_15_3] = CPU_ID_NIC_QMAN_ARC15, |
| 2077 | [GAUDI2_QUEUE_ID_NIC_16_0] = CPU_ID_NIC_QMAN_ARC16, |
| 2078 | [GAUDI2_QUEUE_ID_NIC_16_1] = CPU_ID_NIC_QMAN_ARC16, |
| 2079 | [GAUDI2_QUEUE_ID_NIC_16_2] = CPU_ID_NIC_QMAN_ARC16, |
| 2080 | [GAUDI2_QUEUE_ID_NIC_16_3] = CPU_ID_NIC_QMAN_ARC16, |
| 2081 | [GAUDI2_QUEUE_ID_NIC_17_0] = CPU_ID_NIC_QMAN_ARC17, |
| 2082 | [GAUDI2_QUEUE_ID_NIC_17_1] = CPU_ID_NIC_QMAN_ARC17, |
| 2083 | [GAUDI2_QUEUE_ID_NIC_17_2] = CPU_ID_NIC_QMAN_ARC17, |
| 2084 | [GAUDI2_QUEUE_ID_NIC_17_3] = CPU_ID_NIC_QMAN_ARC17, |
| 2085 | [GAUDI2_QUEUE_ID_NIC_18_0] = CPU_ID_NIC_QMAN_ARC18, |
| 2086 | [GAUDI2_QUEUE_ID_NIC_18_1] = CPU_ID_NIC_QMAN_ARC18, |
| 2087 | [GAUDI2_QUEUE_ID_NIC_18_2] = CPU_ID_NIC_QMAN_ARC18, |
| 2088 | [GAUDI2_QUEUE_ID_NIC_18_3] = CPU_ID_NIC_QMAN_ARC18, |
| 2089 | [GAUDI2_QUEUE_ID_NIC_19_0] = CPU_ID_NIC_QMAN_ARC19, |
| 2090 | [GAUDI2_QUEUE_ID_NIC_19_1] = CPU_ID_NIC_QMAN_ARC19, |
| 2091 | [GAUDI2_QUEUE_ID_NIC_19_2] = CPU_ID_NIC_QMAN_ARC19, |
| 2092 | [GAUDI2_QUEUE_ID_NIC_19_3] = CPU_ID_NIC_QMAN_ARC19, |
| 2093 | [GAUDI2_QUEUE_ID_NIC_20_0] = CPU_ID_NIC_QMAN_ARC20, |
| 2094 | [GAUDI2_QUEUE_ID_NIC_20_1] = CPU_ID_NIC_QMAN_ARC20, |
| 2095 | [GAUDI2_QUEUE_ID_NIC_20_2] = CPU_ID_NIC_QMAN_ARC20, |
| 2096 | [GAUDI2_QUEUE_ID_NIC_20_3] = CPU_ID_NIC_QMAN_ARC20, |
| 2097 | [GAUDI2_QUEUE_ID_NIC_21_0] = CPU_ID_NIC_QMAN_ARC21, |
| 2098 | [GAUDI2_QUEUE_ID_NIC_21_1] = CPU_ID_NIC_QMAN_ARC21, |
| 2099 | [GAUDI2_QUEUE_ID_NIC_21_2] = CPU_ID_NIC_QMAN_ARC21, |
| 2100 | [GAUDI2_QUEUE_ID_NIC_21_3] = CPU_ID_NIC_QMAN_ARC21, |
| 2101 | [GAUDI2_QUEUE_ID_NIC_22_0] = CPU_ID_NIC_QMAN_ARC22, |
| 2102 | [GAUDI2_QUEUE_ID_NIC_22_1] = CPU_ID_NIC_QMAN_ARC22, |
| 2103 | [GAUDI2_QUEUE_ID_NIC_22_2] = CPU_ID_NIC_QMAN_ARC22, |
| 2104 | [GAUDI2_QUEUE_ID_NIC_22_3] = CPU_ID_NIC_QMAN_ARC22, |
| 2105 | [GAUDI2_QUEUE_ID_NIC_23_0] = CPU_ID_NIC_QMAN_ARC23, |
| 2106 | [GAUDI2_QUEUE_ID_NIC_23_1] = CPU_ID_NIC_QMAN_ARC23, |
| 2107 | [GAUDI2_QUEUE_ID_NIC_23_2] = CPU_ID_NIC_QMAN_ARC23, |
| 2108 | [GAUDI2_QUEUE_ID_NIC_23_3] = CPU_ID_NIC_QMAN_ARC23, |
| 2109 | [GAUDI2_QUEUE_ID_ROT_0_0] = CPU_ID_ROT_QMAN_ARC0, |
| 2110 | [GAUDI2_QUEUE_ID_ROT_0_1] = CPU_ID_ROT_QMAN_ARC0, |
| 2111 | [GAUDI2_QUEUE_ID_ROT_0_2] = CPU_ID_ROT_QMAN_ARC0, |
| 2112 | [GAUDI2_QUEUE_ID_ROT_0_3] = CPU_ID_ROT_QMAN_ARC0, |
| 2113 | [GAUDI2_QUEUE_ID_ROT_1_0] = CPU_ID_ROT_QMAN_ARC1, |
| 2114 | [GAUDI2_QUEUE_ID_ROT_1_1] = CPU_ID_ROT_QMAN_ARC1, |
| 2115 | [GAUDI2_QUEUE_ID_ROT_1_2] = CPU_ID_ROT_QMAN_ARC1, |
| 2116 | [GAUDI2_QUEUE_ID_ROT_1_3] = CPU_ID_ROT_QMAN_ARC1 |
| 2117 | }; |
| 2118 | |
| 2119 | const u32 gaudi2_dma_core_blocks_bases[DMA_CORE_ID_SIZE] = { |
| 2120 | [DMA_CORE_ID_PDMA0] = mmPDMA0_CORE_BASE, |
| 2121 | [DMA_CORE_ID_PDMA1] = mmPDMA1_CORE_BASE, |
| 2122 | [DMA_CORE_ID_EDMA0] = mmDCORE0_EDMA0_CORE_BASE, |
| 2123 | [DMA_CORE_ID_EDMA1] = mmDCORE0_EDMA1_CORE_BASE, |
| 2124 | [DMA_CORE_ID_EDMA2] = mmDCORE1_EDMA0_CORE_BASE, |
| 2125 | [DMA_CORE_ID_EDMA3] = mmDCORE1_EDMA1_CORE_BASE, |
| 2126 | [DMA_CORE_ID_EDMA4] = mmDCORE2_EDMA0_CORE_BASE, |
| 2127 | [DMA_CORE_ID_EDMA5] = mmDCORE2_EDMA1_CORE_BASE, |
| 2128 | [DMA_CORE_ID_EDMA6] = mmDCORE3_EDMA0_CORE_BASE, |
| 2129 | [DMA_CORE_ID_EDMA7] = mmDCORE3_EDMA1_CORE_BASE, |
| 2130 | [DMA_CORE_ID_KDMA] = mmARC_FARM_KDMA_BASE |
| 2131 | }; |
| 2132 | |
| 2133 | const u32 gaudi2_mme_acc_blocks_bases[MME_ID_SIZE] = { |
| 2134 | [MME_ID_DCORE0] = mmDCORE0_MME_ACC_BASE, |
| 2135 | [MME_ID_DCORE1] = mmDCORE1_MME_ACC_BASE, |
| 2136 | [MME_ID_DCORE2] = mmDCORE2_MME_ACC_BASE, |
| 2137 | [MME_ID_DCORE3] = mmDCORE3_MME_ACC_BASE |
| 2138 | }; |
| 2139 | |
| 2140 | static const u32 gaudi2_tpc_cfg_blocks_bases[TPC_ID_SIZE] = { |
| 2141 | [TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_CFG_BASE, |
| 2142 | [TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_CFG_BASE, |
| 2143 | [TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_CFG_BASE, |
| 2144 | [TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_CFG_BASE, |
| 2145 | [TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_CFG_BASE, |
| 2146 | [TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_CFG_BASE, |
| 2147 | [TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_CFG_BASE, |
| 2148 | [TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_CFG_BASE, |
| 2149 | [TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_CFG_BASE, |
| 2150 | [TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_CFG_BASE, |
| 2151 | [TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_CFG_BASE, |
| 2152 | [TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_CFG_BASE, |
| 2153 | [TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_CFG_BASE, |
| 2154 | [TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_CFG_BASE, |
| 2155 | [TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_CFG_BASE, |
| 2156 | [TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_CFG_BASE, |
| 2157 | [TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_CFG_BASE, |
| 2158 | [TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_CFG_BASE, |
| 2159 | [TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_CFG_BASE, |
| 2160 | [TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_CFG_BASE, |
| 2161 | [TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_CFG_BASE, |
| 2162 | [TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_CFG_BASE, |
| 2163 | [TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_CFG_BASE, |
| 2164 | [TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_CFG_BASE, |
| 2165 | [TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_CFG_BASE, |
| 2166 | }; |
| 2167 | |
| 2168 | static const u32 gaudi2_tpc_eml_cfg_blocks_bases[TPC_ID_SIZE] = { |
| 2169 | [TPC_ID_DCORE0_TPC0] = mmDCORE0_TPC0_EML_CFG_BASE, |
| 2170 | [TPC_ID_DCORE0_TPC1] = mmDCORE0_TPC1_EML_CFG_BASE, |
| 2171 | [TPC_ID_DCORE0_TPC2] = mmDCORE0_TPC2_EML_CFG_BASE, |
| 2172 | [TPC_ID_DCORE0_TPC3] = mmDCORE0_TPC3_EML_CFG_BASE, |
| 2173 | [TPC_ID_DCORE0_TPC4] = mmDCORE0_TPC4_EML_CFG_BASE, |
| 2174 | [TPC_ID_DCORE0_TPC5] = mmDCORE0_TPC5_EML_CFG_BASE, |
| 2175 | [TPC_ID_DCORE1_TPC0] = mmDCORE1_TPC0_EML_CFG_BASE, |
| 2176 | [TPC_ID_DCORE1_TPC1] = mmDCORE1_TPC1_EML_CFG_BASE, |
| 2177 | [TPC_ID_DCORE1_TPC2] = mmDCORE1_TPC2_EML_CFG_BASE, |
| 2178 | [TPC_ID_DCORE1_TPC3] = mmDCORE1_TPC3_EML_CFG_BASE, |
| 2179 | [TPC_ID_DCORE1_TPC4] = mmDCORE1_TPC4_EML_CFG_BASE, |
| 2180 | [TPC_ID_DCORE1_TPC5] = mmDCORE1_TPC5_EML_CFG_BASE, |
| 2181 | [TPC_ID_DCORE2_TPC0] = mmDCORE2_TPC0_EML_CFG_BASE, |
| 2182 | [TPC_ID_DCORE2_TPC1] = mmDCORE2_TPC1_EML_CFG_BASE, |
| 2183 | [TPC_ID_DCORE2_TPC2] = mmDCORE2_TPC2_EML_CFG_BASE, |
| 2184 | [TPC_ID_DCORE2_TPC3] = mmDCORE2_TPC3_EML_CFG_BASE, |
| 2185 | [TPC_ID_DCORE2_TPC4] = mmDCORE2_TPC4_EML_CFG_BASE, |
| 2186 | [TPC_ID_DCORE2_TPC5] = mmDCORE2_TPC5_EML_CFG_BASE, |
| 2187 | [TPC_ID_DCORE3_TPC0] = mmDCORE3_TPC0_EML_CFG_BASE, |
| 2188 | [TPC_ID_DCORE3_TPC1] = mmDCORE3_TPC1_EML_CFG_BASE, |
| 2189 | [TPC_ID_DCORE3_TPC2] = mmDCORE3_TPC2_EML_CFG_BASE, |
| 2190 | [TPC_ID_DCORE3_TPC3] = mmDCORE3_TPC3_EML_CFG_BASE, |
| 2191 | [TPC_ID_DCORE3_TPC4] = mmDCORE3_TPC4_EML_CFG_BASE, |
| 2192 | [TPC_ID_DCORE3_TPC5] = mmDCORE3_TPC5_EML_CFG_BASE, |
| 2193 | [TPC_ID_DCORE0_TPC6] = mmDCORE0_TPC6_EML_CFG_BASE, |
| 2194 | }; |
| 2195 | |
| 2196 | const u32 gaudi2_rot_blocks_bases[ROTATOR_ID_SIZE] = { |
| 2197 | [ROTATOR_ID_0] = mmROT0_BASE, |
| 2198 | [ROTATOR_ID_1] = mmROT1_BASE |
| 2199 | }; |
| 2200 | |
| 2201 | static const u32 gaudi2_tpc_id_to_queue_id[TPC_ID_SIZE] = { |
| 2202 | [TPC_ID_DCORE0_TPC0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0, |
| 2203 | [TPC_ID_DCORE0_TPC1] = GAUDI2_QUEUE_ID_DCORE0_TPC_1_0, |
| 2204 | [TPC_ID_DCORE0_TPC2] = GAUDI2_QUEUE_ID_DCORE0_TPC_2_0, |
| 2205 | [TPC_ID_DCORE0_TPC3] = GAUDI2_QUEUE_ID_DCORE0_TPC_3_0, |
| 2206 | [TPC_ID_DCORE0_TPC4] = GAUDI2_QUEUE_ID_DCORE0_TPC_4_0, |
| 2207 | [TPC_ID_DCORE0_TPC5] = GAUDI2_QUEUE_ID_DCORE0_TPC_5_0, |
| 2208 | [TPC_ID_DCORE1_TPC0] = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0, |
| 2209 | [TPC_ID_DCORE1_TPC1] = GAUDI2_QUEUE_ID_DCORE1_TPC_1_0, |
| 2210 | [TPC_ID_DCORE1_TPC2] = GAUDI2_QUEUE_ID_DCORE1_TPC_2_0, |
| 2211 | [TPC_ID_DCORE1_TPC3] = GAUDI2_QUEUE_ID_DCORE1_TPC_3_0, |
| 2212 | [TPC_ID_DCORE1_TPC4] = GAUDI2_QUEUE_ID_DCORE1_TPC_4_0, |
| 2213 | [TPC_ID_DCORE1_TPC5] = GAUDI2_QUEUE_ID_DCORE1_TPC_5_0, |
| 2214 | [TPC_ID_DCORE2_TPC0] = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0, |
| 2215 | [TPC_ID_DCORE2_TPC1] = GAUDI2_QUEUE_ID_DCORE2_TPC_1_0, |
| 2216 | [TPC_ID_DCORE2_TPC2] = GAUDI2_QUEUE_ID_DCORE2_TPC_2_0, |
| 2217 | [TPC_ID_DCORE2_TPC3] = GAUDI2_QUEUE_ID_DCORE2_TPC_3_0, |
| 2218 | [TPC_ID_DCORE2_TPC4] = GAUDI2_QUEUE_ID_DCORE2_TPC_4_0, |
| 2219 | [TPC_ID_DCORE2_TPC5] = GAUDI2_QUEUE_ID_DCORE2_TPC_5_0, |
| 2220 | [TPC_ID_DCORE3_TPC0] = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0, |
| 2221 | [TPC_ID_DCORE3_TPC1] = GAUDI2_QUEUE_ID_DCORE3_TPC_1_0, |
| 2222 | [TPC_ID_DCORE3_TPC2] = GAUDI2_QUEUE_ID_DCORE3_TPC_2_0, |
| 2223 | [TPC_ID_DCORE3_TPC3] = GAUDI2_QUEUE_ID_DCORE3_TPC_3_0, |
| 2224 | [TPC_ID_DCORE3_TPC4] = GAUDI2_QUEUE_ID_DCORE3_TPC_4_0, |
| 2225 | [TPC_ID_DCORE3_TPC5] = GAUDI2_QUEUE_ID_DCORE3_TPC_5_0, |
| 2226 | [TPC_ID_DCORE0_TPC6] = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0, |
| 2227 | }; |
| 2228 | |
| 2229 | static const u32 gaudi2_rot_id_to_queue_id[ROTATOR_ID_SIZE] = { |
| 2230 | [ROTATOR_ID_0] = GAUDI2_QUEUE_ID_ROT_0_0, |
| 2231 | [ROTATOR_ID_1] = GAUDI2_QUEUE_ID_ROT_1_0, |
| 2232 | }; |
| 2233 | |
| 2234 | static const u32 gaudi2_tpc_engine_id_to_tpc_id[] = { |
| 2235 | [GAUDI2_DCORE0_ENGINE_ID_TPC_0] = TPC_ID_DCORE0_TPC0, |
| 2236 | [GAUDI2_DCORE0_ENGINE_ID_TPC_1] = TPC_ID_DCORE0_TPC1, |
| 2237 | [GAUDI2_DCORE0_ENGINE_ID_TPC_2] = TPC_ID_DCORE0_TPC2, |
| 2238 | [GAUDI2_DCORE0_ENGINE_ID_TPC_3] = TPC_ID_DCORE0_TPC3, |
| 2239 | [GAUDI2_DCORE0_ENGINE_ID_TPC_4] = TPC_ID_DCORE0_TPC4, |
| 2240 | [GAUDI2_DCORE0_ENGINE_ID_TPC_5] = TPC_ID_DCORE0_TPC5, |
| 2241 | [GAUDI2_DCORE1_ENGINE_ID_TPC_0] = TPC_ID_DCORE1_TPC0, |
| 2242 | [GAUDI2_DCORE1_ENGINE_ID_TPC_1] = TPC_ID_DCORE1_TPC1, |
| 2243 | [GAUDI2_DCORE1_ENGINE_ID_TPC_2] = TPC_ID_DCORE1_TPC2, |
| 2244 | [GAUDI2_DCORE1_ENGINE_ID_TPC_3] = TPC_ID_DCORE1_TPC3, |
| 2245 | [GAUDI2_DCORE1_ENGINE_ID_TPC_4] = TPC_ID_DCORE1_TPC4, |
| 2246 | [GAUDI2_DCORE1_ENGINE_ID_TPC_5] = TPC_ID_DCORE1_TPC5, |
| 2247 | [GAUDI2_DCORE2_ENGINE_ID_TPC_0] = TPC_ID_DCORE2_TPC0, |
| 2248 | [GAUDI2_DCORE2_ENGINE_ID_TPC_1] = TPC_ID_DCORE2_TPC1, |
| 2249 | [GAUDI2_DCORE2_ENGINE_ID_TPC_2] = TPC_ID_DCORE2_TPC2, |
| 2250 | [GAUDI2_DCORE2_ENGINE_ID_TPC_3] = TPC_ID_DCORE2_TPC3, |
| 2251 | [GAUDI2_DCORE2_ENGINE_ID_TPC_4] = TPC_ID_DCORE2_TPC4, |
| 2252 | [GAUDI2_DCORE2_ENGINE_ID_TPC_5] = TPC_ID_DCORE2_TPC5, |
| 2253 | [GAUDI2_DCORE3_ENGINE_ID_TPC_0] = TPC_ID_DCORE3_TPC0, |
| 2254 | [GAUDI2_DCORE3_ENGINE_ID_TPC_1] = TPC_ID_DCORE3_TPC1, |
| 2255 | [GAUDI2_DCORE3_ENGINE_ID_TPC_2] = TPC_ID_DCORE3_TPC2, |
| 2256 | [GAUDI2_DCORE3_ENGINE_ID_TPC_3] = TPC_ID_DCORE3_TPC3, |
| 2257 | [GAUDI2_DCORE3_ENGINE_ID_TPC_4] = TPC_ID_DCORE3_TPC4, |
| 2258 | [GAUDI2_DCORE3_ENGINE_ID_TPC_5] = TPC_ID_DCORE3_TPC5, |
| 2259 | /* the PCI TPC is placed last (mapped liked HW) */ |
| 2260 | [GAUDI2_DCORE0_ENGINE_ID_TPC_6] = TPC_ID_DCORE0_TPC6, |
| 2261 | }; |
| 2262 | |
| 2263 | static const u32 gaudi2_mme_engine_id_to_mme_id[] = { |
| 2264 | [GAUDI2_DCORE0_ENGINE_ID_MME] = MME_ID_DCORE0, |
| 2265 | [GAUDI2_DCORE1_ENGINE_ID_MME] = MME_ID_DCORE1, |
| 2266 | [GAUDI2_DCORE2_ENGINE_ID_MME] = MME_ID_DCORE2, |
| 2267 | [GAUDI2_DCORE3_ENGINE_ID_MME] = MME_ID_DCORE3, |
| 2268 | }; |
| 2269 | |
| 2270 | static const u32 gaudi2_edma_engine_id_to_edma_id[] = { |
| 2271 | [GAUDI2_ENGINE_ID_PDMA_0] = DMA_CORE_ID_PDMA0, |
| 2272 | [GAUDI2_ENGINE_ID_PDMA_1] = DMA_CORE_ID_PDMA1, |
| 2273 | [GAUDI2_DCORE0_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA0, |
| 2274 | [GAUDI2_DCORE0_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA1, |
| 2275 | [GAUDI2_DCORE1_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA2, |
| 2276 | [GAUDI2_DCORE1_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA3, |
| 2277 | [GAUDI2_DCORE2_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA4, |
| 2278 | [GAUDI2_DCORE2_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA5, |
| 2279 | [GAUDI2_DCORE3_ENGINE_ID_EDMA_0] = DMA_CORE_ID_EDMA6, |
| 2280 | [GAUDI2_DCORE3_ENGINE_ID_EDMA_1] = DMA_CORE_ID_EDMA7, |
| 2281 | [GAUDI2_ENGINE_ID_KDMA] = DMA_CORE_ID_KDMA, |
| 2282 | }; |
| 2283 | |
| 2284 | const u32 edma_stream_base[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = { |
| 2285 | GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0, |
| 2286 | GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0, |
| 2287 | GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0, |
| 2288 | GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0, |
| 2289 | GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0, |
| 2290 | GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0, |
| 2291 | GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0, |
| 2292 | GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0, |
| 2293 | }; |
| 2294 | |
| 2295 | static const char gaudi2_vdec_irq_name[GAUDI2_VDEC_MSIX_ENTRIES][GAUDI2_MAX_STRING_LEN] = { |
| 2296 | "gaudi2 vdec 0_0" , "gaudi2 vdec 0_0 abnormal" , |
| 2297 | "gaudi2 vdec 0_1" , "gaudi2 vdec 0_1 abnormal" , |
| 2298 | "gaudi2 vdec 1_0" , "gaudi2 vdec 1_0 abnormal" , |
| 2299 | "gaudi2 vdec 1_1" , "gaudi2 vdec 1_1 abnormal" , |
| 2300 | "gaudi2 vdec 2_0" , "gaudi2 vdec 2_0 abnormal" , |
| 2301 | "gaudi2 vdec 2_1" , "gaudi2 vdec 2_1 abnormal" , |
| 2302 | "gaudi2 vdec 3_0" , "gaudi2 vdec 3_0 abnormal" , |
| 2303 | "gaudi2 vdec 3_1" , "gaudi2 vdec 3_1 abnormal" , |
| 2304 | "gaudi2 vdec s_0" , "gaudi2 vdec s_0 abnormal" , |
| 2305 | "gaudi2 vdec s_1" , "gaudi2 vdec s_1 abnormal" |
| 2306 | }; |
| 2307 | |
| 2308 | enum rtr_id { |
| 2309 | DCORE0_RTR0, |
| 2310 | DCORE0_RTR1, |
| 2311 | DCORE0_RTR2, |
| 2312 | DCORE0_RTR3, |
| 2313 | DCORE0_RTR4, |
| 2314 | DCORE0_RTR5, |
| 2315 | DCORE0_RTR6, |
| 2316 | DCORE0_RTR7, |
| 2317 | DCORE1_RTR0, |
| 2318 | DCORE1_RTR1, |
| 2319 | DCORE1_RTR2, |
| 2320 | DCORE1_RTR3, |
| 2321 | DCORE1_RTR4, |
| 2322 | DCORE1_RTR5, |
| 2323 | DCORE1_RTR6, |
| 2324 | DCORE1_RTR7, |
| 2325 | DCORE2_RTR0, |
| 2326 | DCORE2_RTR1, |
| 2327 | DCORE2_RTR2, |
| 2328 | DCORE2_RTR3, |
| 2329 | DCORE2_RTR4, |
| 2330 | DCORE2_RTR5, |
| 2331 | DCORE2_RTR6, |
| 2332 | DCORE2_RTR7, |
| 2333 | DCORE3_RTR0, |
| 2334 | DCORE3_RTR1, |
| 2335 | DCORE3_RTR2, |
| 2336 | DCORE3_RTR3, |
| 2337 | DCORE3_RTR4, |
| 2338 | DCORE3_RTR5, |
| 2339 | DCORE3_RTR6, |
| 2340 | DCORE3_RTR7, |
| 2341 | }; |
| 2342 | |
| 2343 | static const u32 gaudi2_tpc_initiator_hbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = { |
| 2344 | DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, DCORE0_RTR3, DCORE0_RTR3, |
| 2345 | DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, DCORE1_RTR4, DCORE1_RTR4, |
| 2346 | DCORE2_RTR3, DCORE2_RTR3, DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, |
| 2347 | DCORE3_RTR4, DCORE3_RTR4, DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6, |
| 2348 | DCORE0_RTR0 |
| 2349 | }; |
| 2350 | |
| 2351 | static const u32 gaudi2_tpc_initiator_lbw_rtr_id[NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1] = { |
| 2352 | DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR1, DCORE0_RTR2, DCORE0_RTR2, |
| 2353 | DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR6, DCORE1_RTR6, DCORE1_RTR5, DCORE1_RTR5, |
| 2354 | DCORE2_RTR2, DCORE2_RTR2, DCORE2_RTR1, DCORE2_RTR1, DCORE2_RTR0, DCORE2_RTR0, |
| 2355 | DCORE3_RTR5, DCORE3_RTR5, DCORE3_RTR6, DCORE3_RTR6, DCORE3_RTR7, DCORE3_RTR7, |
| 2356 | DCORE0_RTR0 |
| 2357 | }; |
| 2358 | |
| 2359 | static const u32 gaudi2_dec_initiator_hbw_rtr_id[NUMBER_OF_DEC] = { |
| 2360 | DCORE0_RTR0, DCORE0_RTR0, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, DCORE2_RTR0, |
| 2361 | DCORE3_RTR7, DCORE3_RTR7, DCORE0_RTR0, DCORE0_RTR0 |
| 2362 | }; |
| 2363 | |
| 2364 | static const u32 gaudi2_dec_initiator_lbw_rtr_id[NUMBER_OF_DEC] = { |
| 2365 | DCORE0_RTR1, DCORE0_RTR1, DCORE1_RTR6, DCORE1_RTR6, DCORE2_RTR1, DCORE2_RTR1, |
| 2366 | DCORE3_RTR6, DCORE3_RTR6, DCORE0_RTR0, DCORE0_RTR0 |
| 2367 | }; |
| 2368 | |
| 2369 | static const u32 gaudi2_nic_initiator_hbw_rtr_id[NIC_NUMBER_OF_MACROS] = { |
| 2370 | DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, |
| 2371 | DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7 |
| 2372 | }; |
| 2373 | |
| 2374 | static const u32 gaudi2_nic_initiator_lbw_rtr_id[NIC_NUMBER_OF_MACROS] = { |
| 2375 | DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE1_RTR7, DCORE2_RTR0, |
| 2376 | DCORE2_RTR0, DCORE2_RTR0, DCORE2_RTR0, DCORE3_RTR7, DCORE3_RTR7, DCORE3_RTR7 |
| 2377 | }; |
| 2378 | |
| 2379 | static const u32 gaudi2_edma_initiator_hbw_sft[NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES] = { |
| 2380 | mmSFT0_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2381 | mmSFT0_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2382 | mmSFT1_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2383 | mmSFT1_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2384 | mmSFT2_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2385 | mmSFT2_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2386 | mmSFT3_HBW_RTR_IF0_MSTR_IF_RR_SHRD_HBW_BASE, |
| 2387 | mmSFT3_HBW_RTR_IF1_MSTR_IF_RR_SHRD_HBW_BASE |
| 2388 | }; |
| 2389 | |
| 2390 | static const u32 gaudi2_pdma_initiator_hbw_rtr_id[NUM_OF_PDMA] = { |
| 2391 | DCORE0_RTR0, DCORE0_RTR0 |
| 2392 | }; |
| 2393 | |
| 2394 | static const u32 gaudi2_pdma_initiator_lbw_rtr_id[NUM_OF_PDMA] = { |
| 2395 | DCORE0_RTR2, DCORE0_RTR2 |
| 2396 | }; |
| 2397 | |
| 2398 | static const u32 gaudi2_rot_initiator_hbw_rtr_id[NUM_OF_ROT] = { |
| 2399 | DCORE2_RTR0, DCORE3_RTR7 |
| 2400 | }; |
| 2401 | |
| 2402 | static const u32 gaudi2_rot_initiator_lbw_rtr_id[NUM_OF_ROT] = { |
| 2403 | DCORE2_RTR2, DCORE3_RTR5 |
| 2404 | }; |
| 2405 | |
| 2406 | struct mme_initiators_rtr_id { |
| 2407 | u32 wap0; |
| 2408 | u32 wap1; |
| 2409 | u32 write; |
| 2410 | u32 read; |
| 2411 | u32 sbte0; |
| 2412 | u32 sbte1; |
| 2413 | u32 sbte2; |
| 2414 | u32 sbte3; |
| 2415 | u32 sbte4; |
| 2416 | }; |
| 2417 | |
| 2418 | enum mme_initiators { |
| 2419 | MME_WAP0 = 0, |
| 2420 | MME_WAP1, |
| 2421 | MME_WRITE, |
| 2422 | MME_READ, |
| 2423 | MME_SBTE0, |
| 2424 | MME_SBTE1, |
| 2425 | MME_SBTE2, |
| 2426 | MME_SBTE3, |
| 2427 | MME_SBTE4, |
| 2428 | MME_INITIATORS_MAX |
| 2429 | }; |
| 2430 | |
| 2431 | static const struct mme_initiators_rtr_id |
| 2432 | gaudi2_mme_initiator_rtr_id[NUM_OF_MME_PER_DCORE * NUM_OF_DCORES] = { |
| 2433 | { .wap0 = 5, .wap1 = 7, .write = 6, .read = 7, |
| 2434 | .sbte0 = 7, .sbte1 = 4, .sbte2 = 4, .sbte3 = 5, .sbte4 = 6}, |
| 2435 | { .wap0 = 10, .wap1 = 8, .write = 9, .read = 8, |
| 2436 | .sbte0 = 11, .sbte1 = 11, .sbte2 = 10, .sbte3 = 9, .sbte4 = 8}, |
| 2437 | { .wap0 = 21, .wap1 = 23, .write = 22, .read = 23, |
| 2438 | .sbte0 = 20, .sbte1 = 20, .sbte2 = 21, .sbte3 = 22, .sbte4 = 23}, |
| 2439 | { .wap0 = 30, .wap1 = 28, .write = 29, .read = 30, |
| 2440 | .sbte0 = 31, .sbte1 = 31, .sbte2 = 30, .sbte3 = 29, .sbte4 = 28}, |
| 2441 | }; |
| 2442 | |
| 2443 | enum razwi_event_sources { |
| 2444 | RAZWI_TPC, |
| 2445 | RAZWI_MME, |
| 2446 | RAZWI_EDMA, |
| 2447 | RAZWI_PDMA, |
| 2448 | RAZWI_NIC, |
| 2449 | RAZWI_DEC, |
| 2450 | RAZWI_ROT, |
| 2451 | RAZWI_ARC_FARM |
| 2452 | }; |
| 2453 | |
| 2454 | struct hbm_mc_error_causes { |
| 2455 | u32 mask; |
| 2456 | char cause[50]; |
| 2457 | }; |
| 2458 | |
| 2459 | static struct hl_special_block_info gaudi2_special_blocks[] = GAUDI2_SPECIAL_BLOCKS; |
| 2460 | |
| 2461 | /* Special blocks iterator is currently used to configure security protection bits, |
| 2462 | * and read global errors. Most HW blocks are addressable and those who aren't (N/A)- |
| 2463 | * must be skipped. Following configurations are commonly used for both PB config |
| 2464 | * and global error reading, since currently they both share the same settings. |
| 2465 | * Once it changes, we must remember to use separate configurations for either one. |
| 2466 | */ |
| 2467 | static int gaudi2_iterator_skip_block_types[] = { |
| 2468 | GAUDI2_BLOCK_TYPE_PLL, |
| 2469 | GAUDI2_BLOCK_TYPE_EU_BIST, |
| 2470 | GAUDI2_BLOCK_TYPE_HBM, |
| 2471 | GAUDI2_BLOCK_TYPE_XFT |
| 2472 | }; |
| 2473 | |
| 2474 | static struct range gaudi2_iterator_skip_block_ranges[] = { |
| 2475 | /* Skip all PSOC blocks except for PSOC_GLOBAL_CONF */ |
| 2476 | {mmPSOC_I2C_M0_BASE, mmPSOC_EFUSE_BASE}, |
| 2477 | {mmPSOC_BTL_BASE, mmPSOC_MSTR_IF_RR_SHRD_HBW_BASE}, |
| 2478 | /* Skip all CPU blocks except for CPU_IF */ |
| 2479 | {mmCPU_CA53_CFG_BASE, mmCPU_CA53_CFG_BASE}, |
| 2480 | {mmCPU_TIMESTAMP_BASE, mmCPU_MSTR_IF_RR_SHRD_HBW_BASE} |
| 2481 | }; |
| 2482 | |
| 2483 | static struct hbm_mc_error_causes hbm_mc_spi[GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE] = { |
| 2484 | {HBM_MC_SPI_TEMP_PIN_CHG_MASK, "temperature pins changed" }, |
| 2485 | {HBM_MC_SPI_THR_ENG_MASK, "temperature-based throttling engaged" }, |
| 2486 | {HBM_MC_SPI_THR_DIS_ENG_MASK, "temperature-based throttling disengaged" }, |
| 2487 | {HBM_MC_SPI_IEEE1500_COMP_MASK, "IEEE1500 op comp" }, |
| 2488 | {HBM_MC_SPI_IEEE1500_PAUSED_MASK, "IEEE1500 op paused" }, |
| 2489 | }; |
| 2490 | |
| 2491 | static const char * const hbm_mc_sei_cause[GAUDI2_NUM_OF_HBM_SEI_CAUSE] = { |
| 2492 | [HBM_SEI_CMD_PARITY_EVEN] = "SEI C/A parity even" , |
| 2493 | [HBM_SEI_CMD_PARITY_ODD] = "SEI C/A parity odd" , |
| 2494 | [HBM_SEI_READ_ERR] = "SEI read data error" , |
| 2495 | [HBM_SEI_WRITE_DATA_PARITY_ERR] = "SEI write data parity error" , |
| 2496 | [HBM_SEI_CATTRIP] = "SEI CATTRIP asserted" , |
| 2497 | [HBM_SEI_MEM_BIST_FAIL] = "SEI memory BIST fail" , |
| 2498 | [HBM_SEI_DFI] = "SEI DFI error" , |
| 2499 | [HBM_SEI_INV_TEMP_READ_OUT] = "SEI invalid temp read" , |
| 2500 | [HBM_SEI_BIST_FAIL] = "SEI BIST fail" |
| 2501 | }; |
| 2502 | |
| 2503 | struct mmu_spi_sei_cause { |
| 2504 | char cause[50]; |
| 2505 | int clear_bit; |
| 2506 | }; |
| 2507 | |
| 2508 | static const struct mmu_spi_sei_cause gaudi2_mmu_spi_sei[GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE] = { |
| 2509 | {"page fault" , 1}, /* INTERRUPT_CLR[1] */ |
| 2510 | {"page access" , 1}, /* INTERRUPT_CLR[1] */ |
| 2511 | {"bypass ddr" , 2}, /* INTERRUPT_CLR[2] */ |
| 2512 | {"multi hit" , 2}, /* INTERRUPT_CLR[2] */ |
| 2513 | {"mmu rei0" , -1}, /* no clear register bit */ |
| 2514 | {"mmu rei1" , -1}, /* no clear register bit */ |
| 2515 | {"stlb rei0" , -1}, /* no clear register bit */ |
| 2516 | {"stlb rei1" , -1}, /* no clear register bit */ |
| 2517 | {"rr privileged write hit" , 2}, /* INTERRUPT_CLR[2] */ |
| 2518 | {"rr privileged read hit" , 2}, /* INTERRUPT_CLR[2] */ |
| 2519 | {"rr secure write hit" , 2}, /* INTERRUPT_CLR[2] */ |
| 2520 | {"rr secure read hit" , 2}, /* INTERRUPT_CLR[2] */ |
| 2521 | {"bist_fail no use" , 2}, /* INTERRUPT_CLR[2] */ |
| 2522 | {"bist_fail no use" , 2}, /* INTERRUPT_CLR[2] */ |
| 2523 | {"bist_fail no use" , 2}, /* INTERRUPT_CLR[2] */ |
| 2524 | {"bist_fail no use" , 2}, /* INTERRUPT_CLR[2] */ |
| 2525 | {"slave error" , 16}, /* INTERRUPT_CLR[16] */ |
| 2526 | {"dec error" , 17}, /* INTERRUPT_CLR[17] */ |
| 2527 | {"burst fifo full" , 2} /* INTERRUPT_CLR[2] */ |
| 2528 | }; |
| 2529 | |
| 2530 | struct gaudi2_cache_invld_params { |
| 2531 | u64 start_va; |
| 2532 | u64 end_va; |
| 2533 | u32 inv_start_val; |
| 2534 | u32 flags; |
| 2535 | bool range_invalidation; |
| 2536 | }; |
| 2537 | |
| 2538 | struct gaudi2_tpc_idle_data { |
| 2539 | struct engines_data *e; |
| 2540 | unsigned long *mask; |
| 2541 | bool *is_idle; |
| 2542 | const char *tpc_fmt; |
| 2543 | }; |
| 2544 | |
| 2545 | struct gaudi2_tpc_mmu_data { |
| 2546 | u32 rw_asid; |
| 2547 | }; |
| 2548 | |
| 2549 | static s64 gaudi2_state_dump_specs_props[SP_MAX] = {0}; |
| 2550 | |
| 2551 | static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val); |
| 2552 | static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id); |
| 2553 | static bool gaudi2_is_arc_enabled(struct hl_device *hdev, u64 arc_id); |
| 2554 | static void gaudi2_clr_arc_id_cap(struct hl_device *hdev, u64 arc_id); |
| 2555 | static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id); |
| 2556 | static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val); |
| 2557 | static int gaudi2_send_job_to_kdma(struct hl_device *hdev, u64 src_addr, u64 dst_addr, u32 size, |
| 2558 | bool is_memset); |
| 2559 | static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 2560 | struct engines_data *e); |
| 2561 | static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 2562 | struct engines_data *e); |
| 2563 | static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 2564 | struct engines_data *e); |
| 2565 | static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr); |
| 2566 | static u64 gaudi2_mmu_descramble_addr(struct hl_device *hdev, u64 scrambled_addr); |
| 2567 | |
| 2568 | static void gaudi2_init_scrambler_hbm(struct hl_device *hdev) |
| 2569 | { |
| 2570 | |
| 2571 | } |
| 2572 | |
| 2573 | static u32 gaudi2_get_signal_cb_size(struct hl_device *hdev) |
| 2574 | { |
| 2575 | return sizeof(struct packet_msg_short); |
| 2576 | } |
| 2577 | |
| 2578 | static u32 gaudi2_get_wait_cb_size(struct hl_device *hdev) |
| 2579 | { |
| 2580 | return sizeof(struct packet_msg_short) * 4 + sizeof(struct packet_fence); |
| 2581 | } |
| 2582 | |
| 2583 | void gaudi2_iterate_tpcs(struct hl_device *hdev, struct iterate_module_ctx *ctx) |
| 2584 | { |
| 2585 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 2586 | int dcore, inst, tpc_seq; |
| 2587 | u32 offset; |
| 2588 | |
| 2589 | /* init the return code */ |
| 2590 | ctx->rc = 0; |
| 2591 | |
| 2592 | for (dcore = 0; dcore < NUM_OF_DCORES; dcore++) { |
| 2593 | for (inst = 0; inst < NUM_OF_TPC_PER_DCORE; inst++) { |
| 2594 | tpc_seq = dcore * NUM_OF_TPC_PER_DCORE + inst; |
| 2595 | |
| 2596 | if (!(prop->tpc_enabled_mask & BIT(tpc_seq))) |
| 2597 | continue; |
| 2598 | |
| 2599 | offset = (DCORE_OFFSET * dcore) + (DCORE_TPC_OFFSET * inst); |
| 2600 | |
| 2601 | ctx->fn(hdev, dcore, inst, offset, ctx); |
| 2602 | if (ctx->rc) { |
| 2603 | dev_err(hdev->dev, "TPC iterator failed for DCORE%d TPC%d\n" , |
| 2604 | dcore, inst); |
| 2605 | return; |
| 2606 | } |
| 2607 | } |
| 2608 | } |
| 2609 | |
| 2610 | if (!(prop->tpc_enabled_mask & BIT(TPC_ID_DCORE0_TPC6))) |
| 2611 | return; |
| 2612 | |
| 2613 | /* special check for PCI TPC (DCORE0_TPC6) */ |
| 2614 | offset = DCORE_TPC_OFFSET * (NUM_DCORE0_TPC - 1); |
| 2615 | ctx->fn(hdev, 0, NUM_DCORE0_TPC - 1, offset, ctx); |
| 2616 | if (ctx->rc) |
| 2617 | dev_err(hdev->dev, "TPC iterator failed for DCORE0 TPC6\n" ); |
| 2618 | } |
| 2619 | |
| 2620 | static bool gaudi2_host_phys_addr_valid(u64 addr) |
| 2621 | { |
| 2622 | if ((addr < HOST_PHYS_BASE_0 + HOST_PHYS_SIZE_0) || (addr >= HOST_PHYS_BASE_1)) |
| 2623 | return true; |
| 2624 | |
| 2625 | return false; |
| 2626 | } |
| 2627 | |
| 2628 | static int set_number_of_functional_hbms(struct hl_device *hdev) |
| 2629 | { |
| 2630 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 2631 | u8 faulty_hbms = hweight64(hdev->dram_binning); |
| 2632 | |
| 2633 | /* check if all HBMs should be used */ |
| 2634 | if (!faulty_hbms) { |
| 2635 | dev_dbg(hdev->dev, "All HBM are in use (no binning)\n" ); |
| 2636 | prop->num_functional_hbms = GAUDI2_HBM_NUM; |
| 2637 | return 0; |
| 2638 | } |
| 2639 | |
| 2640 | /* |
| 2641 | * check for error condition in which number of binning |
| 2642 | * candidates is higher than the maximum supported by the |
| 2643 | * driver (in which case binning mask shall be ignored and driver will |
| 2644 | * set the default) |
| 2645 | */ |
| 2646 | if (faulty_hbms > MAX_FAULTY_HBMS) { |
| 2647 | dev_err(hdev->dev, |
| 2648 | "HBM binning supports max of %d faulty HBMs, supplied mask 0x%llx.\n" , |
| 2649 | MAX_FAULTY_HBMS, hdev->dram_binning); |
| 2650 | return -EINVAL; |
| 2651 | } |
| 2652 | |
| 2653 | /* |
| 2654 | * by default, number of functional HBMs in Gaudi2 is always |
| 2655 | * GAUDI2_HBM_NUM - 1. |
| 2656 | */ |
| 2657 | prop->num_functional_hbms = GAUDI2_HBM_NUM - faulty_hbms; |
| 2658 | return 0; |
| 2659 | } |
| 2660 | |
| 2661 | static bool gaudi2_is_edma_queue_id(u32 queue_id) |
| 2662 | { |
| 2663 | |
| 2664 | switch (queue_id) { |
| 2665 | case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3: |
| 2666 | case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3: |
| 2667 | case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3: |
| 2668 | case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3: |
| 2669 | return true; |
| 2670 | default: |
| 2671 | return false; |
| 2672 | } |
| 2673 | } |
| 2674 | |
| 2675 | static int gaudi2_set_dram_properties(struct hl_device *hdev) |
| 2676 | { |
| 2677 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 2678 | u64 hbm_drv_base_offset = 0, edma_pq_base_addr; |
| 2679 | u32 basic_hbm_page_size, edma_idx = 0; |
| 2680 | int rc, i; |
| 2681 | |
| 2682 | rc = set_number_of_functional_hbms(hdev); |
| 2683 | if (rc) |
| 2684 | return -EINVAL; |
| 2685 | |
| 2686 | /* |
| 2687 | * Due to HW bug in which TLB size is x16 smaller than expected we use a workaround |
| 2688 | * in which we are using x16 bigger page size to be able to populate the entire |
| 2689 | * HBM mappings in the TLB |
| 2690 | */ |
| 2691 | basic_hbm_page_size = prop->num_functional_hbms * SZ_8M; |
| 2692 | prop->dram_page_size = GAUDI2_COMPENSATE_TLB_PAGE_SIZE_FACTOR * basic_hbm_page_size; |
| 2693 | prop->device_mem_alloc_default_page_size = prop->dram_page_size; |
| 2694 | prop->dram_size = prop->num_functional_hbms * SZ_16G; |
| 2695 | prop->dram_base_address = DRAM_PHYS_BASE; |
| 2696 | prop->dram_end_address = prop->dram_base_address + prop->dram_size; |
| 2697 | prop->dram_supports_virtual_memory = true; |
| 2698 | |
| 2699 | prop->dram_user_base_address = DRAM_PHYS_BASE + prop->dram_page_size; |
| 2700 | prop->dram_hints_align_mask = ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK; |
| 2701 | prop->hints_dram_reserved_va_range.start_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_START; |
| 2702 | prop->hints_dram_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HBM_END; |
| 2703 | |
| 2704 | /* since DRAM page size differs from DMMU page size we need to allocate |
| 2705 | * DRAM memory in units of dram_page size and mapping this memory in |
| 2706 | * units of DMMU page size. we overcome this size mismatch using a |
| 2707 | * scrambling routine which takes a DRAM page and converts it to a DMMU |
| 2708 | * page. |
| 2709 | * We therefore: |
| 2710 | * 1. partition the virtual address space to DRAM-page (whole) pages. |
| 2711 | * (suppose we get n such pages) |
| 2712 | * 2. limit the amount of virtual address space we got from 1 above to |
| 2713 | * a multiple of 64M as we don't want the scrambled address to cross |
| 2714 | * the DRAM virtual address space. |
| 2715 | * ( m = (n * DRAM_page_size) / DMMU_page_size). |
| 2716 | * 3. determine the and address accordingly |
| 2717 | * end_addr = start_addr + m * 48M |
| 2718 | * |
| 2719 | * the DRAM address MSBs (63:48) are not part of the roundup calculation |
| 2720 | */ |
| 2721 | prop->dmmu.start_addr = prop->dram_base_address + |
| 2722 | (prop->dram_page_size * |
| 2723 | DIV_ROUND_UP_SECTOR_T(prop->dram_size, prop->dram_page_size)); |
| 2724 | prop->dmmu.end_addr = prop->dmmu.start_addr + prop->dram_page_size * |
| 2725 | div_u64(dividend: (VA_HBM_SPACE_END - prop->dmmu.start_addr), divisor: prop->dmmu.page_size); |
| 2726 | /* |
| 2727 | * Driver can't share an (48MB) HBM page with the F/W in order to prevent FW to block |
| 2728 | * the driver part by range register, so it must start at the next (48MB) page |
| 2729 | */ |
| 2730 | hbm_drv_base_offset = roundup(CPU_FW_IMAGE_SIZE, prop->num_functional_hbms * SZ_8M); |
| 2731 | |
| 2732 | /* |
| 2733 | * The NIC driver section size and the HMMU page tables section in the HBM needs |
| 2734 | * to be the remaining size in the first dram page after taking into |
| 2735 | * account the F/W image size |
| 2736 | */ |
| 2737 | |
| 2738 | /* Reserve region in HBM for HMMU page tables */ |
| 2739 | prop->mmu_pgt_addr = DRAM_PHYS_BASE + hbm_drv_base_offset + |
| 2740 | ((prop->dram_page_size - hbm_drv_base_offset) - |
| 2741 | (HMMU_PAGE_TABLES_SIZE + EDMA_PQS_SIZE + EDMA_SCRATCHPAD_SIZE)); |
| 2742 | |
| 2743 | /* Set EDMA PQs HBM addresses */ |
| 2744 | edma_pq_base_addr = prop->mmu_pgt_addr + HMMU_PAGE_TABLES_SIZE; |
| 2745 | |
| 2746 | for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) { |
| 2747 | if (gaudi2_is_edma_queue_id(queue_id: i)) { |
| 2748 | prop->hw_queues_props[i].q_dram_bd_address = edma_pq_base_addr + |
| 2749 | (edma_idx * HL_QUEUE_SIZE_IN_BYTES); |
| 2750 | edma_idx++; |
| 2751 | } |
| 2752 | } |
| 2753 | |
| 2754 | return 0; |
| 2755 | } |
| 2756 | |
| 2757 | static int gaudi2_set_fixed_properties(struct hl_device *hdev) |
| 2758 | { |
| 2759 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 2760 | struct hw_queue_properties *q_props; |
| 2761 | u32 num_sync_stream_queues = 0; |
| 2762 | int i, rc; |
| 2763 | |
| 2764 | prop->max_queues = GAUDI2_QUEUE_ID_SIZE; |
| 2765 | prop->hw_queues_props = kcalloc(prop->max_queues, sizeof(struct hw_queue_properties), |
| 2766 | GFP_KERNEL); |
| 2767 | |
| 2768 | if (!prop->hw_queues_props) |
| 2769 | return -ENOMEM; |
| 2770 | |
| 2771 | q_props = prop->hw_queues_props; |
| 2772 | |
| 2773 | for (i = 0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i++) { |
| 2774 | q_props[i].type = QUEUE_TYPE_HW; |
| 2775 | q_props[i].driver_only = 0; |
| 2776 | |
| 2777 | if (i >= GAUDI2_QUEUE_ID_NIC_0_0 && i <= GAUDI2_QUEUE_ID_NIC_23_3) { |
| 2778 | q_props[i].supports_sync_stream = 0; |
| 2779 | } else { |
| 2780 | q_props[i].supports_sync_stream = 1; |
| 2781 | num_sync_stream_queues++; |
| 2782 | } |
| 2783 | |
| 2784 | q_props[i].cb_alloc_flags = CB_ALLOC_USER; |
| 2785 | |
| 2786 | if (gaudi2_is_edma_queue_id(queue_id: i)) |
| 2787 | q_props[i].dram_bd = 1; |
| 2788 | } |
| 2789 | |
| 2790 | q_props[GAUDI2_QUEUE_ID_CPU_PQ].type = QUEUE_TYPE_CPU; |
| 2791 | q_props[GAUDI2_QUEUE_ID_CPU_PQ].driver_only = 1; |
| 2792 | q_props[GAUDI2_QUEUE_ID_CPU_PQ].cb_alloc_flags = CB_ALLOC_KERNEL; |
| 2793 | |
| 2794 | prop->cache_line_size = DEVICE_CACHE_LINE_SIZE; |
| 2795 | prop->cfg_base_address = CFG_BASE; |
| 2796 | prop->device_dma_offset_for_host_access = HOST_PHYS_BASE_0; |
| 2797 | prop->host_base_address = HOST_PHYS_BASE_0; |
| 2798 | prop->host_end_address = prop->host_base_address + HOST_PHYS_SIZE_0; |
| 2799 | prop->max_pending_cs = GAUDI2_MAX_PENDING_CS; |
| 2800 | prop->completion_queues_count = GAUDI2_RESERVED_CQ_NUMBER; |
| 2801 | prop->user_dec_intr_count = NUMBER_OF_DEC; |
| 2802 | prop->user_interrupt_count = GAUDI2_IRQ_NUM_USER_LAST - GAUDI2_IRQ_NUM_USER_FIRST + 1; |
| 2803 | prop->completion_mode = HL_COMPLETION_MODE_CS; |
| 2804 | prop->sync_stream_first_sob = GAUDI2_RESERVED_SOB_NUMBER; |
| 2805 | prop->sync_stream_first_mon = GAUDI2_RESERVED_MON_NUMBER; |
| 2806 | |
| 2807 | prop->sram_base_address = SRAM_BASE_ADDR; |
| 2808 | prop->sram_size = SRAM_SIZE; |
| 2809 | prop->sram_end_address = prop->sram_base_address + prop->sram_size; |
| 2810 | prop->sram_user_base_address = prop->sram_base_address + SRAM_USER_BASE_OFFSET; |
| 2811 | |
| 2812 | prop->hints_range_reservation = true; |
| 2813 | |
| 2814 | prop->rotator_enabled_mask = BIT(NUM_OF_ROT) - 1; |
| 2815 | |
| 2816 | prop->max_asid = 2; |
| 2817 | |
| 2818 | prop->dmmu.pgt_size = HMMU_PAGE_TABLES_SIZE; |
| 2819 | prop->mmu_pte_size = HL_PTE_SIZE; |
| 2820 | |
| 2821 | prop->dmmu.hop_shifts[MMU_HOP0] = DHOP0_SHIFT; |
| 2822 | prop->dmmu.hop_shifts[MMU_HOP1] = DHOP1_SHIFT; |
| 2823 | prop->dmmu.hop_shifts[MMU_HOP2] = DHOP2_SHIFT; |
| 2824 | prop->dmmu.hop_shifts[MMU_HOP3] = DHOP3_SHIFT; |
| 2825 | prop->dmmu.hop_masks[MMU_HOP0] = DHOP0_MASK; |
| 2826 | prop->dmmu.hop_masks[MMU_HOP1] = DHOP1_MASK; |
| 2827 | prop->dmmu.hop_masks[MMU_HOP2] = DHOP2_MASK; |
| 2828 | prop->dmmu.hop_masks[MMU_HOP3] = DHOP3_MASK; |
| 2829 | prop->dmmu.page_size = PAGE_SIZE_1GB; |
| 2830 | prop->dmmu.num_hops = MMU_ARCH_4_HOPS; |
| 2831 | prop->dmmu.last_mask = LAST_MASK; |
| 2832 | prop->dmmu.host_resident = 0; |
| 2833 | prop->dmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE; |
| 2834 | prop->dmmu.hop0_tables_total_size = HOP_TABLE_SIZE_512_PTE * prop->max_asid; |
| 2835 | |
| 2836 | /* As we need to set the pgt address in dram for HMMU init so we cannot |
| 2837 | * wait to the fw cpucp info to set the dram props as mmu init comes before |
| 2838 | * hw init |
| 2839 | */ |
| 2840 | rc = hdev->asic_funcs->set_dram_properties(hdev); |
| 2841 | if (rc) |
| 2842 | goto free_qprops; |
| 2843 | |
| 2844 | prop->mmu_pgt_size = PMMU_PAGE_TABLES_SIZE; |
| 2845 | |
| 2846 | prop->pmmu.pgt_size = prop->mmu_pgt_size; |
| 2847 | hdev->pmmu_huge_range = true; |
| 2848 | prop->pmmu.host_resident = 1; |
| 2849 | prop->pmmu.num_hops = MMU_ARCH_6_HOPS; |
| 2850 | prop->pmmu.last_mask = LAST_MASK; |
| 2851 | prop->pmmu.hop_table_size = HOP_TABLE_SIZE_512_PTE; |
| 2852 | prop->pmmu.hop0_tables_total_size = HOP_TABLE_SIZE_512_PTE * prop->max_asid; |
| 2853 | |
| 2854 | prop->hints_host_reserved_va_range.start_addr = RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START; |
| 2855 | prop->hints_host_reserved_va_range.end_addr = RESERVED_VA_RANGE_FOR_ARC_ON_HOST_END; |
| 2856 | prop->hints_host_hpage_reserved_va_range.start_addr = |
| 2857 | RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_START; |
| 2858 | prop->hints_host_hpage_reserved_va_range.end_addr = |
| 2859 | RESERVED_VA_RANGE_FOR_ARC_ON_HOST_HPAGE_END; |
| 2860 | |
| 2861 | if (PAGE_SIZE == SZ_64K) { |
| 2862 | prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_64K; |
| 2863 | prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_64K; |
| 2864 | prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_64K; |
| 2865 | prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_64K; |
| 2866 | prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_64K; |
| 2867 | prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_64K; |
| 2868 | prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_64K; |
| 2869 | prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_64K; |
| 2870 | prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_64K; |
| 2871 | prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_64K; |
| 2872 | prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_64K; |
| 2873 | prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_64K; |
| 2874 | prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START; |
| 2875 | prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END; |
| 2876 | prop->pmmu.page_size = PAGE_SIZE_64KB; |
| 2877 | |
| 2878 | /* shifts and masks are the same in PMMU and HPMMU */ |
| 2879 | memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); |
| 2880 | prop->pmmu_huge.page_size = PAGE_SIZE_16MB; |
| 2881 | prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START; |
| 2882 | prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END; |
| 2883 | } else { |
| 2884 | prop->pmmu.hop_shifts[MMU_HOP0] = HOP0_SHIFT_4K; |
| 2885 | prop->pmmu.hop_shifts[MMU_HOP1] = HOP1_SHIFT_4K; |
| 2886 | prop->pmmu.hop_shifts[MMU_HOP2] = HOP2_SHIFT_4K; |
| 2887 | prop->pmmu.hop_shifts[MMU_HOP3] = HOP3_SHIFT_4K; |
| 2888 | prop->pmmu.hop_shifts[MMU_HOP4] = HOP4_SHIFT_4K; |
| 2889 | prop->pmmu.hop_shifts[MMU_HOP5] = HOP5_SHIFT_4K; |
| 2890 | prop->pmmu.hop_masks[MMU_HOP0] = HOP0_MASK_4K; |
| 2891 | prop->pmmu.hop_masks[MMU_HOP1] = HOP1_MASK_4K; |
| 2892 | prop->pmmu.hop_masks[MMU_HOP2] = HOP2_MASK_4K; |
| 2893 | prop->pmmu.hop_masks[MMU_HOP3] = HOP3_MASK_4K; |
| 2894 | prop->pmmu.hop_masks[MMU_HOP4] = HOP4_MASK_4K; |
| 2895 | prop->pmmu.hop_masks[MMU_HOP5] = HOP5_MASK_4K; |
| 2896 | prop->pmmu.start_addr = VA_HOST_SPACE_PAGE_START; |
| 2897 | prop->pmmu.end_addr = VA_HOST_SPACE_PAGE_END; |
| 2898 | prop->pmmu.page_size = PAGE_SIZE_4KB; |
| 2899 | |
| 2900 | /* shifts and masks are the same in PMMU and HPMMU */ |
| 2901 | memcpy(&prop->pmmu_huge, &prop->pmmu, sizeof(prop->pmmu)); |
| 2902 | prop->pmmu_huge.page_size = PAGE_SIZE_2MB; |
| 2903 | prop->pmmu_huge.start_addr = VA_HOST_SPACE_HPAGE_START; |
| 2904 | prop->pmmu_huge.end_addr = VA_HOST_SPACE_HPAGE_END; |
| 2905 | } |
| 2906 | |
| 2907 | prop->max_num_of_engines = GAUDI2_ENGINE_ID_SIZE; |
| 2908 | prop->num_engine_cores = CPU_ID_MAX; |
| 2909 | prop->cfg_size = CFG_SIZE; |
| 2910 | prop->num_of_events = GAUDI2_EVENT_SIZE; |
| 2911 | |
| 2912 | prop->supports_engine_modes = true; |
| 2913 | |
| 2914 | prop->dc_power_default = DC_POWER_DEFAULT; |
| 2915 | |
| 2916 | prop->cb_pool_cb_cnt = GAUDI2_CB_POOL_CB_CNT; |
| 2917 | prop->cb_pool_cb_size = GAUDI2_CB_POOL_CB_SIZE; |
| 2918 | prop->pcie_dbi_base_address = CFG_BASE + mmPCIE_DBI_BASE; |
| 2919 | prop->pcie_aux_dbi_reg_addr = CFG_BASE + mmPCIE_AUX_DBI; |
| 2920 | |
| 2921 | strscpy_pad(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, CARD_NAME_MAX_LEN); |
| 2922 | |
| 2923 | prop->mme_master_slave_mode = 1; |
| 2924 | |
| 2925 | prop->first_available_user_sob[0] = GAUDI2_RESERVED_SOB_NUMBER + |
| 2926 | (num_sync_stream_queues * HL_RSVD_SOBS); |
| 2927 | |
| 2928 | prop->first_available_user_mon[0] = GAUDI2_RESERVED_MON_NUMBER + |
| 2929 | (num_sync_stream_queues * HL_RSVD_MONS); |
| 2930 | |
| 2931 | prop->first_available_user_interrupt = GAUDI2_IRQ_NUM_USER_FIRST; |
| 2932 | prop->tpc_interrupt_id = GAUDI2_IRQ_NUM_TPC_ASSERT; |
| 2933 | prop->eq_interrupt_id = GAUDI2_IRQ_NUM_EVENT_QUEUE; |
| 2934 | |
| 2935 | prop->first_available_cq[0] = GAUDI2_RESERVED_CQ_NUMBER; |
| 2936 | |
| 2937 | prop->fw_cpu_boot_dev_sts0_valid = false; |
| 2938 | prop->fw_cpu_boot_dev_sts1_valid = false; |
| 2939 | prop->hard_reset_done_by_fw = false; |
| 2940 | prop->gic_interrupts_enable = true; |
| 2941 | |
| 2942 | prop->server_type = HL_SERVER_TYPE_UNKNOWN; |
| 2943 | |
| 2944 | prop->max_dec = NUMBER_OF_DEC; |
| 2945 | |
| 2946 | prop->clk_pll_index = HL_GAUDI2_MME_PLL; |
| 2947 | |
| 2948 | prop->dma_mask = 64; |
| 2949 | |
| 2950 | prop->hbw_flush_reg = mmPCIE_WRAP_SPECIAL_GLBL_SPARE_0; |
| 2951 | |
| 2952 | prop->supports_advanced_cpucp_rc = true; |
| 2953 | |
| 2954 | return 0; |
| 2955 | |
| 2956 | free_qprops: |
| 2957 | kfree(objp: prop->hw_queues_props); |
| 2958 | return rc; |
| 2959 | } |
| 2960 | |
| 2961 | static int gaudi2_pci_bars_map(struct hl_device *hdev) |
| 2962 | { |
| 2963 | static const char * const name[] = {"CFG_SRAM" , "MSIX" , "DRAM" }; |
| 2964 | bool is_wc[3] = {false, false, true}; |
| 2965 | int rc; |
| 2966 | |
| 2967 | rc = hl_pci_bars_map(hdev, name, is_wc); |
| 2968 | if (rc) |
| 2969 | return rc; |
| 2970 | |
| 2971 | hdev->rmmio = hdev->pcie_bar[SRAM_CFG_BAR_ID] + (CFG_BASE - STM_FLASH_BASE_ADDR); |
| 2972 | |
| 2973 | return 0; |
| 2974 | } |
| 2975 | |
| 2976 | static u64 gaudi2_set_hbm_bar_base(struct hl_device *hdev, u64 addr) |
| 2977 | { |
| 2978 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 2979 | struct hl_inbound_pci_region pci_region; |
| 2980 | u64 old_addr = addr; |
| 2981 | int rc; |
| 2982 | |
| 2983 | if ((gaudi2) && (gaudi2->dram_bar_cur_addr == addr)) |
| 2984 | return old_addr; |
| 2985 | |
| 2986 | if (hdev->asic_prop.iatu_done_by_fw) |
| 2987 | return U64_MAX; |
| 2988 | |
| 2989 | /* Inbound Region 2 - Bar 4 - Point to DRAM */ |
| 2990 | pci_region.mode = PCI_BAR_MATCH_MODE; |
| 2991 | pci_region.bar = DRAM_BAR_ID; |
| 2992 | pci_region.addr = addr; |
| 2993 | rc = hl_pci_set_inbound_region(hdev, region: 2, pci_region: &pci_region); |
| 2994 | if (rc) |
| 2995 | return U64_MAX; |
| 2996 | |
| 2997 | if (gaudi2) { |
| 2998 | old_addr = gaudi2->dram_bar_cur_addr; |
| 2999 | gaudi2->dram_bar_cur_addr = addr; |
| 3000 | } |
| 3001 | |
| 3002 | return old_addr; |
| 3003 | } |
| 3004 | |
| 3005 | static int gaudi2_init_iatu(struct hl_device *hdev) |
| 3006 | { |
| 3007 | struct hl_inbound_pci_region inbound_region; |
| 3008 | struct hl_outbound_pci_region outbound_region; |
| 3009 | u32 bar_addr_low, bar_addr_high; |
| 3010 | int rc; |
| 3011 | |
| 3012 | if (hdev->asic_prop.iatu_done_by_fw) |
| 3013 | return 0; |
| 3014 | |
| 3015 | /* Temporary inbound Region 0 - Bar 0 - Point to CFG |
| 3016 | * We must map this region in BAR match mode in order to |
| 3017 | * fetch BAR physical base address |
| 3018 | */ |
| 3019 | inbound_region.mode = PCI_BAR_MATCH_MODE; |
| 3020 | inbound_region.bar = SRAM_CFG_BAR_ID; |
| 3021 | /* Base address must be aligned to Bar size which is 256 MB */ |
| 3022 | inbound_region.addr = STM_FLASH_BASE_ADDR - STM_FLASH_ALIGNED_OFF; |
| 3023 | rc = hl_pci_set_inbound_region(hdev, region: 0, pci_region: &inbound_region); |
| 3024 | if (rc) |
| 3025 | return rc; |
| 3026 | |
| 3027 | /* Fetch physical BAR address */ |
| 3028 | bar_addr_high = RREG32(mmPCIE_DBI_BAR1_REG + STM_FLASH_ALIGNED_OFF); |
| 3029 | bar_addr_low = RREG32(mmPCIE_DBI_BAR0_REG + STM_FLASH_ALIGNED_OFF) & ~0xF; |
| 3030 | |
| 3031 | hdev->pcie_bar_phys[SRAM_CFG_BAR_ID] = (u64)bar_addr_high << 32 | bar_addr_low; |
| 3032 | |
| 3033 | /* Inbound Region 0 - Bar 0 - Point to CFG */ |
| 3034 | inbound_region.mode = PCI_ADDRESS_MATCH_MODE; |
| 3035 | inbound_region.bar = SRAM_CFG_BAR_ID; |
| 3036 | inbound_region.offset_in_bar = 0; |
| 3037 | inbound_region.addr = STM_FLASH_BASE_ADDR; |
| 3038 | inbound_region.size = CFG_REGION_SIZE; |
| 3039 | rc = hl_pci_set_inbound_region(hdev, region: 0, pci_region: &inbound_region); |
| 3040 | if (rc) |
| 3041 | return rc; |
| 3042 | |
| 3043 | /* Inbound Region 1 - Bar 0 - Point to BAR0_RESERVED + SRAM */ |
| 3044 | inbound_region.mode = PCI_ADDRESS_MATCH_MODE; |
| 3045 | inbound_region.bar = SRAM_CFG_BAR_ID; |
| 3046 | inbound_region.offset_in_bar = CFG_REGION_SIZE; |
| 3047 | inbound_region.addr = BAR0_RSRVD_BASE_ADDR; |
| 3048 | inbound_region.size = BAR0_RSRVD_SIZE + SRAM_SIZE; |
| 3049 | rc = hl_pci_set_inbound_region(hdev, region: 1, pci_region: &inbound_region); |
| 3050 | if (rc) |
| 3051 | return rc; |
| 3052 | |
| 3053 | /* Inbound Region 2 - Bar 4 - Point to DRAM */ |
| 3054 | inbound_region.mode = PCI_BAR_MATCH_MODE; |
| 3055 | inbound_region.bar = DRAM_BAR_ID; |
| 3056 | inbound_region.addr = DRAM_PHYS_BASE; |
| 3057 | rc = hl_pci_set_inbound_region(hdev, region: 2, pci_region: &inbound_region); |
| 3058 | if (rc) |
| 3059 | return rc; |
| 3060 | |
| 3061 | /* Outbound Region 0 - Point to Host */ |
| 3062 | outbound_region.addr = HOST_PHYS_BASE_0; |
| 3063 | outbound_region.size = HOST_PHYS_SIZE_0; |
| 3064 | rc = hl_pci_set_outbound_region(hdev, pci_region: &outbound_region); |
| 3065 | |
| 3066 | return rc; |
| 3067 | } |
| 3068 | |
| 3069 | static enum hl_device_hw_state gaudi2_get_hw_state(struct hl_device *hdev) |
| 3070 | { |
| 3071 | return RREG32(mmHW_STATE); |
| 3072 | } |
| 3073 | |
| 3074 | static int gaudi2_tpc_binning_init_prop(struct hl_device *hdev) |
| 3075 | { |
| 3076 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3077 | |
| 3078 | /* |
| 3079 | * check for error condition in which number of binning candidates |
| 3080 | * is higher than the maximum supported by the driver |
| 3081 | */ |
| 3082 | if (hweight64(hdev->tpc_binning) > MAX_CLUSTER_BINNING_FAULTY_TPCS) { |
| 3083 | dev_err(hdev->dev, "TPC binning is supported for max of %d faulty TPCs, provided mask 0x%llx\n" , |
| 3084 | MAX_CLUSTER_BINNING_FAULTY_TPCS, |
| 3085 | hdev->tpc_binning); |
| 3086 | return -EINVAL; |
| 3087 | } |
| 3088 | |
| 3089 | prop->tpc_binning_mask = hdev->tpc_binning; |
| 3090 | prop->tpc_enabled_mask = GAUDI2_TPC_FULL_MASK; |
| 3091 | |
| 3092 | return 0; |
| 3093 | } |
| 3094 | |
| 3095 | static int gaudi2_set_tpc_binning_masks(struct hl_device *hdev) |
| 3096 | { |
| 3097 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3098 | struct hw_queue_properties *q_props = prop->hw_queues_props; |
| 3099 | u64 tpc_binning_mask; |
| 3100 | u8 subst_idx = 0; |
| 3101 | int i, rc; |
| 3102 | |
| 3103 | rc = gaudi2_tpc_binning_init_prop(hdev); |
| 3104 | if (rc) |
| 3105 | return rc; |
| 3106 | |
| 3107 | tpc_binning_mask = prop->tpc_binning_mask; |
| 3108 | |
| 3109 | for (i = 0 ; i < MAX_FAULTY_TPCS ; i++) { |
| 3110 | u8 subst_seq, binned, qid_base; |
| 3111 | |
| 3112 | if (tpc_binning_mask == 0) |
| 3113 | break; |
| 3114 | |
| 3115 | if (subst_idx == 0) { |
| 3116 | subst_seq = TPC_ID_DCORE0_TPC6; |
| 3117 | qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0; |
| 3118 | } else { |
| 3119 | subst_seq = TPC_ID_DCORE3_TPC5; |
| 3120 | qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_5_0; |
| 3121 | } |
| 3122 | |
| 3123 | |
| 3124 | /* clear bit from mask */ |
| 3125 | binned = __ffs(tpc_binning_mask); |
| 3126 | /* |
| 3127 | * Coverity complains about possible out-of-bound access in |
| 3128 | * clear_bit |
| 3129 | */ |
| 3130 | if (binned >= TPC_ID_SIZE) { |
| 3131 | dev_err(hdev->dev, |
| 3132 | "Invalid binned TPC (binning mask: %llx)\n" , |
| 3133 | tpc_binning_mask); |
| 3134 | return -EINVAL; |
| 3135 | } |
| 3136 | clear_bit(nr: binned, addr: (unsigned long *)&tpc_binning_mask); |
| 3137 | |
| 3138 | /* also clear replacing TPC bit from enabled mask */ |
| 3139 | clear_bit(nr: subst_seq, addr: (unsigned long *)&prop->tpc_enabled_mask); |
| 3140 | |
| 3141 | /* bin substite TPC's Qs */ |
| 3142 | q_props[qid_base].binned = 1; |
| 3143 | q_props[qid_base + 1].binned = 1; |
| 3144 | q_props[qid_base + 2].binned = 1; |
| 3145 | q_props[qid_base + 3].binned = 1; |
| 3146 | |
| 3147 | subst_idx++; |
| 3148 | } |
| 3149 | |
| 3150 | return 0; |
| 3151 | } |
| 3152 | |
| 3153 | static int gaudi2_set_dec_binning_masks(struct hl_device *hdev) |
| 3154 | { |
| 3155 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3156 | u8 num_faulty; |
| 3157 | |
| 3158 | num_faulty = hweight32(hdev->decoder_binning); |
| 3159 | |
| 3160 | /* |
| 3161 | * check for error condition in which number of binning candidates |
| 3162 | * is higher than the maximum supported by the driver |
| 3163 | */ |
| 3164 | if (num_faulty > MAX_FAULTY_DECODERS) { |
| 3165 | dev_err(hdev->dev, "decoder binning is supported for max of single faulty decoder, provided mask 0x%x\n" , |
| 3166 | hdev->decoder_binning); |
| 3167 | return -EINVAL; |
| 3168 | } |
| 3169 | |
| 3170 | prop->decoder_binning_mask = (hdev->decoder_binning & GAUDI2_DECODER_FULL_MASK); |
| 3171 | |
| 3172 | if (prop->decoder_binning_mask) |
| 3173 | prop->decoder_enabled_mask = (GAUDI2_DECODER_FULL_MASK & ~BIT(DEC_ID_PCIE_VDEC1)); |
| 3174 | else |
| 3175 | prop->decoder_enabled_mask = GAUDI2_DECODER_FULL_MASK; |
| 3176 | |
| 3177 | return 0; |
| 3178 | } |
| 3179 | |
| 3180 | static void gaudi2_set_dram_binning_masks(struct hl_device *hdev) |
| 3181 | { |
| 3182 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3183 | |
| 3184 | /* check if we should override default binning */ |
| 3185 | if (!hdev->dram_binning) { |
| 3186 | prop->dram_binning_mask = 0; |
| 3187 | prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK; |
| 3188 | return; |
| 3189 | } |
| 3190 | |
| 3191 | /* set DRAM binning constraints */ |
| 3192 | prop->faulty_dram_cluster_map |= hdev->dram_binning; |
| 3193 | prop->dram_binning_mask = hdev->dram_binning; |
| 3194 | prop->dram_enabled_mask = GAUDI2_DRAM_FULL_MASK & ~BIT(HBM_ID5); |
| 3195 | } |
| 3196 | |
| 3197 | static int gaudi2_set_edma_binning_masks(struct hl_device *hdev) |
| 3198 | { |
| 3199 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3200 | struct hw_queue_properties *q_props; |
| 3201 | u8 seq, num_faulty; |
| 3202 | |
| 3203 | num_faulty = hweight32(hdev->edma_binning); |
| 3204 | |
| 3205 | /* |
| 3206 | * check for error condition in which number of binning candidates |
| 3207 | * is higher than the maximum supported by the driver |
| 3208 | */ |
| 3209 | if (num_faulty > MAX_FAULTY_EDMAS) { |
| 3210 | dev_err(hdev->dev, |
| 3211 | "EDMA binning is supported for max of single faulty EDMA, provided mask 0x%x\n" , |
| 3212 | hdev->edma_binning); |
| 3213 | return -EINVAL; |
| 3214 | } |
| 3215 | |
| 3216 | if (!hdev->edma_binning) { |
| 3217 | prop->edma_binning_mask = 0; |
| 3218 | prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK; |
| 3219 | return 0; |
| 3220 | } |
| 3221 | |
| 3222 | seq = __ffs((unsigned long)hdev->edma_binning); |
| 3223 | |
| 3224 | /* set binning constraints */ |
| 3225 | prop->faulty_dram_cluster_map |= BIT(edma_to_hbm_cluster[seq]); |
| 3226 | prop->edma_binning_mask = hdev->edma_binning; |
| 3227 | prop->edma_enabled_mask = GAUDI2_EDMA_FULL_MASK & ~BIT(EDMA_ID_DCORE3_INSTANCE1); |
| 3228 | |
| 3229 | /* bin substitute EDMA's queue */ |
| 3230 | q_props = prop->hw_queues_props; |
| 3231 | q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0].binned = 1; |
| 3232 | q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_1].binned = 1; |
| 3233 | q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_2].binned = 1; |
| 3234 | q_props[GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3].binned = 1; |
| 3235 | |
| 3236 | return 0; |
| 3237 | } |
| 3238 | |
| 3239 | static int gaudi2_set_xbar_edge_enable_mask(struct hl_device *hdev, u32 xbar_edge_iso_mask) |
| 3240 | { |
| 3241 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3242 | u8 num_faulty, seq; |
| 3243 | |
| 3244 | /* check if we should override default binning */ |
| 3245 | if (!xbar_edge_iso_mask) { |
| 3246 | prop->xbar_edge_enabled_mask = GAUDI2_XBAR_EDGE_FULL_MASK; |
| 3247 | return 0; |
| 3248 | } |
| 3249 | |
| 3250 | /* |
| 3251 | * note that it can be set to value other than 0 only after cpucp packet (i.e. |
| 3252 | * only the FW can set a redundancy value). for user it'll always be 0. |
| 3253 | */ |
| 3254 | num_faulty = hweight32(xbar_edge_iso_mask); |
| 3255 | |
| 3256 | /* |
| 3257 | * check for error condition in which number of binning candidates |
| 3258 | * is higher than the maximum supported by the driver |
| 3259 | */ |
| 3260 | if (num_faulty > MAX_FAULTY_XBARS) { |
| 3261 | dev_err(hdev->dev, "we cannot have more than %d faulty XBAR EDGE\n" , |
| 3262 | MAX_FAULTY_XBARS); |
| 3263 | return -EINVAL; |
| 3264 | } |
| 3265 | |
| 3266 | seq = __ffs((unsigned long)xbar_edge_iso_mask); |
| 3267 | |
| 3268 | /* set binning constraints */ |
| 3269 | prop->faulty_dram_cluster_map |= BIT(xbar_edge_to_hbm_cluster[seq]); |
| 3270 | prop->xbar_edge_enabled_mask = (~xbar_edge_iso_mask) & GAUDI2_XBAR_EDGE_FULL_MASK; |
| 3271 | |
| 3272 | return 0; |
| 3273 | } |
| 3274 | |
| 3275 | static int gaudi2_set_cluster_binning_masks_common(struct hl_device *hdev, u8 xbar_edge_iso_mask) |
| 3276 | { |
| 3277 | int rc; |
| 3278 | |
| 3279 | /* |
| 3280 | * mark all clusters as good, each component will "fail" cluster |
| 3281 | * based on eFuse/user values. |
| 3282 | * If more than single cluster is faulty- the chip is unusable |
| 3283 | */ |
| 3284 | hdev->asic_prop.faulty_dram_cluster_map = 0; |
| 3285 | |
| 3286 | gaudi2_set_dram_binning_masks(hdev); |
| 3287 | |
| 3288 | rc = gaudi2_set_edma_binning_masks(hdev); |
| 3289 | if (rc) |
| 3290 | return rc; |
| 3291 | |
| 3292 | rc = gaudi2_set_xbar_edge_enable_mask(hdev, xbar_edge_iso_mask); |
| 3293 | if (rc) |
| 3294 | return rc; |
| 3295 | |
| 3296 | |
| 3297 | /* always initially set to full mask */ |
| 3298 | hdev->asic_prop.hmmu_hif_enabled_mask = GAUDI2_HIF_HMMU_FULL_MASK; |
| 3299 | |
| 3300 | return 0; |
| 3301 | } |
| 3302 | |
| 3303 | static int gaudi2_set_cluster_binning_masks(struct hl_device *hdev) |
| 3304 | { |
| 3305 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3306 | int rc; |
| 3307 | |
| 3308 | rc = gaudi2_set_cluster_binning_masks_common(hdev, xbar_edge_iso_mask: prop->cpucp_info.xbar_binning_mask); |
| 3309 | if (rc) |
| 3310 | return rc; |
| 3311 | |
| 3312 | /* if we have DRAM binning reported by FW we should perform cluster config */ |
| 3313 | if (prop->faulty_dram_cluster_map) { |
| 3314 | u8 cluster_seq = __ffs((unsigned long)prop->faulty_dram_cluster_map); |
| 3315 | |
| 3316 | prop->hmmu_hif_enabled_mask = cluster_hmmu_hif_enabled_mask[cluster_seq]; |
| 3317 | } |
| 3318 | |
| 3319 | return 0; |
| 3320 | } |
| 3321 | |
| 3322 | static int gaudi2_set_binning_masks(struct hl_device *hdev) |
| 3323 | { |
| 3324 | int rc; |
| 3325 | |
| 3326 | rc = gaudi2_set_cluster_binning_masks(hdev); |
| 3327 | if (rc) |
| 3328 | return rc; |
| 3329 | |
| 3330 | rc = gaudi2_set_tpc_binning_masks(hdev); |
| 3331 | if (rc) |
| 3332 | return rc; |
| 3333 | |
| 3334 | rc = gaudi2_set_dec_binning_masks(hdev); |
| 3335 | if (rc) |
| 3336 | return rc; |
| 3337 | |
| 3338 | return 0; |
| 3339 | } |
| 3340 | |
| 3341 | static int gaudi2_cpucp_info_get(struct hl_device *hdev) |
| 3342 | { |
| 3343 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 3344 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3345 | long max_power; |
| 3346 | u64 dram_size; |
| 3347 | int rc; |
| 3348 | |
| 3349 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) |
| 3350 | return 0; |
| 3351 | |
| 3352 | /* No point of asking this information again when not doing hard reset, as the device |
| 3353 | * CPU hasn't been reset |
| 3354 | */ |
| 3355 | if (hdev->reset_info.in_compute_reset) |
| 3356 | return 0; |
| 3357 | |
| 3358 | rc = hl_fw_cpucp_handshake(hdev, mmCPU_BOOT_DEV_STS0, mmCPU_BOOT_DEV_STS1, mmCPU_BOOT_ERR0, |
| 3359 | mmCPU_BOOT_ERR1); |
| 3360 | if (rc) |
| 3361 | return rc; |
| 3362 | |
| 3363 | dram_size = le64_to_cpu(prop->cpucp_info.dram_size); |
| 3364 | if (dram_size) { |
| 3365 | /* we can have wither 5 or 6 HBMs. other values are invalid */ |
| 3366 | |
| 3367 | if ((dram_size != ((GAUDI2_HBM_NUM - 1) * SZ_16G)) && |
| 3368 | (dram_size != (GAUDI2_HBM_NUM * SZ_16G))) { |
| 3369 | dev_err(hdev->dev, |
| 3370 | "F/W reported invalid DRAM size %llu. Trying to use default size %llu\n" , |
| 3371 | dram_size, prop->dram_size); |
| 3372 | dram_size = prop->dram_size; |
| 3373 | } |
| 3374 | |
| 3375 | prop->dram_size = dram_size; |
| 3376 | prop->dram_end_address = prop->dram_base_address + dram_size; |
| 3377 | } |
| 3378 | |
| 3379 | if (!strlen(prop->cpucp_info.card_name)) |
| 3380 | strscpy_pad(prop->cpucp_info.card_name, GAUDI2_DEFAULT_CARD_NAME, |
| 3381 | CARD_NAME_MAX_LEN); |
| 3382 | |
| 3383 | /* Overwrite binning masks with the actual binning values from F/W */ |
| 3384 | hdev->dram_binning = prop->cpucp_info.dram_binning_mask; |
| 3385 | hdev->edma_binning = prop->cpucp_info.edma_binning_mask; |
| 3386 | hdev->tpc_binning = le64_to_cpu(prop->cpucp_info.tpc_binning_mask); |
| 3387 | hdev->decoder_binning = lower_32_bits(le64_to_cpu(prop->cpucp_info.decoder_binning_mask)); |
| 3388 | |
| 3389 | dev_dbg(hdev->dev, "Read binning masks: tpc: 0x%llx, dram: 0x%llx, edma: 0x%x, dec: 0x%x\n" , |
| 3390 | hdev->tpc_binning, hdev->dram_binning, hdev->edma_binning, |
| 3391 | hdev->decoder_binning); |
| 3392 | |
| 3393 | /* |
| 3394 | * at this point the DRAM parameters need to be updated according to data obtained |
| 3395 | * from the FW |
| 3396 | */ |
| 3397 | rc = hdev->asic_funcs->set_dram_properties(hdev); |
| 3398 | if (rc) |
| 3399 | return rc; |
| 3400 | |
| 3401 | rc = hdev->asic_funcs->set_binning_masks(hdev); |
| 3402 | if (rc) |
| 3403 | return rc; |
| 3404 | |
| 3405 | max_power = hl_fw_get_max_power(hdev); |
| 3406 | if (max_power < 0) |
| 3407 | return max_power; |
| 3408 | |
| 3409 | prop->max_power_default = (u64) max_power; |
| 3410 | |
| 3411 | return 0; |
| 3412 | } |
| 3413 | |
| 3414 | static int gaudi2_fetch_psoc_frequency(struct hl_device *hdev) |
| 3415 | { |
| 3416 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 3417 | u16 pll_freq_arr[HL_PLL_NUM_OUTPUTS]; |
| 3418 | int rc; |
| 3419 | |
| 3420 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) |
| 3421 | return 0; |
| 3422 | |
| 3423 | rc = hl_fw_cpucp_pll_info_get(hdev, pll_index: HL_GAUDI2_CPU_PLL, pll_freq_arr); |
| 3424 | if (rc) |
| 3425 | return rc; |
| 3426 | |
| 3427 | hdev->asic_prop.psoc_timestamp_frequency = pll_freq_arr[3]; |
| 3428 | |
| 3429 | return 0; |
| 3430 | } |
| 3431 | |
| 3432 | static int gaudi2_mmu_clear_pgt_range(struct hl_device *hdev) |
| 3433 | { |
| 3434 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 3435 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3436 | int rc; |
| 3437 | |
| 3438 | if (!(gaudi2->hw_cap_initialized & HW_CAP_MMU_MASK)) |
| 3439 | return 0; |
| 3440 | |
| 3441 | if (prop->dmmu.host_resident) |
| 3442 | return 0; |
| 3443 | |
| 3444 | rc = gaudi2_memset_device_memory(hdev, addr: prop->mmu_pgt_addr, size: prop->dmmu.pgt_size, val: 0); |
| 3445 | if (rc) |
| 3446 | dev_err(hdev->dev, "Failed to clear mmu pgt" ); |
| 3447 | |
| 3448 | return rc; |
| 3449 | } |
| 3450 | |
| 3451 | static int gaudi2_early_init(struct hl_device *hdev) |
| 3452 | { |
| 3453 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3454 | struct pci_dev *pdev = hdev->pdev; |
| 3455 | resource_size_t pci_bar_size; |
| 3456 | int rc; |
| 3457 | |
| 3458 | rc = gaudi2_set_fixed_properties(hdev); |
| 3459 | if (rc) |
| 3460 | return rc; |
| 3461 | |
| 3462 | /* Check BAR sizes */ |
| 3463 | pci_bar_size = pci_resource_len(pdev, SRAM_CFG_BAR_ID); |
| 3464 | |
| 3465 | if (pci_bar_size != CFG_BAR_SIZE) { |
| 3466 | dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n" , |
| 3467 | SRAM_CFG_BAR_ID, &pci_bar_size, CFG_BAR_SIZE); |
| 3468 | rc = -ENODEV; |
| 3469 | goto free_queue_props; |
| 3470 | } |
| 3471 | |
| 3472 | pci_bar_size = pci_resource_len(pdev, MSIX_BAR_ID); |
| 3473 | if (pci_bar_size != MSIX_BAR_SIZE) { |
| 3474 | dev_err(hdev->dev, "Not " HL_NAME "? BAR %d size %pa, expecting %llu\n" , |
| 3475 | MSIX_BAR_ID, &pci_bar_size, MSIX_BAR_SIZE); |
| 3476 | rc = -ENODEV; |
| 3477 | goto free_queue_props; |
| 3478 | } |
| 3479 | |
| 3480 | prop->dram_pci_bar_size = pci_resource_len(pdev, DRAM_BAR_ID); |
| 3481 | hdev->dram_pci_bar_start = pci_resource_start(pdev, DRAM_BAR_ID); |
| 3482 | |
| 3483 | /* |
| 3484 | * Only in pldm driver config iATU |
| 3485 | */ |
| 3486 | if (hdev->pldm) |
| 3487 | hdev->asic_prop.iatu_done_by_fw = false; |
| 3488 | else |
| 3489 | hdev->asic_prop.iatu_done_by_fw = true; |
| 3490 | |
| 3491 | rc = hl_pci_init(hdev); |
| 3492 | if (rc) |
| 3493 | goto free_queue_props; |
| 3494 | |
| 3495 | /* Before continuing in the initialization, we need to read the preboot |
| 3496 | * version to determine whether we run with a security-enabled firmware |
| 3497 | */ |
| 3498 | rc = hl_fw_read_preboot_status(hdev); |
| 3499 | if (rc) { |
| 3500 | if (hdev->reset_on_preboot_fail) |
| 3501 | hdev->asic_funcs->hw_fini(hdev, true, false); |
| 3502 | goto pci_fini; |
| 3503 | } |
| 3504 | |
| 3505 | if (gaudi2_get_hw_state(hdev) == HL_DEVICE_HW_STATE_DIRTY) { |
| 3506 | dev_dbg(hdev->dev, "H/W state is dirty, must reset before initializing\n" ); |
| 3507 | rc = hdev->asic_funcs->hw_fini(hdev, true, false); |
| 3508 | if (rc) { |
| 3509 | dev_err(hdev->dev, "failed to reset HW in dirty state (%d)\n" , rc); |
| 3510 | goto pci_fini; |
| 3511 | } |
| 3512 | |
| 3513 | rc = hl_fw_read_preboot_status(hdev); |
| 3514 | if (rc) { |
| 3515 | if (hdev->reset_on_preboot_fail) |
| 3516 | hdev->asic_funcs->hw_fini(hdev, true, false); |
| 3517 | goto pci_fini; |
| 3518 | } |
| 3519 | } |
| 3520 | |
| 3521 | return 0; |
| 3522 | |
| 3523 | pci_fini: |
| 3524 | hl_pci_fini(hdev); |
| 3525 | free_queue_props: |
| 3526 | kfree(objp: hdev->asic_prop.hw_queues_props); |
| 3527 | return rc; |
| 3528 | } |
| 3529 | |
| 3530 | static int gaudi2_early_fini(struct hl_device *hdev) |
| 3531 | { |
| 3532 | kfree(objp: hdev->asic_prop.hw_queues_props); |
| 3533 | hl_pci_fini(hdev); |
| 3534 | |
| 3535 | return 0; |
| 3536 | } |
| 3537 | |
| 3538 | static bool gaudi2_is_arc_nic_owned(u64 arc_id) |
| 3539 | { |
| 3540 | switch (arc_id) { |
| 3541 | case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23: |
| 3542 | return true; |
| 3543 | default: |
| 3544 | return false; |
| 3545 | } |
| 3546 | } |
| 3547 | |
| 3548 | static bool gaudi2_is_arc_tpc_owned(u64 arc_id) |
| 3549 | { |
| 3550 | switch (arc_id) { |
| 3551 | case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24: |
| 3552 | return true; |
| 3553 | default: |
| 3554 | return false; |
| 3555 | } |
| 3556 | } |
| 3557 | |
| 3558 | static void gaudi2_init_arcs(struct hl_device *hdev) |
| 3559 | { |
| 3560 | struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 3561 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 3562 | u64 arc_id; |
| 3563 | u32 i; |
| 3564 | |
| 3565 | for (i = CPU_ID_SCHED_ARC0 ; i <= CPU_ID_SCHED_ARC3 ; i++) { |
| 3566 | if (gaudi2_is_arc_enabled(hdev, arc_id: i)) |
| 3567 | continue; |
| 3568 | |
| 3569 | gaudi2_set_arc_id_cap(hdev, arc_id: i); |
| 3570 | } |
| 3571 | |
| 3572 | for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i += 4) { |
| 3573 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id: i)) |
| 3574 | continue; |
| 3575 | |
| 3576 | arc_id = gaudi2_queue_id_to_arc_id[i]; |
| 3577 | if (gaudi2_is_arc_enabled(hdev, arc_id)) |
| 3578 | continue; |
| 3579 | |
| 3580 | if (gaudi2_is_arc_nic_owned(arc_id) && |
| 3581 | !(hdev->nic_ports_mask & BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0))) |
| 3582 | continue; |
| 3583 | |
| 3584 | if (gaudi2_is_arc_tpc_owned(arc_id) && !(gaudi2->tpc_hw_cap_initialized & |
| 3585 | BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0))) |
| 3586 | continue; |
| 3587 | |
| 3588 | gaudi2_set_arc_id_cap(hdev, arc_id); |
| 3589 | } |
| 3590 | |
| 3591 | /* Fetch ARC scratchpad address */ |
| 3592 | hdev->asic_prop.engine_core_interrupt_reg_addr = |
| 3593 | CFG_BASE + le32_to_cpu(dyn_regs->eng_arc_irq_ctrl); |
| 3594 | } |
| 3595 | |
| 3596 | static int gaudi2_scrub_arc_dccm(struct hl_device *hdev, u32 cpu_id) |
| 3597 | { |
| 3598 | u32 reg_base, reg_val; |
| 3599 | int rc; |
| 3600 | |
| 3601 | switch (cpu_id) { |
| 3602 | case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC3: |
| 3603 | /* Each ARC scheduler has 2 consecutive DCCM blocks */ |
| 3604 | rc = gaudi2_send_job_to_kdma(hdev, src_addr: 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], |
| 3605 | ARC_DCCM_BLOCK_SIZE * 2, is_memset: true); |
| 3606 | if (rc) |
| 3607 | return rc; |
| 3608 | break; |
| 3609 | case CPU_ID_SCHED_ARC4: |
| 3610 | case CPU_ID_SCHED_ARC5: |
| 3611 | case CPU_ID_MME_QMAN_ARC0: |
| 3612 | case CPU_ID_MME_QMAN_ARC1: |
| 3613 | reg_base = gaudi2_arc_blocks_bases[cpu_id]; |
| 3614 | |
| 3615 | /* Scrub lower DCCM block */ |
| 3616 | rc = gaudi2_send_job_to_kdma(hdev, src_addr: 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], |
| 3617 | ARC_DCCM_BLOCK_SIZE, is_memset: true); |
| 3618 | if (rc) |
| 3619 | return rc; |
| 3620 | |
| 3621 | /* Switch to upper DCCM block */ |
| 3622 | reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 1); |
| 3623 | WREG32(reg_base + ARC_DCCM_UPPER_EN_OFFSET, reg_val); |
| 3624 | |
| 3625 | /* Scrub upper DCCM block */ |
| 3626 | rc = gaudi2_send_job_to_kdma(hdev, src_addr: 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], |
| 3627 | ARC_DCCM_BLOCK_SIZE, is_memset: true); |
| 3628 | if (rc) |
| 3629 | return rc; |
| 3630 | |
| 3631 | /* Switch to lower DCCM block */ |
| 3632 | reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_MME_ARC_UPPER_DCCM_EN_VAL_MASK, 0); |
| 3633 | WREG32(reg_base + ARC_DCCM_UPPER_EN_OFFSET, reg_val); |
| 3634 | break; |
| 3635 | default: |
| 3636 | rc = gaudi2_send_job_to_kdma(hdev, src_addr: 0, CFG_BASE + gaudi2_arc_dccm_bases[cpu_id], |
| 3637 | ARC_DCCM_BLOCK_SIZE, is_memset: true); |
| 3638 | if (rc) |
| 3639 | return rc; |
| 3640 | } |
| 3641 | |
| 3642 | return 0; |
| 3643 | } |
| 3644 | |
| 3645 | static int gaudi2_scrub_arcs_dccm(struct hl_device *hdev) |
| 3646 | { |
| 3647 | u16 arc_id; |
| 3648 | int rc; |
| 3649 | |
| 3650 | for (arc_id = CPU_ID_SCHED_ARC0 ; arc_id < CPU_ID_MAX ; arc_id++) { |
| 3651 | if (!gaudi2_is_arc_enabled(hdev, arc_id)) |
| 3652 | continue; |
| 3653 | |
| 3654 | rc = gaudi2_scrub_arc_dccm(hdev, cpu_id: arc_id); |
| 3655 | if (rc) |
| 3656 | return rc; |
| 3657 | } |
| 3658 | |
| 3659 | return 0; |
| 3660 | } |
| 3661 | |
| 3662 | static int gaudi2_late_init(struct hl_device *hdev) |
| 3663 | { |
| 3664 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 3665 | int rc; |
| 3666 | |
| 3667 | rc = hl_fw_send_pci_access_msg(hdev, opcode: CPUCP_PACKET_ENABLE_PCI_ACCESS, |
| 3668 | value: gaudi2->virt_msix_db_dma_addr); |
| 3669 | if (rc) |
| 3670 | return rc; |
| 3671 | |
| 3672 | rc = gaudi2_fetch_psoc_frequency(hdev); |
| 3673 | if (rc) { |
| 3674 | dev_err(hdev->dev, "Failed to fetch psoc frequency\n" ); |
| 3675 | goto disable_pci_access; |
| 3676 | } |
| 3677 | |
| 3678 | rc = gaudi2_mmu_clear_pgt_range(hdev); |
| 3679 | if (rc) { |
| 3680 | dev_err(hdev->dev, "Failed to clear MMU page tables range\n" ); |
| 3681 | goto disable_pci_access; |
| 3682 | } |
| 3683 | |
| 3684 | gaudi2_init_arcs(hdev); |
| 3685 | |
| 3686 | rc = gaudi2_scrub_arcs_dccm(hdev); |
| 3687 | if (rc) { |
| 3688 | dev_err(hdev->dev, "Failed to scrub arcs DCCM\n" ); |
| 3689 | goto disable_pci_access; |
| 3690 | } |
| 3691 | |
| 3692 | gaudi2_init_security(hdev); |
| 3693 | |
| 3694 | return 0; |
| 3695 | |
| 3696 | disable_pci_access: |
| 3697 | hl_fw_send_pci_access_msg(hdev, opcode: CPUCP_PACKET_DISABLE_PCI_ACCESS, value: 0x0); |
| 3698 | |
| 3699 | return rc; |
| 3700 | } |
| 3701 | |
| 3702 | static void gaudi2_late_fini(struct hl_device *hdev) |
| 3703 | { |
| 3704 | hl_hwmon_release_resources(hdev); |
| 3705 | } |
| 3706 | |
| 3707 | static void gaudi2_user_mapped_dec_init(struct gaudi2_device *gaudi2, u32 start_idx) |
| 3708 | { |
| 3709 | struct user_mapped_block *blocks = gaudi2->mapped_blocks; |
| 3710 | |
| 3711 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE0_DEC0_CMD_BASE, HL_BLOCK_SIZE); |
| 3712 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE0_DEC1_CMD_BASE, HL_BLOCK_SIZE); |
| 3713 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE1_DEC0_CMD_BASE, HL_BLOCK_SIZE); |
| 3714 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE1_DEC1_CMD_BASE, HL_BLOCK_SIZE); |
| 3715 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE2_DEC0_CMD_BASE, HL_BLOCK_SIZE); |
| 3716 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE2_DEC1_CMD_BASE, HL_BLOCK_SIZE); |
| 3717 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE3_DEC0_CMD_BASE, HL_BLOCK_SIZE); |
| 3718 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmDCORE3_DEC1_CMD_BASE, HL_BLOCK_SIZE); |
| 3719 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx++], mmPCIE_DEC0_CMD_BASE, HL_BLOCK_SIZE); |
| 3720 | HL_USR_MAPPED_BLK_INIT(&blocks[start_idx], mmPCIE_DEC1_CMD_BASE, HL_BLOCK_SIZE); |
| 3721 | } |
| 3722 | |
| 3723 | static void gaudi2_user_mapped_blocks_init(struct hl_device *hdev) |
| 3724 | { |
| 3725 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 3726 | struct user_mapped_block *blocks = gaudi2->mapped_blocks; |
| 3727 | u32 block_size, umr_start_idx, num_umr_blocks; |
| 3728 | int i; |
| 3729 | |
| 3730 | for (i = 0 ; i < NUM_ARC_CPUS ; i++) { |
| 3731 | if (i >= CPU_ID_SCHED_ARC0 && i <= CPU_ID_SCHED_ARC3) |
| 3732 | block_size = ARC_DCCM_BLOCK_SIZE * 2; |
| 3733 | else |
| 3734 | block_size = ARC_DCCM_BLOCK_SIZE; |
| 3735 | |
| 3736 | blocks[i].address = gaudi2_arc_dccm_bases[i]; |
| 3737 | blocks[i].size = block_size; |
| 3738 | } |
| 3739 | |
| 3740 | blocks[NUM_ARC_CPUS].address = mmARC_FARM_ARC0_ACP_ENG_BASE; |
| 3741 | blocks[NUM_ARC_CPUS].size = HL_BLOCK_SIZE; |
| 3742 | |
| 3743 | blocks[NUM_ARC_CPUS + 1].address = mmARC_FARM_ARC1_ACP_ENG_BASE; |
| 3744 | blocks[NUM_ARC_CPUS + 1].size = HL_BLOCK_SIZE; |
| 3745 | |
| 3746 | blocks[NUM_ARC_CPUS + 2].address = mmARC_FARM_ARC2_ACP_ENG_BASE; |
| 3747 | blocks[NUM_ARC_CPUS + 2].size = HL_BLOCK_SIZE; |
| 3748 | |
| 3749 | blocks[NUM_ARC_CPUS + 3].address = mmARC_FARM_ARC3_ACP_ENG_BASE; |
| 3750 | blocks[NUM_ARC_CPUS + 3].size = HL_BLOCK_SIZE; |
| 3751 | |
| 3752 | blocks[NUM_ARC_CPUS + 4].address = mmDCORE0_MME_QM_ARC_ACP_ENG_BASE; |
| 3753 | blocks[NUM_ARC_CPUS + 4].size = HL_BLOCK_SIZE; |
| 3754 | |
| 3755 | blocks[NUM_ARC_CPUS + 5].address = mmDCORE1_MME_QM_ARC_ACP_ENG_BASE; |
| 3756 | blocks[NUM_ARC_CPUS + 5].size = HL_BLOCK_SIZE; |
| 3757 | |
| 3758 | blocks[NUM_ARC_CPUS + 6].address = mmDCORE2_MME_QM_ARC_ACP_ENG_BASE; |
| 3759 | blocks[NUM_ARC_CPUS + 6].size = HL_BLOCK_SIZE; |
| 3760 | |
| 3761 | blocks[NUM_ARC_CPUS + 7].address = mmDCORE3_MME_QM_ARC_ACP_ENG_BASE; |
| 3762 | blocks[NUM_ARC_CPUS + 7].size = HL_BLOCK_SIZE; |
| 3763 | |
| 3764 | umr_start_idx = NUM_ARC_CPUS + NUM_OF_USER_ACP_BLOCKS; |
| 3765 | num_umr_blocks = NIC_NUMBER_OF_ENGINES * NUM_OF_USER_NIC_UMR_BLOCKS; |
| 3766 | for (i = 0 ; i < num_umr_blocks ; i++) { |
| 3767 | u8 nic_id, umr_block_id; |
| 3768 | |
| 3769 | nic_id = i / NUM_OF_USER_NIC_UMR_BLOCKS; |
| 3770 | umr_block_id = i % NUM_OF_USER_NIC_UMR_BLOCKS; |
| 3771 | |
| 3772 | blocks[umr_start_idx + i].address = |
| 3773 | mmNIC0_UMR0_0_UNSECURE_DOORBELL0_BASE + |
| 3774 | (nic_id / NIC_NUMBER_OF_QM_PER_MACRO) * NIC_OFFSET + |
| 3775 | (nic_id % NIC_NUMBER_OF_QM_PER_MACRO) * NIC_QM_OFFSET + |
| 3776 | umr_block_id * NIC_UMR_OFFSET; |
| 3777 | blocks[umr_start_idx + i].size = HL_BLOCK_SIZE; |
| 3778 | } |
| 3779 | |
| 3780 | /* Expose decoder HW configuration block to user */ |
| 3781 | gaudi2_user_mapped_dec_init(gaudi2, USR_MAPPED_BLK_DEC_START_IDX); |
| 3782 | |
| 3783 | for (i = 1; i < NUM_OF_DCORES; ++i) { |
| 3784 | blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1)].size = SM_OBJS_BLOCK_SIZE; |
| 3785 | blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1) + 1].size = HL_BLOCK_SIZE; |
| 3786 | |
| 3787 | blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1)].address = |
| 3788 | mmDCORE0_SYNC_MNGR_OBJS_BASE + i * DCORE_OFFSET; |
| 3789 | |
| 3790 | blocks[USR_MAPPED_BLK_SM_START_IDX + 2 * (i - 1) + 1].address = |
| 3791 | mmDCORE0_SYNC_MNGR_GLBL_BASE + i * DCORE_OFFSET; |
| 3792 | } |
| 3793 | } |
| 3794 | |
| 3795 | static int gaudi2_alloc_cpu_accessible_dma_mem(struct hl_device *hdev) |
| 3796 | { |
| 3797 | dma_addr_t dma_addr_arr[GAUDI2_ALLOC_CPU_MEM_RETRY_CNT] = {}, end_addr; |
| 3798 | void *virt_addr_arr[GAUDI2_ALLOC_CPU_MEM_RETRY_CNT] = {}; |
| 3799 | int i, j, rc = 0; |
| 3800 | |
| 3801 | /* The device ARC works with 32-bits addresses, and because there is a single HW register |
| 3802 | * that holds the extension bits (49..28), these bits must be identical in all the allocated |
| 3803 | * range. |
| 3804 | */ |
| 3805 | |
| 3806 | for (i = 0 ; i < GAUDI2_ALLOC_CPU_MEM_RETRY_CNT ; i++) { |
| 3807 | virt_addr_arr[i] = hl_asic_dma_alloc_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, |
| 3808 | &dma_addr_arr[i], GFP_KERNEL | __GFP_ZERO); |
| 3809 | if (!virt_addr_arr[i]) { |
| 3810 | rc = -ENOMEM; |
| 3811 | goto free_dma_mem_arr; |
| 3812 | } |
| 3813 | |
| 3814 | end_addr = dma_addr_arr[i] + HL_CPU_ACCESSIBLE_MEM_SIZE - 1; |
| 3815 | if (GAUDI2_ARC_PCI_MSB_ADDR(dma_addr_arr[i]) == GAUDI2_ARC_PCI_MSB_ADDR(end_addr)) |
| 3816 | break; |
| 3817 | } |
| 3818 | |
| 3819 | if (i == GAUDI2_ALLOC_CPU_MEM_RETRY_CNT) { |
| 3820 | dev_err(hdev->dev, |
| 3821 | "MSB of ARC accessible DMA memory are not identical in all range\n" ); |
| 3822 | rc = -EFAULT; |
| 3823 | goto free_dma_mem_arr; |
| 3824 | } |
| 3825 | |
| 3826 | hdev->cpu_accessible_dma_mem = virt_addr_arr[i]; |
| 3827 | hdev->cpu_accessible_dma_address = dma_addr_arr[i]; |
| 3828 | |
| 3829 | free_dma_mem_arr: |
| 3830 | for (j = 0 ; j < i ; j++) |
| 3831 | hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, virt_addr_arr[j], |
| 3832 | dma_addr_arr[j]); |
| 3833 | |
| 3834 | return rc; |
| 3835 | } |
| 3836 | |
| 3837 | static void gaudi2_set_pci_memory_regions(struct hl_device *hdev) |
| 3838 | { |
| 3839 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3840 | struct pci_mem_region *region; |
| 3841 | |
| 3842 | /* CFG */ |
| 3843 | region = &hdev->pci_mem_region[PCI_REGION_CFG]; |
| 3844 | region->region_base = CFG_BASE; |
| 3845 | region->region_size = CFG_SIZE; |
| 3846 | region->offset_in_bar = CFG_BASE - STM_FLASH_BASE_ADDR; |
| 3847 | region->bar_size = CFG_BAR_SIZE; |
| 3848 | region->bar_id = SRAM_CFG_BAR_ID; |
| 3849 | region->used = 1; |
| 3850 | |
| 3851 | /* SRAM */ |
| 3852 | region = &hdev->pci_mem_region[PCI_REGION_SRAM]; |
| 3853 | region->region_base = SRAM_BASE_ADDR; |
| 3854 | region->region_size = SRAM_SIZE; |
| 3855 | region->offset_in_bar = CFG_REGION_SIZE + BAR0_RSRVD_SIZE; |
| 3856 | region->bar_size = CFG_BAR_SIZE; |
| 3857 | region->bar_id = SRAM_CFG_BAR_ID; |
| 3858 | region->used = 1; |
| 3859 | |
| 3860 | /* DRAM */ |
| 3861 | region = &hdev->pci_mem_region[PCI_REGION_DRAM]; |
| 3862 | region->region_base = DRAM_PHYS_BASE; |
| 3863 | region->region_size = hdev->asic_prop.dram_size; |
| 3864 | region->offset_in_bar = 0; |
| 3865 | region->bar_size = prop->dram_pci_bar_size; |
| 3866 | region->bar_id = DRAM_BAR_ID; |
| 3867 | region->used = 1; |
| 3868 | } |
| 3869 | |
| 3870 | static void gaudi2_user_interrupt_setup(struct hl_device *hdev) |
| 3871 | { |
| 3872 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3873 | int i, j, k; |
| 3874 | |
| 3875 | /* Initialize TPC interrupt */ |
| 3876 | HL_USR_INTR_STRUCT_INIT(hdev->tpc_interrupt, hdev, 0, HL_USR_INTERRUPT_TPC); |
| 3877 | |
| 3878 | /* Initialize unexpected error interrupt */ |
| 3879 | HL_USR_INTR_STRUCT_INIT(hdev->unexpected_error_interrupt, hdev, 0, |
| 3880 | HL_USR_INTERRUPT_UNEXPECTED); |
| 3881 | |
| 3882 | /* Initialize common user CQ interrupt */ |
| 3883 | HL_USR_INTR_STRUCT_INIT(hdev->common_user_cq_interrupt, hdev, |
| 3884 | HL_COMMON_USER_CQ_INTERRUPT_ID, HL_USR_INTERRUPT_CQ); |
| 3885 | |
| 3886 | /* Initialize common decoder interrupt */ |
| 3887 | HL_USR_INTR_STRUCT_INIT(hdev->common_decoder_interrupt, hdev, |
| 3888 | HL_COMMON_DEC_INTERRUPT_ID, HL_USR_INTERRUPT_DECODER); |
| 3889 | |
| 3890 | /* User interrupts structure holds both decoder and user interrupts from various engines. |
| 3891 | * We first initialize the decoder interrupts and then we add the user interrupts. |
| 3892 | * The only limitation is that the last decoder interrupt id must be smaller |
| 3893 | * then GAUDI2_IRQ_NUM_USER_FIRST. This is checked at compilation time. |
| 3894 | */ |
| 3895 | |
| 3896 | /* Initialize decoder interrupts, expose only normal interrupts, |
| 3897 | * error interrupts to be handled by driver |
| 3898 | */ |
| 3899 | for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, j = 0 ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_NRM; |
| 3900 | i += 2, j++) |
| 3901 | HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, |
| 3902 | HL_USR_INTERRUPT_DECODER); |
| 3903 | |
| 3904 | for (i = GAUDI2_IRQ_NUM_USER_FIRST, k = 0 ; k < prop->user_interrupt_count; i++, j++, k++) |
| 3905 | HL_USR_INTR_STRUCT_INIT(hdev->user_interrupt[j], hdev, i, HL_USR_INTERRUPT_CQ); |
| 3906 | } |
| 3907 | |
| 3908 | static inline int gaudi2_get_non_zero_random_int(void) |
| 3909 | { |
| 3910 | int rand = get_random_u32(); |
| 3911 | |
| 3912 | return rand ? rand : 1; |
| 3913 | } |
| 3914 | |
| 3915 | static void gaudi2_special_blocks_free(struct hl_device *hdev) |
| 3916 | { |
| 3917 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3918 | struct hl_skip_blocks_cfg *skip_special_blocks_cfg = |
| 3919 | &prop->skip_special_blocks_cfg; |
| 3920 | |
| 3921 | kfree(objp: prop->special_blocks); |
| 3922 | kfree(objp: skip_special_blocks_cfg->block_types); |
| 3923 | kfree(objp: skip_special_blocks_cfg->block_ranges); |
| 3924 | } |
| 3925 | |
| 3926 | static void gaudi2_special_blocks_iterator_free(struct hl_device *hdev) |
| 3927 | { |
| 3928 | gaudi2_special_blocks_free(hdev); |
| 3929 | } |
| 3930 | |
| 3931 | static bool gaudi2_special_block_skip(struct hl_device *hdev, |
| 3932 | struct hl_special_blocks_cfg *special_blocks_cfg, |
| 3933 | u32 blk_idx, u32 major, u32 minor, u32 sub_minor) |
| 3934 | { |
| 3935 | return false; |
| 3936 | } |
| 3937 | |
| 3938 | static int gaudi2_special_blocks_config(struct hl_device *hdev) |
| 3939 | { |
| 3940 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 3941 | int i, rc; |
| 3942 | |
| 3943 | /* Configure Special blocks */ |
| 3944 | prop->glbl_err_max_cause_num = GAUDI2_GLBL_ERR_MAX_CAUSE_NUM; |
| 3945 | prop->num_of_special_blocks = ARRAY_SIZE(gaudi2_special_blocks); |
| 3946 | prop->special_blocks = kmalloc_array(prop->num_of_special_blocks, |
| 3947 | sizeof(*prop->special_blocks), GFP_KERNEL); |
| 3948 | if (!prop->special_blocks) |
| 3949 | return -ENOMEM; |
| 3950 | |
| 3951 | for (i = 0 ; i < prop->num_of_special_blocks ; i++) |
| 3952 | memcpy(&prop->special_blocks[i], &gaudi2_special_blocks[i], |
| 3953 | sizeof(*prop->special_blocks)); |
| 3954 | |
| 3955 | /* Configure when to skip Special blocks */ |
| 3956 | memset(&prop->skip_special_blocks_cfg, 0, sizeof(prop->skip_special_blocks_cfg)); |
| 3957 | prop->skip_special_blocks_cfg.skip_block_hook = gaudi2_special_block_skip; |
| 3958 | |
| 3959 | if (ARRAY_SIZE(gaudi2_iterator_skip_block_types)) { |
| 3960 | prop->skip_special_blocks_cfg.block_types = |
| 3961 | kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_types), |
| 3962 | sizeof(gaudi2_iterator_skip_block_types[0]), GFP_KERNEL); |
| 3963 | if (!prop->skip_special_blocks_cfg.block_types) { |
| 3964 | rc = -ENOMEM; |
| 3965 | goto free_special_blocks; |
| 3966 | } |
| 3967 | |
| 3968 | memcpy(prop->skip_special_blocks_cfg.block_types, gaudi2_iterator_skip_block_types, |
| 3969 | sizeof(gaudi2_iterator_skip_block_types)); |
| 3970 | |
| 3971 | prop->skip_special_blocks_cfg.block_types_len = |
| 3972 | ARRAY_SIZE(gaudi2_iterator_skip_block_types); |
| 3973 | } |
| 3974 | |
| 3975 | if (ARRAY_SIZE(gaudi2_iterator_skip_block_ranges)) { |
| 3976 | prop->skip_special_blocks_cfg.block_ranges = |
| 3977 | kmalloc_array(ARRAY_SIZE(gaudi2_iterator_skip_block_ranges), |
| 3978 | sizeof(gaudi2_iterator_skip_block_ranges[0]), GFP_KERNEL); |
| 3979 | if (!prop->skip_special_blocks_cfg.block_ranges) { |
| 3980 | rc = -ENOMEM; |
| 3981 | goto free_skip_special_blocks_types; |
| 3982 | } |
| 3983 | |
| 3984 | for (i = 0 ; i < ARRAY_SIZE(gaudi2_iterator_skip_block_ranges) ; i++) |
| 3985 | memcpy(&prop->skip_special_blocks_cfg.block_ranges[i], |
| 3986 | &gaudi2_iterator_skip_block_ranges[i], |
| 3987 | sizeof(struct range)); |
| 3988 | |
| 3989 | prop->skip_special_blocks_cfg.block_ranges_len = |
| 3990 | ARRAY_SIZE(gaudi2_iterator_skip_block_ranges); |
| 3991 | } |
| 3992 | |
| 3993 | return 0; |
| 3994 | |
| 3995 | free_skip_special_blocks_types: |
| 3996 | kfree(objp: prop->skip_special_blocks_cfg.block_types); |
| 3997 | free_special_blocks: |
| 3998 | kfree(objp: prop->special_blocks); |
| 3999 | |
| 4000 | return rc; |
| 4001 | } |
| 4002 | |
| 4003 | static int gaudi2_special_blocks_iterator_config(struct hl_device *hdev) |
| 4004 | { |
| 4005 | return gaudi2_special_blocks_config(hdev); |
| 4006 | } |
| 4007 | |
| 4008 | static void gaudi2_test_queues_msgs_free(struct hl_device *hdev) |
| 4009 | { |
| 4010 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4011 | struct gaudi2_queues_test_info *msg_info = gaudi2->queues_test_info; |
| 4012 | int i; |
| 4013 | |
| 4014 | for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) { |
| 4015 | /* bail-out if this is an allocation failure point */ |
| 4016 | if (!msg_info[i].kern_addr) |
| 4017 | break; |
| 4018 | |
| 4019 | hl_asic_dma_pool_free(hdev, msg_info[i].kern_addr, msg_info[i].dma_addr); |
| 4020 | msg_info[i].kern_addr = NULL; |
| 4021 | } |
| 4022 | } |
| 4023 | |
| 4024 | static int gaudi2_test_queues_msgs_alloc(struct hl_device *hdev) |
| 4025 | { |
| 4026 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4027 | struct gaudi2_queues_test_info *msg_info = gaudi2->queues_test_info; |
| 4028 | int i, rc; |
| 4029 | |
| 4030 | /* allocate a message-short buf for each Q we intend to test */ |
| 4031 | for (i = 0 ; i < GAUDI2_NUM_TESTED_QS ; i++) { |
| 4032 | msg_info[i].kern_addr = |
| 4033 | (void *)hl_asic_dma_pool_zalloc(hdev, sizeof(struct packet_msg_short), |
| 4034 | GFP_KERNEL, &msg_info[i].dma_addr); |
| 4035 | if (!msg_info[i].kern_addr) { |
| 4036 | dev_err(hdev->dev, |
| 4037 | "Failed to allocate dma memory for H/W queue %d testing\n" , i); |
| 4038 | rc = -ENOMEM; |
| 4039 | goto err_exit; |
| 4040 | } |
| 4041 | } |
| 4042 | |
| 4043 | return 0; |
| 4044 | |
| 4045 | err_exit: |
| 4046 | gaudi2_test_queues_msgs_free(hdev); |
| 4047 | return rc; |
| 4048 | } |
| 4049 | |
| 4050 | static int gaudi2_sw_init(struct hl_device *hdev) |
| 4051 | { |
| 4052 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 4053 | struct gaudi2_device *gaudi2; |
| 4054 | int i, rc; |
| 4055 | |
| 4056 | /* Allocate device structure */ |
| 4057 | gaudi2 = kzalloc(sizeof(*gaudi2), GFP_KERNEL); |
| 4058 | if (!gaudi2) |
| 4059 | return -ENOMEM; |
| 4060 | |
| 4061 | for (i = 0 ; i < ARRAY_SIZE(gaudi2_irq_map_table) ; i++) { |
| 4062 | if (gaudi2_irq_map_table[i].msg || !gaudi2_irq_map_table[i].valid) |
| 4063 | continue; |
| 4064 | |
| 4065 | if (gaudi2->num_of_valid_hw_events == GAUDI2_EVENT_SIZE) { |
| 4066 | dev_err(hdev->dev, "H/W events array exceeds the limit of %u events\n" , |
| 4067 | GAUDI2_EVENT_SIZE); |
| 4068 | rc = -EINVAL; |
| 4069 | goto free_gaudi2_device; |
| 4070 | } |
| 4071 | |
| 4072 | gaudi2->hw_events[gaudi2->num_of_valid_hw_events++] = gaudi2_irq_map_table[i].fc_id; |
| 4073 | } |
| 4074 | |
| 4075 | for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) |
| 4076 | gaudi2->lfsr_rand_seeds[i] = gaudi2_get_non_zero_random_int(); |
| 4077 | |
| 4078 | gaudi2->cpucp_info_get = gaudi2_cpucp_info_get; |
| 4079 | |
| 4080 | hdev->asic_specific = gaudi2; |
| 4081 | |
| 4082 | /* Create DMA pool for small allocations. |
| 4083 | * Use DEVICE_CACHE_LINE_SIZE for alignment since the NIC memory-mapped |
| 4084 | * PI/CI registers allocated from this pool have this restriction |
| 4085 | */ |
| 4086 | hdev->dma_pool = dma_pool_create(name: dev_name(dev: hdev->dev), dev: &hdev->pdev->dev, |
| 4087 | GAUDI2_DMA_POOL_BLK_SIZE, DEVICE_CACHE_LINE_SIZE, boundary: 0); |
| 4088 | if (!hdev->dma_pool) { |
| 4089 | dev_err(hdev->dev, "failed to create DMA pool\n" ); |
| 4090 | rc = -ENOMEM; |
| 4091 | goto free_gaudi2_device; |
| 4092 | } |
| 4093 | |
| 4094 | rc = gaudi2_alloc_cpu_accessible_dma_mem(hdev); |
| 4095 | if (rc) |
| 4096 | goto free_dma_pool; |
| 4097 | |
| 4098 | hdev->cpu_accessible_dma_pool = gen_pool_create(ilog2(32), -1); |
| 4099 | if (!hdev->cpu_accessible_dma_pool) { |
| 4100 | dev_err(hdev->dev, "Failed to create CPU accessible DMA pool\n" ); |
| 4101 | rc = -ENOMEM; |
| 4102 | goto free_cpu_dma_mem; |
| 4103 | } |
| 4104 | |
| 4105 | rc = gen_pool_add(pool: hdev->cpu_accessible_dma_pool, addr: (uintptr_t) hdev->cpu_accessible_dma_mem, |
| 4106 | HL_CPU_ACCESSIBLE_MEM_SIZE, nid: -1); |
| 4107 | if (rc) { |
| 4108 | dev_err(hdev->dev, "Failed to add memory to CPU accessible DMA pool\n" ); |
| 4109 | rc = -EFAULT; |
| 4110 | goto free_cpu_accessible_dma_pool; |
| 4111 | } |
| 4112 | |
| 4113 | gaudi2->virt_msix_db_cpu_addr = hl_cpu_accessible_dma_pool_alloc(hdev, size: prop->pmmu.page_size, |
| 4114 | dma_handle: &gaudi2->virt_msix_db_dma_addr); |
| 4115 | if (!gaudi2->virt_msix_db_cpu_addr) { |
| 4116 | dev_err(hdev->dev, "Failed to allocate DMA memory for virtual MSI-X doorbell\n" ); |
| 4117 | rc = -ENOMEM; |
| 4118 | goto free_cpu_accessible_dma_pool; |
| 4119 | } |
| 4120 | |
| 4121 | spin_lock_init(&gaudi2->hw_queues_lock); |
| 4122 | |
| 4123 | gaudi2->scratchpad_bus_address = prop->mmu_pgt_addr + HMMU_PAGE_TABLES_SIZE + EDMA_PQS_SIZE; |
| 4124 | |
| 4125 | gaudi2_user_mapped_blocks_init(hdev); |
| 4126 | |
| 4127 | /* Initialize user interrupts */ |
| 4128 | gaudi2_user_interrupt_setup(hdev); |
| 4129 | |
| 4130 | hdev->supports_coresight = true; |
| 4131 | hdev->supports_sync_stream = true; |
| 4132 | hdev->supports_cb_mapping = true; |
| 4133 | hdev->supports_wait_for_multi_cs = false; |
| 4134 | |
| 4135 | prop->supports_compute_reset = true; |
| 4136 | |
| 4137 | /* Event queue sanity check added in FW version 1.11 */ |
| 4138 | if (hl_fw_version_cmp(hdev, major: 1, minor: 11, subminor: 0) < 0) |
| 4139 | hdev->event_queue.check_eqe_index = false; |
| 4140 | else |
| 4141 | hdev->event_queue.check_eqe_index = true; |
| 4142 | |
| 4143 | hdev->asic_funcs->set_pci_memory_regions(hdev); |
| 4144 | |
| 4145 | rc = gaudi2_special_blocks_iterator_config(hdev); |
| 4146 | if (rc) |
| 4147 | goto free_virt_msix_db_mem; |
| 4148 | |
| 4149 | rc = gaudi2_test_queues_msgs_alloc(hdev); |
| 4150 | if (rc) |
| 4151 | goto special_blocks_free; |
| 4152 | |
| 4153 | hdev->heartbeat_debug_info.cpu_queue_id = GAUDI2_QUEUE_ID_CPU_PQ; |
| 4154 | |
| 4155 | return 0; |
| 4156 | |
| 4157 | special_blocks_free: |
| 4158 | gaudi2_special_blocks_iterator_free(hdev); |
| 4159 | free_virt_msix_db_mem: |
| 4160 | hl_cpu_accessible_dma_pool_free(hdev, size: prop->pmmu.page_size, vaddr: gaudi2->virt_msix_db_cpu_addr); |
| 4161 | free_cpu_accessible_dma_pool: |
| 4162 | gen_pool_destroy(hdev->cpu_accessible_dma_pool); |
| 4163 | free_cpu_dma_mem: |
| 4164 | hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, |
| 4165 | hdev->cpu_accessible_dma_address); |
| 4166 | free_dma_pool: |
| 4167 | dma_pool_destroy(pool: hdev->dma_pool); |
| 4168 | free_gaudi2_device: |
| 4169 | kfree(objp: gaudi2); |
| 4170 | return rc; |
| 4171 | } |
| 4172 | |
| 4173 | static int gaudi2_sw_fini(struct hl_device *hdev) |
| 4174 | { |
| 4175 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 4176 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4177 | |
| 4178 | gaudi2_test_queues_msgs_free(hdev); |
| 4179 | |
| 4180 | gaudi2_special_blocks_iterator_free(hdev); |
| 4181 | |
| 4182 | hl_cpu_accessible_dma_pool_free(hdev, size: prop->pmmu.page_size, vaddr: gaudi2->virt_msix_db_cpu_addr); |
| 4183 | |
| 4184 | gen_pool_destroy(hdev->cpu_accessible_dma_pool); |
| 4185 | |
| 4186 | hl_asic_dma_free_coherent(hdev, HL_CPU_ACCESSIBLE_MEM_SIZE, hdev->cpu_accessible_dma_mem, |
| 4187 | hdev->cpu_accessible_dma_address); |
| 4188 | |
| 4189 | dma_pool_destroy(pool: hdev->dma_pool); |
| 4190 | |
| 4191 | kfree(objp: gaudi2); |
| 4192 | |
| 4193 | return 0; |
| 4194 | } |
| 4195 | |
| 4196 | static void gaudi2_stop_qman_common(struct hl_device *hdev, u32 reg_base) |
| 4197 | { |
| 4198 | WREG32(reg_base + QM_GLBL_CFG1_OFFSET, QM_GLBL_CFG1_PQF_STOP | |
| 4199 | QM_GLBL_CFG1_CQF_STOP | |
| 4200 | QM_GLBL_CFG1_CP_STOP); |
| 4201 | |
| 4202 | /* stop also the ARC */ |
| 4203 | WREG32(reg_base + QM_GLBL_CFG2_OFFSET, QM_GLBL_CFG2_ARC_CQF_STOP); |
| 4204 | } |
| 4205 | |
| 4206 | static void gaudi2_flush_qman_common(struct hl_device *hdev, u32 reg_base) |
| 4207 | { |
| 4208 | WREG32(reg_base + QM_GLBL_CFG1_OFFSET, QM_GLBL_CFG1_PQF_FLUSH | |
| 4209 | QM_GLBL_CFG1_CQF_FLUSH | |
| 4210 | QM_GLBL_CFG1_CP_FLUSH); |
| 4211 | } |
| 4212 | |
| 4213 | static void gaudi2_flush_qman_arc_common(struct hl_device *hdev, u32 reg_base) |
| 4214 | { |
| 4215 | WREG32(reg_base + QM_GLBL_CFG2_OFFSET, QM_GLBL_CFG2_ARC_CQF_FLUSH); |
| 4216 | } |
| 4217 | |
| 4218 | /** |
| 4219 | * gaudi2_clear_qm_fence_counters_common - clear QM's fence counters |
| 4220 | * |
| 4221 | * @hdev: pointer to the habanalabs device structure |
| 4222 | * @queue_id: queue to clear fence counters to |
| 4223 | * @skip_fence: if true set maximum fence value to all fence counters to avoid |
| 4224 | * getting stuck on any fence value. otherwise set all fence |
| 4225 | * counters to 0 (standard clear of fence counters) |
| 4226 | */ |
| 4227 | static void gaudi2_clear_qm_fence_counters_common(struct hl_device *hdev, u32 queue_id, |
| 4228 | bool skip_fence) |
| 4229 | { |
| 4230 | u32 size, reg_base; |
| 4231 | u32 addr, val; |
| 4232 | |
| 4233 | reg_base = gaudi2_qm_blocks_bases[queue_id]; |
| 4234 | |
| 4235 | addr = reg_base + QM_CP_FENCE0_CNT_0_OFFSET; |
| 4236 | size = mmPDMA0_QM_CP_BARRIER_CFG - mmPDMA0_QM_CP_FENCE0_CNT_0; |
| 4237 | |
| 4238 | /* |
| 4239 | * in case we want to make sure that QM that is stuck on a fence will |
| 4240 | * be released we should set the fence counter to a higher value that |
| 4241 | * the value the QM waiting for. to comply with any fence counter of |
| 4242 | * any value we set maximum fence value to all counters |
| 4243 | */ |
| 4244 | val = skip_fence ? U32_MAX : 0; |
| 4245 | gaudi2_memset_device_lbw(hdev, addr, size, val); |
| 4246 | } |
| 4247 | |
| 4248 | static void gaudi2_qman_manual_flush_common(struct hl_device *hdev, u32 queue_id) |
| 4249 | { |
| 4250 | u32 reg_base = gaudi2_qm_blocks_bases[queue_id]; |
| 4251 | |
| 4252 | gaudi2_clear_qm_fence_counters_common(hdev, queue_id, skip_fence: true); |
| 4253 | gaudi2_flush_qman_common(hdev, reg_base); |
| 4254 | gaudi2_flush_qman_arc_common(hdev, reg_base); |
| 4255 | } |
| 4256 | |
| 4257 | static void gaudi2_stop_dma_qmans(struct hl_device *hdev) |
| 4258 | { |
| 4259 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4260 | int dcore, inst; |
| 4261 | |
| 4262 | if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK)) |
| 4263 | goto stop_edma_qmans; |
| 4264 | |
| 4265 | /* Stop CPs of PDMA QMANs */ |
| 4266 | gaudi2_stop_qman_common(hdev, mmPDMA0_QM_BASE); |
| 4267 | gaudi2_stop_qman_common(hdev, mmPDMA1_QM_BASE); |
| 4268 | |
| 4269 | stop_edma_qmans: |
| 4270 | if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK)) |
| 4271 | return; |
| 4272 | |
| 4273 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 4274 | for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { |
| 4275 | u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst; |
| 4276 | u32 qm_base; |
| 4277 | |
| 4278 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq))) |
| 4279 | continue; |
| 4280 | |
| 4281 | qm_base = mmDCORE0_EDMA0_QM_BASE + dcore * DCORE_OFFSET + |
| 4282 | inst * DCORE_EDMA_OFFSET; |
| 4283 | |
| 4284 | /* Stop CPs of EDMA QMANs */ |
| 4285 | gaudi2_stop_qman_common(hdev, reg_base: qm_base); |
| 4286 | } |
| 4287 | } |
| 4288 | } |
| 4289 | |
| 4290 | static void gaudi2_stop_mme_qmans(struct hl_device *hdev) |
| 4291 | { |
| 4292 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4293 | u32 offset, i; |
| 4294 | |
| 4295 | offset = mmDCORE1_MME_QM_BASE - mmDCORE0_MME_QM_BASE; |
| 4296 | |
| 4297 | for (i = 0 ; i < NUM_OF_DCORES ; i++) { |
| 4298 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i))) |
| 4299 | continue; |
| 4300 | |
| 4301 | gaudi2_stop_qman_common(hdev, mmDCORE0_MME_QM_BASE + (i * offset)); |
| 4302 | } |
| 4303 | } |
| 4304 | |
| 4305 | static void gaudi2_stop_tpc_qmans(struct hl_device *hdev) |
| 4306 | { |
| 4307 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4308 | u32 reg_base; |
| 4309 | int i; |
| 4310 | |
| 4311 | if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK)) |
| 4312 | return; |
| 4313 | |
| 4314 | for (i = 0 ; i < TPC_ID_SIZE ; i++) { |
| 4315 | if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i))) |
| 4316 | continue; |
| 4317 | |
| 4318 | reg_base = gaudi2_qm_blocks_bases[gaudi2_tpc_id_to_queue_id[i]]; |
| 4319 | gaudi2_stop_qman_common(hdev, reg_base); |
| 4320 | } |
| 4321 | } |
| 4322 | |
| 4323 | static void gaudi2_stop_rot_qmans(struct hl_device *hdev) |
| 4324 | { |
| 4325 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4326 | u32 reg_base; |
| 4327 | int i; |
| 4328 | |
| 4329 | if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK)) |
| 4330 | return; |
| 4331 | |
| 4332 | for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { |
| 4333 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i))) |
| 4334 | continue; |
| 4335 | |
| 4336 | reg_base = gaudi2_qm_blocks_bases[gaudi2_rot_id_to_queue_id[i]]; |
| 4337 | gaudi2_stop_qman_common(hdev, reg_base); |
| 4338 | } |
| 4339 | } |
| 4340 | |
| 4341 | static void gaudi2_stop_nic_qmans(struct hl_device *hdev) |
| 4342 | { |
| 4343 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4344 | u32 reg_base, queue_id; |
| 4345 | int i; |
| 4346 | |
| 4347 | if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK)) |
| 4348 | return; |
| 4349 | |
| 4350 | queue_id = GAUDI2_QUEUE_ID_NIC_0_0; |
| 4351 | |
| 4352 | for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { |
| 4353 | if (!(hdev->nic_ports_mask & BIT(i))) |
| 4354 | continue; |
| 4355 | |
| 4356 | reg_base = gaudi2_qm_blocks_bases[queue_id]; |
| 4357 | gaudi2_stop_qman_common(hdev, reg_base); |
| 4358 | } |
| 4359 | } |
| 4360 | |
| 4361 | static void gaudi2_stall_dma_common(struct hl_device *hdev, u32 reg_base) |
| 4362 | { |
| 4363 | u32 reg_val; |
| 4364 | |
| 4365 | reg_val = FIELD_PREP(PDMA0_CORE_CFG_1_HALT_MASK, 0x1); |
| 4366 | WREG32(reg_base + DMA_CORE_CFG_1_OFFSET, reg_val); |
| 4367 | } |
| 4368 | |
| 4369 | static void gaudi2_dma_stall(struct hl_device *hdev) |
| 4370 | { |
| 4371 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4372 | int dcore, inst; |
| 4373 | |
| 4374 | if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK)) |
| 4375 | goto stall_edma; |
| 4376 | |
| 4377 | gaudi2_stall_dma_common(hdev, mmPDMA0_CORE_BASE); |
| 4378 | gaudi2_stall_dma_common(hdev, mmPDMA1_CORE_BASE); |
| 4379 | |
| 4380 | stall_edma: |
| 4381 | if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK)) |
| 4382 | return; |
| 4383 | |
| 4384 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 4385 | for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { |
| 4386 | u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst; |
| 4387 | u32 core_base; |
| 4388 | |
| 4389 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq))) |
| 4390 | continue; |
| 4391 | |
| 4392 | core_base = mmDCORE0_EDMA0_CORE_BASE + dcore * DCORE_OFFSET + |
| 4393 | inst * DCORE_EDMA_OFFSET; |
| 4394 | |
| 4395 | /* Stall CPs of EDMA QMANs */ |
| 4396 | gaudi2_stall_dma_common(hdev, reg_base: core_base); |
| 4397 | } |
| 4398 | } |
| 4399 | } |
| 4400 | |
| 4401 | static void gaudi2_mme_stall(struct hl_device *hdev) |
| 4402 | { |
| 4403 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4404 | u32 offset, i; |
| 4405 | |
| 4406 | offset = mmDCORE1_MME_CTRL_LO_QM_STALL - mmDCORE0_MME_CTRL_LO_QM_STALL; |
| 4407 | |
| 4408 | for (i = 0 ; i < NUM_OF_DCORES ; i++) |
| 4409 | if (gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i)) |
| 4410 | WREG32(mmDCORE0_MME_CTRL_LO_QM_STALL + (i * offset), 1); |
| 4411 | } |
| 4412 | |
| 4413 | static void gaudi2_tpc_stall(struct hl_device *hdev) |
| 4414 | { |
| 4415 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4416 | u32 reg_base; |
| 4417 | int i; |
| 4418 | |
| 4419 | if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK)) |
| 4420 | return; |
| 4421 | |
| 4422 | for (i = 0 ; i < TPC_ID_SIZE ; i++) { |
| 4423 | if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i))) |
| 4424 | continue; |
| 4425 | |
| 4426 | reg_base = gaudi2_tpc_cfg_blocks_bases[i]; |
| 4427 | WREG32(reg_base + TPC_CFG_STALL_OFFSET, 1); |
| 4428 | } |
| 4429 | } |
| 4430 | |
| 4431 | static void gaudi2_rotator_stall(struct hl_device *hdev) |
| 4432 | { |
| 4433 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4434 | u32 reg_val; |
| 4435 | int i; |
| 4436 | |
| 4437 | if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK)) |
| 4438 | return; |
| 4439 | |
| 4440 | reg_val = FIELD_PREP(ROT_MSS_HALT_WBC_MASK, 0x1) | |
| 4441 | FIELD_PREP(ROT_MSS_HALT_RSB_MASK, 0x1) | |
| 4442 | FIELD_PREP(ROT_MSS_HALT_MRSB_MASK, 0x1); |
| 4443 | |
| 4444 | for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { |
| 4445 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i))) |
| 4446 | continue; |
| 4447 | |
| 4448 | WREG32(mmROT0_MSS_HALT + i * ROT_OFFSET, reg_val); |
| 4449 | } |
| 4450 | } |
| 4451 | |
| 4452 | static void gaudi2_disable_qman_common(struct hl_device *hdev, u32 reg_base) |
| 4453 | { |
| 4454 | WREG32(reg_base + QM_GLBL_CFG0_OFFSET, 0); |
| 4455 | } |
| 4456 | |
| 4457 | static void gaudi2_disable_dma_qmans(struct hl_device *hdev) |
| 4458 | { |
| 4459 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4460 | int dcore, inst; |
| 4461 | |
| 4462 | if (!(gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK)) |
| 4463 | goto stop_edma_qmans; |
| 4464 | |
| 4465 | gaudi2_disable_qman_common(hdev, mmPDMA0_QM_BASE); |
| 4466 | gaudi2_disable_qman_common(hdev, mmPDMA1_QM_BASE); |
| 4467 | |
| 4468 | stop_edma_qmans: |
| 4469 | if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK)) |
| 4470 | return; |
| 4471 | |
| 4472 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 4473 | for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { |
| 4474 | u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst; |
| 4475 | u32 qm_base; |
| 4476 | |
| 4477 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + seq))) |
| 4478 | continue; |
| 4479 | |
| 4480 | qm_base = mmDCORE0_EDMA0_QM_BASE + dcore * DCORE_OFFSET + |
| 4481 | inst * DCORE_EDMA_OFFSET; |
| 4482 | |
| 4483 | /* Disable CPs of EDMA QMANs */ |
| 4484 | gaudi2_disable_qman_common(hdev, reg_base: qm_base); |
| 4485 | } |
| 4486 | } |
| 4487 | } |
| 4488 | |
| 4489 | static void gaudi2_disable_mme_qmans(struct hl_device *hdev) |
| 4490 | { |
| 4491 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4492 | u32 offset, i; |
| 4493 | |
| 4494 | offset = mmDCORE1_MME_QM_BASE - mmDCORE0_MME_QM_BASE; |
| 4495 | |
| 4496 | for (i = 0 ; i < NUM_OF_DCORES ; i++) |
| 4497 | if (gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + i)) |
| 4498 | gaudi2_disable_qman_common(hdev, mmDCORE0_MME_QM_BASE + (i * offset)); |
| 4499 | } |
| 4500 | |
| 4501 | static void gaudi2_disable_tpc_qmans(struct hl_device *hdev) |
| 4502 | { |
| 4503 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4504 | u32 reg_base; |
| 4505 | int i; |
| 4506 | |
| 4507 | if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK)) |
| 4508 | return; |
| 4509 | |
| 4510 | for (i = 0 ; i < TPC_ID_SIZE ; i++) { |
| 4511 | if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + i))) |
| 4512 | continue; |
| 4513 | |
| 4514 | reg_base = gaudi2_qm_blocks_bases[gaudi2_tpc_id_to_queue_id[i]]; |
| 4515 | gaudi2_disable_qman_common(hdev, reg_base); |
| 4516 | } |
| 4517 | } |
| 4518 | |
| 4519 | static void gaudi2_disable_rot_qmans(struct hl_device *hdev) |
| 4520 | { |
| 4521 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4522 | u32 reg_base; |
| 4523 | int i; |
| 4524 | |
| 4525 | if (!(gaudi2->hw_cap_initialized & HW_CAP_ROT_MASK)) |
| 4526 | return; |
| 4527 | |
| 4528 | for (i = 0 ; i < ROTATOR_ID_SIZE ; i++) { |
| 4529 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_ROT_SHIFT + i))) |
| 4530 | continue; |
| 4531 | |
| 4532 | reg_base = gaudi2_qm_blocks_bases[gaudi2_rot_id_to_queue_id[i]]; |
| 4533 | gaudi2_disable_qman_common(hdev, reg_base); |
| 4534 | } |
| 4535 | } |
| 4536 | |
| 4537 | static void gaudi2_disable_nic_qmans(struct hl_device *hdev) |
| 4538 | { |
| 4539 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4540 | u32 reg_base, queue_id; |
| 4541 | int i; |
| 4542 | |
| 4543 | if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK)) |
| 4544 | return; |
| 4545 | |
| 4546 | queue_id = GAUDI2_QUEUE_ID_NIC_0_0; |
| 4547 | |
| 4548 | for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { |
| 4549 | if (!(hdev->nic_ports_mask & BIT(i))) |
| 4550 | continue; |
| 4551 | |
| 4552 | reg_base = gaudi2_qm_blocks_bases[queue_id]; |
| 4553 | gaudi2_disable_qman_common(hdev, reg_base); |
| 4554 | } |
| 4555 | } |
| 4556 | |
| 4557 | static void gaudi2_enable_timestamp(struct hl_device *hdev) |
| 4558 | { |
| 4559 | /* Disable the timestamp counter */ |
| 4560 | WREG32(mmPSOC_TIMESTAMP_BASE, 0); |
| 4561 | |
| 4562 | /* Zero the lower/upper parts of the 64-bit counter */ |
| 4563 | WREG32(mmPSOC_TIMESTAMP_BASE + 0xC, 0); |
| 4564 | WREG32(mmPSOC_TIMESTAMP_BASE + 0x8, 0); |
| 4565 | |
| 4566 | /* Enable the counter */ |
| 4567 | WREG32(mmPSOC_TIMESTAMP_BASE, 1); |
| 4568 | } |
| 4569 | |
| 4570 | static void gaudi2_disable_timestamp(struct hl_device *hdev) |
| 4571 | { |
| 4572 | /* Disable the timestamp counter */ |
| 4573 | WREG32(mmPSOC_TIMESTAMP_BASE, 0); |
| 4574 | } |
| 4575 | |
| 4576 | static const char *gaudi2_irq_name(u16 irq_number) |
| 4577 | { |
| 4578 | switch (irq_number) { |
| 4579 | case GAUDI2_IRQ_NUM_EVENT_QUEUE: |
| 4580 | return "gaudi2 cpu eq" ; |
| 4581 | case GAUDI2_IRQ_NUM_COMPLETION: |
| 4582 | return "gaudi2 completion" ; |
| 4583 | case GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ... GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM: |
| 4584 | return gaudi2_vdec_irq_name[irq_number - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM]; |
| 4585 | case GAUDI2_IRQ_NUM_TPC_ASSERT: |
| 4586 | return "gaudi2 tpc assert" ; |
| 4587 | case GAUDI2_IRQ_NUM_UNEXPECTED_ERROR: |
| 4588 | return "gaudi2 unexpected error" ; |
| 4589 | case GAUDI2_IRQ_NUM_USER_FIRST ... GAUDI2_IRQ_NUM_USER_LAST: |
| 4590 | return "gaudi2 user completion" ; |
| 4591 | case GAUDI2_IRQ_NUM_EQ_ERROR: |
| 4592 | return "gaudi2 eq error" ; |
| 4593 | default: |
| 4594 | return "invalid" ; |
| 4595 | } |
| 4596 | } |
| 4597 | |
| 4598 | static void gaudi2_dec_disable_msix(struct hl_device *hdev, u32 max_irq_num) |
| 4599 | { |
| 4600 | int i, irq, relative_idx; |
| 4601 | struct hl_dec *dec; |
| 4602 | |
| 4603 | for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ; i < max_irq_num ; i++) { |
| 4604 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4605 | relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM; |
| 4606 | |
| 4607 | dec = hdev->dec + relative_idx / 2; |
| 4608 | |
| 4609 | /* We pass different structures depending on the irq handler. For the abnormal |
| 4610 | * interrupt we pass hl_dec and for the regular interrupt we pass the relevant |
| 4611 | * user_interrupt entry |
| 4612 | */ |
| 4613 | free_irq(irq, ((relative_idx % 2) ? |
| 4614 | (void *) dec : |
| 4615 | (void *) &hdev->user_interrupt[dec->core_id])); |
| 4616 | } |
| 4617 | } |
| 4618 | |
| 4619 | static int gaudi2_dec_enable_msix(struct hl_device *hdev) |
| 4620 | { |
| 4621 | int rc, i, irq_init_cnt, irq, relative_idx; |
| 4622 | struct hl_dec *dec; |
| 4623 | |
| 4624 | for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM, irq_init_cnt = 0; |
| 4625 | i <= GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM; |
| 4626 | i++, irq_init_cnt++) { |
| 4627 | |
| 4628 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4629 | relative_idx = i - GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM; |
| 4630 | |
| 4631 | /* We pass different structures depending on the irq handler. For the abnormal |
| 4632 | * interrupt we pass hl_dec and for the regular interrupt we pass the relevant |
| 4633 | * user_interrupt entry |
| 4634 | * |
| 4635 | * TODO: change the dec abnrm to threaded irq |
| 4636 | */ |
| 4637 | |
| 4638 | dec = hdev->dec + relative_idx / 2; |
| 4639 | if (relative_idx % 2) { |
| 4640 | rc = request_irq(irq, handler: hl_irq_handler_dec_abnrm, flags: 0, |
| 4641 | name: gaudi2_irq_name(irq_number: i), dev: (void *) dec); |
| 4642 | } else { |
| 4643 | rc = request_irq(irq, handler: hl_irq_user_interrupt_handler, flags: 0, name: gaudi2_irq_name(irq_number: i), |
| 4644 | dev: (void *) &hdev->user_interrupt[dec->core_id]); |
| 4645 | } |
| 4646 | |
| 4647 | if (rc) { |
| 4648 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4649 | goto free_dec_irqs; |
| 4650 | } |
| 4651 | } |
| 4652 | |
| 4653 | return 0; |
| 4654 | |
| 4655 | free_dec_irqs: |
| 4656 | gaudi2_dec_disable_msix(hdev, max_irq_num: (GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + irq_init_cnt)); |
| 4657 | return rc; |
| 4658 | } |
| 4659 | |
| 4660 | static int gaudi2_enable_msix(struct hl_device *hdev) |
| 4661 | { |
| 4662 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 4663 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4664 | int rc, irq, i, j, user_irq_init_cnt; |
| 4665 | struct hl_cq *cq; |
| 4666 | |
| 4667 | if (gaudi2->hw_cap_initialized & HW_CAP_MSIX) |
| 4668 | return 0; |
| 4669 | |
| 4670 | hl_init_cpu_for_irq(hdev); |
| 4671 | |
| 4672 | rc = pci_alloc_irq_vectors(dev: hdev->pdev, GAUDI2_MSIX_ENTRIES, GAUDI2_MSIX_ENTRIES, |
| 4673 | PCI_IRQ_MSIX); |
| 4674 | if (rc < 0) { |
| 4675 | dev_err(hdev->dev, "MSI-X: Failed to enable support -- %d/%d\n" , |
| 4676 | GAUDI2_MSIX_ENTRIES, rc); |
| 4677 | return rc; |
| 4678 | } |
| 4679 | |
| 4680 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_COMPLETION); |
| 4681 | cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_CS_COMPLETION]; |
| 4682 | rc = request_irq(irq, handler: hl_irq_handler_cq, flags: 0, name: gaudi2_irq_name(irq_number: GAUDI2_IRQ_NUM_COMPLETION), dev: cq); |
| 4683 | if (rc) { |
| 4684 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4685 | goto free_irq_vectors; |
| 4686 | } |
| 4687 | |
| 4688 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EVENT_QUEUE); |
| 4689 | rc = request_irq(irq, handler: hl_irq_handler_eq, flags: 0, name: gaudi2_irq_name(irq_number: GAUDI2_IRQ_NUM_EVENT_QUEUE), |
| 4690 | dev: &hdev->event_queue); |
| 4691 | if (rc) { |
| 4692 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4693 | goto free_completion_irq; |
| 4694 | } |
| 4695 | |
| 4696 | rc = gaudi2_dec_enable_msix(hdev); |
| 4697 | if (rc) { |
| 4698 | dev_err(hdev->dev, "Failed to enable decoder IRQ" ); |
| 4699 | goto free_event_irq; |
| 4700 | } |
| 4701 | |
| 4702 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_TPC_ASSERT); |
| 4703 | rc = request_threaded_irq(irq, NULL, thread_fn: hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT, |
| 4704 | name: gaudi2_irq_name(irq_number: GAUDI2_IRQ_NUM_TPC_ASSERT), |
| 4705 | dev: &hdev->tpc_interrupt); |
| 4706 | if (rc) { |
| 4707 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4708 | goto free_dec_irq; |
| 4709 | } |
| 4710 | |
| 4711 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_UNEXPECTED_ERROR); |
| 4712 | rc = request_threaded_irq(irq, NULL, thread_fn: hl_irq_user_interrupt_thread_handler, IRQF_ONESHOT, |
| 4713 | name: gaudi2_irq_name(irq_number: GAUDI2_IRQ_NUM_UNEXPECTED_ERROR), |
| 4714 | dev: &hdev->unexpected_error_interrupt); |
| 4715 | if (rc) { |
| 4716 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4717 | goto free_tpc_irq; |
| 4718 | } |
| 4719 | |
| 4720 | for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, user_irq_init_cnt = 0; |
| 4721 | user_irq_init_cnt < prop->user_interrupt_count; |
| 4722 | i++, j++, user_irq_init_cnt++) { |
| 4723 | |
| 4724 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4725 | hl_set_irq_affinity(hdev, irq); |
| 4726 | rc = request_irq(irq, handler: hl_irq_user_interrupt_handler, flags: 0, name: gaudi2_irq_name(irq_number: i), |
| 4727 | dev: &hdev->user_interrupt[j]); |
| 4728 | if (rc) { |
| 4729 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4730 | goto free_user_irq; |
| 4731 | } |
| 4732 | } |
| 4733 | |
| 4734 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EQ_ERROR); |
| 4735 | rc = request_threaded_irq(irq, NULL, thread_fn: hl_irq_eq_error_interrupt_thread_handler, |
| 4736 | IRQF_ONESHOT, name: gaudi2_irq_name(irq_number: GAUDI2_IRQ_NUM_EQ_ERROR), |
| 4737 | dev: hdev); |
| 4738 | if (rc) { |
| 4739 | dev_err(hdev->dev, "Failed to request IRQ %d" , irq); |
| 4740 | goto free_user_irq; |
| 4741 | } |
| 4742 | |
| 4743 | gaudi2->hw_cap_initialized |= HW_CAP_MSIX; |
| 4744 | |
| 4745 | return 0; |
| 4746 | |
| 4747 | free_user_irq: |
| 4748 | for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count; |
| 4749 | i < GAUDI2_IRQ_NUM_USER_FIRST + user_irq_init_cnt ; i++, j++) { |
| 4750 | |
| 4751 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4752 | irq_set_affinity_and_hint(irq, NULL); |
| 4753 | free_irq(irq, &hdev->user_interrupt[j]); |
| 4754 | } |
| 4755 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_UNEXPECTED_ERROR); |
| 4756 | free_irq(irq, &hdev->unexpected_error_interrupt); |
| 4757 | free_tpc_irq: |
| 4758 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_TPC_ASSERT); |
| 4759 | free_irq(irq, &hdev->tpc_interrupt); |
| 4760 | free_dec_irq: |
| 4761 | gaudi2_dec_disable_msix(hdev, max_irq_num: GAUDI2_IRQ_NUM_DEC_LAST + 1); |
| 4762 | free_event_irq: |
| 4763 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EVENT_QUEUE); |
| 4764 | free_irq(irq, cq); |
| 4765 | |
| 4766 | free_completion_irq: |
| 4767 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_COMPLETION); |
| 4768 | free_irq(irq, cq); |
| 4769 | |
| 4770 | free_irq_vectors: |
| 4771 | pci_free_irq_vectors(dev: hdev->pdev); |
| 4772 | |
| 4773 | return rc; |
| 4774 | } |
| 4775 | |
| 4776 | static void gaudi2_sync_irqs(struct hl_device *hdev) |
| 4777 | { |
| 4778 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4779 | int i, j; |
| 4780 | int irq; |
| 4781 | |
| 4782 | if (!(gaudi2->hw_cap_initialized & HW_CAP_MSIX)) |
| 4783 | return; |
| 4784 | |
| 4785 | /* Wait for all pending IRQs to be finished */ |
| 4786 | synchronize_irq(irq: pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_COMPLETION)); |
| 4787 | |
| 4788 | for (i = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM ; i <= GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM ; i++) { |
| 4789 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4790 | synchronize_irq(irq); |
| 4791 | } |
| 4792 | |
| 4793 | synchronize_irq(irq: pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_TPC_ASSERT)); |
| 4794 | synchronize_irq(irq: pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_UNEXPECTED_ERROR)); |
| 4795 | |
| 4796 | for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = 0 ; j < hdev->asic_prop.user_interrupt_count; |
| 4797 | i++, j++) { |
| 4798 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4799 | synchronize_irq(irq); |
| 4800 | } |
| 4801 | |
| 4802 | synchronize_irq(irq: pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EVENT_QUEUE)); |
| 4803 | synchronize_irq(irq: pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EQ_ERROR)); |
| 4804 | } |
| 4805 | |
| 4806 | static void gaudi2_disable_msix(struct hl_device *hdev) |
| 4807 | { |
| 4808 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 4809 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4810 | struct hl_cq *cq; |
| 4811 | int irq, i, j, k; |
| 4812 | |
| 4813 | if (!(gaudi2->hw_cap_initialized & HW_CAP_MSIX)) |
| 4814 | return; |
| 4815 | |
| 4816 | gaudi2_sync_irqs(hdev); |
| 4817 | |
| 4818 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EVENT_QUEUE); |
| 4819 | free_irq(irq, &hdev->event_queue); |
| 4820 | |
| 4821 | gaudi2_dec_disable_msix(hdev, max_irq_num: GAUDI2_IRQ_NUM_SHARED_DEC1_ABNRM + 1); |
| 4822 | |
| 4823 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_TPC_ASSERT); |
| 4824 | free_irq(irq, &hdev->tpc_interrupt); |
| 4825 | |
| 4826 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_UNEXPECTED_ERROR); |
| 4827 | free_irq(irq, &hdev->unexpected_error_interrupt); |
| 4828 | |
| 4829 | for (i = GAUDI2_IRQ_NUM_USER_FIRST, j = prop->user_dec_intr_count, k = 0; |
| 4830 | k < hdev->asic_prop.user_interrupt_count ; i++, j++, k++) { |
| 4831 | |
| 4832 | irq = pci_irq_vector(dev: hdev->pdev, nr: i); |
| 4833 | irq_set_affinity_and_hint(irq, NULL); |
| 4834 | free_irq(irq, &hdev->user_interrupt[j]); |
| 4835 | } |
| 4836 | |
| 4837 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_COMPLETION); |
| 4838 | cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_CS_COMPLETION]; |
| 4839 | free_irq(irq, cq); |
| 4840 | |
| 4841 | irq = pci_irq_vector(dev: hdev->pdev, nr: GAUDI2_IRQ_NUM_EQ_ERROR); |
| 4842 | free_irq(irq, hdev); |
| 4843 | |
| 4844 | pci_free_irq_vectors(dev: hdev->pdev); |
| 4845 | |
| 4846 | gaudi2->hw_cap_initialized &= ~HW_CAP_MSIX; |
| 4847 | } |
| 4848 | |
| 4849 | static void gaudi2_stop_dcore_dec(struct hl_device *hdev, int dcore_id) |
| 4850 | { |
| 4851 | u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1); |
| 4852 | u32 graceful_pend_mask = DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK; |
| 4853 | u32 timeout_usec, dec_id, dec_bit, offset, graceful; |
| 4854 | int rc; |
| 4855 | |
| 4856 | if (hdev->pldm) |
| 4857 | timeout_usec = GAUDI2_PLDM_VDEC_TIMEOUT_USEC; |
| 4858 | else |
| 4859 | timeout_usec = GAUDI2_VDEC_TIMEOUT_USEC; |
| 4860 | |
| 4861 | for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { |
| 4862 | dec_bit = dcore_id * NUM_OF_DEC_PER_DCORE + dec_id; |
| 4863 | if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit))) |
| 4864 | continue; |
| 4865 | |
| 4866 | offset = dcore_id * DCORE_OFFSET + dec_id * DCORE_VDEC_OFFSET; |
| 4867 | |
| 4868 | WREG32(mmDCORE0_DEC0_CMD_SWREG16 + offset, 0); |
| 4869 | |
| 4870 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL + offset, reg_val); |
| 4871 | |
| 4872 | /* Wait till all traffic from decoder stops |
| 4873 | * before apply core reset. |
| 4874 | */ |
| 4875 | rc = hl_poll_timeout( |
| 4876 | hdev, |
| 4877 | mmDCORE0_VDEC0_BRDG_CTRL_GRACEFUL + offset, |
| 4878 | graceful, |
| 4879 | (graceful & graceful_pend_mask), |
| 4880 | 100, |
| 4881 | timeout_usec); |
| 4882 | if (rc) |
| 4883 | dev_err(hdev->dev, |
| 4884 | "Failed to stop traffic from DCORE%d Decoder %d\n" , |
| 4885 | dcore_id, dec_id); |
| 4886 | } |
| 4887 | } |
| 4888 | |
| 4889 | static void gaudi2_stop_pcie_dec(struct hl_device *hdev) |
| 4890 | { |
| 4891 | u32 reg_val = FIELD_PREP(DCORE0_VDEC0_BRDG_CTRL_GRACEFUL_STOP_MASK, 0x1); |
| 4892 | u32 graceful_pend_mask = PCIE_VDEC0_BRDG_CTRL_GRACEFUL_PEND_MASK; |
| 4893 | u32 timeout_usec, dec_id, dec_bit, offset, graceful; |
| 4894 | int rc; |
| 4895 | |
| 4896 | if (hdev->pldm) |
| 4897 | timeout_usec = GAUDI2_PLDM_VDEC_TIMEOUT_USEC; |
| 4898 | else |
| 4899 | timeout_usec = GAUDI2_VDEC_TIMEOUT_USEC; |
| 4900 | |
| 4901 | for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { |
| 4902 | dec_bit = PCIE_DEC_SHIFT + dec_id; |
| 4903 | if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit))) |
| 4904 | continue; |
| 4905 | |
| 4906 | offset = dec_id * PCIE_VDEC_OFFSET; |
| 4907 | |
| 4908 | WREG32(mmPCIE_DEC0_CMD_SWREG16 + offset, 0); |
| 4909 | |
| 4910 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL + offset, reg_val); |
| 4911 | |
| 4912 | /* Wait till all traffic from decoder stops |
| 4913 | * before apply core reset. |
| 4914 | */ |
| 4915 | rc = hl_poll_timeout( |
| 4916 | hdev, |
| 4917 | mmPCIE_VDEC0_BRDG_CTRL_GRACEFUL + offset, |
| 4918 | graceful, |
| 4919 | (graceful & graceful_pend_mask), |
| 4920 | 100, |
| 4921 | timeout_usec); |
| 4922 | if (rc) |
| 4923 | dev_err(hdev->dev, |
| 4924 | "Failed to stop traffic from PCIe Decoder %d\n" , |
| 4925 | dec_id); |
| 4926 | } |
| 4927 | } |
| 4928 | |
| 4929 | static void gaudi2_stop_dec(struct hl_device *hdev) |
| 4930 | { |
| 4931 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4932 | int dcore_id; |
| 4933 | |
| 4934 | if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == 0) |
| 4935 | return; |
| 4936 | |
| 4937 | for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) |
| 4938 | gaudi2_stop_dcore_dec(hdev, dcore_id); |
| 4939 | |
| 4940 | gaudi2_stop_pcie_dec(hdev); |
| 4941 | } |
| 4942 | |
| 4943 | static void gaudi2_set_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode) |
| 4944 | { |
| 4945 | u32 reg_base, reg_val; |
| 4946 | |
| 4947 | reg_base = gaudi2_arc_blocks_bases[cpu_id]; |
| 4948 | if (run_mode == HL_ENGINE_CORE_RUN) |
| 4949 | reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 1); |
| 4950 | else |
| 4951 | reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_HALT_REQ_MASK, 1); |
| 4952 | |
| 4953 | WREG32(reg_base + ARC_HALT_REQ_OFFSET, reg_val); |
| 4954 | } |
| 4955 | |
| 4956 | static void gaudi2_halt_arcs(struct hl_device *hdev) |
| 4957 | { |
| 4958 | u16 arc_id; |
| 4959 | |
| 4960 | for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++) { |
| 4961 | if (gaudi2_is_arc_enabled(hdev, arc_id)) |
| 4962 | gaudi2_set_arc_running_mode(hdev, cpu_id: arc_id, run_mode: HL_ENGINE_CORE_HALT); |
| 4963 | } |
| 4964 | } |
| 4965 | |
| 4966 | static int gaudi2_verify_arc_running_mode(struct hl_device *hdev, u32 cpu_id, u32 run_mode) |
| 4967 | { |
| 4968 | int rc; |
| 4969 | u32 reg_base, val, ack_mask, timeout_usec = 100000; |
| 4970 | |
| 4971 | if (hdev->pldm) |
| 4972 | timeout_usec *= 100; |
| 4973 | |
| 4974 | reg_base = gaudi2_arc_blocks_bases[cpu_id]; |
| 4975 | if (run_mode == HL_ENGINE_CORE_RUN) |
| 4976 | ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_RUN_ACK_MASK; |
| 4977 | else |
| 4978 | ack_mask = ARC_FARM_ARC0_AUX_RUN_HALT_ACK_HALT_ACK_MASK; |
| 4979 | |
| 4980 | rc = hl_poll_timeout(hdev, reg_base + ARC_HALT_ACK_OFFSET, |
| 4981 | val, ((val & ack_mask) == ack_mask), |
| 4982 | 1000, timeout_usec); |
| 4983 | |
| 4984 | if (!rc) { |
| 4985 | /* Clear */ |
| 4986 | val = FIELD_PREP(ARC_FARM_ARC0_AUX_RUN_HALT_REQ_RUN_REQ_MASK, 0); |
| 4987 | WREG32(reg_base + ARC_HALT_REQ_OFFSET, val); |
| 4988 | } |
| 4989 | |
| 4990 | return rc; |
| 4991 | } |
| 4992 | |
| 4993 | static void gaudi2_reset_arcs(struct hl_device *hdev) |
| 4994 | { |
| 4995 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 4996 | u16 arc_id; |
| 4997 | |
| 4998 | if (!gaudi2) |
| 4999 | return; |
| 5000 | |
| 5001 | for (arc_id = CPU_ID_SCHED_ARC0; arc_id < CPU_ID_MAX; arc_id++) |
| 5002 | if (gaudi2_is_arc_enabled(hdev, arc_id)) |
| 5003 | gaudi2_clr_arc_id_cap(hdev, arc_id); |
| 5004 | } |
| 5005 | |
| 5006 | static void gaudi2_nic_qmans_manual_flush(struct hl_device *hdev) |
| 5007 | { |
| 5008 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5009 | u32 queue_id; |
| 5010 | int i; |
| 5011 | |
| 5012 | if (!(gaudi2->nic_hw_cap_initialized & HW_CAP_NIC_MASK)) |
| 5013 | return; |
| 5014 | |
| 5015 | queue_id = GAUDI2_QUEUE_ID_NIC_0_0; |
| 5016 | |
| 5017 | for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { |
| 5018 | if (!(hdev->nic_ports_mask & BIT(i))) |
| 5019 | continue; |
| 5020 | |
| 5021 | gaudi2_qman_manual_flush_common(hdev, queue_id); |
| 5022 | } |
| 5023 | } |
| 5024 | |
| 5025 | static int gaudi2_set_engine_cores(struct hl_device *hdev, u32 *core_ids, |
| 5026 | u32 num_cores, u32 core_command) |
| 5027 | { |
| 5028 | int i, rc; |
| 5029 | |
| 5030 | for (i = 0 ; i < num_cores ; i++) { |
| 5031 | if (gaudi2_is_arc_enabled(hdev, arc_id: core_ids[i])) |
| 5032 | gaudi2_set_arc_running_mode(hdev, cpu_id: core_ids[i], run_mode: core_command); |
| 5033 | } |
| 5034 | |
| 5035 | for (i = 0 ; i < num_cores ; i++) { |
| 5036 | if (gaudi2_is_arc_enabled(hdev, arc_id: core_ids[i])) { |
| 5037 | rc = gaudi2_verify_arc_running_mode(hdev, cpu_id: core_ids[i], run_mode: core_command); |
| 5038 | |
| 5039 | if (rc) { |
| 5040 | dev_err(hdev->dev, "failed to %s arc: %d\n" , |
| 5041 | (core_command == HL_ENGINE_CORE_HALT) ? |
| 5042 | "HALT" : "RUN" , core_ids[i]); |
| 5043 | return -1; |
| 5044 | } |
| 5045 | } |
| 5046 | } |
| 5047 | |
| 5048 | return 0; |
| 5049 | } |
| 5050 | |
| 5051 | static int gaudi2_set_tpc_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command) |
| 5052 | { |
| 5053 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5054 | u32 reg_base, reg_addr, reg_val, tpc_id; |
| 5055 | |
| 5056 | if (!(gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK)) |
| 5057 | return 0; |
| 5058 | |
| 5059 | tpc_id = gaudi2_tpc_engine_id_to_tpc_id[engine_id]; |
| 5060 | if (!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(HW_CAP_TPC_SHIFT + tpc_id))) |
| 5061 | return 0; |
| 5062 | |
| 5063 | reg_base = gaudi2_tpc_cfg_blocks_bases[tpc_id]; |
| 5064 | reg_addr = reg_base + TPC_CFG_STALL_OFFSET; |
| 5065 | reg_val = FIELD_PREP(DCORE0_TPC0_CFG_TPC_STALL_V_MASK, |
| 5066 | (engine_command == HL_ENGINE_STALL) ? 1 : 0); |
| 5067 | WREG32(reg_addr, reg_val); |
| 5068 | |
| 5069 | if (engine_command == HL_ENGINE_RESUME) { |
| 5070 | reg_base = gaudi2_tpc_eml_cfg_blocks_bases[tpc_id]; |
| 5071 | reg_addr = reg_base + TPC_EML_CFG_DBG_CNT_OFFSET; |
| 5072 | RMWREG32(reg_addr, 0x1, DCORE0_TPC0_EML_CFG_DBG_CNT_DBG_EXIT_MASK); |
| 5073 | } |
| 5074 | |
| 5075 | return 0; |
| 5076 | } |
| 5077 | |
| 5078 | static int gaudi2_set_mme_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command) |
| 5079 | { |
| 5080 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5081 | u32 reg_base, reg_addr, reg_val, mme_id; |
| 5082 | |
| 5083 | mme_id = gaudi2_mme_engine_id_to_mme_id[engine_id]; |
| 5084 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_MME_SHIFT + mme_id))) |
| 5085 | return 0; |
| 5086 | |
| 5087 | reg_base = gaudi2_mme_ctrl_lo_blocks_bases[mme_id]; |
| 5088 | reg_addr = reg_base + MME_CTRL_LO_QM_STALL_OFFSET; |
| 5089 | reg_val = FIELD_PREP(DCORE0_MME_CTRL_LO_QM_STALL_V_MASK, |
| 5090 | (engine_command == HL_ENGINE_STALL) ? 1 : 0); |
| 5091 | WREG32(reg_addr, reg_val); |
| 5092 | |
| 5093 | return 0; |
| 5094 | } |
| 5095 | |
| 5096 | static int gaudi2_set_edma_engine_mode(struct hl_device *hdev, u32 engine_id, u32 engine_command) |
| 5097 | { |
| 5098 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5099 | u32 reg_base, reg_addr, reg_val, edma_id; |
| 5100 | |
| 5101 | if (!(gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK)) |
| 5102 | return 0; |
| 5103 | |
| 5104 | edma_id = gaudi2_edma_engine_id_to_edma_id[engine_id]; |
| 5105 | if (!(gaudi2->hw_cap_initialized & BIT_ULL(HW_CAP_EDMA_SHIFT + edma_id))) |
| 5106 | return 0; |
| 5107 | |
| 5108 | reg_base = gaudi2_dma_core_blocks_bases[edma_id]; |
| 5109 | reg_addr = reg_base + EDMA_CORE_CFG_STALL_OFFSET; |
| 5110 | reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, |
| 5111 | (engine_command == HL_ENGINE_STALL) ? 1 : 0); |
| 5112 | WREG32(reg_addr, reg_val); |
| 5113 | |
| 5114 | if (engine_command == HL_ENGINE_STALL) { |
| 5115 | reg_val = FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_HALT_MASK, 0x1) | |
| 5116 | FIELD_PREP(DCORE0_EDMA0_CORE_CFG_1_FLUSH_MASK, 0x1); |
| 5117 | WREG32(reg_addr, reg_val); |
| 5118 | } |
| 5119 | |
| 5120 | return 0; |
| 5121 | } |
| 5122 | |
| 5123 | static int gaudi2_set_engine_modes(struct hl_device *hdev, |
| 5124 | u32 *engine_ids, u32 num_engines, u32 engine_command) |
| 5125 | { |
| 5126 | int i, rc; |
| 5127 | |
| 5128 | for (i = 0 ; i < num_engines ; ++i) { |
| 5129 | switch (engine_ids[i]) { |
| 5130 | case GAUDI2_DCORE0_ENGINE_ID_TPC_0 ... GAUDI2_DCORE0_ENGINE_ID_TPC_5: |
| 5131 | case GAUDI2_DCORE1_ENGINE_ID_TPC_0 ... GAUDI2_DCORE1_ENGINE_ID_TPC_5: |
| 5132 | case GAUDI2_DCORE2_ENGINE_ID_TPC_0 ... GAUDI2_DCORE2_ENGINE_ID_TPC_5: |
| 5133 | case GAUDI2_DCORE3_ENGINE_ID_TPC_0 ... GAUDI2_DCORE3_ENGINE_ID_TPC_5: |
| 5134 | rc = gaudi2_set_tpc_engine_mode(hdev, engine_id: engine_ids[i], engine_command); |
| 5135 | if (rc) |
| 5136 | return rc; |
| 5137 | |
| 5138 | break; |
| 5139 | case GAUDI2_DCORE0_ENGINE_ID_MME: |
| 5140 | case GAUDI2_DCORE1_ENGINE_ID_MME: |
| 5141 | case GAUDI2_DCORE2_ENGINE_ID_MME: |
| 5142 | case GAUDI2_DCORE3_ENGINE_ID_MME: |
| 5143 | rc = gaudi2_set_mme_engine_mode(hdev, engine_id: engine_ids[i], engine_command); |
| 5144 | if (rc) |
| 5145 | return rc; |
| 5146 | |
| 5147 | break; |
| 5148 | case GAUDI2_DCORE0_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE0_ENGINE_ID_EDMA_1: |
| 5149 | case GAUDI2_DCORE1_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE1_ENGINE_ID_EDMA_1: |
| 5150 | case GAUDI2_DCORE2_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE2_ENGINE_ID_EDMA_1: |
| 5151 | case GAUDI2_DCORE3_ENGINE_ID_EDMA_0 ... GAUDI2_DCORE3_ENGINE_ID_EDMA_1: |
| 5152 | rc = gaudi2_set_edma_engine_mode(hdev, engine_id: engine_ids[i], engine_command); |
| 5153 | if (rc) |
| 5154 | return rc; |
| 5155 | |
| 5156 | break; |
| 5157 | default: |
| 5158 | dev_err(hdev->dev, "Invalid engine ID %u\n" , engine_ids[i]); |
| 5159 | return -EINVAL; |
| 5160 | } |
| 5161 | } |
| 5162 | |
| 5163 | return 0; |
| 5164 | } |
| 5165 | |
| 5166 | static int gaudi2_set_engines(struct hl_device *hdev, u32 *engine_ids, |
| 5167 | u32 num_engines, u32 engine_command) |
| 5168 | { |
| 5169 | switch (engine_command) { |
| 5170 | case HL_ENGINE_CORE_HALT: |
| 5171 | case HL_ENGINE_CORE_RUN: |
| 5172 | return gaudi2_set_engine_cores(hdev, core_ids: engine_ids, num_cores: num_engines, core_command: engine_command); |
| 5173 | |
| 5174 | case HL_ENGINE_STALL: |
| 5175 | case HL_ENGINE_RESUME: |
| 5176 | return gaudi2_set_engine_modes(hdev, engine_ids, num_engines, engine_command); |
| 5177 | |
| 5178 | default: |
| 5179 | dev_err(hdev->dev, "failed to execute command id %u\n" , engine_command); |
| 5180 | return -EINVAL; |
| 5181 | } |
| 5182 | } |
| 5183 | |
| 5184 | static void gaudi2_halt_engines(struct hl_device *hdev, bool hard_reset, bool fw_reset) |
| 5185 | { |
| 5186 | u32 wait_timeout_ms; |
| 5187 | |
| 5188 | if (hdev->pldm) |
| 5189 | wait_timeout_ms = GAUDI2_PLDM_RESET_WAIT_MSEC; |
| 5190 | else |
| 5191 | wait_timeout_ms = GAUDI2_RESET_WAIT_MSEC; |
| 5192 | |
| 5193 | if (fw_reset || hdev->cpld_shutdown) |
| 5194 | goto skip_engines; |
| 5195 | |
| 5196 | gaudi2_stop_dma_qmans(hdev); |
| 5197 | gaudi2_stop_mme_qmans(hdev); |
| 5198 | gaudi2_stop_tpc_qmans(hdev); |
| 5199 | gaudi2_stop_rot_qmans(hdev); |
| 5200 | gaudi2_stop_nic_qmans(hdev); |
| 5201 | msleep(msecs: wait_timeout_ms); |
| 5202 | |
| 5203 | gaudi2_halt_arcs(hdev); |
| 5204 | gaudi2_dma_stall(hdev); |
| 5205 | gaudi2_mme_stall(hdev); |
| 5206 | gaudi2_tpc_stall(hdev); |
| 5207 | gaudi2_rotator_stall(hdev); |
| 5208 | |
| 5209 | msleep(msecs: wait_timeout_ms); |
| 5210 | |
| 5211 | gaudi2_stop_dec(hdev); |
| 5212 | |
| 5213 | /* |
| 5214 | * in case of soft reset do a manual flush for QMANs (currently called |
| 5215 | * only for NIC QMANs |
| 5216 | */ |
| 5217 | if (!hard_reset) |
| 5218 | gaudi2_nic_qmans_manual_flush(hdev); |
| 5219 | |
| 5220 | gaudi2_disable_dma_qmans(hdev); |
| 5221 | gaudi2_disable_mme_qmans(hdev); |
| 5222 | gaudi2_disable_tpc_qmans(hdev); |
| 5223 | gaudi2_disable_rot_qmans(hdev); |
| 5224 | gaudi2_disable_nic_qmans(hdev); |
| 5225 | gaudi2_disable_timestamp(hdev); |
| 5226 | |
| 5227 | skip_engines: |
| 5228 | if (hard_reset) { |
| 5229 | gaudi2_disable_msix(hdev); |
| 5230 | return; |
| 5231 | } |
| 5232 | |
| 5233 | gaudi2_sync_irqs(hdev); |
| 5234 | } |
| 5235 | |
| 5236 | static void gaudi2_init_firmware_preload_params(struct hl_device *hdev) |
| 5237 | { |
| 5238 | struct pre_fw_load_props *pre_fw_load = &hdev->fw_loader.pre_fw_load; |
| 5239 | |
| 5240 | pre_fw_load->cpu_boot_status_reg = mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS; |
| 5241 | pre_fw_load->sts_boot_dev_sts0_reg = mmCPU_BOOT_DEV_STS0; |
| 5242 | pre_fw_load->sts_boot_dev_sts1_reg = mmCPU_BOOT_DEV_STS1; |
| 5243 | pre_fw_load->boot_err0_reg = mmCPU_BOOT_ERR0; |
| 5244 | pre_fw_load->boot_err1_reg = mmCPU_BOOT_ERR1; |
| 5245 | pre_fw_load->wait_for_preboot_timeout = GAUDI2_PREBOOT_REQ_TIMEOUT_USEC; |
| 5246 | pre_fw_load->wait_for_preboot_extended_timeout = |
| 5247 | GAUDI2_PREBOOT_EXTENDED_REQ_TIMEOUT_USEC; |
| 5248 | } |
| 5249 | |
| 5250 | static void gaudi2_init_firmware_loader(struct hl_device *hdev) |
| 5251 | { |
| 5252 | struct fw_load_mgr *fw_loader = &hdev->fw_loader; |
| 5253 | struct dynamic_fw_load_mgr *dynamic_loader; |
| 5254 | struct cpu_dyn_regs *dyn_regs; |
| 5255 | |
| 5256 | /* fill common fields */ |
| 5257 | fw_loader->fw_comp_loaded = FW_TYPE_NONE; |
| 5258 | fw_loader->boot_fit_img.image_name = GAUDI2_BOOT_FIT_FILE; |
| 5259 | fw_loader->linux_img.image_name = GAUDI2_LINUX_FW_FILE; |
| 5260 | fw_loader->boot_fit_timeout = GAUDI2_BOOT_FIT_REQ_TIMEOUT_USEC; |
| 5261 | fw_loader->skip_bmc = false; |
| 5262 | fw_loader->sram_bar_id = SRAM_CFG_BAR_ID; |
| 5263 | fw_loader->dram_bar_id = DRAM_BAR_ID; |
| 5264 | fw_loader->cpu_timeout = GAUDI2_CPU_TIMEOUT_USEC; |
| 5265 | |
| 5266 | /* here we update initial values for few specific dynamic regs (as |
| 5267 | * before reading the first descriptor from FW those value has to be |
| 5268 | * hard-coded). in later stages of the protocol those values will be |
| 5269 | * updated automatically by reading the FW descriptor so data there |
| 5270 | * will always be up-to-date |
| 5271 | */ |
| 5272 | dynamic_loader = &hdev->fw_loader.dynamic_loader; |
| 5273 | dyn_regs = &dynamic_loader->comm_desc.cpu_dyn_regs; |
| 5274 | dyn_regs->kmd_msg_to_cpu = cpu_to_le32(mmPSOC_GLOBAL_CONF_KMD_MSG_TO_CPU); |
| 5275 | dyn_regs->cpu_cmd_status_to_host = cpu_to_le32(mmCPU_CMD_STATUS_TO_HOST); |
| 5276 | dynamic_loader->wait_for_bl_timeout = GAUDI2_WAIT_FOR_BL_TIMEOUT_USEC; |
| 5277 | } |
| 5278 | |
| 5279 | static int gaudi2_init_cpu(struct hl_device *hdev) |
| 5280 | { |
| 5281 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5282 | int rc; |
| 5283 | |
| 5284 | if (!(hdev->fw_components & FW_TYPE_PREBOOT_CPU)) |
| 5285 | return 0; |
| 5286 | |
| 5287 | if (gaudi2->hw_cap_initialized & HW_CAP_CPU) |
| 5288 | return 0; |
| 5289 | |
| 5290 | rc = hl_fw_init_cpu(hdev); |
| 5291 | if (rc) |
| 5292 | return rc; |
| 5293 | |
| 5294 | gaudi2->hw_cap_initialized |= HW_CAP_CPU; |
| 5295 | |
| 5296 | return 0; |
| 5297 | } |
| 5298 | |
| 5299 | static int gaudi2_init_cpu_queues(struct hl_device *hdev, u32 cpu_timeout) |
| 5300 | { |
| 5301 | struct hl_hw_queue *cpu_pq = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; |
| 5302 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 5303 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5304 | struct cpu_dyn_regs *dyn_regs; |
| 5305 | struct hl_eq *eq; |
| 5306 | u32 status; |
| 5307 | int err; |
| 5308 | |
| 5309 | if (!hdev->cpu_queues_enable) |
| 5310 | return 0; |
| 5311 | |
| 5312 | if (gaudi2->hw_cap_initialized & HW_CAP_CPU_Q) |
| 5313 | return 0; |
| 5314 | |
| 5315 | eq = &hdev->event_queue; |
| 5316 | |
| 5317 | dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 5318 | |
| 5319 | WREG32(mmCPU_IF_PQ_BASE_ADDR_LOW, lower_32_bits(cpu_pq->bus_address)); |
| 5320 | WREG32(mmCPU_IF_PQ_BASE_ADDR_HIGH, upper_32_bits(cpu_pq->bus_address)); |
| 5321 | |
| 5322 | WREG32(mmCPU_IF_EQ_BASE_ADDR_LOW, lower_32_bits(eq->bus_address)); |
| 5323 | WREG32(mmCPU_IF_EQ_BASE_ADDR_HIGH, upper_32_bits(eq->bus_address)); |
| 5324 | |
| 5325 | WREG32(mmCPU_IF_CQ_BASE_ADDR_LOW, lower_32_bits(hdev->cpu_accessible_dma_address)); |
| 5326 | WREG32(mmCPU_IF_CQ_BASE_ADDR_HIGH, upper_32_bits(hdev->cpu_accessible_dma_address)); |
| 5327 | |
| 5328 | WREG32(mmCPU_IF_PQ_LENGTH, HL_QUEUE_SIZE_IN_BYTES); |
| 5329 | WREG32(mmCPU_IF_EQ_LENGTH, HL_EQ_SIZE_IN_BYTES); |
| 5330 | WREG32(mmCPU_IF_CQ_LENGTH, HL_CPU_ACCESSIBLE_MEM_SIZE); |
| 5331 | |
| 5332 | /* Used for EQ CI */ |
| 5333 | WREG32(mmCPU_IF_EQ_RD_OFFS, 0); |
| 5334 | |
| 5335 | WREG32(mmCPU_IF_PF_PQ_PI, 0); |
| 5336 | |
| 5337 | WREG32(mmCPU_IF_QUEUE_INIT, PQ_INIT_STATUS_READY_FOR_CP); |
| 5338 | |
| 5339 | /* Let the ARC know we are ready as it is now handling those queues */ |
| 5340 | |
| 5341 | WREG32(le32_to_cpu(dyn_regs->gic_host_pi_upd_irq), |
| 5342 | gaudi2_irq_map_table[GAUDI2_EVENT_CPU_PI_UPDATE].cpu_id); |
| 5343 | |
| 5344 | err = hl_poll_timeout( |
| 5345 | hdev, |
| 5346 | mmCPU_IF_QUEUE_INIT, |
| 5347 | status, |
| 5348 | (status == PQ_INIT_STATUS_READY_FOR_HOST), |
| 5349 | 1000, |
| 5350 | cpu_timeout); |
| 5351 | |
| 5352 | if (err) { |
| 5353 | dev_err(hdev->dev, "Failed to communicate with device CPU (timeout)\n" ); |
| 5354 | return -EIO; |
| 5355 | } |
| 5356 | |
| 5357 | /* update FW application security bits */ |
| 5358 | if (prop->fw_cpu_boot_dev_sts0_valid) |
| 5359 | prop->fw_app_cpu_boot_dev_sts0 = RREG32(mmCPU_BOOT_DEV_STS0); |
| 5360 | |
| 5361 | if (prop->fw_cpu_boot_dev_sts1_valid) |
| 5362 | prop->fw_app_cpu_boot_dev_sts1 = RREG32(mmCPU_BOOT_DEV_STS1); |
| 5363 | |
| 5364 | gaudi2->hw_cap_initialized |= HW_CAP_CPU_Q; |
| 5365 | return 0; |
| 5366 | } |
| 5367 | |
| 5368 | static void gaudi2_init_qman_pq(struct hl_device *hdev, u32 reg_base, |
| 5369 | u32 queue_id_base) |
| 5370 | { |
| 5371 | struct hl_hw_queue *q; |
| 5372 | u32 pq_id, pq_offset; |
| 5373 | |
| 5374 | for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) { |
| 5375 | q = &hdev->kernel_queues[queue_id_base + pq_id]; |
| 5376 | pq_offset = pq_id * 4; |
| 5377 | |
| 5378 | if (q->dram_bd) { |
| 5379 | WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset, |
| 5380 | lower_32_bits(q->pq_dram_address)); |
| 5381 | WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset, |
| 5382 | upper_32_bits(q->pq_dram_address)); |
| 5383 | } else { |
| 5384 | WREG32(reg_base + QM_PQ_BASE_LO_0_OFFSET + pq_offset, |
| 5385 | lower_32_bits(q->bus_address)); |
| 5386 | WREG32(reg_base + QM_PQ_BASE_HI_0_OFFSET + pq_offset, |
| 5387 | upper_32_bits(q->bus_address)); |
| 5388 | } |
| 5389 | WREG32(reg_base + QM_PQ_SIZE_0_OFFSET + pq_offset, ilog2(HL_QUEUE_LENGTH)); |
| 5390 | WREG32(reg_base + QM_PQ_PI_0_OFFSET + pq_offset, 0); |
| 5391 | WREG32(reg_base + QM_PQ_CI_0_OFFSET + pq_offset, 0); |
| 5392 | } |
| 5393 | } |
| 5394 | |
| 5395 | static void gaudi2_init_qman_cp(struct hl_device *hdev, u32 reg_base) |
| 5396 | { |
| 5397 | u32 cp_id, cp_offset, mtr_base_lo, mtr_base_hi, so_base_lo, so_base_hi; |
| 5398 | |
| 5399 | mtr_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); |
| 5400 | mtr_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0); |
| 5401 | so_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0); |
| 5402 | so_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0); |
| 5403 | |
| 5404 | for (cp_id = 0 ; cp_id < NUM_OF_CP_PER_QMAN; cp_id++) { |
| 5405 | cp_offset = cp_id * 4; |
| 5406 | |
| 5407 | WREG32(reg_base + QM_CP_MSG_BASE0_ADDR_LO_0_OFFSET + cp_offset, mtr_base_lo); |
| 5408 | WREG32(reg_base + QM_CP_MSG_BASE0_ADDR_HI_0_OFFSET + cp_offset, mtr_base_hi); |
| 5409 | WREG32(reg_base + QM_CP_MSG_BASE1_ADDR_LO_0_OFFSET + cp_offset, so_base_lo); |
| 5410 | WREG32(reg_base + QM_CP_MSG_BASE1_ADDR_HI_0_OFFSET + cp_offset, so_base_hi); |
| 5411 | } |
| 5412 | |
| 5413 | /* allow QMANs to accept work from ARC CQF */ |
| 5414 | WREG32(reg_base + QM_CP_CFG_OFFSET, FIELD_PREP(PDMA0_QM_CP_CFG_SWITCH_EN_MASK, 0x1)); |
| 5415 | } |
| 5416 | |
| 5417 | static void gaudi2_init_qman_pqc(struct hl_device *hdev, u32 reg_base, |
| 5418 | u32 queue_id_base) |
| 5419 | { |
| 5420 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5421 | u32 pq_id, pq_offset, so_base_lo, so_base_hi; |
| 5422 | |
| 5423 | so_base_lo = lower_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0); |
| 5424 | so_base_hi = upper_32_bits(CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0); |
| 5425 | |
| 5426 | for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) { |
| 5427 | pq_offset = pq_id * 4; |
| 5428 | |
| 5429 | /* Configure QMAN HBW to scratchpad as it is not needed */ |
| 5430 | WREG32(reg_base + QM_PQC_HBW_BASE_LO_0_OFFSET + pq_offset, |
| 5431 | lower_32_bits(gaudi2->scratchpad_bus_address)); |
| 5432 | WREG32(reg_base + QM_PQC_HBW_BASE_HI_0_OFFSET + pq_offset, |
| 5433 | upper_32_bits(gaudi2->scratchpad_bus_address)); |
| 5434 | WREG32(reg_base + QM_PQC_SIZE_0_OFFSET + pq_offset, |
| 5435 | ilog2(PAGE_SIZE / sizeof(struct hl_cq_entry))); |
| 5436 | |
| 5437 | WREG32(reg_base + QM_PQC_PI_0_OFFSET + pq_offset, 0); |
| 5438 | WREG32(reg_base + QM_PQC_LBW_WDATA_0_OFFSET + pq_offset, QM_PQC_LBW_WDATA); |
| 5439 | WREG32(reg_base + QM_PQC_LBW_BASE_LO_0_OFFSET + pq_offset, so_base_lo); |
| 5440 | WREG32(reg_base + QM_PQC_LBW_BASE_HI_0_OFFSET + pq_offset, so_base_hi); |
| 5441 | } |
| 5442 | |
| 5443 | /* Enable QMAN H/W completion */ |
| 5444 | WREG32(reg_base + QM_PQC_CFG_OFFSET, 1 << PDMA0_QM_PQC_CFG_EN_SHIFT); |
| 5445 | } |
| 5446 | |
| 5447 | static u32 gaudi2_get_dyn_sp_reg(struct hl_device *hdev, u32 queue_id_base) |
| 5448 | { |
| 5449 | struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 5450 | u32 sp_reg_addr; |
| 5451 | |
| 5452 | switch (queue_id_base) { |
| 5453 | case GAUDI2_QUEUE_ID_PDMA_0_0...GAUDI2_QUEUE_ID_PDMA_1_3: |
| 5454 | fallthrough; |
| 5455 | case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3: |
| 5456 | fallthrough; |
| 5457 | case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3: |
| 5458 | fallthrough; |
| 5459 | case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3: |
| 5460 | fallthrough; |
| 5461 | case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3: |
| 5462 | sp_reg_addr = le32_to_cpu(dyn_regs->gic_dma_qm_irq_ctrl); |
| 5463 | break; |
| 5464 | case GAUDI2_QUEUE_ID_DCORE0_MME_0_0...GAUDI2_QUEUE_ID_DCORE0_MME_0_3: |
| 5465 | fallthrough; |
| 5466 | case GAUDI2_QUEUE_ID_DCORE1_MME_0_0...GAUDI2_QUEUE_ID_DCORE1_MME_0_3: |
| 5467 | fallthrough; |
| 5468 | case GAUDI2_QUEUE_ID_DCORE2_MME_0_0...GAUDI2_QUEUE_ID_DCORE2_MME_0_3: |
| 5469 | fallthrough; |
| 5470 | case GAUDI2_QUEUE_ID_DCORE3_MME_0_0...GAUDI2_QUEUE_ID_DCORE3_MME_0_3: |
| 5471 | sp_reg_addr = le32_to_cpu(dyn_regs->gic_mme_qm_irq_ctrl); |
| 5472 | break; |
| 5473 | case GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_6_3: |
| 5474 | fallthrough; |
| 5475 | case GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE1_TPC_5_3: |
| 5476 | fallthrough; |
| 5477 | case GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE2_TPC_5_3: |
| 5478 | fallthrough; |
| 5479 | case GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE3_TPC_5_3: |
| 5480 | sp_reg_addr = le32_to_cpu(dyn_regs->gic_tpc_qm_irq_ctrl); |
| 5481 | break; |
| 5482 | case GAUDI2_QUEUE_ID_ROT_0_0...GAUDI2_QUEUE_ID_ROT_1_3: |
| 5483 | sp_reg_addr = le32_to_cpu(dyn_regs->gic_rot_qm_irq_ctrl); |
| 5484 | break; |
| 5485 | case GAUDI2_QUEUE_ID_NIC_0_0...GAUDI2_QUEUE_ID_NIC_23_3: |
| 5486 | sp_reg_addr = le32_to_cpu(dyn_regs->gic_nic_qm_irq_ctrl); |
| 5487 | break; |
| 5488 | default: |
| 5489 | dev_err(hdev->dev, "Unexpected h/w queue %d\n" , queue_id_base); |
| 5490 | return 0; |
| 5491 | } |
| 5492 | |
| 5493 | return sp_reg_addr; |
| 5494 | } |
| 5495 | |
| 5496 | static void gaudi2_init_qman_common(struct hl_device *hdev, u32 reg_base, |
| 5497 | u32 queue_id_base) |
| 5498 | { |
| 5499 | u32 glbl_prot = QMAN_MAKE_TRUSTED, irq_handler_offset; |
| 5500 | int map_table_entry; |
| 5501 | |
| 5502 | WREG32(reg_base + QM_GLBL_PROT_OFFSET, glbl_prot); |
| 5503 | |
| 5504 | irq_handler_offset = gaudi2_get_dyn_sp_reg(hdev, queue_id_base); |
| 5505 | WREG32(reg_base + QM_GLBL_ERR_ADDR_LO_OFFSET, lower_32_bits(CFG_BASE + irq_handler_offset)); |
| 5506 | WREG32(reg_base + QM_GLBL_ERR_ADDR_HI_OFFSET, upper_32_bits(CFG_BASE + irq_handler_offset)); |
| 5507 | |
| 5508 | map_table_entry = gaudi2_qman_async_event_id[queue_id_base]; |
| 5509 | WREG32(reg_base + QM_GLBL_ERR_WDATA_OFFSET, |
| 5510 | gaudi2_irq_map_table[map_table_entry].cpu_id); |
| 5511 | |
| 5512 | WREG32(reg_base + QM_ARB_ERR_MSG_EN_OFFSET, QM_ARB_ERR_MSG_EN_MASK); |
| 5513 | |
| 5514 | WREG32(reg_base + QM_ARB_SLV_CHOISE_WDT_OFFSET, GAUDI2_ARB_WDT_TIMEOUT); |
| 5515 | WREG32(reg_base + QM_GLBL_CFG1_OFFSET, 0); |
| 5516 | WREG32(reg_base + QM_GLBL_CFG2_OFFSET, 0); |
| 5517 | |
| 5518 | /* Enable the QMAN channel. |
| 5519 | * PDMA QMAN configuration is different, as we do not allow user to |
| 5520 | * access some of the CPs. |
| 5521 | * PDMA0: CP2/3 are reserved for the ARC usage. |
| 5522 | * PDMA1: CP1/2/3 are reserved for the ARC usage. |
| 5523 | */ |
| 5524 | if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0]) |
| 5525 | WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA1_QMAN_ENABLE); |
| 5526 | else if (reg_base == gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0]) |
| 5527 | WREG32(reg_base + QM_GLBL_CFG0_OFFSET, PDMA0_QMAN_ENABLE); |
| 5528 | else |
| 5529 | WREG32(reg_base + QM_GLBL_CFG0_OFFSET, QMAN_ENABLE); |
| 5530 | } |
| 5531 | |
| 5532 | static void gaudi2_init_qman(struct hl_device *hdev, u32 reg_base, |
| 5533 | u32 queue_id_base) |
| 5534 | { |
| 5535 | u32 pq_id; |
| 5536 | |
| 5537 | for (pq_id = 0 ; pq_id < NUM_OF_PQ_PER_QMAN ; pq_id++) |
| 5538 | hdev->kernel_queues[queue_id_base + pq_id].cq_id = GAUDI2_RESERVED_CQ_CS_COMPLETION; |
| 5539 | |
| 5540 | gaudi2_init_qman_pq(hdev, reg_base, queue_id_base); |
| 5541 | gaudi2_init_qman_cp(hdev, reg_base); |
| 5542 | gaudi2_init_qman_pqc(hdev, reg_base, queue_id_base); |
| 5543 | gaudi2_init_qman_common(hdev, reg_base, queue_id_base); |
| 5544 | } |
| 5545 | |
| 5546 | static void gaudi2_init_dma_core(struct hl_device *hdev, u32 reg_base, |
| 5547 | u32 dma_core_id, bool is_secure) |
| 5548 | { |
| 5549 | u32 prot, irq_handler_offset; |
| 5550 | struct cpu_dyn_regs *dyn_regs; |
| 5551 | int map_table_entry; |
| 5552 | |
| 5553 | prot = 1 << ARC_FARM_KDMA_PROT_ERR_VAL_SHIFT; |
| 5554 | if (is_secure) |
| 5555 | prot |= 1 << ARC_FARM_KDMA_PROT_VAL_SHIFT; |
| 5556 | |
| 5557 | WREG32(reg_base + DMA_CORE_PROT_OFFSET, prot); |
| 5558 | |
| 5559 | dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 5560 | irq_handler_offset = le32_to_cpu(dyn_regs->gic_dma_core_irq_ctrl); |
| 5561 | |
| 5562 | WREG32(reg_base + DMA_CORE_ERRMSG_ADDR_LO_OFFSET, |
| 5563 | lower_32_bits(CFG_BASE + irq_handler_offset)); |
| 5564 | |
| 5565 | WREG32(reg_base + DMA_CORE_ERRMSG_ADDR_HI_OFFSET, |
| 5566 | upper_32_bits(CFG_BASE + irq_handler_offset)); |
| 5567 | |
| 5568 | map_table_entry = gaudi2_dma_core_async_event_id[dma_core_id]; |
| 5569 | WREG32(reg_base + DMA_CORE_ERRMSG_WDATA_OFFSET, |
| 5570 | gaudi2_irq_map_table[map_table_entry].cpu_id); |
| 5571 | |
| 5572 | /* Enable the DMA channel */ |
| 5573 | WREG32(reg_base + DMA_CORE_CFG_0_OFFSET, 1 << ARC_FARM_KDMA_CFG_0_EN_SHIFT); |
| 5574 | } |
| 5575 | |
| 5576 | static void gaudi2_init_kdma(struct hl_device *hdev) |
| 5577 | { |
| 5578 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5579 | u32 reg_base; |
| 5580 | |
| 5581 | if ((gaudi2->hw_cap_initialized & HW_CAP_KDMA) == HW_CAP_KDMA) |
| 5582 | return; |
| 5583 | |
| 5584 | reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_KDMA]; |
| 5585 | |
| 5586 | gaudi2_init_dma_core(hdev, reg_base, dma_core_id: DMA_CORE_ID_KDMA, is_secure: true); |
| 5587 | |
| 5588 | gaudi2->hw_cap_initialized |= HW_CAP_KDMA; |
| 5589 | } |
| 5590 | |
| 5591 | static void gaudi2_init_pdma(struct hl_device *hdev) |
| 5592 | { |
| 5593 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5594 | u32 reg_base; |
| 5595 | |
| 5596 | if ((gaudi2->hw_cap_initialized & HW_CAP_PDMA_MASK) == HW_CAP_PDMA_MASK) |
| 5597 | return; |
| 5598 | |
| 5599 | reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_PDMA0]; |
| 5600 | gaudi2_init_dma_core(hdev, reg_base, dma_core_id: DMA_CORE_ID_PDMA0, is_secure: false); |
| 5601 | |
| 5602 | reg_base = gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_0_0]; |
| 5603 | gaudi2_init_qman(hdev, reg_base, queue_id_base: GAUDI2_QUEUE_ID_PDMA_0_0); |
| 5604 | |
| 5605 | reg_base = gaudi2_dma_core_blocks_bases[DMA_CORE_ID_PDMA1]; |
| 5606 | gaudi2_init_dma_core(hdev, reg_base, dma_core_id: DMA_CORE_ID_PDMA1, is_secure: false); |
| 5607 | |
| 5608 | reg_base = gaudi2_qm_blocks_bases[GAUDI2_QUEUE_ID_PDMA_1_0]; |
| 5609 | gaudi2_init_qman(hdev, reg_base, queue_id_base: GAUDI2_QUEUE_ID_PDMA_1_0); |
| 5610 | |
| 5611 | gaudi2->hw_cap_initialized |= HW_CAP_PDMA_MASK; |
| 5612 | } |
| 5613 | |
| 5614 | static void gaudi2_init_edma_instance(struct hl_device *hdev, u8 seq) |
| 5615 | { |
| 5616 | u32 reg_base, base_edma_core_id, base_edma_qman_id; |
| 5617 | |
| 5618 | base_edma_core_id = DMA_CORE_ID_EDMA0 + seq; |
| 5619 | base_edma_qman_id = edma_stream_base[seq]; |
| 5620 | |
| 5621 | reg_base = gaudi2_dma_core_blocks_bases[base_edma_core_id]; |
| 5622 | gaudi2_init_dma_core(hdev, reg_base, dma_core_id: base_edma_core_id, is_secure: false); |
| 5623 | |
| 5624 | reg_base = gaudi2_qm_blocks_bases[base_edma_qman_id]; |
| 5625 | gaudi2_init_qman(hdev, reg_base, queue_id_base: base_edma_qman_id); |
| 5626 | } |
| 5627 | |
| 5628 | static void gaudi2_init_edma(struct hl_device *hdev) |
| 5629 | { |
| 5630 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 5631 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5632 | int dcore, inst; |
| 5633 | |
| 5634 | if ((gaudi2->hw_cap_initialized & HW_CAP_EDMA_MASK) == HW_CAP_EDMA_MASK) |
| 5635 | return; |
| 5636 | |
| 5637 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 5638 | for (inst = 0 ; inst < NUM_OF_EDMA_PER_DCORE ; inst++) { |
| 5639 | u8 seq = dcore * NUM_OF_EDMA_PER_DCORE + inst; |
| 5640 | |
| 5641 | if (!(prop->edma_enabled_mask & BIT(seq))) |
| 5642 | continue; |
| 5643 | |
| 5644 | gaudi2_init_edma_instance(hdev, seq); |
| 5645 | |
| 5646 | gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_EDMA_SHIFT + seq); |
| 5647 | } |
| 5648 | } |
| 5649 | } |
| 5650 | |
| 5651 | /* |
| 5652 | * gaudi2_arm_monitors_for_virt_msix_db() - Arm monitors for writing to the virtual MSI-X doorbell. |
| 5653 | * @hdev: pointer to habanalabs device structure. |
| 5654 | * @sob_id: sync object ID. |
| 5655 | * @first_mon_id: ID of first monitor out of 3 consecutive monitors. |
| 5656 | * @interrupt_id: interrupt ID. |
| 5657 | * |
| 5658 | * Some initiators cannot have HBW address in their completion address registers, and thus cannot |
| 5659 | * write directly to the HBW host memory of the virtual MSI-X doorbell. |
| 5660 | * Instead, they are configured to LBW write to a sync object, and a monitor will do the HBW write. |
| 5661 | * |
| 5662 | * The mechanism in the sync manager block is composed of a master monitor with 3 messages. |
| 5663 | * In addition to the HBW write, the other 2 messages are for preparing the monitor to next |
| 5664 | * completion, by decrementing the sync object value and re-arming the monitor. |
| 5665 | */ |
| 5666 | static void gaudi2_arm_monitors_for_virt_msix_db(struct hl_device *hdev, u32 sob_id, |
| 5667 | u32 first_mon_id, u32 interrupt_id) |
| 5668 | { |
| 5669 | u32 sob_offset, first_mon_offset, mon_offset, payload, sob_group, mode, arm, config; |
| 5670 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5671 | u64 addr; |
| 5672 | u8 mask; |
| 5673 | |
| 5674 | /* Reset the SOB value */ |
| 5675 | sob_offset = sob_id * sizeof(u32); |
| 5676 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0); |
| 5677 | |
| 5678 | /* Configure 3 monitors: |
| 5679 | * 1. Write interrupt ID to the virtual MSI-X doorbell (master monitor) |
| 5680 | * 2. Decrement SOB value by 1. |
| 5681 | * 3. Re-arm the master monitor. |
| 5682 | */ |
| 5683 | |
| 5684 | first_mon_offset = first_mon_id * sizeof(u32); |
| 5685 | |
| 5686 | /* 2nd monitor: Decrement SOB value by 1 */ |
| 5687 | mon_offset = first_mon_offset + sizeof(u32); |
| 5688 | |
| 5689 | addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset; |
| 5690 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr)); |
| 5691 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr)); |
| 5692 | |
| 5693 | payload = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 0x7FFF) | /* "-1" */ |
| 5694 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_SIGN_MASK, 1) | |
| 5695 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1); |
| 5696 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload); |
| 5697 | |
| 5698 | /* 3rd monitor: Re-arm the master monitor */ |
| 5699 | mon_offset = first_mon_offset + 2 * sizeof(u32); |
| 5700 | |
| 5701 | addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + first_mon_offset; |
| 5702 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr)); |
| 5703 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr)); |
| 5704 | |
| 5705 | sob_group = sob_id / 8; |
| 5706 | mask = ~BIT(sob_id & 0x7); |
| 5707 | mode = 0; /* comparison mode is "greater than or equal to" */ |
| 5708 | arm = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK, sob_group) | |
| 5709 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK, mask) | |
| 5710 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK, mode) | |
| 5711 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK, 1); |
| 5712 | |
| 5713 | payload = arm; |
| 5714 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload); |
| 5715 | |
| 5716 | /* 1st monitor (master): Write interrupt ID to the virtual MSI-X doorbell */ |
| 5717 | mon_offset = first_mon_offset; |
| 5718 | |
| 5719 | config = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_WR_NUM_MASK, 2); /* "2": 3 writes */ |
| 5720 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + mon_offset, config); |
| 5721 | |
| 5722 | addr = gaudi2->virt_msix_db_dma_addr; |
| 5723 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, lower_32_bits(addr)); |
| 5724 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + mon_offset, upper_32_bits(addr)); |
| 5725 | |
| 5726 | payload = interrupt_id; |
| 5727 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, payload); |
| 5728 | |
| 5729 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + mon_offset, arm); |
| 5730 | } |
| 5731 | |
| 5732 | static void gaudi2_prepare_sm_for_virt_msix_db(struct hl_device *hdev) |
| 5733 | { |
| 5734 | u32 decoder_id, sob_id, first_mon_id, interrupt_id; |
| 5735 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 5736 | |
| 5737 | /* Decoder normal/abnormal interrupts */ |
| 5738 | for (decoder_id = 0 ; decoder_id < NUMBER_OF_DEC ; ++decoder_id) { |
| 5739 | if (!(prop->decoder_enabled_mask & BIT(decoder_id))) |
| 5740 | continue; |
| 5741 | |
| 5742 | sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id; |
| 5743 | first_mon_id = GAUDI2_RESERVED_MON_DEC_NRM_FIRST + 3 * decoder_id; |
| 5744 | interrupt_id = GAUDI2_IRQ_NUM_DCORE0_DEC0_NRM + 2 * decoder_id; |
| 5745 | gaudi2_arm_monitors_for_virt_msix_db(hdev, sob_id, first_mon_id, interrupt_id); |
| 5746 | |
| 5747 | sob_id = GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + decoder_id; |
| 5748 | first_mon_id = GAUDI2_RESERVED_MON_DEC_ABNRM_FIRST + 3 * decoder_id; |
| 5749 | interrupt_id += 1; |
| 5750 | gaudi2_arm_monitors_for_virt_msix_db(hdev, sob_id, first_mon_id, interrupt_id); |
| 5751 | } |
| 5752 | } |
| 5753 | |
| 5754 | static void gaudi2_init_sm(struct hl_device *hdev) |
| 5755 | { |
| 5756 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5757 | u64 cq_address; |
| 5758 | u32 reg_val; |
| 5759 | int i; |
| 5760 | |
| 5761 | /* Enable HBW/LBW CQ for completion monitors */ |
| 5762 | reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1); |
| 5763 | reg_val |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_LBW_EN_MASK, 1); |
| 5764 | |
| 5765 | for (i = 0 ; i < GAUDI2_MAX_PENDING_CS ; i++) |
| 5766 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val); |
| 5767 | |
| 5768 | /* Enable only HBW CQ for KDMA completion monitor */ |
| 5769 | reg_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_CONFIG_CQ_EN_MASK, 1); |
| 5770 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + (4 * i), reg_val); |
| 5771 | |
| 5772 | /* Init CQ0 DB - configure the monitor to trigger MSI-X interrupt */ |
| 5773 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0, lower_32_bits(gaudi2->virt_msix_db_dma_addr)); |
| 5774 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0, upper_32_bits(gaudi2->virt_msix_db_dma_addr)); |
| 5775 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0, GAUDI2_IRQ_NUM_COMPLETION); |
| 5776 | |
| 5777 | for (i = 0 ; i < GAUDI2_RESERVED_CQ_NUMBER ; i++) { |
| 5778 | cq_address = |
| 5779 | hdev->completion_queue[i].bus_address; |
| 5780 | |
| 5781 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + (4 * i), |
| 5782 | lower_32_bits(cq_address)); |
| 5783 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + (4 * i), |
| 5784 | upper_32_bits(cq_address)); |
| 5785 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + (4 * i), |
| 5786 | ilog2(HL_CQ_SIZE_IN_BYTES)); |
| 5787 | } |
| 5788 | |
| 5789 | /* Configure kernel ASID and MMU BP*/ |
| 5790 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_SEC, 0x10000); |
| 5791 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV, 0); |
| 5792 | |
| 5793 | /* Initialize sync objects and monitors which are used for the virtual MSI-X doorbell */ |
| 5794 | gaudi2_prepare_sm_for_virt_msix_db(hdev); |
| 5795 | } |
| 5796 | |
| 5797 | static void gaudi2_init_mme_acc(struct hl_device *hdev, u32 reg_base) |
| 5798 | { |
| 5799 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5800 | u32 reg_val; |
| 5801 | int i; |
| 5802 | |
| 5803 | reg_val = FIELD_PREP(MME_ACC_INTR_MASK_WBC_ERR_RESP_MASK, 0); |
| 5804 | reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_POS_INF_MASK, 1); |
| 5805 | reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_NEG_INF_MASK, 1); |
| 5806 | reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_SRC_NAN_MASK, 1); |
| 5807 | reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_RESULT_POS_INF_MASK, 1); |
| 5808 | reg_val |= FIELD_PREP(MME_ACC_INTR_MASK_AP_RESULT_NEG_INF_MASK, 1); |
| 5809 | |
| 5810 | WREG32(reg_base + MME_ACC_INTR_MASK_OFFSET, reg_val); |
| 5811 | WREG32(reg_base + MME_ACC_AP_LFSR_POLY_OFFSET, 0x80DEADAF); |
| 5812 | |
| 5813 | for (i = 0 ; i < MME_NUM_OF_LFSR_SEEDS ; i++) { |
| 5814 | WREG32(reg_base + MME_ACC_AP_LFSR_SEED_SEL_OFFSET, i); |
| 5815 | WREG32(reg_base + MME_ACC_AP_LFSR_SEED_WDATA_OFFSET, gaudi2->lfsr_rand_seeds[i]); |
| 5816 | } |
| 5817 | } |
| 5818 | |
| 5819 | static void gaudi2_init_dcore_mme(struct hl_device *hdev, int dcore_id, |
| 5820 | bool config_qman_only) |
| 5821 | { |
| 5822 | u32 queue_id_base, reg_base; |
| 5823 | |
| 5824 | switch (dcore_id) { |
| 5825 | case 0: |
| 5826 | queue_id_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0; |
| 5827 | break; |
| 5828 | case 1: |
| 5829 | queue_id_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0; |
| 5830 | break; |
| 5831 | case 2: |
| 5832 | queue_id_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0; |
| 5833 | break; |
| 5834 | case 3: |
| 5835 | queue_id_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0; |
| 5836 | break; |
| 5837 | default: |
| 5838 | dev_err(hdev->dev, "Invalid dcore id %u\n" , dcore_id); |
| 5839 | return; |
| 5840 | } |
| 5841 | |
| 5842 | if (!config_qman_only) { |
| 5843 | reg_base = gaudi2_mme_acc_blocks_bases[dcore_id]; |
| 5844 | gaudi2_init_mme_acc(hdev, reg_base); |
| 5845 | } |
| 5846 | |
| 5847 | reg_base = gaudi2_qm_blocks_bases[queue_id_base]; |
| 5848 | gaudi2_init_qman(hdev, reg_base, queue_id_base); |
| 5849 | } |
| 5850 | |
| 5851 | static void gaudi2_init_mme(struct hl_device *hdev) |
| 5852 | { |
| 5853 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5854 | int i; |
| 5855 | |
| 5856 | if ((gaudi2->hw_cap_initialized & HW_CAP_MME_MASK) == HW_CAP_MME_MASK) |
| 5857 | return; |
| 5858 | |
| 5859 | for (i = 0 ; i < NUM_OF_DCORES ; i++) { |
| 5860 | gaudi2_init_dcore_mme(hdev, dcore_id: i, config_qman_only: false); |
| 5861 | |
| 5862 | gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_MME_SHIFT + i); |
| 5863 | } |
| 5864 | } |
| 5865 | |
| 5866 | static void gaudi2_init_tpc_cfg(struct hl_device *hdev, u32 reg_base) |
| 5867 | { |
| 5868 | /* Mask arithmetic and QM interrupts in TPC */ |
| 5869 | WREG32(reg_base + TPC_CFG_TPC_INTR_MASK_OFFSET, 0x23FFFE); |
| 5870 | |
| 5871 | /* Set 16 cache lines */ |
| 5872 | WREG32(reg_base + TPC_CFG_MSS_CONFIG_OFFSET, |
| 5873 | 2 << DCORE0_TPC0_CFG_MSS_CONFIG_ICACHE_FETCH_LINE_NUM_SHIFT); |
| 5874 | } |
| 5875 | |
| 5876 | struct gaudi2_tpc_init_cfg_data { |
| 5877 | enum gaudi2_queue_id dcore_tpc_qid_base[NUM_OF_DCORES]; |
| 5878 | }; |
| 5879 | |
| 5880 | static void gaudi2_init_tpc_config(struct hl_device *hdev, int dcore, int inst, |
| 5881 | u32 offset, struct iterate_module_ctx *ctx) |
| 5882 | { |
| 5883 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5884 | struct gaudi2_tpc_init_cfg_data *cfg_data = ctx->data; |
| 5885 | u32 queue_id_base; |
| 5886 | u8 seq; |
| 5887 | |
| 5888 | queue_id_base = cfg_data->dcore_tpc_qid_base[dcore] + (inst * NUM_OF_PQ_PER_QMAN); |
| 5889 | |
| 5890 | if (dcore == 0 && inst == (NUM_DCORE0_TPC - 1)) |
| 5891 | /* gets last sequence number */ |
| 5892 | seq = NUM_OF_DCORES * NUM_OF_TPC_PER_DCORE; |
| 5893 | else |
| 5894 | seq = dcore * NUM_OF_TPC_PER_DCORE + inst; |
| 5895 | |
| 5896 | gaudi2_init_tpc_cfg(hdev, mmDCORE0_TPC0_CFG_BASE + offset); |
| 5897 | gaudi2_init_qman(hdev, mmDCORE0_TPC0_QM_BASE + offset, queue_id_base); |
| 5898 | |
| 5899 | gaudi2->tpc_hw_cap_initialized |= BIT_ULL(HW_CAP_TPC_SHIFT + seq); |
| 5900 | } |
| 5901 | |
| 5902 | static void gaudi2_init_tpc(struct hl_device *hdev) |
| 5903 | { |
| 5904 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5905 | struct gaudi2_tpc_init_cfg_data init_cfg_data; |
| 5906 | struct iterate_module_ctx tpc_iter; |
| 5907 | |
| 5908 | if (!hdev->asic_prop.tpc_enabled_mask) |
| 5909 | return; |
| 5910 | |
| 5911 | if ((gaudi2->tpc_hw_cap_initialized & HW_CAP_TPC_MASK) == HW_CAP_TPC_MASK) |
| 5912 | return; |
| 5913 | |
| 5914 | init_cfg_data.dcore_tpc_qid_base[0] = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0; |
| 5915 | init_cfg_data.dcore_tpc_qid_base[1] = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0; |
| 5916 | init_cfg_data.dcore_tpc_qid_base[2] = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0; |
| 5917 | init_cfg_data.dcore_tpc_qid_base[3] = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0; |
| 5918 | tpc_iter.fn = &gaudi2_init_tpc_config; |
| 5919 | tpc_iter.data = &init_cfg_data; |
| 5920 | gaudi2_iterate_tpcs(hdev, ctx: &tpc_iter); |
| 5921 | } |
| 5922 | |
| 5923 | static void gaudi2_init_rotator(struct hl_device *hdev) |
| 5924 | { |
| 5925 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5926 | u32 i, reg_base, queue_id; |
| 5927 | |
| 5928 | queue_id = GAUDI2_QUEUE_ID_ROT_0_0; |
| 5929 | |
| 5930 | for (i = 0 ; i < NUM_OF_ROT ; i++, queue_id += NUM_OF_PQ_PER_QMAN) { |
| 5931 | reg_base = gaudi2_qm_blocks_bases[queue_id]; |
| 5932 | gaudi2_init_qman(hdev, reg_base, queue_id_base: queue_id); |
| 5933 | |
| 5934 | gaudi2->hw_cap_initialized |= BIT_ULL(HW_CAP_ROT_SHIFT + i); |
| 5935 | } |
| 5936 | } |
| 5937 | |
| 5938 | static void gaudi2_init_vdec_brdg_ctrl(struct hl_device *hdev, u64 base_addr, u32 decoder_id) |
| 5939 | { |
| 5940 | u32 sob_id; |
| 5941 | |
| 5942 | /* VCMD normal interrupt */ |
| 5943 | sob_id = GAUDI2_RESERVED_SOB_DEC_NRM_FIRST + decoder_id; |
| 5944 | WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_AWADDR, |
| 5945 | mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_id * sizeof(u32)); |
| 5946 | WREG32(base_addr + BRDG_CTRL_NRM_MSIX_LBW_WDATA, GAUDI2_SOB_INCREMENT_BY_ONE); |
| 5947 | |
| 5948 | /* VCMD abnormal interrupt */ |
| 5949 | sob_id = GAUDI2_RESERVED_SOB_DEC_ABNRM_FIRST + decoder_id; |
| 5950 | WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_AWADDR, |
| 5951 | mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_id * sizeof(u32)); |
| 5952 | WREG32(base_addr + BRDG_CTRL_ABNRM_MSIX_LBW_WDATA, GAUDI2_SOB_INCREMENT_BY_ONE); |
| 5953 | } |
| 5954 | |
| 5955 | static void gaudi2_init_dec(struct hl_device *hdev) |
| 5956 | { |
| 5957 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 5958 | u32 dcore_id, dec_id, dec_bit; |
| 5959 | u64 base_addr; |
| 5960 | |
| 5961 | if (!hdev->asic_prop.decoder_enabled_mask) |
| 5962 | return; |
| 5963 | |
| 5964 | if ((gaudi2->dec_hw_cap_initialized & HW_CAP_DEC_MASK) == HW_CAP_DEC_MASK) |
| 5965 | return; |
| 5966 | |
| 5967 | for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) |
| 5968 | for (dec_id = 0 ; dec_id < NUM_OF_DEC_PER_DCORE ; dec_id++) { |
| 5969 | dec_bit = dcore_id * NUM_OF_DEC_PER_DCORE + dec_id; |
| 5970 | |
| 5971 | if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit))) |
| 5972 | continue; |
| 5973 | |
| 5974 | base_addr = mmDCORE0_DEC0_CMD_BASE + |
| 5975 | BRDG_CTRL_BLOCK_OFFSET + |
| 5976 | dcore_id * DCORE_OFFSET + |
| 5977 | dec_id * DCORE_VDEC_OFFSET; |
| 5978 | |
| 5979 | gaudi2_init_vdec_brdg_ctrl(hdev, base_addr, decoder_id: dec_bit); |
| 5980 | |
| 5981 | gaudi2->dec_hw_cap_initialized |= BIT_ULL(HW_CAP_DEC_SHIFT + dec_bit); |
| 5982 | } |
| 5983 | |
| 5984 | for (dec_id = 0 ; dec_id < NUM_OF_PCIE_VDEC ; dec_id++) { |
| 5985 | dec_bit = PCIE_DEC_SHIFT + dec_id; |
| 5986 | if (!(hdev->asic_prop.decoder_enabled_mask & BIT(dec_bit))) |
| 5987 | continue; |
| 5988 | |
| 5989 | base_addr = mmPCIE_DEC0_CMD_BASE + BRDG_CTRL_BLOCK_OFFSET + |
| 5990 | dec_id * DCORE_VDEC_OFFSET; |
| 5991 | |
| 5992 | gaudi2_init_vdec_brdg_ctrl(hdev, base_addr, decoder_id: dec_bit); |
| 5993 | |
| 5994 | gaudi2->dec_hw_cap_initialized |= BIT_ULL(HW_CAP_DEC_SHIFT + dec_bit); |
| 5995 | } |
| 5996 | } |
| 5997 | |
| 5998 | static int gaudi2_mmu_update_asid_hop0_addr(struct hl_device *hdev, |
| 5999 | u32 stlb_base, u32 asid, u64 phys_addr) |
| 6000 | { |
| 6001 | u32 status, timeout_usec; |
| 6002 | int rc; |
| 6003 | |
| 6004 | if (hdev->pldm || !hdev->pdev) |
| 6005 | timeout_usec = GAUDI2_PLDM_MMU_TIMEOUT_USEC; |
| 6006 | else |
| 6007 | timeout_usec = MMU_CONFIG_TIMEOUT_USEC; |
| 6008 | |
| 6009 | WREG32(stlb_base + STLB_ASID_OFFSET, asid); |
| 6010 | WREG32(stlb_base + STLB_HOP0_PA43_12_OFFSET, phys_addr >> MMU_HOP0_PA43_12_SHIFT); |
| 6011 | WREG32(stlb_base + STLB_HOP0_PA63_44_OFFSET, phys_addr >> MMU_HOP0_PA63_44_SHIFT); |
| 6012 | WREG32(stlb_base + STLB_BUSY_OFFSET, 0x80000000); |
| 6013 | |
| 6014 | rc = hl_poll_timeout( |
| 6015 | hdev, |
| 6016 | stlb_base + STLB_BUSY_OFFSET, |
| 6017 | status, |
| 6018 | !(status & 0x80000000), |
| 6019 | 1000, |
| 6020 | timeout_usec); |
| 6021 | |
| 6022 | if (rc) { |
| 6023 | dev_err(hdev->dev, "Timeout during MMU hop0 config of asid %d\n" , asid); |
| 6024 | return rc; |
| 6025 | } |
| 6026 | |
| 6027 | return 0; |
| 6028 | } |
| 6029 | |
| 6030 | static void gaudi2_mmu_send_invalidate_cache_cmd(struct hl_device *hdev, u32 stlb_base, |
| 6031 | u32 start_offset, u32 inv_start_val, |
| 6032 | u32 flags) |
| 6033 | { |
| 6034 | /* clear PMMU mem line cache (only needed in mmu range invalidation) */ |
| 6035 | if (flags & MMU_OP_CLEAR_MEMCACHE) |
| 6036 | WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INVALIDATION, 0x1); |
| 6037 | |
| 6038 | if (flags & MMU_OP_SKIP_LOW_CACHE_INV) |
| 6039 | return; |
| 6040 | |
| 6041 | WREG32(stlb_base + start_offset, inv_start_val); |
| 6042 | } |
| 6043 | |
| 6044 | static int gaudi2_mmu_invalidate_cache_status_poll(struct hl_device *hdev, u32 stlb_base, |
| 6045 | struct gaudi2_cache_invld_params *inv_params) |
| 6046 | { |
| 6047 | u32 status, timeout_usec, start_offset; |
| 6048 | int rc; |
| 6049 | |
| 6050 | timeout_usec = (hdev->pldm) ? GAUDI2_PLDM_MMU_TIMEOUT_USEC : |
| 6051 | GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC; |
| 6052 | |
| 6053 | /* poll PMMU mem line cache (only needed in mmu range invalidation) */ |
| 6054 | if (inv_params->flags & MMU_OP_CLEAR_MEMCACHE) { |
| 6055 | rc = hl_poll_timeout( |
| 6056 | hdev, |
| 6057 | mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS, |
| 6058 | status, |
| 6059 | status & 0x1, |
| 6060 | 1000, |
| 6061 | timeout_usec); |
| 6062 | |
| 6063 | if (rc) |
| 6064 | return rc; |
| 6065 | |
| 6066 | /* Need to manually reset the status to 0 */ |
| 6067 | WREG32(mmPMMU_HBW_STLB_MEM_CACHE_INV_STATUS, 0x0); |
| 6068 | } |
| 6069 | |
| 6070 | /* Lower cache does not work with cache lines, hence we can skip its |
| 6071 | * invalidation upon map and invalidate only upon unmap |
| 6072 | */ |
| 6073 | if (inv_params->flags & MMU_OP_SKIP_LOW_CACHE_INV) |
| 6074 | return 0; |
| 6075 | |
| 6076 | start_offset = inv_params->range_invalidation ? |
| 6077 | STLB_RANGE_CACHE_INVALIDATION_OFFSET : STLB_INV_ALL_START_OFFSET; |
| 6078 | |
| 6079 | rc = hl_poll_timeout( |
| 6080 | hdev, |
| 6081 | stlb_base + start_offset, |
| 6082 | status, |
| 6083 | !(status & 0x1), |
| 6084 | 1000, |
| 6085 | timeout_usec); |
| 6086 | |
| 6087 | return rc; |
| 6088 | } |
| 6089 | |
| 6090 | bool gaudi2_is_hmmu_enabled(struct hl_device *hdev, int dcore_id, int hmmu_id) |
| 6091 | { |
| 6092 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6093 | u32 hw_cap; |
| 6094 | |
| 6095 | hw_cap = HW_CAP_DCORE0_DMMU0 << (NUM_OF_HMMU_PER_DCORE * dcore_id + hmmu_id); |
| 6096 | |
| 6097 | if (gaudi2->hw_cap_initialized & hw_cap) |
| 6098 | return true; |
| 6099 | |
| 6100 | return false; |
| 6101 | } |
| 6102 | |
| 6103 | /* this function shall be called only for HMMUs for which capability bit is set */ |
| 6104 | static inline u32 get_hmmu_stlb_base(int dcore_id, int hmmu_id) |
| 6105 | { |
| 6106 | u32 offset; |
| 6107 | |
| 6108 | offset = (u32) (dcore_id * DCORE_OFFSET + hmmu_id * DCORE_HMMU_OFFSET); |
| 6109 | return (u32)(mmDCORE0_HMMU0_STLB_BASE + offset); |
| 6110 | } |
| 6111 | |
| 6112 | static void gaudi2_mmu_invalidate_cache_trigger(struct hl_device *hdev, u32 stlb_base, |
| 6113 | struct gaudi2_cache_invld_params *inv_params) |
| 6114 | { |
| 6115 | u32 start_offset; |
| 6116 | |
| 6117 | if (inv_params->range_invalidation) { |
| 6118 | /* Set the addresses range |
| 6119 | * Note: that the start address we set in register, is not included in |
| 6120 | * the range of the invalidation, by design. |
| 6121 | * that's why we need to set lower address than the one we actually |
| 6122 | * want to be included in the range invalidation. |
| 6123 | */ |
| 6124 | u64 start = inv_params->start_va - 1; |
| 6125 | |
| 6126 | start_offset = STLB_RANGE_CACHE_INVALIDATION_OFFSET; |
| 6127 | |
| 6128 | WREG32(stlb_base + STLB_RANGE_INV_START_LSB_OFFSET, |
| 6129 | start >> MMU_RANGE_INV_VA_LSB_SHIFT); |
| 6130 | |
| 6131 | WREG32(stlb_base + STLB_RANGE_INV_START_MSB_OFFSET, |
| 6132 | start >> MMU_RANGE_INV_VA_MSB_SHIFT); |
| 6133 | |
| 6134 | WREG32(stlb_base + STLB_RANGE_INV_END_LSB_OFFSET, |
| 6135 | inv_params->end_va >> MMU_RANGE_INV_VA_LSB_SHIFT); |
| 6136 | |
| 6137 | WREG32(stlb_base + STLB_RANGE_INV_END_MSB_OFFSET, |
| 6138 | inv_params->end_va >> MMU_RANGE_INV_VA_MSB_SHIFT); |
| 6139 | } else { |
| 6140 | start_offset = STLB_INV_ALL_START_OFFSET; |
| 6141 | } |
| 6142 | |
| 6143 | gaudi2_mmu_send_invalidate_cache_cmd(hdev, stlb_base, start_offset, |
| 6144 | inv_start_val: inv_params->inv_start_val, flags: inv_params->flags); |
| 6145 | } |
| 6146 | |
| 6147 | static inline void gaudi2_hmmu_invalidate_cache_trigger(struct hl_device *hdev, |
| 6148 | int dcore_id, int hmmu_id, |
| 6149 | struct gaudi2_cache_invld_params *inv_params) |
| 6150 | { |
| 6151 | u32 stlb_base = get_hmmu_stlb_base(dcore_id, hmmu_id); |
| 6152 | |
| 6153 | gaudi2_mmu_invalidate_cache_trigger(hdev, stlb_base, inv_params); |
| 6154 | } |
| 6155 | |
| 6156 | static inline int gaudi2_hmmu_invalidate_cache_status_poll(struct hl_device *hdev, |
| 6157 | int dcore_id, int hmmu_id, |
| 6158 | struct gaudi2_cache_invld_params *inv_params) |
| 6159 | { |
| 6160 | u32 stlb_base = get_hmmu_stlb_base(dcore_id, hmmu_id); |
| 6161 | |
| 6162 | return gaudi2_mmu_invalidate_cache_status_poll(hdev, stlb_base, inv_params); |
| 6163 | } |
| 6164 | |
| 6165 | static int gaudi2_hmmus_invalidate_cache(struct hl_device *hdev, |
| 6166 | struct gaudi2_cache_invld_params *inv_params) |
| 6167 | { |
| 6168 | int dcore_id, hmmu_id; |
| 6169 | |
| 6170 | /* first send all invalidation commands */ |
| 6171 | for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { |
| 6172 | for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) { |
| 6173 | if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id)) |
| 6174 | continue; |
| 6175 | |
| 6176 | gaudi2_hmmu_invalidate_cache_trigger(hdev, dcore_id, hmmu_id, inv_params); |
| 6177 | } |
| 6178 | } |
| 6179 | |
| 6180 | /* next, poll all invalidations status */ |
| 6181 | for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { |
| 6182 | for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE ; hmmu_id++) { |
| 6183 | int rc; |
| 6184 | |
| 6185 | if (!gaudi2_is_hmmu_enabled(hdev, dcore_id, hmmu_id)) |
| 6186 | continue; |
| 6187 | |
| 6188 | rc = gaudi2_hmmu_invalidate_cache_status_poll(hdev, dcore_id, hmmu_id, |
| 6189 | inv_params); |
| 6190 | if (rc) |
| 6191 | return rc; |
| 6192 | } |
| 6193 | } |
| 6194 | |
| 6195 | return 0; |
| 6196 | } |
| 6197 | |
| 6198 | static int gaudi2_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags) |
| 6199 | { |
| 6200 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6201 | struct gaudi2_cache_invld_params invld_params; |
| 6202 | int rc = 0; |
| 6203 | |
| 6204 | if (hdev->reset_info.hard_reset_pending) |
| 6205 | return rc; |
| 6206 | |
| 6207 | invld_params.range_invalidation = false; |
| 6208 | invld_params.inv_start_val = 1; |
| 6209 | |
| 6210 | if ((flags & MMU_OP_USERPTR) && (gaudi2->hw_cap_initialized & HW_CAP_PMMU)) { |
| 6211 | invld_params.flags = flags; |
| 6212 | gaudi2_mmu_invalidate_cache_trigger(hdev, mmPMMU_HBW_STLB_BASE, inv_params: &invld_params); |
| 6213 | rc = gaudi2_mmu_invalidate_cache_status_poll(hdev, mmPMMU_HBW_STLB_BASE, |
| 6214 | inv_params: &invld_params); |
| 6215 | } else if (flags & MMU_OP_PHYS_PACK) { |
| 6216 | invld_params.flags = 0; |
| 6217 | rc = gaudi2_hmmus_invalidate_cache(hdev, inv_params: &invld_params); |
| 6218 | } |
| 6219 | |
| 6220 | return rc; |
| 6221 | } |
| 6222 | |
| 6223 | static int gaudi2_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, |
| 6224 | u32 flags, u32 asid, u64 va, u64 size) |
| 6225 | { |
| 6226 | struct gaudi2_cache_invld_params invld_params = {0}; |
| 6227 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6228 | u64 start_va, end_va; |
| 6229 | u32 inv_start_val; |
| 6230 | int rc = 0; |
| 6231 | |
| 6232 | if (hdev->reset_info.hard_reset_pending) |
| 6233 | return 0; |
| 6234 | |
| 6235 | inv_start_val = (1 << MMU_RANGE_INV_EN_SHIFT | |
| 6236 | 1 << MMU_RANGE_INV_ASID_EN_SHIFT | |
| 6237 | asid << MMU_RANGE_INV_ASID_SHIFT); |
| 6238 | start_va = va; |
| 6239 | end_va = start_va + size; |
| 6240 | |
| 6241 | if ((flags & MMU_OP_USERPTR) && (gaudi2->hw_cap_initialized & HW_CAP_PMMU)) { |
| 6242 | /* As range invalidation does not support zero address we will |
| 6243 | * do full invalidation in this case |
| 6244 | */ |
| 6245 | if (start_va) { |
| 6246 | invld_params.range_invalidation = true; |
| 6247 | invld_params.start_va = start_va; |
| 6248 | invld_params.end_va = end_va; |
| 6249 | invld_params.inv_start_val = inv_start_val; |
| 6250 | invld_params.flags = flags | MMU_OP_CLEAR_MEMCACHE; |
| 6251 | } else { |
| 6252 | invld_params.range_invalidation = false; |
| 6253 | invld_params.inv_start_val = 1; |
| 6254 | invld_params.flags = flags; |
| 6255 | } |
| 6256 | |
| 6257 | |
| 6258 | gaudi2_mmu_invalidate_cache_trigger(hdev, mmPMMU_HBW_STLB_BASE, inv_params: &invld_params); |
| 6259 | rc = gaudi2_mmu_invalidate_cache_status_poll(hdev, mmPMMU_HBW_STLB_BASE, |
| 6260 | inv_params: &invld_params); |
| 6261 | if (rc) |
| 6262 | return rc; |
| 6263 | |
| 6264 | } else if (flags & MMU_OP_PHYS_PACK) { |
| 6265 | invld_params.start_va = gaudi2_mmu_scramble_addr(hdev, raw_addr: start_va); |
| 6266 | invld_params.end_va = gaudi2_mmu_scramble_addr(hdev, raw_addr: end_va); |
| 6267 | invld_params.inv_start_val = inv_start_val; |
| 6268 | invld_params.flags = flags; |
| 6269 | rc = gaudi2_hmmus_invalidate_cache(hdev, inv_params: &invld_params); |
| 6270 | } |
| 6271 | |
| 6272 | return rc; |
| 6273 | } |
| 6274 | |
| 6275 | static int gaudi2_mmu_update_hop0_addr(struct hl_device *hdev, u32 stlb_base, |
| 6276 | bool host_resident_pgt) |
| 6277 | { |
| 6278 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 6279 | u64 hop0_addr; |
| 6280 | u32 asid, max_asid = prop->max_asid; |
| 6281 | int rc; |
| 6282 | |
| 6283 | /* it takes too much time to init all of the ASIDs on palladium */ |
| 6284 | if (hdev->pldm) |
| 6285 | max_asid = min((u32) 8, max_asid); |
| 6286 | |
| 6287 | for (asid = 0 ; asid < max_asid ; asid++) { |
| 6288 | if (host_resident_pgt) |
| 6289 | hop0_addr = hdev->mmu_priv.hr.mmu_asid_hop0[asid].phys_addr; |
| 6290 | else |
| 6291 | hop0_addr = prop->mmu_pgt_addr + (asid * prop->dmmu.hop_table_size); |
| 6292 | |
| 6293 | rc = gaudi2_mmu_update_asid_hop0_addr(hdev, stlb_base, asid, phys_addr: hop0_addr); |
| 6294 | if (rc) { |
| 6295 | dev_err(hdev->dev, "failed to set hop0 addr for asid %d\n" , asid); |
| 6296 | return rc; |
| 6297 | } |
| 6298 | } |
| 6299 | |
| 6300 | return 0; |
| 6301 | } |
| 6302 | |
| 6303 | static int gaudi2_mmu_init_common(struct hl_device *hdev, u32 mmu_base, u32 stlb_base, |
| 6304 | bool host_resident_pgt) |
| 6305 | { |
| 6306 | u32 status, timeout_usec; |
| 6307 | int rc; |
| 6308 | |
| 6309 | if (hdev->pldm || !hdev->pdev) |
| 6310 | timeout_usec = GAUDI2_PLDM_MMU_TIMEOUT_USEC; |
| 6311 | else |
| 6312 | timeout_usec = GAUDI2_MMU_CACHE_INV_TIMEOUT_USEC; |
| 6313 | |
| 6314 | WREG32(stlb_base + STLB_INV_ALL_START_OFFSET, 1); |
| 6315 | |
| 6316 | rc = hl_poll_timeout( |
| 6317 | hdev, |
| 6318 | stlb_base + STLB_SRAM_INIT_OFFSET, |
| 6319 | status, |
| 6320 | !status, |
| 6321 | 1000, |
| 6322 | timeout_usec); |
| 6323 | |
| 6324 | if (rc) |
| 6325 | dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU SRAM init\n" ); |
| 6326 | |
| 6327 | rc = gaudi2_mmu_update_hop0_addr(hdev, stlb_base, host_resident_pgt); |
| 6328 | if (rc) |
| 6329 | return rc; |
| 6330 | |
| 6331 | WREG32(mmu_base + MMU_BYPASS_OFFSET, 0); |
| 6332 | |
| 6333 | rc = hl_poll_timeout( |
| 6334 | hdev, |
| 6335 | stlb_base + STLB_INV_ALL_START_OFFSET, |
| 6336 | status, |
| 6337 | !status, |
| 6338 | 1000, |
| 6339 | timeout_usec); |
| 6340 | |
| 6341 | if (rc) |
| 6342 | dev_notice_ratelimited(hdev->dev, "Timeout when waiting for MMU invalidate all\n" ); |
| 6343 | |
| 6344 | WREG32(mmu_base + MMU_ENABLE_OFFSET, 1); |
| 6345 | |
| 6346 | return rc; |
| 6347 | } |
| 6348 | |
| 6349 | static int gaudi2_pci_mmu_init(struct hl_device *hdev) |
| 6350 | { |
| 6351 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 6352 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6353 | u32 mmu_base, stlb_base; |
| 6354 | int rc; |
| 6355 | |
| 6356 | if (gaudi2->hw_cap_initialized & HW_CAP_PMMU) |
| 6357 | return 0; |
| 6358 | |
| 6359 | mmu_base = mmPMMU_HBW_MMU_BASE; |
| 6360 | stlb_base = mmPMMU_HBW_STLB_BASE; |
| 6361 | |
| 6362 | RMWREG32_SHIFTED(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, |
| 6363 | (0 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_SHIFT) | |
| 6364 | (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_SHIFT) | |
| 6365 | (4 << PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_SHIFT) | |
| 6366 | (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_SHIFT) | |
| 6367 | (5 << PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_SHIFT), |
| 6368 | PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK | |
| 6369 | PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK | |
| 6370 | PMMU_HBW_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK | |
| 6371 | PMMU_HBW_STLB_HOP_CONFIGURATION_LAST_HOP_MASK | |
| 6372 | PMMU_HBW_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK); |
| 6373 | |
| 6374 | WREG32(stlb_base + STLB_LL_LOOKUP_MASK_63_32_OFFSET, 0); |
| 6375 | |
| 6376 | if (PAGE_SIZE == SZ_64K) { |
| 6377 | /* Set page sizes to 64K on hop5 and 16M on hop4 + enable 8 bit hops */ |
| 6378 | RMWREG32_SHIFTED(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, |
| 6379 | FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK, 4) | |
| 6380 | FIELD_PREP(DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK, 3) | |
| 6381 | FIELD_PREP( |
| 6382 | DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK, |
| 6383 | 1), |
| 6384 | DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP5_PAGE_SIZE_MASK | |
| 6385 | DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK | |
| 6386 | DCORE0_HMMU0_MMU_STATIC_MULTI_PAGE_SIZE_CFG_8_BITS_HOP_MODE_EN_MASK); |
| 6387 | } |
| 6388 | |
| 6389 | WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_PMMU_SPI_SEI_ENABLE_MASK); |
| 6390 | |
| 6391 | rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base, host_resident_pgt: prop->pmmu.host_resident); |
| 6392 | if (rc) |
| 6393 | return rc; |
| 6394 | |
| 6395 | gaudi2->hw_cap_initialized |= HW_CAP_PMMU; |
| 6396 | |
| 6397 | return 0; |
| 6398 | } |
| 6399 | |
| 6400 | static int gaudi2_dcore_hmmu_init(struct hl_device *hdev, int dcore_id, |
| 6401 | int hmmu_id) |
| 6402 | { |
| 6403 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 6404 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6405 | u32 offset, mmu_base, stlb_base, hw_cap; |
| 6406 | u8 dmmu_seq; |
| 6407 | int rc; |
| 6408 | |
| 6409 | dmmu_seq = NUM_OF_HMMU_PER_DCORE * dcore_id + hmmu_id; |
| 6410 | hw_cap = HW_CAP_DCORE0_DMMU0 << dmmu_seq; |
| 6411 | |
| 6412 | /* |
| 6413 | * return if DMMU is already initialized or if it's not out of |
| 6414 | * isolation (due to cluster binning) |
| 6415 | */ |
| 6416 | if ((gaudi2->hw_cap_initialized & hw_cap) || !(prop->hmmu_hif_enabled_mask & BIT(dmmu_seq))) |
| 6417 | return 0; |
| 6418 | |
| 6419 | offset = (u32) (dcore_id * DCORE_OFFSET + hmmu_id * DCORE_HMMU_OFFSET); |
| 6420 | mmu_base = mmDCORE0_HMMU0_MMU_BASE + offset; |
| 6421 | stlb_base = mmDCORE0_HMMU0_STLB_BASE + offset; |
| 6422 | |
| 6423 | RMWREG32(mmu_base + MMU_STATIC_MULTI_PAGE_SIZE_OFFSET, 5 /* 64MB */, |
| 6424 | MMU_STATIC_MULTI_PAGE_SIZE_HOP4_PAGE_SIZE_MASK); |
| 6425 | |
| 6426 | RMWREG32_SHIFTED(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, |
| 6427 | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK, 0) | |
| 6428 | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK, 3) | |
| 6429 | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK, 3) | |
| 6430 | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK, 3) | |
| 6431 | FIELD_PREP(DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK, 3), |
| 6432 | DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_HOP_MASK | |
| 6433 | DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_SMALL_P_MASK | |
| 6434 | DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FIRST_LOOKUP_HOP_LARGE_P_MASK | |
| 6435 | DCORE0_HMMU0_STLB_HOP_CONFIGURATION_LAST_HOP_MASK | |
| 6436 | DCORE0_HMMU0_STLB_HOP_CONFIGURATION_FOLLOWER_HOP_MASK); |
| 6437 | |
| 6438 | RMWREG32(stlb_base + STLB_HOP_CONFIGURATION_OFFSET, 1, |
| 6439 | STLB_HOP_CONFIGURATION_ONLY_LARGE_PAGE_MASK); |
| 6440 | |
| 6441 | WREG32(mmu_base + MMU_SPI_SEI_MASK_OFFSET, GAUDI2_HMMU_SPI_SEI_ENABLE_MASK); |
| 6442 | |
| 6443 | rc = gaudi2_mmu_init_common(hdev, mmu_base, stlb_base, host_resident_pgt: prop->dmmu.host_resident); |
| 6444 | if (rc) |
| 6445 | return rc; |
| 6446 | |
| 6447 | gaudi2->hw_cap_initialized |= hw_cap; |
| 6448 | |
| 6449 | return 0; |
| 6450 | } |
| 6451 | |
| 6452 | static int gaudi2_hbm_mmu_init(struct hl_device *hdev) |
| 6453 | { |
| 6454 | int rc, dcore_id, hmmu_id; |
| 6455 | |
| 6456 | for (dcore_id = 0 ; dcore_id < NUM_OF_DCORES ; dcore_id++) |
| 6457 | for (hmmu_id = 0 ; hmmu_id < NUM_OF_HMMU_PER_DCORE; hmmu_id++) { |
| 6458 | rc = gaudi2_dcore_hmmu_init(hdev, dcore_id, hmmu_id); |
| 6459 | if (rc) |
| 6460 | return rc; |
| 6461 | } |
| 6462 | |
| 6463 | return 0; |
| 6464 | } |
| 6465 | |
| 6466 | static int gaudi2_mmu_init(struct hl_device *hdev) |
| 6467 | { |
| 6468 | int rc; |
| 6469 | |
| 6470 | rc = gaudi2_pci_mmu_init(hdev); |
| 6471 | if (rc) |
| 6472 | return rc; |
| 6473 | |
| 6474 | rc = gaudi2_hbm_mmu_init(hdev); |
| 6475 | if (rc) |
| 6476 | return rc; |
| 6477 | |
| 6478 | return 0; |
| 6479 | } |
| 6480 | |
| 6481 | static int gaudi2_hw_init(struct hl_device *hdev) |
| 6482 | { |
| 6483 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6484 | int rc; |
| 6485 | |
| 6486 | /* Let's mark in the H/W that we have reached this point. We check |
| 6487 | * this value in the reset_before_init function to understand whether |
| 6488 | * we need to reset the chip before doing H/W init. This register is |
| 6489 | * cleared by the H/W upon H/W reset |
| 6490 | */ |
| 6491 | WREG32(mmHW_STATE, HL_DEVICE_HW_STATE_DIRTY); |
| 6492 | |
| 6493 | /* Perform read from the device to make sure device is up */ |
| 6494 | RREG32(mmHW_STATE); |
| 6495 | |
| 6496 | /* If iATU is done by FW, the HBM bar ALWAYS points to DRAM_PHYS_BASE. |
| 6497 | * So we set it here and if anyone tries to move it later to |
| 6498 | * a different address, there will be an error |
| 6499 | */ |
| 6500 | if (hdev->asic_prop.iatu_done_by_fw) |
| 6501 | gaudi2->dram_bar_cur_addr = DRAM_PHYS_BASE; |
| 6502 | |
| 6503 | /* |
| 6504 | * Before pushing u-boot/linux to device, need to set the hbm bar to |
| 6505 | * base address of dram |
| 6506 | */ |
| 6507 | if (gaudi2_set_hbm_bar_base(hdev, DRAM_PHYS_BASE) == U64_MAX) { |
| 6508 | dev_err(hdev->dev, "failed to map HBM bar to DRAM base address\n" ); |
| 6509 | return -EIO; |
| 6510 | } |
| 6511 | |
| 6512 | rc = gaudi2_init_cpu(hdev); |
| 6513 | if (rc) { |
| 6514 | dev_err(hdev->dev, "failed to initialize CPU\n" ); |
| 6515 | return rc; |
| 6516 | } |
| 6517 | |
| 6518 | gaudi2_init_scrambler_hbm(hdev); |
| 6519 | gaudi2_init_kdma(hdev); |
| 6520 | |
| 6521 | rc = gaudi2_init_cpu_queues(hdev, GAUDI2_CPU_TIMEOUT_USEC); |
| 6522 | if (rc) { |
| 6523 | dev_err(hdev->dev, "failed to initialize CPU H/W queues %d\n" , rc); |
| 6524 | return rc; |
| 6525 | } |
| 6526 | |
| 6527 | rc = gaudi2->cpucp_info_get(hdev); |
| 6528 | if (rc) { |
| 6529 | dev_err(hdev->dev, "Failed to get cpucp info\n" ); |
| 6530 | return rc; |
| 6531 | } |
| 6532 | |
| 6533 | rc = gaudi2_mmu_init(hdev); |
| 6534 | if (rc) |
| 6535 | return rc; |
| 6536 | |
| 6537 | gaudi2_init_pdma(hdev); |
| 6538 | gaudi2_init_edma(hdev); |
| 6539 | gaudi2_init_sm(hdev); |
| 6540 | gaudi2_init_tpc(hdev); |
| 6541 | gaudi2_init_mme(hdev); |
| 6542 | gaudi2_init_rotator(hdev); |
| 6543 | gaudi2_init_dec(hdev); |
| 6544 | gaudi2_enable_timestamp(hdev); |
| 6545 | |
| 6546 | rc = gaudi2_coresight_init(hdev); |
| 6547 | if (rc) |
| 6548 | goto disable_queues; |
| 6549 | |
| 6550 | rc = gaudi2_enable_msix(hdev); |
| 6551 | if (rc) |
| 6552 | goto disable_queues; |
| 6553 | |
| 6554 | /* Perform read from the device to flush all configuration */ |
| 6555 | RREG32(mmHW_STATE); |
| 6556 | |
| 6557 | return 0; |
| 6558 | |
| 6559 | disable_queues: |
| 6560 | gaudi2_disable_dma_qmans(hdev); |
| 6561 | gaudi2_disable_mme_qmans(hdev); |
| 6562 | gaudi2_disable_tpc_qmans(hdev); |
| 6563 | gaudi2_disable_rot_qmans(hdev); |
| 6564 | gaudi2_disable_nic_qmans(hdev); |
| 6565 | |
| 6566 | gaudi2_disable_timestamp(hdev); |
| 6567 | |
| 6568 | return rc; |
| 6569 | } |
| 6570 | |
| 6571 | /** |
| 6572 | * gaudi2_send_hard_reset_cmd - common function to handle reset |
| 6573 | * |
| 6574 | * @hdev: pointer to the habanalabs device structure |
| 6575 | * |
| 6576 | * This function handles the various possible scenarios for reset. |
| 6577 | * It considers if reset is handled by driver\FW and what FW components are loaded |
| 6578 | */ |
| 6579 | static void gaudi2_send_hard_reset_cmd(struct hl_device *hdev) |
| 6580 | { |
| 6581 | struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 6582 | bool heartbeat_reset, preboot_only, cpu_initialized = false; |
| 6583 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6584 | u32 cpu_boot_status; |
| 6585 | |
| 6586 | preboot_only = (hdev->fw_loader.fw_comp_loaded == FW_TYPE_PREBOOT_CPU); |
| 6587 | heartbeat_reset = (hdev->reset_info.curr_reset_cause == HL_RESET_CAUSE_HEARTBEAT); |
| 6588 | |
| 6589 | /* |
| 6590 | * Handle corner case where failure was at cpu management app load, |
| 6591 | * and driver didn't detect any failure while loading the FW, |
| 6592 | * then at such scenario driver will send only HALT_MACHINE |
| 6593 | * and no one will respond to this request since FW already back to preboot |
| 6594 | * and it cannot handle such cmd. |
| 6595 | * In this case next time the management app loads it'll check on events register |
| 6596 | * which will still have the halt indication, and will reboot the device. |
| 6597 | * The solution is to let preboot clear all relevant registers before next boot |
| 6598 | * once driver send COMMS_RST_DEV. |
| 6599 | */ |
| 6600 | cpu_boot_status = RREG32(mmPSOC_GLOBAL_CONF_CPU_BOOT_STATUS); |
| 6601 | |
| 6602 | if (gaudi2 && (gaudi2->hw_cap_initialized & HW_CAP_CPU) && |
| 6603 | (cpu_boot_status == CPU_BOOT_STATUS_SRAM_AVAIL)) |
| 6604 | cpu_initialized = true; |
| 6605 | |
| 6606 | /* |
| 6607 | * when Linux/Bootfit exist this write to the SP can be interpreted in 2 ways: |
| 6608 | * 1. FW reset: FW initiate the reset sequence |
| 6609 | * 2. driver reset: FW will start HALT sequence (the preparations for the |
| 6610 | * reset but not the reset itself as it is not implemented |
| 6611 | * on their part) and LKD will wait to let FW complete the |
| 6612 | * sequence before issuing the reset |
| 6613 | */ |
| 6614 | if (!preboot_only && cpu_initialized) { |
| 6615 | WREG32(le32_to_cpu(dyn_regs->gic_host_halt_irq), |
| 6616 | gaudi2_irq_map_table[GAUDI2_EVENT_CPU_HALT_MACHINE].cpu_id); |
| 6617 | |
| 6618 | msleep(GAUDI2_CPU_RESET_WAIT_MSEC); |
| 6619 | } |
| 6620 | |
| 6621 | /* |
| 6622 | * When working with preboot (without Linux/Boot fit) we can |
| 6623 | * communicate only using the COMMS commands to issue halt/reset. |
| 6624 | * |
| 6625 | * For the case in which we are working with Linux/Bootfit this is a hail-mary |
| 6626 | * attempt to revive the card in the small chance that the f/w has |
| 6627 | * experienced a watchdog event, which caused it to return back to preboot. |
| 6628 | * In that case, triggering reset through GIC won't help. We need to |
| 6629 | * trigger the reset as if Linux wasn't loaded. |
| 6630 | * |
| 6631 | * We do it only if the reset cause was HB, because that would be the |
| 6632 | * indication of such an event. |
| 6633 | * |
| 6634 | * In case watchdog hasn't expired but we still got HB, then this won't |
| 6635 | * do any damage. |
| 6636 | */ |
| 6637 | |
| 6638 | if (heartbeat_reset || preboot_only || !cpu_initialized) { |
| 6639 | if (hdev->asic_prop.hard_reset_done_by_fw) |
| 6640 | hl_fw_ask_hard_reset_without_linux(hdev); |
| 6641 | else |
| 6642 | hl_fw_ask_halt_machine_without_linux(hdev); |
| 6643 | } |
| 6644 | } |
| 6645 | |
| 6646 | /** |
| 6647 | * gaudi2_execute_hard_reset - execute hard reset by driver/FW |
| 6648 | * |
| 6649 | * @hdev: pointer to the habanalabs device structure |
| 6650 | * |
| 6651 | * This function executes hard reset based on if driver/FW should do the reset |
| 6652 | */ |
| 6653 | static void gaudi2_execute_hard_reset(struct hl_device *hdev) |
| 6654 | { |
| 6655 | if (hdev->asic_prop.hard_reset_done_by_fw) { |
| 6656 | gaudi2_send_hard_reset_cmd(hdev); |
| 6657 | return; |
| 6658 | } |
| 6659 | |
| 6660 | /* Set device to handle FLR by H/W as we will put the device |
| 6661 | * CPU to halt mode |
| 6662 | */ |
| 6663 | WREG32(mmPCIE_AUX_FLR_CTRL, |
| 6664 | (PCIE_AUX_FLR_CTRL_HW_CTRL_MASK | PCIE_AUX_FLR_CTRL_INT_MASK_MASK)); |
| 6665 | |
| 6666 | gaudi2_send_hard_reset_cmd(hdev); |
| 6667 | |
| 6668 | WREG32(mmPSOC_RESET_CONF_SW_ALL_RST, 1); |
| 6669 | } |
| 6670 | |
| 6671 | /** |
| 6672 | * gaudi2_execute_soft_reset - execute soft reset by driver/FW |
| 6673 | * |
| 6674 | * @hdev: pointer to the habanalabs device structure |
| 6675 | * @driver_performs_reset: true if driver should perform reset instead of f/w. |
| 6676 | * @poll_timeout_us: time to wait for response from f/w. |
| 6677 | * |
| 6678 | * This function executes soft reset based on if driver/FW should do the reset |
| 6679 | */ |
| 6680 | static int gaudi2_execute_soft_reset(struct hl_device *hdev, bool driver_performs_reset, |
| 6681 | u32 poll_timeout_us) |
| 6682 | { |
| 6683 | if (!driver_performs_reset) |
| 6684 | return hl_fw_send_soft_reset(hdev); |
| 6685 | |
| 6686 | /* Block access to engines, QMANs and SM during reset, these |
| 6687 | * RRs will be reconfigured after soft reset. |
| 6688 | * PCIE_MSIX is left unsecured to allow NIC packets processing during the reset. |
| 6689 | */ |
| 6690 | gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, NUM_LONG_LBW_RR - 1, |
| 6691 | mmDCORE0_TPC0_QM_DCCM_BASE, mmPCIE_MSIX_BASE); |
| 6692 | |
| 6693 | gaudi2_write_rr_to_all_lbw_rtrs(hdev, RR_TYPE_LONG, NUM_LONG_LBW_RR - 2, |
| 6694 | mmPCIE_MSIX_BASE + HL_BLOCK_SIZE, |
| 6695 | mmPCIE_VDEC1_MSTR_IF_RR_SHRD_HBW_BASE + HL_BLOCK_SIZE); |
| 6696 | |
| 6697 | WREG32(mmPSOC_RESET_CONF_SOFT_RST, 1); |
| 6698 | return 0; |
| 6699 | } |
| 6700 | |
| 6701 | static void gaudi2_poll_btm_indication(struct hl_device *hdev, u32 poll_timeout_us) |
| 6702 | { |
| 6703 | int i, rc = 0; |
| 6704 | u32 reg_val; |
| 6705 | |
| 6706 | /* We poll the BTM done indication multiple times after reset due to |
| 6707 | * a HW errata 'GAUDI2_0300' |
| 6708 | */ |
| 6709 | for (i = 0 ; i < GAUDI2_RESET_POLL_CNT ; i++) |
| 6710 | rc = hl_poll_timeout( |
| 6711 | hdev, |
| 6712 | mmPSOC_GLOBAL_CONF_BTM_FSM, |
| 6713 | reg_val, |
| 6714 | reg_val == 0, |
| 6715 | 1000, |
| 6716 | poll_timeout_us); |
| 6717 | |
| 6718 | if (rc) |
| 6719 | dev_err(hdev->dev, "Timeout while waiting for device to reset 0x%x\n" , reg_val); |
| 6720 | } |
| 6721 | |
| 6722 | static int gaudi2_hw_fini(struct hl_device *hdev, bool hard_reset, bool fw_reset) |
| 6723 | { |
| 6724 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6725 | u32 poll_timeout_us, reset_sleep_ms; |
| 6726 | bool driver_performs_reset = false; |
| 6727 | int rc; |
| 6728 | |
| 6729 | if (hdev->pldm) { |
| 6730 | reset_sleep_ms = hard_reset ? GAUDI2_PLDM_HRESET_TIMEOUT_MSEC : |
| 6731 | GAUDI2_PLDM_SRESET_TIMEOUT_MSEC; |
| 6732 | poll_timeout_us = GAUDI2_PLDM_RESET_POLL_TIMEOUT_USEC; |
| 6733 | } else { |
| 6734 | reset_sleep_ms = GAUDI2_RESET_TIMEOUT_MSEC; |
| 6735 | poll_timeout_us = GAUDI2_RESET_POLL_TIMEOUT_USEC; |
| 6736 | } |
| 6737 | |
| 6738 | if (fw_reset) |
| 6739 | goto skip_reset; |
| 6740 | |
| 6741 | gaudi2_reset_arcs(hdev); |
| 6742 | |
| 6743 | if (hard_reset) { |
| 6744 | driver_performs_reset = !hdev->asic_prop.hard_reset_done_by_fw; |
| 6745 | gaudi2_execute_hard_reset(hdev); |
| 6746 | } else { |
| 6747 | /* |
| 6748 | * As we have to support also work with preboot only (which does not supports |
| 6749 | * soft reset) we have to make sure that security is disabled before letting driver |
| 6750 | * do the reset. user shall control the BFE flags to avoid asking soft reset in |
| 6751 | * secured device with preboot only. |
| 6752 | */ |
| 6753 | driver_performs_reset = (hdev->fw_components == FW_TYPE_PREBOOT_CPU && |
| 6754 | !hdev->asic_prop.fw_security_enabled); |
| 6755 | rc = gaudi2_execute_soft_reset(hdev, driver_performs_reset, poll_timeout_us); |
| 6756 | if (rc) |
| 6757 | return rc; |
| 6758 | } |
| 6759 | |
| 6760 | skip_reset: |
| 6761 | if (driver_performs_reset || hard_reset) { |
| 6762 | /* |
| 6763 | * Instead of waiting for BTM indication we should wait for preboot ready: |
| 6764 | * Consider the below scenario: |
| 6765 | * 1. FW update is being triggered |
| 6766 | * - setting the dirty bit |
| 6767 | * 2. hard reset will be triggered due to the dirty bit |
| 6768 | * 3. FW initiates the reset: |
| 6769 | * - dirty bit cleared |
| 6770 | * - BTM indication cleared |
| 6771 | * - preboot ready indication cleared |
| 6772 | * 4. during hard reset: |
| 6773 | * - BTM indication will be set |
| 6774 | * - BIST test performed and another reset triggered |
| 6775 | * 5. only after this reset the preboot will set the preboot ready |
| 6776 | * |
| 6777 | * when polling on BTM indication alone we can lose sync with FW while trying to |
| 6778 | * communicate with FW that is during reset. |
| 6779 | * to overcome this we will always wait to preboot ready indication |
| 6780 | */ |
| 6781 | |
| 6782 | /* without this sleep reset will not work */ |
| 6783 | msleep(msecs: reset_sleep_ms); |
| 6784 | |
| 6785 | if (hdev->fw_components & FW_TYPE_PREBOOT_CPU) |
| 6786 | hl_fw_wait_preboot_ready(hdev); |
| 6787 | else |
| 6788 | gaudi2_poll_btm_indication(hdev, poll_timeout_us); |
| 6789 | } |
| 6790 | |
| 6791 | if (!gaudi2) |
| 6792 | return 0; |
| 6793 | |
| 6794 | gaudi2->dec_hw_cap_initialized &= ~(HW_CAP_DEC_MASK); |
| 6795 | gaudi2->tpc_hw_cap_initialized &= ~(HW_CAP_TPC_MASK); |
| 6796 | |
| 6797 | /* |
| 6798 | * Clear NIC capability mask in order for driver to re-configure |
| 6799 | * NIC QMANs. NIC ports will not be re-configured during soft |
| 6800 | * reset as we call gaudi2_nic_init only during hard reset |
| 6801 | */ |
| 6802 | gaudi2->nic_hw_cap_initialized &= ~(HW_CAP_NIC_MASK); |
| 6803 | |
| 6804 | if (hard_reset) { |
| 6805 | gaudi2->hw_cap_initialized &= |
| 6806 | ~(HW_CAP_DRAM | HW_CAP_CLK_GATE | HW_CAP_HBM_SCRAMBLER_MASK | |
| 6807 | HW_CAP_PMMU | HW_CAP_CPU | HW_CAP_CPU_Q | |
| 6808 | HW_CAP_SRAM_SCRAMBLER | HW_CAP_DMMU_MASK | |
| 6809 | HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_KDMA | |
| 6810 | HW_CAP_MME_MASK | HW_CAP_ROT_MASK); |
| 6811 | |
| 6812 | memset(gaudi2->events_stat, 0, sizeof(gaudi2->events_stat)); |
| 6813 | } else { |
| 6814 | gaudi2->hw_cap_initialized &= |
| 6815 | ~(HW_CAP_CLK_GATE | HW_CAP_HBM_SCRAMBLER_SW_RESET | |
| 6816 | HW_CAP_PDMA_MASK | HW_CAP_EDMA_MASK | HW_CAP_MME_MASK | |
| 6817 | HW_CAP_ROT_MASK); |
| 6818 | } |
| 6819 | return 0; |
| 6820 | } |
| 6821 | |
| 6822 | static int gaudi2_suspend(struct hl_device *hdev) |
| 6823 | { |
| 6824 | return hl_fw_send_pci_access_msg(hdev, opcode: CPUCP_PACKET_DISABLE_PCI_ACCESS, value: 0x0); |
| 6825 | } |
| 6826 | |
| 6827 | static int gaudi2_resume(struct hl_device *hdev) |
| 6828 | { |
| 6829 | return gaudi2_init_iatu(hdev); |
| 6830 | } |
| 6831 | |
| 6832 | static int gaudi2_mmap(struct hl_device *hdev, struct vm_area_struct *vma, |
| 6833 | void *cpu_addr, dma_addr_t dma_addr, size_t size) |
| 6834 | { |
| 6835 | int rc; |
| 6836 | |
| 6837 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | |
| 6838 | VM_DONTCOPY | VM_NORESERVE); |
| 6839 | |
| 6840 | #ifdef _HAS_DMA_MMAP_COHERENT |
| 6841 | /* |
| 6842 | * If dma_alloc_coherent() returns a vmalloc address, set VM_MIXEDMAP |
| 6843 | * so vm_insert_page() can handle it safely. Without this, the kernel |
| 6844 | * may BUG_ON due to VM_PFNMAP. |
| 6845 | */ |
| 6846 | if (is_vmalloc_addr(cpu_addr)) |
| 6847 | vm_flags_set(vma, VM_MIXEDMAP); |
| 6848 | |
| 6849 | rc = dma_mmap_coherent(hdev->dev, vma, cpu_addr, dma_addr, size); |
| 6850 | if (rc) |
| 6851 | dev_err(hdev->dev, "dma_mmap_coherent error %d" , rc); |
| 6852 | |
| 6853 | #else |
| 6854 | |
| 6855 | rc = remap_pfn_range(vma, addr: vma->vm_start, |
| 6856 | virt_to_phys(address: cpu_addr) >> PAGE_SHIFT, |
| 6857 | size, pgprot: vma->vm_page_prot); |
| 6858 | if (rc) |
| 6859 | dev_err(hdev->dev, "remap_pfn_range error %d" , rc); |
| 6860 | |
| 6861 | #endif |
| 6862 | |
| 6863 | return rc; |
| 6864 | } |
| 6865 | |
| 6866 | static bool gaudi2_is_queue_enabled(struct hl_device *hdev, u32 hw_queue_id) |
| 6867 | { |
| 6868 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6869 | u64 hw_cap_mask = 0; |
| 6870 | u64 hw_tpc_cap_bit = 0; |
| 6871 | u64 hw_nic_cap_bit = 0; |
| 6872 | u64 hw_test_cap_bit = 0; |
| 6873 | |
| 6874 | switch (hw_queue_id) { |
| 6875 | case GAUDI2_QUEUE_ID_PDMA_0_0: |
| 6876 | case GAUDI2_QUEUE_ID_PDMA_0_1: |
| 6877 | case GAUDI2_QUEUE_ID_PDMA_1_0: |
| 6878 | hw_cap_mask = HW_CAP_PDMA_MASK; |
| 6879 | break; |
| 6880 | case GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE0_EDMA_1_3: |
| 6881 | hw_test_cap_bit = HW_CAP_EDMA_SHIFT + |
| 6882 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0) >> 2); |
| 6883 | break; |
| 6884 | case GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE1_EDMA_1_3: |
| 6885 | hw_test_cap_bit = HW_CAP_EDMA_SHIFT + NUM_OF_EDMA_PER_DCORE + |
| 6886 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0) >> 2); |
| 6887 | break; |
| 6888 | case GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE2_EDMA_1_3: |
| 6889 | hw_test_cap_bit = HW_CAP_EDMA_SHIFT + 2 * NUM_OF_EDMA_PER_DCORE + |
| 6890 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0) >> 2); |
| 6891 | break; |
| 6892 | case GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0...GAUDI2_QUEUE_ID_DCORE3_EDMA_1_3: |
| 6893 | hw_test_cap_bit = HW_CAP_EDMA_SHIFT + 3 * NUM_OF_EDMA_PER_DCORE + |
| 6894 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0) >> 2); |
| 6895 | break; |
| 6896 | |
| 6897 | case GAUDI2_QUEUE_ID_DCORE0_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE0_MME_0_3: |
| 6898 | hw_test_cap_bit = HW_CAP_MME_SHIFT; |
| 6899 | break; |
| 6900 | |
| 6901 | case GAUDI2_QUEUE_ID_DCORE1_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE1_MME_0_3: |
| 6902 | hw_test_cap_bit = HW_CAP_MME_SHIFT + 1; |
| 6903 | break; |
| 6904 | |
| 6905 | case GAUDI2_QUEUE_ID_DCORE2_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE2_MME_0_3: |
| 6906 | hw_test_cap_bit = HW_CAP_MME_SHIFT + 2; |
| 6907 | break; |
| 6908 | |
| 6909 | case GAUDI2_QUEUE_ID_DCORE3_MME_0_0 ... GAUDI2_QUEUE_ID_DCORE3_MME_0_3: |
| 6910 | hw_test_cap_bit = HW_CAP_MME_SHIFT + 3; |
| 6911 | break; |
| 6912 | |
| 6913 | case GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_5_3: |
| 6914 | hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + |
| 6915 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE0_TPC_0_0) >> 2); |
| 6916 | |
| 6917 | /* special case where cap bit refers to the first queue id */ |
| 6918 | if (!hw_tpc_cap_bit) |
| 6919 | return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(0)); |
| 6920 | break; |
| 6921 | |
| 6922 | case GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE1_TPC_5_3: |
| 6923 | hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + NUM_OF_TPC_PER_DCORE + |
| 6924 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE1_TPC_0_0) >> 2); |
| 6925 | break; |
| 6926 | |
| 6927 | case GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE2_TPC_5_3: |
| 6928 | hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (2 * NUM_OF_TPC_PER_DCORE) + |
| 6929 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE2_TPC_0_0) >> 2); |
| 6930 | break; |
| 6931 | |
| 6932 | case GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 ... GAUDI2_QUEUE_ID_DCORE3_TPC_5_3: |
| 6933 | hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (3 * NUM_OF_TPC_PER_DCORE) + |
| 6934 | ((hw_queue_id - GAUDI2_QUEUE_ID_DCORE3_TPC_0_0) >> 2); |
| 6935 | break; |
| 6936 | |
| 6937 | case GAUDI2_QUEUE_ID_DCORE0_TPC_6_0 ... GAUDI2_QUEUE_ID_DCORE0_TPC_6_3: |
| 6938 | hw_tpc_cap_bit = HW_CAP_TPC_SHIFT + (4 * NUM_OF_TPC_PER_DCORE); |
| 6939 | break; |
| 6940 | |
| 6941 | case GAUDI2_QUEUE_ID_ROT_0_0 ... GAUDI2_QUEUE_ID_ROT_1_3: |
| 6942 | hw_test_cap_bit = HW_CAP_ROT_SHIFT + ((hw_queue_id - GAUDI2_QUEUE_ID_ROT_0_0) >> 2); |
| 6943 | break; |
| 6944 | |
| 6945 | case GAUDI2_QUEUE_ID_NIC_0_0 ... GAUDI2_QUEUE_ID_NIC_23_3: |
| 6946 | hw_nic_cap_bit = HW_CAP_NIC_SHIFT + ((hw_queue_id - GAUDI2_QUEUE_ID_NIC_0_0) >> 2); |
| 6947 | |
| 6948 | /* special case where cap bit refers to the first queue id */ |
| 6949 | if (!hw_nic_cap_bit) |
| 6950 | return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(0)); |
| 6951 | break; |
| 6952 | |
| 6953 | case GAUDI2_QUEUE_ID_CPU_PQ: |
| 6954 | return !!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q); |
| 6955 | |
| 6956 | default: |
| 6957 | return false; |
| 6958 | } |
| 6959 | |
| 6960 | if (hw_tpc_cap_bit) |
| 6961 | return !!(gaudi2->tpc_hw_cap_initialized & BIT_ULL(hw_tpc_cap_bit)); |
| 6962 | |
| 6963 | if (hw_nic_cap_bit) |
| 6964 | return !!(gaudi2->nic_hw_cap_initialized & BIT_ULL(hw_nic_cap_bit)); |
| 6965 | |
| 6966 | if (hw_test_cap_bit) |
| 6967 | hw_cap_mask = BIT_ULL(hw_test_cap_bit); |
| 6968 | |
| 6969 | return !!(gaudi2->hw_cap_initialized & hw_cap_mask); |
| 6970 | } |
| 6971 | |
| 6972 | static bool gaudi2_is_arc_enabled(struct hl_device *hdev, u64 arc_id) |
| 6973 | { |
| 6974 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6975 | |
| 6976 | switch (arc_id) { |
| 6977 | case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5: |
| 6978 | case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1: |
| 6979 | return !!(gaudi2->active_hw_arc & BIT_ULL(arc_id)); |
| 6980 | |
| 6981 | case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24: |
| 6982 | return !!(gaudi2->active_tpc_arc & BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0)); |
| 6983 | |
| 6984 | case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23: |
| 6985 | return !!(gaudi2->active_nic_arc & BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0)); |
| 6986 | |
| 6987 | default: |
| 6988 | return false; |
| 6989 | } |
| 6990 | } |
| 6991 | |
| 6992 | static void gaudi2_clr_arc_id_cap(struct hl_device *hdev, u64 arc_id) |
| 6993 | { |
| 6994 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 6995 | |
| 6996 | switch (arc_id) { |
| 6997 | case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5: |
| 6998 | case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1: |
| 6999 | gaudi2->active_hw_arc &= ~(BIT_ULL(arc_id)); |
| 7000 | break; |
| 7001 | |
| 7002 | case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24: |
| 7003 | gaudi2->active_tpc_arc &= ~(BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0)); |
| 7004 | break; |
| 7005 | |
| 7006 | case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23: |
| 7007 | gaudi2->active_nic_arc &= ~(BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0)); |
| 7008 | break; |
| 7009 | |
| 7010 | default: |
| 7011 | return; |
| 7012 | } |
| 7013 | } |
| 7014 | |
| 7015 | static void gaudi2_set_arc_id_cap(struct hl_device *hdev, u64 arc_id) |
| 7016 | { |
| 7017 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7018 | |
| 7019 | switch (arc_id) { |
| 7020 | case CPU_ID_SCHED_ARC0 ... CPU_ID_SCHED_ARC5: |
| 7021 | case CPU_ID_MME_QMAN_ARC0...CPU_ID_ROT_QMAN_ARC1: |
| 7022 | gaudi2->active_hw_arc |= BIT_ULL(arc_id); |
| 7023 | break; |
| 7024 | |
| 7025 | case CPU_ID_TPC_QMAN_ARC0...CPU_ID_TPC_QMAN_ARC24: |
| 7026 | gaudi2->active_tpc_arc |= BIT_ULL(arc_id - CPU_ID_TPC_QMAN_ARC0); |
| 7027 | break; |
| 7028 | |
| 7029 | case CPU_ID_NIC_QMAN_ARC0...CPU_ID_NIC_QMAN_ARC23: |
| 7030 | gaudi2->active_nic_arc |= BIT_ULL(arc_id - CPU_ID_NIC_QMAN_ARC0); |
| 7031 | break; |
| 7032 | |
| 7033 | default: |
| 7034 | return; |
| 7035 | } |
| 7036 | } |
| 7037 | |
| 7038 | static void gaudi2_ring_doorbell(struct hl_device *hdev, u32 hw_queue_id, u32 pi) |
| 7039 | { |
| 7040 | struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 7041 | u32 pq_offset, reg_base, db_reg_offset, db_value; |
| 7042 | |
| 7043 | if (hw_queue_id != GAUDI2_QUEUE_ID_CPU_PQ) { |
| 7044 | /* |
| 7045 | * QMAN has 4 successive PQ_PI registers, 1 for each of the QMAN PQs. |
| 7046 | * Masking the H/W queue ID with 0x3 extracts the QMAN internal PQ |
| 7047 | * number. |
| 7048 | */ |
| 7049 | pq_offset = (hw_queue_id & 0x3) * 4; |
| 7050 | reg_base = gaudi2_qm_blocks_bases[hw_queue_id]; |
| 7051 | db_reg_offset = reg_base + QM_PQ_PI_0_OFFSET + pq_offset; |
| 7052 | } else { |
| 7053 | db_reg_offset = mmCPU_IF_PF_PQ_PI; |
| 7054 | } |
| 7055 | |
| 7056 | db_value = pi; |
| 7057 | |
| 7058 | /* ring the doorbell */ |
| 7059 | WREG32(db_reg_offset, db_value); |
| 7060 | |
| 7061 | if (hw_queue_id == GAUDI2_QUEUE_ID_CPU_PQ) { |
| 7062 | /* make sure device CPU will read latest data from host */ |
| 7063 | mb(); |
| 7064 | WREG32(le32_to_cpu(dyn_regs->gic_host_pi_upd_irq), |
| 7065 | gaudi2_irq_map_table[GAUDI2_EVENT_CPU_PI_UPDATE].cpu_id); |
| 7066 | } |
| 7067 | } |
| 7068 | |
| 7069 | static void gaudi2_pqe_write(struct hl_device *hdev, __le64 *pqe, struct hl_bd *bd) |
| 7070 | { |
| 7071 | __le64 *pbd = (__le64 *) bd; |
| 7072 | |
| 7073 | /* The QMANs are on the host memory so a simple copy suffice */ |
| 7074 | pqe[0] = pbd[0]; |
| 7075 | pqe[1] = pbd[1]; |
| 7076 | } |
| 7077 | |
| 7078 | static void *gaudi2_dma_alloc_coherent(struct hl_device *hdev, size_t size, |
| 7079 | dma_addr_t *dma_handle, gfp_t flags) |
| 7080 | { |
| 7081 | return dma_alloc_coherent(dev: &hdev->pdev->dev, size, dma_handle, gfp: flags); |
| 7082 | } |
| 7083 | |
| 7084 | static void gaudi2_dma_free_coherent(struct hl_device *hdev, size_t size, |
| 7085 | void *cpu_addr, dma_addr_t dma_handle) |
| 7086 | { |
| 7087 | dma_free_coherent(dev: &hdev->pdev->dev, size, cpu_addr, dma_handle); |
| 7088 | } |
| 7089 | |
| 7090 | static int gaudi2_send_cpu_message(struct hl_device *hdev, u32 *msg, u16 len, |
| 7091 | u32 timeout, u64 *result) |
| 7092 | { |
| 7093 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7094 | |
| 7095 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) { |
| 7096 | if (result) |
| 7097 | *result = 0; |
| 7098 | return 0; |
| 7099 | } |
| 7100 | |
| 7101 | if (!timeout) |
| 7102 | timeout = GAUDI2_MSG_TO_CPU_TIMEOUT_USEC; |
| 7103 | |
| 7104 | return hl_fw_send_cpu_message(hdev, hw_queue_id: GAUDI2_QUEUE_ID_CPU_PQ, msg, len, timeout, result); |
| 7105 | } |
| 7106 | |
| 7107 | static void *gaudi2_dma_pool_zalloc(struct hl_device *hdev, size_t size, |
| 7108 | gfp_t mem_flags, dma_addr_t *dma_handle) |
| 7109 | { |
| 7110 | if (size > GAUDI2_DMA_POOL_BLK_SIZE) |
| 7111 | return NULL; |
| 7112 | |
| 7113 | return dma_pool_zalloc(pool: hdev->dma_pool, mem_flags, handle: dma_handle); |
| 7114 | } |
| 7115 | |
| 7116 | static void gaudi2_dma_pool_free(struct hl_device *hdev, void *vaddr, dma_addr_t dma_addr) |
| 7117 | { |
| 7118 | dma_pool_free(pool: hdev->dma_pool, vaddr, addr: dma_addr); |
| 7119 | } |
| 7120 | |
| 7121 | static void *gaudi2_cpu_accessible_dma_pool_alloc(struct hl_device *hdev, size_t size, |
| 7122 | dma_addr_t *dma_handle) |
| 7123 | { |
| 7124 | return hl_fw_cpu_accessible_dma_pool_alloc(hdev, size, dma_handle); |
| 7125 | } |
| 7126 | |
| 7127 | static void gaudi2_cpu_accessible_dma_pool_free(struct hl_device *hdev, size_t size, void *vaddr) |
| 7128 | { |
| 7129 | hl_fw_cpu_accessible_dma_pool_free(hdev, size, vaddr); |
| 7130 | } |
| 7131 | |
| 7132 | static int gaudi2_validate_cb_address(struct hl_device *hdev, struct hl_cs_parser *parser) |
| 7133 | { |
| 7134 | struct asic_fixed_properties *asic_prop = &hdev->asic_prop; |
| 7135 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7136 | |
| 7137 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id: parser->hw_queue_id)) { |
| 7138 | dev_err(hdev->dev, "h/w queue %s is disabled\n" , |
| 7139 | GAUDI2_QUEUE_ID_TO_STR(parser->hw_queue_id)); |
| 7140 | return -EINVAL; |
| 7141 | } |
| 7142 | |
| 7143 | /* Just check if CB address is valid */ |
| 7144 | |
| 7145 | if (hl_mem_area_inside_range(address: (u64) (uintptr_t) parser->user_cb, |
| 7146 | size: parser->user_cb_size, |
| 7147 | range_start_address: asic_prop->sram_user_base_address, |
| 7148 | range_end_address: asic_prop->sram_end_address)) |
| 7149 | return 0; |
| 7150 | |
| 7151 | if (hl_mem_area_inside_range(address: (u64) (uintptr_t) parser->user_cb, |
| 7152 | size: parser->user_cb_size, |
| 7153 | range_start_address: asic_prop->dram_user_base_address, |
| 7154 | range_end_address: asic_prop->dram_end_address)) |
| 7155 | return 0; |
| 7156 | |
| 7157 | if ((gaudi2->hw_cap_initialized & HW_CAP_DMMU_MASK) && |
| 7158 | hl_mem_area_inside_range(address: (u64) (uintptr_t) parser->user_cb, |
| 7159 | size: parser->user_cb_size, |
| 7160 | range_start_address: asic_prop->dmmu.start_addr, |
| 7161 | range_end_address: asic_prop->dmmu.end_addr)) |
| 7162 | return 0; |
| 7163 | |
| 7164 | if (gaudi2->hw_cap_initialized & HW_CAP_PMMU) { |
| 7165 | if (hl_mem_area_inside_range(address: (u64) (uintptr_t) parser->user_cb, |
| 7166 | size: parser->user_cb_size, |
| 7167 | range_start_address: asic_prop->pmmu.start_addr, |
| 7168 | range_end_address: asic_prop->pmmu.end_addr) || |
| 7169 | hl_mem_area_inside_range( |
| 7170 | address: (u64) (uintptr_t) parser->user_cb, |
| 7171 | size: parser->user_cb_size, |
| 7172 | range_start_address: asic_prop->pmmu_huge.start_addr, |
| 7173 | range_end_address: asic_prop->pmmu_huge.end_addr)) |
| 7174 | return 0; |
| 7175 | |
| 7176 | } else if (gaudi2_host_phys_addr_valid(addr: (u64) (uintptr_t) parser->user_cb)) { |
| 7177 | if (!hdev->pdev) |
| 7178 | return 0; |
| 7179 | |
| 7180 | if (!device_iommu_mapped(dev: &hdev->pdev->dev)) |
| 7181 | return 0; |
| 7182 | } |
| 7183 | |
| 7184 | dev_err(hdev->dev, "CB address %p + 0x%x for internal QMAN is not valid\n" , |
| 7185 | parser->user_cb, parser->user_cb_size); |
| 7186 | |
| 7187 | return -EFAULT; |
| 7188 | } |
| 7189 | |
| 7190 | static int gaudi2_cs_parser(struct hl_device *hdev, struct hl_cs_parser *parser) |
| 7191 | { |
| 7192 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7193 | |
| 7194 | if (!parser->is_kernel_allocated_cb) |
| 7195 | return gaudi2_validate_cb_address(hdev, parser); |
| 7196 | |
| 7197 | if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU)) { |
| 7198 | dev_err(hdev->dev, "PMMU not initialized - Unsupported mode in Gaudi2\n" ); |
| 7199 | return -EINVAL; |
| 7200 | } |
| 7201 | |
| 7202 | return 0; |
| 7203 | } |
| 7204 | |
| 7205 | static int gaudi2_send_heartbeat(struct hl_device *hdev) |
| 7206 | { |
| 7207 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7208 | |
| 7209 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) |
| 7210 | return 0; |
| 7211 | |
| 7212 | return hl_fw_send_heartbeat(hdev); |
| 7213 | } |
| 7214 | |
| 7215 | /* This is an internal helper function, used to update the KDMA mmu props. |
| 7216 | * Should be called with a proper kdma lock. |
| 7217 | */ |
| 7218 | static void gaudi2_kdma_set_mmbp_asid(struct hl_device *hdev, |
| 7219 | bool mmu_bypass, u32 asid) |
| 7220 | { |
| 7221 | u32 rw_asid, rw_mmu_bp; |
| 7222 | |
| 7223 | rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) | |
| 7224 | (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT); |
| 7225 | |
| 7226 | rw_mmu_bp = (!!mmu_bypass << ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_SHIFT) | |
| 7227 | (!!mmu_bypass << ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_SHIFT); |
| 7228 | |
| 7229 | WREG32(mmARC_FARM_KDMA_CTX_AXUSER_HB_ASID, rw_asid); |
| 7230 | WREG32(mmARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP, rw_mmu_bp); |
| 7231 | } |
| 7232 | |
| 7233 | static void gaudi2_arm_cq_monitor(struct hl_device *hdev, u32 sob_id, u32 mon_id, u32 cq_id, |
| 7234 | u32 mon_payload, u32 sync_value) |
| 7235 | { |
| 7236 | u32 sob_offset, mon_offset, sync_group_id, mode, mon_arm; |
| 7237 | u8 mask; |
| 7238 | |
| 7239 | sob_offset = sob_id * 4; |
| 7240 | mon_offset = mon_id * 4; |
| 7241 | |
| 7242 | /* Reset the SOB value */ |
| 7243 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset, 0); |
| 7244 | |
| 7245 | /* Configure this address with CQ_ID 0 because CQ_EN is set */ |
| 7246 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + mon_offset, cq_id); |
| 7247 | |
| 7248 | /* Configure this address with CS index because CQ_EN is set */ |
| 7249 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + mon_offset, mon_payload); |
| 7250 | |
| 7251 | sync_group_id = sob_id / 8; |
| 7252 | mask = ~(1 << (sob_id & 0x7)); |
| 7253 | mode = 1; /* comparison mode is "equal to" */ |
| 7254 | |
| 7255 | mon_arm = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOD_MASK, sync_value); |
| 7256 | mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SOP_MASK, mode); |
| 7257 | mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_MASK_MASK, mask); |
| 7258 | mon_arm |= FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_MON_ARM_SID_MASK, sync_group_id); |
| 7259 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + mon_offset, mon_arm); |
| 7260 | } |
| 7261 | |
| 7262 | /* This is an internal helper function used by gaudi2_send_job_to_kdma only */ |
| 7263 | static int gaudi2_send_job_to_kdma(struct hl_device *hdev, |
| 7264 | u64 src_addr, u64 dst_addr, |
| 7265 | u32 size, bool is_memset) |
| 7266 | { |
| 7267 | u32 comp_val, commit_mask, *polling_addr, timeout, status = 0; |
| 7268 | struct hl_cq_entry *cq_base; |
| 7269 | struct hl_cq *cq; |
| 7270 | u64 comp_addr; |
| 7271 | int rc; |
| 7272 | |
| 7273 | gaudi2_arm_cq_monitor(hdev, sob_id: GAUDI2_RESERVED_SOB_KDMA_COMPLETION, |
| 7274 | mon_id: GAUDI2_RESERVED_MON_KDMA_COMPLETION, |
| 7275 | cq_id: GAUDI2_RESERVED_CQ_KDMA_COMPLETION, mon_payload: 1, sync_value: 1); |
| 7276 | |
| 7277 | comp_addr = CFG_BASE + mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + |
| 7278 | (GAUDI2_RESERVED_SOB_KDMA_COMPLETION * sizeof(u32)); |
| 7279 | |
| 7280 | comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) | |
| 7281 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1); |
| 7282 | |
| 7283 | WREG32(mmARC_FARM_KDMA_CTX_SRC_BASE_LO, lower_32_bits(src_addr)); |
| 7284 | WREG32(mmARC_FARM_KDMA_CTX_SRC_BASE_HI, upper_32_bits(src_addr)); |
| 7285 | WREG32(mmARC_FARM_KDMA_CTX_DST_BASE_LO, lower_32_bits(dst_addr)); |
| 7286 | WREG32(mmARC_FARM_KDMA_CTX_DST_BASE_HI, upper_32_bits(dst_addr)); |
| 7287 | WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_LO, lower_32_bits(comp_addr)); |
| 7288 | WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_ADDR_HI, upper_32_bits(comp_addr)); |
| 7289 | WREG32(mmARC_FARM_KDMA_CTX_WR_COMP_WDATA, comp_val); |
| 7290 | WREG32(mmARC_FARM_KDMA_CTX_DST_TSIZE_0, size); |
| 7291 | |
| 7292 | commit_mask = FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_LIN_MASK, 1) | |
| 7293 | FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_WR_COMP_EN_MASK, 1); |
| 7294 | |
| 7295 | if (is_memset) |
| 7296 | commit_mask |= FIELD_PREP(ARC_FARM_KDMA_CTX_COMMIT_MEM_SET_MASK, 1); |
| 7297 | |
| 7298 | WREG32(mmARC_FARM_KDMA_CTX_COMMIT, commit_mask); |
| 7299 | |
| 7300 | /* Wait for completion */ |
| 7301 | cq = &hdev->completion_queue[GAUDI2_RESERVED_CQ_KDMA_COMPLETION]; |
| 7302 | cq_base = cq->kernel_address; |
| 7303 | polling_addr = (u32 *)&cq_base[cq->ci]; |
| 7304 | |
| 7305 | if (hdev->pldm) |
| 7306 | /* for each 1MB 20 second of timeout */ |
| 7307 | timeout = ((size / SZ_1M) + 1) * USEC_PER_SEC * 20; |
| 7308 | else |
| 7309 | timeout = KDMA_TIMEOUT_USEC; |
| 7310 | |
| 7311 | /* Polling */ |
| 7312 | rc = hl_poll_timeout_memory( |
| 7313 | hdev, |
| 7314 | polling_addr, |
| 7315 | status, |
| 7316 | (status == 1), |
| 7317 | 1000, |
| 7318 | timeout, |
| 7319 | true); |
| 7320 | |
| 7321 | *polling_addr = 0; |
| 7322 | |
| 7323 | if (rc) { |
| 7324 | dev_err(hdev->dev, "Timeout while waiting for KDMA to be idle\n" ); |
| 7325 | WREG32(mmARC_FARM_KDMA_CFG_1, 1 << ARC_FARM_KDMA_CFG_1_HALT_SHIFT); |
| 7326 | return rc; |
| 7327 | } |
| 7328 | |
| 7329 | cq->ci = hl_cq_inc_ptr(ptr: cq->ci); |
| 7330 | |
| 7331 | return 0; |
| 7332 | } |
| 7333 | |
| 7334 | static void gaudi2_memset_device_lbw(struct hl_device *hdev, u32 addr, u32 size, u32 val) |
| 7335 | { |
| 7336 | u32 i; |
| 7337 | |
| 7338 | for (i = 0 ; i < size ; i += sizeof(u32)) |
| 7339 | WREG32(addr + i, val); |
| 7340 | } |
| 7341 | |
| 7342 | static void gaudi2_qman_set_test_mode(struct hl_device *hdev, u32 hw_queue_id, bool enable) |
| 7343 | { |
| 7344 | u32 reg_base = gaudi2_qm_blocks_bases[hw_queue_id]; |
| 7345 | |
| 7346 | if (enable) { |
| 7347 | WREG32(reg_base + QM_GLBL_PROT_OFFSET, QMAN_MAKE_TRUSTED_TEST_MODE); |
| 7348 | WREG32(reg_base + QM_PQC_CFG_OFFSET, 0); |
| 7349 | } else { |
| 7350 | WREG32(reg_base + QM_GLBL_PROT_OFFSET, QMAN_MAKE_TRUSTED); |
| 7351 | WREG32(reg_base + QM_PQC_CFG_OFFSET, 1 << PDMA0_QM_PQC_CFG_EN_SHIFT); |
| 7352 | } |
| 7353 | } |
| 7354 | |
| 7355 | static inline u32 gaudi2_test_queue_hw_queue_id_to_sob_id(struct hl_device *hdev, u32 hw_queue_id) |
| 7356 | { |
| 7357 | return hdev->asic_prop.first_available_user_sob[0] + |
| 7358 | hw_queue_id - GAUDI2_QUEUE_ID_PDMA_0_0; |
| 7359 | } |
| 7360 | |
| 7361 | static void gaudi2_test_queue_clear(struct hl_device *hdev, u32 hw_queue_id) |
| 7362 | { |
| 7363 | u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4; |
| 7364 | u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset; |
| 7365 | |
| 7366 | /* Reset the SOB value */ |
| 7367 | WREG32(sob_addr, 0); |
| 7368 | } |
| 7369 | |
| 7370 | static int gaudi2_test_queue_send_msg_short(struct hl_device *hdev, u32 hw_queue_id, u32 sob_val, |
| 7371 | struct gaudi2_queues_test_info *msg_info) |
| 7372 | { |
| 7373 | u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4; |
| 7374 | u32 tmp, sob_base = 1; |
| 7375 | struct packet_msg_short *msg_short_pkt = msg_info->kern_addr; |
| 7376 | size_t pkt_size = sizeof(struct packet_msg_short); |
| 7377 | int rc; |
| 7378 | |
| 7379 | tmp = (PACKET_MSG_SHORT << GAUDI2_PKT_CTL_OPCODE_SHIFT) | |
| 7380 | (1 << GAUDI2_PKT_CTL_EB_SHIFT) | |
| 7381 | (1 << GAUDI2_PKT_CTL_MB_SHIFT) | |
| 7382 | (sob_base << GAUDI2_PKT_SHORT_CTL_BASE_SHIFT) | |
| 7383 | (sob_offset << GAUDI2_PKT_SHORT_CTL_ADDR_SHIFT); |
| 7384 | |
| 7385 | msg_short_pkt->value = cpu_to_le32(sob_val); |
| 7386 | msg_short_pkt->ctl = cpu_to_le32(tmp); |
| 7387 | |
| 7388 | rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, cb_size: pkt_size, cb_ptr: msg_info->dma_addr); |
| 7389 | if (rc) |
| 7390 | dev_err(hdev->dev, |
| 7391 | "Failed to send msg_short packet to H/W queue %s\n" , |
| 7392 | GAUDI2_QUEUE_ID_TO_STR(hw_queue_id)); |
| 7393 | |
| 7394 | return rc; |
| 7395 | } |
| 7396 | |
| 7397 | static int gaudi2_test_queue_wait_completion(struct hl_device *hdev, u32 hw_queue_id, u32 sob_val) |
| 7398 | { |
| 7399 | u32 sob_offset = gaudi2_test_queue_hw_queue_id_to_sob_id(hdev, hw_queue_id) * 4; |
| 7400 | u32 sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset; |
| 7401 | u32 timeout_usec, tmp; |
| 7402 | int rc; |
| 7403 | |
| 7404 | if (hdev->pldm) |
| 7405 | timeout_usec = GAUDI2_PLDM_TEST_QUEUE_WAIT_USEC; |
| 7406 | else |
| 7407 | timeout_usec = GAUDI2_TEST_QUEUE_WAIT_USEC; |
| 7408 | |
| 7409 | rc = hl_poll_timeout( |
| 7410 | hdev, |
| 7411 | sob_addr, |
| 7412 | tmp, |
| 7413 | (tmp == sob_val), |
| 7414 | 1000, |
| 7415 | timeout_usec); |
| 7416 | |
| 7417 | if (rc == -ETIMEDOUT) { |
| 7418 | dev_err(hdev->dev, "H/W queue %s test failed (SOB_OBJ_0 == 0x%x)\n" , |
| 7419 | GAUDI2_QUEUE_ID_TO_STR(hw_queue_id), tmp); |
| 7420 | rc = -EIO; |
| 7421 | } |
| 7422 | |
| 7423 | return rc; |
| 7424 | } |
| 7425 | |
| 7426 | static int gaudi2_test_cpu_queue(struct hl_device *hdev) |
| 7427 | { |
| 7428 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7429 | |
| 7430 | /* |
| 7431 | * check capability here as send_cpu_message() won't update the result |
| 7432 | * value if no capability |
| 7433 | */ |
| 7434 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) |
| 7435 | return 0; |
| 7436 | |
| 7437 | return hl_fw_test_cpu_queue(hdev); |
| 7438 | } |
| 7439 | |
| 7440 | static int gaudi2_test_queues(struct hl_device *hdev) |
| 7441 | { |
| 7442 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7443 | struct gaudi2_queues_test_info *msg_info; |
| 7444 | u32 sob_val = 0x5a5a; |
| 7445 | int i, rc; |
| 7446 | |
| 7447 | /* send test message on all enabled Qs */ |
| 7448 | for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) { |
| 7449 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id: i) || gaudi2_is_edma_queue_id(queue_id: i)) |
| 7450 | continue; |
| 7451 | |
| 7452 | msg_info = &gaudi2->queues_test_info[i - GAUDI2_QUEUE_ID_PDMA_0_0]; |
| 7453 | gaudi2_qman_set_test_mode(hdev, hw_queue_id: i, enable: true); |
| 7454 | gaudi2_test_queue_clear(hdev, hw_queue_id: i); |
| 7455 | rc = gaudi2_test_queue_send_msg_short(hdev, hw_queue_id: i, sob_val, msg_info); |
| 7456 | if (rc) |
| 7457 | goto done; |
| 7458 | } |
| 7459 | |
| 7460 | rc = gaudi2_test_cpu_queue(hdev); |
| 7461 | if (rc) |
| 7462 | goto done; |
| 7463 | |
| 7464 | /* verify that all messages were processed */ |
| 7465 | for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ; i++) { |
| 7466 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id: i) || gaudi2_is_edma_queue_id(queue_id: i)) |
| 7467 | continue; |
| 7468 | |
| 7469 | rc = gaudi2_test_queue_wait_completion(hdev, hw_queue_id: i, sob_val); |
| 7470 | if (rc) |
| 7471 | /* chip is not usable, no need for cleanups, just bail-out with error */ |
| 7472 | goto done; |
| 7473 | |
| 7474 | gaudi2_test_queue_clear(hdev, hw_queue_id: i); |
| 7475 | gaudi2_qman_set_test_mode(hdev, hw_queue_id: i, enable: false); |
| 7476 | } |
| 7477 | |
| 7478 | done: |
| 7479 | return rc; |
| 7480 | } |
| 7481 | |
| 7482 | static int gaudi2_compute_reset_late_init(struct hl_device *hdev) |
| 7483 | { |
| 7484 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7485 | size_t irq_arr_size; |
| 7486 | int rc; |
| 7487 | |
| 7488 | gaudi2_init_arcs(hdev); |
| 7489 | |
| 7490 | rc = gaudi2_scrub_arcs_dccm(hdev); |
| 7491 | if (rc) { |
| 7492 | dev_err(hdev->dev, "Failed to scrub arcs DCCM\n" ); |
| 7493 | return rc; |
| 7494 | } |
| 7495 | |
| 7496 | gaudi2_init_security(hdev); |
| 7497 | |
| 7498 | /* Unmask all IRQs since some could have been received during the soft reset */ |
| 7499 | irq_arr_size = gaudi2->num_of_valid_hw_events * sizeof(gaudi2->hw_events[0]); |
| 7500 | return hl_fw_unmask_irq_arr(hdev, irq_arr: gaudi2->hw_events, irq_arr_size); |
| 7501 | } |
| 7502 | |
| 7503 | static bool gaudi2_get_edma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7504 | struct engines_data *e) |
| 7505 | { |
| 7506 | u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1; |
| 7507 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 7508 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7509 | const char *edma_fmt = "%-6d%-6d%-9s%#-14x%#-15x%#x\n" ; |
| 7510 | bool is_idle = true, is_eng_idle; |
| 7511 | int engine_idx, i, j; |
| 7512 | u64 offset; |
| 7513 | |
| 7514 | if (e) |
| 7515 | hl_engine_data_sprintf(e, |
| 7516 | fmt: "\nCORE EDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n" |
| 7517 | "---- ---- ------- ------------ ------------- -------------\n" ); |
| 7518 | |
| 7519 | for (i = 0; i < NUM_OF_DCORES; i++) { |
| 7520 | for (j = 0 ; j < NUM_OF_EDMA_PER_DCORE ; j++) { |
| 7521 | int seq = i * NUM_OF_EDMA_PER_DCORE + j; |
| 7522 | |
| 7523 | if (!(prop->edma_enabled_mask & BIT(seq))) |
| 7524 | continue; |
| 7525 | |
| 7526 | engine_idx = GAUDI2_DCORE0_ENGINE_ID_EDMA_0 + |
| 7527 | i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j; |
| 7528 | offset = i * DCORE_OFFSET + j * DCORE_EDMA_OFFSET; |
| 7529 | |
| 7530 | dma_core_sts0 = RREG32(mmDCORE0_EDMA0_CORE_STS0 + offset); |
| 7531 | dma_core_sts1 = RREG32(mmDCORE0_EDMA0_CORE_STS1 + offset); |
| 7532 | |
| 7533 | qm_glbl_sts0 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS0 + offset); |
| 7534 | qm_glbl_sts1 = RREG32(mmDCORE0_EDMA0_QM_GLBL_STS1 + offset); |
| 7535 | qm_cgm_sts = RREG32(mmDCORE0_EDMA0_QM_CGM_STS + offset); |
| 7536 | |
| 7537 | is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) && |
| 7538 | IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1); |
| 7539 | is_idle &= is_eng_idle; |
| 7540 | |
| 7541 | if (mask && !is_eng_idle) |
| 7542 | set_bit(nr: engine_idx, addr: mask); |
| 7543 | |
| 7544 | if (e) |
| 7545 | hl_engine_data_sprintf(e, fmt: edma_fmt, i, j, is_eng_idle ? "Y" : "N" , |
| 7546 | qm_glbl_sts0, dma_core_sts0, dma_core_sts1); |
| 7547 | } |
| 7548 | } |
| 7549 | |
| 7550 | return is_idle; |
| 7551 | } |
| 7552 | |
| 7553 | static bool gaudi2_get_pdma_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7554 | struct engines_data *e) |
| 7555 | { |
| 7556 | u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, dma_core_sts0, dma_core_sts1; |
| 7557 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7558 | const char *pdma_fmt = "%-6d%-9s%#-14x%#-15x%#x\n" ; |
| 7559 | bool is_idle = true, is_eng_idle; |
| 7560 | int engine_idx, i; |
| 7561 | u64 offset; |
| 7562 | |
| 7563 | if (e) |
| 7564 | hl_engine_data_sprintf(e, |
| 7565 | fmt: "\nPDMA is_idle QM_GLBL_STS0 DMA_CORE_STS0 DMA_CORE_STS1\n" |
| 7566 | "---- ------- ------------ ------------- -------------\n" ); |
| 7567 | |
| 7568 | for (i = 0 ; i < NUM_OF_PDMA ; i++) { |
| 7569 | engine_idx = GAUDI2_ENGINE_ID_PDMA_0 + i; |
| 7570 | offset = i * PDMA_OFFSET; |
| 7571 | dma_core_sts0 = RREG32(mmPDMA0_CORE_STS0 + offset); |
| 7572 | dma_core_sts1 = RREG32(mmPDMA0_CORE_STS1 + offset); |
| 7573 | |
| 7574 | qm_glbl_sts0 = RREG32(mmPDMA0_QM_GLBL_STS0 + offset); |
| 7575 | qm_glbl_sts1 = RREG32(mmPDMA0_QM_GLBL_STS1 + offset); |
| 7576 | qm_cgm_sts = RREG32(mmPDMA0_QM_CGM_STS + offset); |
| 7577 | |
| 7578 | is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) && |
| 7579 | IS_DMA_IDLE(dma_core_sts0) && !IS_DMA_HALTED(dma_core_sts1); |
| 7580 | is_idle &= is_eng_idle; |
| 7581 | |
| 7582 | if (mask && !is_eng_idle) |
| 7583 | set_bit(nr: engine_idx, addr: mask); |
| 7584 | |
| 7585 | if (e) |
| 7586 | hl_engine_data_sprintf(e, fmt: pdma_fmt, i, is_eng_idle ? "Y" : "N" , |
| 7587 | qm_glbl_sts0, dma_core_sts0, dma_core_sts1); |
| 7588 | } |
| 7589 | |
| 7590 | return is_idle; |
| 7591 | } |
| 7592 | |
| 7593 | static bool gaudi2_get_nic_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7594 | struct engines_data *e) |
| 7595 | { |
| 7596 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7597 | const char *nic_fmt = "%-5d%-9s%#-14x%#-12x\n" ; |
| 7598 | u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts; |
| 7599 | bool is_idle = true, is_eng_idle; |
| 7600 | int engine_idx, i; |
| 7601 | u64 offset = 0; |
| 7602 | |
| 7603 | /* NIC, twelve macros in Full chip */ |
| 7604 | if (e && hdev->nic_ports_mask) |
| 7605 | hl_engine_data_sprintf(e, |
| 7606 | fmt: "\nNIC is_idle QM_GLBL_STS0 QM_CGM_STS\n" |
| 7607 | "--- ------- ------------ ----------\n" ); |
| 7608 | |
| 7609 | for (i = 0 ; i < NIC_NUMBER_OF_ENGINES ; i++) { |
| 7610 | if (!(i & 1)) |
| 7611 | offset = i / 2 * NIC_OFFSET; |
| 7612 | else |
| 7613 | offset += NIC_QM_OFFSET; |
| 7614 | |
| 7615 | if (!(hdev->nic_ports_mask & BIT(i))) |
| 7616 | continue; |
| 7617 | |
| 7618 | engine_idx = GAUDI2_ENGINE_ID_NIC0_0 + i; |
| 7619 | |
| 7620 | |
| 7621 | qm_glbl_sts0 = RREG32(mmNIC0_QM0_GLBL_STS0 + offset); |
| 7622 | qm_glbl_sts1 = RREG32(mmNIC0_QM0_GLBL_STS1 + offset); |
| 7623 | qm_cgm_sts = RREG32(mmNIC0_QM0_CGM_STS + offset); |
| 7624 | |
| 7625 | is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts); |
| 7626 | is_idle &= is_eng_idle; |
| 7627 | |
| 7628 | if (mask && !is_eng_idle) |
| 7629 | set_bit(nr: engine_idx, addr: mask); |
| 7630 | |
| 7631 | if (e) |
| 7632 | hl_engine_data_sprintf(e, fmt: nic_fmt, i, is_eng_idle ? "Y" : "N" , |
| 7633 | qm_glbl_sts0, qm_cgm_sts); |
| 7634 | } |
| 7635 | |
| 7636 | return is_idle; |
| 7637 | } |
| 7638 | |
| 7639 | static bool gaudi2_get_mme_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7640 | struct engines_data *e) |
| 7641 | { |
| 7642 | u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts, mme_arch_sts; |
| 7643 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7644 | const char *mme_fmt = "%-5d%-6s%-9s%#-14x%#x\n" ; |
| 7645 | bool is_idle = true, is_eng_idle; |
| 7646 | int engine_idx, i; |
| 7647 | u64 offset; |
| 7648 | |
| 7649 | if (e) |
| 7650 | hl_engine_data_sprintf(e, |
| 7651 | fmt: "\nMME Stub is_idle QM_GLBL_STS0 MME_ARCH_STATUS\n" |
| 7652 | "--- ---- ------- ------------ ---------------\n" ); |
| 7653 | /* MME, one per Dcore */ |
| 7654 | for (i = 0 ; i < NUM_OF_DCORES ; i++) { |
| 7655 | engine_idx = GAUDI2_DCORE0_ENGINE_ID_MME + i * GAUDI2_ENGINE_ID_DCORE_OFFSET; |
| 7656 | offset = i * DCORE_OFFSET; |
| 7657 | |
| 7658 | qm_glbl_sts0 = RREG32(mmDCORE0_MME_QM_GLBL_STS0 + offset); |
| 7659 | qm_glbl_sts1 = RREG32(mmDCORE0_MME_QM_GLBL_STS1 + offset); |
| 7660 | qm_cgm_sts = RREG32(mmDCORE0_MME_QM_CGM_STS + offset); |
| 7661 | |
| 7662 | is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts); |
| 7663 | is_idle &= is_eng_idle; |
| 7664 | |
| 7665 | mme_arch_sts = RREG32(mmDCORE0_MME_CTRL_LO_ARCH_STATUS + offset); |
| 7666 | is_eng_idle &= IS_MME_IDLE(mme_arch_sts); |
| 7667 | is_idle &= is_eng_idle; |
| 7668 | |
| 7669 | if (e) |
| 7670 | hl_engine_data_sprintf(e, fmt: mme_fmt, i, "N" , |
| 7671 | is_eng_idle ? "Y" : "N" , |
| 7672 | qm_glbl_sts0, |
| 7673 | mme_arch_sts); |
| 7674 | |
| 7675 | if (mask && !is_eng_idle) |
| 7676 | set_bit(nr: engine_idx, addr: mask); |
| 7677 | } |
| 7678 | |
| 7679 | return is_idle; |
| 7680 | } |
| 7681 | |
| 7682 | static void gaudi2_is_tpc_engine_idle(struct hl_device *hdev, int dcore, int inst, u32 offset, |
| 7683 | struct iterate_module_ctx *ctx) |
| 7684 | { |
| 7685 | struct gaudi2_tpc_idle_data *idle_data = ctx->data; |
| 7686 | u32 tpc_cfg_sts, qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts; |
| 7687 | bool is_eng_idle; |
| 7688 | int engine_idx; |
| 7689 | |
| 7690 | if ((dcore == 0) && (inst == (NUM_DCORE0_TPC - 1))) |
| 7691 | engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_6; |
| 7692 | else |
| 7693 | engine_idx = GAUDI2_DCORE0_ENGINE_ID_TPC_0 + |
| 7694 | dcore * GAUDI2_ENGINE_ID_DCORE_OFFSET + inst; |
| 7695 | |
| 7696 | tpc_cfg_sts = RREG32(mmDCORE0_TPC0_CFG_STATUS + offset); |
| 7697 | qm_glbl_sts0 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS0 + offset); |
| 7698 | qm_glbl_sts1 = RREG32(mmDCORE0_TPC0_QM_GLBL_STS1 + offset); |
| 7699 | qm_cgm_sts = RREG32(mmDCORE0_TPC0_QM_CGM_STS + offset); |
| 7700 | |
| 7701 | is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts) && |
| 7702 | IS_TPC_IDLE(tpc_cfg_sts); |
| 7703 | *(idle_data->is_idle) &= is_eng_idle; |
| 7704 | |
| 7705 | if (idle_data->mask && !is_eng_idle) |
| 7706 | set_bit(nr: engine_idx, addr: idle_data->mask); |
| 7707 | |
| 7708 | if (idle_data->e) |
| 7709 | hl_engine_data_sprintf(e: idle_data->e, |
| 7710 | fmt: idle_data->tpc_fmt, dcore, inst, |
| 7711 | is_eng_idle ? "Y" : "N" , |
| 7712 | qm_glbl_sts0, qm_cgm_sts, tpc_cfg_sts); |
| 7713 | } |
| 7714 | |
| 7715 | static bool gaudi2_get_tpc_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7716 | struct engines_data *e) |
| 7717 | { |
| 7718 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 7719 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7720 | bool is_idle = true; |
| 7721 | |
| 7722 | struct gaudi2_tpc_idle_data tpc_idle_data = { |
| 7723 | .tpc_fmt = "%-6d%-5d%-9s%#-14x%#-12x%#x\n" , |
| 7724 | .e = e, |
| 7725 | .mask = mask, |
| 7726 | .is_idle = &is_idle, |
| 7727 | }; |
| 7728 | struct iterate_module_ctx tpc_iter = { |
| 7729 | .fn = &gaudi2_is_tpc_engine_idle, |
| 7730 | .data = &tpc_idle_data, |
| 7731 | }; |
| 7732 | |
| 7733 | if (e && prop->tpc_enabled_mask) |
| 7734 | hl_engine_data_sprintf(e, |
| 7735 | fmt: "\nCORE TPC is_idle QM_GLBL_STS0 QM_CGM_STS STATUS\n" |
| 7736 | "---- --- ------- ------------ ---------- ------\n" ); |
| 7737 | |
| 7738 | gaudi2_iterate_tpcs(hdev, ctx: &tpc_iter); |
| 7739 | |
| 7740 | return *tpc_idle_data.is_idle; |
| 7741 | } |
| 7742 | |
| 7743 | static bool gaudi2_get_decoder_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7744 | struct engines_data *e) |
| 7745 | { |
| 7746 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 7747 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7748 | const char *pcie_dec_fmt = "%-10d%-9s%#x\n" ; |
| 7749 | const char *dec_fmt = "%-6d%-5d%-9s%#x\n" ; |
| 7750 | bool is_idle = true, is_eng_idle; |
| 7751 | u32 dec_swreg15, dec_enabled_bit; |
| 7752 | int engine_idx, i, j; |
| 7753 | u64 offset; |
| 7754 | |
| 7755 | /* Decoders, two each Dcore and two shared PCIe decoders */ |
| 7756 | if (e && (prop->decoder_enabled_mask & (~PCIE_DEC_EN_MASK))) |
| 7757 | hl_engine_data_sprintf(e, |
| 7758 | fmt: "\nCORE DEC is_idle VSI_CMD_SWREG15\n" |
| 7759 | "---- --- ------- ---------------\n" ); |
| 7760 | |
| 7761 | for (i = 0 ; i < NUM_OF_DCORES ; i++) { |
| 7762 | for (j = 0 ; j < NUM_OF_DEC_PER_DCORE ; j++) { |
| 7763 | dec_enabled_bit = 1 << (i * NUM_OF_DEC_PER_DCORE + j); |
| 7764 | if (!(prop->decoder_enabled_mask & dec_enabled_bit)) |
| 7765 | continue; |
| 7766 | |
| 7767 | engine_idx = GAUDI2_DCORE0_ENGINE_ID_DEC_0 + |
| 7768 | i * GAUDI2_ENGINE_ID_DCORE_OFFSET + j; |
| 7769 | offset = i * DCORE_OFFSET + j * DCORE_DEC_OFFSET; |
| 7770 | |
| 7771 | dec_swreg15 = RREG32(mmDCORE0_DEC0_CMD_SWREG15 + offset); |
| 7772 | is_eng_idle = IS_DEC_IDLE(dec_swreg15); |
| 7773 | is_idle &= is_eng_idle; |
| 7774 | |
| 7775 | if (mask && !is_eng_idle) |
| 7776 | set_bit(nr: engine_idx, addr: mask); |
| 7777 | |
| 7778 | if (e) |
| 7779 | hl_engine_data_sprintf(e, fmt: dec_fmt, i, j, |
| 7780 | is_eng_idle ? "Y" : "N" , dec_swreg15); |
| 7781 | } |
| 7782 | } |
| 7783 | |
| 7784 | if (e && (prop->decoder_enabled_mask & PCIE_DEC_EN_MASK)) |
| 7785 | hl_engine_data_sprintf(e, |
| 7786 | fmt: "\nPCIe DEC is_idle VSI_CMD_SWREG15\n" |
| 7787 | "-------- ------- ---------------\n" ); |
| 7788 | |
| 7789 | /* Check shared(PCIe) decoders */ |
| 7790 | for (i = 0 ; i < NUM_OF_DEC_PER_DCORE ; i++) { |
| 7791 | dec_enabled_bit = PCIE_DEC_SHIFT + i; |
| 7792 | if (!(prop->decoder_enabled_mask & BIT(dec_enabled_bit))) |
| 7793 | continue; |
| 7794 | |
| 7795 | engine_idx = GAUDI2_PCIE_ENGINE_ID_DEC_0 + i; |
| 7796 | offset = i * DCORE_DEC_OFFSET; |
| 7797 | dec_swreg15 = RREG32(mmPCIE_DEC0_CMD_SWREG15 + offset); |
| 7798 | is_eng_idle = IS_DEC_IDLE(dec_swreg15); |
| 7799 | is_idle &= is_eng_idle; |
| 7800 | |
| 7801 | if (mask && !is_eng_idle) |
| 7802 | set_bit(nr: engine_idx, addr: mask); |
| 7803 | |
| 7804 | if (e) |
| 7805 | hl_engine_data_sprintf(e, fmt: pcie_dec_fmt, i, |
| 7806 | is_eng_idle ? "Y" : "N" , dec_swreg15); |
| 7807 | } |
| 7808 | |
| 7809 | return is_idle; |
| 7810 | } |
| 7811 | |
| 7812 | static bool gaudi2_get_rotator_idle_status(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7813 | struct engines_data *e) |
| 7814 | { |
| 7815 | const char *rot_fmt = "%-6d%-5d%-9s%#-14x%#-14x%#x\n" ; |
| 7816 | unsigned long *mask = (unsigned long *) mask_arr; |
| 7817 | u32 qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts; |
| 7818 | bool is_idle = true, is_eng_idle; |
| 7819 | int engine_idx, i; |
| 7820 | u64 offset; |
| 7821 | |
| 7822 | if (e) |
| 7823 | hl_engine_data_sprintf(e, |
| 7824 | fmt: "\nCORE ROT is_idle QM_GLBL_STS0 QM_GLBL_STS1 QM_CGM_STS\n" |
| 7825 | "---- --- ------- ------------ ------------ ----------\n" ); |
| 7826 | |
| 7827 | for (i = 0 ; i < NUM_OF_ROT ; i++) { |
| 7828 | engine_idx = GAUDI2_ENGINE_ID_ROT_0 + i; |
| 7829 | |
| 7830 | offset = i * ROT_OFFSET; |
| 7831 | |
| 7832 | qm_glbl_sts0 = RREG32(mmROT0_QM_GLBL_STS0 + offset); |
| 7833 | qm_glbl_sts1 = RREG32(mmROT0_QM_GLBL_STS1 + offset); |
| 7834 | qm_cgm_sts = RREG32(mmROT0_QM_CGM_STS + offset); |
| 7835 | |
| 7836 | is_eng_idle = IS_QM_IDLE(qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts); |
| 7837 | is_idle &= is_eng_idle; |
| 7838 | |
| 7839 | if (mask && !is_eng_idle) |
| 7840 | set_bit(nr: engine_idx, addr: mask); |
| 7841 | |
| 7842 | if (e) |
| 7843 | hl_engine_data_sprintf(e, fmt: rot_fmt, i, 0, is_eng_idle ? "Y" : "N" , |
| 7844 | qm_glbl_sts0, qm_glbl_sts1, qm_cgm_sts); |
| 7845 | } |
| 7846 | |
| 7847 | return is_idle; |
| 7848 | } |
| 7849 | |
| 7850 | static bool gaudi2_is_device_idle(struct hl_device *hdev, u64 *mask_arr, u8 mask_len, |
| 7851 | struct engines_data *e) |
| 7852 | { |
| 7853 | bool is_idle = true; |
| 7854 | |
| 7855 | is_idle &= gaudi2_get_edma_idle_status(hdev, mask_arr, mask_len, e); |
| 7856 | is_idle &= gaudi2_get_pdma_idle_status(hdev, mask_arr, mask_len, e); |
| 7857 | is_idle &= gaudi2_get_nic_idle_status(hdev, mask_arr, mask_len, e); |
| 7858 | is_idle &= gaudi2_get_mme_idle_status(hdev, mask_arr, mask_len, e); |
| 7859 | is_idle &= gaudi2_get_tpc_idle_status(hdev, mask_arr, mask_len, e); |
| 7860 | is_idle &= gaudi2_get_decoder_idle_status(hdev, mask_arr, mask_len, e); |
| 7861 | is_idle &= gaudi2_get_rotator_idle_status(hdev, mask_arr, mask_len, e); |
| 7862 | |
| 7863 | return is_idle; |
| 7864 | } |
| 7865 | |
| 7866 | static void gaudi2_hw_queues_lock(struct hl_device *hdev) |
| 7867 | __acquires(&gaudi2->hw_queues_lock) |
| 7868 | { |
| 7869 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7870 | |
| 7871 | spin_lock(lock: &gaudi2->hw_queues_lock); |
| 7872 | } |
| 7873 | |
| 7874 | static void gaudi2_hw_queues_unlock(struct hl_device *hdev) |
| 7875 | __releases(&gaudi2->hw_queues_lock) |
| 7876 | { |
| 7877 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7878 | |
| 7879 | spin_unlock(lock: &gaudi2->hw_queues_lock); |
| 7880 | } |
| 7881 | |
| 7882 | static u32 gaudi2_get_pci_id(struct hl_device *hdev) |
| 7883 | { |
| 7884 | return hdev->pdev->device; |
| 7885 | } |
| 7886 | |
| 7887 | static int gaudi2_get_eeprom_data(struct hl_device *hdev, void *data, size_t max_size) |
| 7888 | { |
| 7889 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7890 | |
| 7891 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) |
| 7892 | return 0; |
| 7893 | |
| 7894 | return hl_fw_get_eeprom_data(hdev, data, max_size); |
| 7895 | } |
| 7896 | |
| 7897 | static void gaudi2_update_eq_ci(struct hl_device *hdev, u32 val) |
| 7898 | { |
| 7899 | WREG32(mmCPU_IF_EQ_RD_OFFS, val); |
| 7900 | } |
| 7901 | |
| 7902 | static void *gaudi2_get_events_stat(struct hl_device *hdev, bool aggregate, u32 *size) |
| 7903 | { |
| 7904 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 7905 | |
| 7906 | if (aggregate) { |
| 7907 | *size = (u32) sizeof(gaudi2->events_stat_aggregate); |
| 7908 | return gaudi2->events_stat_aggregate; |
| 7909 | } |
| 7910 | |
| 7911 | *size = (u32) sizeof(gaudi2->events_stat); |
| 7912 | return gaudi2->events_stat; |
| 7913 | } |
| 7914 | |
| 7915 | static void gaudi2_mmu_vdec_dcore_prepare(struct hl_device *hdev, int dcore_id, |
| 7916 | int dcore_vdec_id, u32 rw_asid, u32 rw_mmu_bp) |
| 7917 | { |
| 7918 | u32 offset = (mmDCORE0_VDEC1_BRDG_CTRL_BASE - mmDCORE0_VDEC0_BRDG_CTRL_BASE) * |
| 7919 | dcore_vdec_id + DCORE_OFFSET * dcore_id; |
| 7920 | |
| 7921 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP + offset, rw_mmu_bp); |
| 7922 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID + offset, rw_asid); |
| 7923 | |
| 7924 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP + offset, rw_mmu_bp); |
| 7925 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID + offset, rw_asid); |
| 7926 | |
| 7927 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP + offset, rw_mmu_bp); |
| 7928 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID + offset, rw_asid); |
| 7929 | |
| 7930 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP + offset, rw_mmu_bp); |
| 7931 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID + offset, rw_asid); |
| 7932 | |
| 7933 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP + offset, rw_mmu_bp); |
| 7934 | WREG32(mmDCORE0_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID + offset, rw_asid); |
| 7935 | } |
| 7936 | |
| 7937 | static void gaudi2_mmu_dcore_prepare(struct hl_device *hdev, int dcore_id, u32 asid) |
| 7938 | { |
| 7939 | u32 rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) | |
| 7940 | (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT); |
| 7941 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 7942 | u32 dcore_offset = dcore_id * DCORE_OFFSET; |
| 7943 | u32 vdec_id, i, ports_offset, reg_val; |
| 7944 | u8 edma_seq_base; |
| 7945 | |
| 7946 | /* EDMA */ |
| 7947 | edma_seq_base = dcore_id * NUM_OF_EDMA_PER_DCORE; |
| 7948 | if (prop->edma_enabled_mask & BIT(edma_seq_base)) { |
| 7949 | WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); |
| 7950 | WREG32(mmDCORE0_EDMA0_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid); |
| 7951 | WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0); |
| 7952 | WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_ASID + dcore_offset, rw_asid); |
| 7953 | } |
| 7954 | |
| 7955 | if (prop->edma_enabled_mask & BIT(edma_seq_base + 1)) { |
| 7956 | WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); |
| 7957 | WREG32(mmDCORE0_EDMA1_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid); |
| 7958 | WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_ASID + dcore_offset, rw_asid); |
| 7959 | WREG32(mmDCORE0_EDMA1_CORE_CTX_AXUSER_HB_MMU_BP + dcore_offset, 0); |
| 7960 | } |
| 7961 | |
| 7962 | /* Sync Mngr */ |
| 7963 | WREG32(mmDCORE0_SYNC_MNGR_GLBL_ASID_NONE_SEC_PRIV + dcore_offset, asid); |
| 7964 | /* |
| 7965 | * Sync Mngrs on dcores 1 - 3 are exposed to user, so must use user ASID |
| 7966 | * for any access type |
| 7967 | */ |
| 7968 | if (dcore_id > 0) { |
| 7969 | reg_val = (asid << DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_RD_SHIFT) | |
| 7970 | (asid << DCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID_WR_SHIFT); |
| 7971 | WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_ASID + dcore_offset, reg_val); |
| 7972 | WREG32(mmDCORE0_SYNC_MNGR_MSTR_IF_AXUSER_HB_MMU_BP + dcore_offset, 0); |
| 7973 | } |
| 7974 | |
| 7975 | WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_MMU_BP + dcore_offset, 0); |
| 7976 | WREG32(mmDCORE0_MME_CTRL_LO_MME_AXUSER_HB_ASID + dcore_offset, rw_asid); |
| 7977 | |
| 7978 | for (i = 0 ; i < NUM_OF_MME_SBTE_PORTS ; i++) { |
| 7979 | ports_offset = i * DCORE_MME_SBTE_OFFSET; |
| 7980 | WREG32(mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_MMU_BP + |
| 7981 | dcore_offset + ports_offset, 0); |
| 7982 | WREG32(mmDCORE0_MME_SBTE0_MSTR_IF_AXUSER_HB_ASID + |
| 7983 | dcore_offset + ports_offset, rw_asid); |
| 7984 | } |
| 7985 | |
| 7986 | for (i = 0 ; i < NUM_OF_MME_WB_PORTS ; i++) { |
| 7987 | ports_offset = i * DCORE_MME_WB_OFFSET; |
| 7988 | WREG32(mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_MMU_BP + |
| 7989 | dcore_offset + ports_offset, 0); |
| 7990 | WREG32(mmDCORE0_MME_WB0_MSTR_IF_AXUSER_HB_ASID + |
| 7991 | dcore_offset + ports_offset, rw_asid); |
| 7992 | } |
| 7993 | |
| 7994 | WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_MMU_BP + dcore_offset, 0); |
| 7995 | WREG32(mmDCORE0_MME_QM_AXUSER_NONSECURED_HB_ASID + dcore_offset, rw_asid); |
| 7996 | |
| 7997 | /* |
| 7998 | * Decoders |
| 7999 | */ |
| 8000 | for (vdec_id = 0 ; vdec_id < NUM_OF_DEC_PER_DCORE ; vdec_id++) { |
| 8001 | if (prop->decoder_enabled_mask & BIT(dcore_id * NUM_OF_DEC_PER_DCORE + vdec_id)) |
| 8002 | gaudi2_mmu_vdec_dcore_prepare(hdev, dcore_id, dcore_vdec_id: vdec_id, rw_asid, rw_mmu_bp: 0); |
| 8003 | } |
| 8004 | } |
| 8005 | |
| 8006 | static void gudi2_mmu_vdec_shared_prepare(struct hl_device *hdev, |
| 8007 | int shared_vdec_id, u32 rw_asid, u32 rw_mmu_bp) |
| 8008 | { |
| 8009 | u32 offset = (mmPCIE_VDEC1_BRDG_CTRL_BASE - mmPCIE_VDEC0_BRDG_CTRL_BASE) * shared_vdec_id; |
| 8010 | |
| 8011 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_MMU_BP + offset, rw_mmu_bp); |
| 8012 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_DEC_HB_ASID + offset, rw_asid); |
| 8013 | |
| 8014 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_MMU_BP + offset, rw_mmu_bp); |
| 8015 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_ABNRM_HB_ASID + offset, rw_asid); |
| 8016 | |
| 8017 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_MMU_BP + offset, rw_mmu_bp); |
| 8018 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_L2C_HB_ASID + offset, rw_asid); |
| 8019 | |
| 8020 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_MMU_BP + offset, rw_mmu_bp); |
| 8021 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_NRM_HB_ASID + offset, rw_asid); |
| 8022 | |
| 8023 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_MMU_BP + offset, rw_mmu_bp); |
| 8024 | WREG32(mmPCIE_VDEC0_BRDG_CTRL_AXUSER_MSIX_VCD_HB_ASID + offset, rw_asid); |
| 8025 | } |
| 8026 | |
| 8027 | static void gudi2_mmu_arc_farm_arc_dup_eng_prepare(struct hl_device *hdev, int arc_farm_id, |
| 8028 | u32 rw_asid, u32 rw_mmu_bp) |
| 8029 | { |
| 8030 | u32 offset = (mmARC_FARM_ARC1_DUP_ENG_BASE - mmARC_FARM_ARC0_DUP_ENG_BASE) * arc_farm_id; |
| 8031 | |
| 8032 | WREG32(mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_MMU_BP + offset, rw_mmu_bp); |
| 8033 | WREG32(mmARC_FARM_ARC0_DUP_ENG_AXUSER_HB_ASID + offset, rw_asid); |
| 8034 | } |
| 8035 | |
| 8036 | static void gaudi2_arc_mmu_prepare(struct hl_device *hdev, u32 cpu_id, u32 asid) |
| 8037 | { |
| 8038 | u32 reg_base, reg_offset, reg_val = 0; |
| 8039 | |
| 8040 | reg_base = gaudi2_arc_blocks_bases[cpu_id]; |
| 8041 | |
| 8042 | /* Enable MMU and configure asid for all relevant ARC regions */ |
| 8043 | reg_val = FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_MMU_BP_MASK, 0); |
| 8044 | reg_val |= FIELD_PREP(ARC_FARM_ARC0_AUX_ARC_REGION_CFG_0_ASID_MASK, asid); |
| 8045 | |
| 8046 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION3_GENERAL); |
| 8047 | WREG32(reg_base + reg_offset, reg_val); |
| 8048 | |
| 8049 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION4_HBM0_FW); |
| 8050 | WREG32(reg_base + reg_offset, reg_val); |
| 8051 | |
| 8052 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION5_HBM1_GC_DATA); |
| 8053 | WREG32(reg_base + reg_offset, reg_val); |
| 8054 | |
| 8055 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION6_HBM2_GC_DATA); |
| 8056 | WREG32(reg_base + reg_offset, reg_val); |
| 8057 | |
| 8058 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION7_HBM3_GC_DATA); |
| 8059 | WREG32(reg_base + reg_offset, reg_val); |
| 8060 | |
| 8061 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION9_PCIE); |
| 8062 | WREG32(reg_base + reg_offset, reg_val); |
| 8063 | |
| 8064 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION10_GENERAL); |
| 8065 | WREG32(reg_base + reg_offset, reg_val); |
| 8066 | |
| 8067 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION11_GENERAL); |
| 8068 | WREG32(reg_base + reg_offset, reg_val); |
| 8069 | |
| 8070 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION12_GENERAL); |
| 8071 | WREG32(reg_base + reg_offset, reg_val); |
| 8072 | |
| 8073 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION13_GENERAL); |
| 8074 | WREG32(reg_base + reg_offset, reg_val); |
| 8075 | |
| 8076 | reg_offset = ARC_REGION_CFG_OFFSET(ARC_REGION14_GENERAL); |
| 8077 | WREG32(reg_base + reg_offset, reg_val); |
| 8078 | } |
| 8079 | |
| 8080 | static int gaudi2_arc_mmu_prepare_all(struct hl_device *hdev, u32 asid) |
| 8081 | { |
| 8082 | int i; |
| 8083 | |
| 8084 | if (hdev->fw_components & FW_TYPE_BOOT_CPU) |
| 8085 | return hl_fw_cpucp_engine_core_asid_set(hdev, asid); |
| 8086 | |
| 8087 | for (i = CPU_ID_SCHED_ARC0 ; i < NUM_OF_ARC_FARMS_ARC ; i++) |
| 8088 | gaudi2_arc_mmu_prepare(hdev, cpu_id: i, asid); |
| 8089 | |
| 8090 | for (i = GAUDI2_QUEUE_ID_PDMA_0_0 ; i < GAUDI2_QUEUE_ID_CPU_PQ ; i += 4) { |
| 8091 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id: i)) |
| 8092 | continue; |
| 8093 | |
| 8094 | gaudi2_arc_mmu_prepare(hdev, cpu_id: gaudi2_queue_id_to_arc_id[i], asid); |
| 8095 | } |
| 8096 | |
| 8097 | return 0; |
| 8098 | } |
| 8099 | |
| 8100 | static int gaudi2_mmu_shared_prepare(struct hl_device *hdev, u32 asid) |
| 8101 | { |
| 8102 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 8103 | u32 rw_asid, offset; |
| 8104 | int rc, i; |
| 8105 | |
| 8106 | rw_asid = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_MASK, asid) | |
| 8107 | FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_MASK, asid); |
| 8108 | |
| 8109 | WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_ASID, rw_asid); |
| 8110 | WREG32(mmPDMA0_QM_AXUSER_NONSECURED_HB_MMU_BP, 0); |
| 8111 | WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_ASID, rw_asid); |
| 8112 | WREG32(mmPDMA0_CORE_CTX_AXUSER_HB_MMU_BP, 0); |
| 8113 | |
| 8114 | WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_ASID, rw_asid); |
| 8115 | WREG32(mmPDMA1_QM_AXUSER_NONSECURED_HB_MMU_BP, 0); |
| 8116 | WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_ASID, rw_asid); |
| 8117 | WREG32(mmPDMA1_CORE_CTX_AXUSER_HB_MMU_BP, 0); |
| 8118 | |
| 8119 | /* ROT */ |
| 8120 | for (i = 0 ; i < NUM_OF_ROT ; i++) { |
| 8121 | offset = i * ROT_OFFSET; |
| 8122 | WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_ASID + offset, rw_asid); |
| 8123 | WREG32(mmROT0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0); |
| 8124 | RMWREG32(mmROT0_CPL_QUEUE_AWUSER + offset, asid, MMUBP_ASID_MASK); |
| 8125 | RMWREG32(mmROT0_DESC_HBW_ARUSER_LO + offset, asid, MMUBP_ASID_MASK); |
| 8126 | RMWREG32(mmROT0_DESC_HBW_AWUSER_LO + offset, asid, MMUBP_ASID_MASK); |
| 8127 | } |
| 8128 | |
| 8129 | /* Shared Decoders are the last bits in the decoders mask */ |
| 8130 | if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 0)) |
| 8131 | gudi2_mmu_vdec_shared_prepare(hdev, shared_vdec_id: 0, rw_asid, rw_mmu_bp: 0); |
| 8132 | |
| 8133 | if (prop->decoder_enabled_mask & BIT(NUM_OF_DCORES * NUM_OF_DEC_PER_DCORE + 1)) |
| 8134 | gudi2_mmu_vdec_shared_prepare(hdev, shared_vdec_id: 1, rw_asid, rw_mmu_bp: 0); |
| 8135 | |
| 8136 | /* arc farm arc dup eng */ |
| 8137 | for (i = 0 ; i < NUM_OF_ARC_FARMS_ARC ; i++) |
| 8138 | gudi2_mmu_arc_farm_arc_dup_eng_prepare(hdev, arc_farm_id: i, rw_asid, rw_mmu_bp: 0); |
| 8139 | |
| 8140 | rc = gaudi2_arc_mmu_prepare_all(hdev, asid); |
| 8141 | if (rc) |
| 8142 | return rc; |
| 8143 | |
| 8144 | return 0; |
| 8145 | } |
| 8146 | |
| 8147 | static void gaudi2_tpc_mmu_prepare(struct hl_device *hdev, int dcore, int inst, u32 offset, |
| 8148 | struct iterate_module_ctx *ctx) |
| 8149 | { |
| 8150 | struct gaudi2_tpc_mmu_data *mmu_data = ctx->data; |
| 8151 | |
| 8152 | WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_MMU_BP + offset, 0); |
| 8153 | WREG32(mmDCORE0_TPC0_CFG_AXUSER_HB_ASID + offset, mmu_data->rw_asid); |
| 8154 | WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_MMU_BP + offset, 0); |
| 8155 | WREG32(mmDCORE0_TPC0_QM_AXUSER_NONSECURED_HB_ASID + offset, mmu_data->rw_asid); |
| 8156 | } |
| 8157 | |
| 8158 | /* zero the MMUBP and set the ASID */ |
| 8159 | static int gaudi2_mmu_prepare(struct hl_device *hdev, u32 asid) |
| 8160 | { |
| 8161 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 8162 | struct gaudi2_tpc_mmu_data tpc_mmu_data; |
| 8163 | struct iterate_module_ctx tpc_iter = { |
| 8164 | .fn = &gaudi2_tpc_mmu_prepare, |
| 8165 | .data = &tpc_mmu_data, |
| 8166 | }; |
| 8167 | int rc, i; |
| 8168 | |
| 8169 | if (asid & ~DCORE0_HMMU0_STLB_ASID_ASID_MASK) { |
| 8170 | dev_crit(hdev->dev, "asid %u is too big\n" , asid); |
| 8171 | return -EINVAL; |
| 8172 | } |
| 8173 | |
| 8174 | if (!(gaudi2->hw_cap_initialized & HW_CAP_MMU_MASK)) |
| 8175 | return 0; |
| 8176 | |
| 8177 | rc = gaudi2_mmu_shared_prepare(hdev, asid); |
| 8178 | if (rc) |
| 8179 | return rc; |
| 8180 | |
| 8181 | /* configure DCORE MMUs */ |
| 8182 | tpc_mmu_data.rw_asid = (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_RD_SHIFT) | |
| 8183 | (asid << ARC_FARM_KDMA_CTX_AXUSER_HB_ASID_WR_SHIFT); |
| 8184 | gaudi2_iterate_tpcs(hdev, ctx: &tpc_iter); |
| 8185 | for (i = 0 ; i < NUM_OF_DCORES ; i++) |
| 8186 | gaudi2_mmu_dcore_prepare(hdev, dcore_id: i, asid); |
| 8187 | |
| 8188 | return 0; |
| 8189 | } |
| 8190 | |
| 8191 | static inline bool is_info_event(u32 event) |
| 8192 | { |
| 8193 | switch (event) { |
| 8194 | case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE: |
| 8195 | case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S ... GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: |
| 8196 | case GAUDI2_EVENT_ARC_PWR_BRK_ENTRY ... GAUDI2_EVENT_ARC_PWR_RD_MODE3: |
| 8197 | |
| 8198 | /* return in case of NIC status event - these events are received periodically and not as |
| 8199 | * an indication to an error. |
| 8200 | */ |
| 8201 | case GAUDI2_EVENT_CPU0_STATUS_NIC0_ENG0 ... GAUDI2_EVENT_CPU11_STATUS_NIC11_ENG1: |
| 8202 | case GAUDI2_EVENT_ARC_EQ_HEARTBEAT: |
| 8203 | return true; |
| 8204 | default: |
| 8205 | return false; |
| 8206 | } |
| 8207 | } |
| 8208 | |
| 8209 | static void gaudi2_print_event(struct hl_device *hdev, u16 event_type, |
| 8210 | bool ratelimited, const char *fmt, ...) |
| 8211 | { |
| 8212 | struct va_format vaf; |
| 8213 | va_list args; |
| 8214 | |
| 8215 | va_start(args, fmt); |
| 8216 | vaf.fmt = fmt; |
| 8217 | vaf.va = &args; |
| 8218 | |
| 8219 | if (ratelimited) |
| 8220 | dev_err_ratelimited(hdev->dev, "%s: %pV\n" , |
| 8221 | gaudi2_irq_map_table[event_type].valid ? |
| 8222 | gaudi2_irq_map_table[event_type].name : "N/A Event" , &vaf); |
| 8223 | else |
| 8224 | dev_err(hdev->dev, "%s: %pV\n" , |
| 8225 | gaudi2_irq_map_table[event_type].valid ? |
| 8226 | gaudi2_irq_map_table[event_type].name : "N/A Event" , &vaf); |
| 8227 | |
| 8228 | va_end(args); |
| 8229 | } |
| 8230 | |
| 8231 | static bool gaudi2_handle_ecc_event(struct hl_device *hdev, u16 event_type, |
| 8232 | struct hl_eq_ecc_data *ecc_data) |
| 8233 | { |
| 8234 | u64 ecc_address = 0, ecc_syndrome = 0; |
| 8235 | u8 memory_wrapper_idx = 0; |
| 8236 | bool has_block_id = false; |
| 8237 | u16 block_id; |
| 8238 | |
| 8239 | if (hl_fw_version_cmp(hdev, major: 1, minor: 12, subminor: 0) >= 0) |
| 8240 | has_block_id = true; |
| 8241 | |
| 8242 | ecc_address = le64_to_cpu(ecc_data->ecc_address); |
| 8243 | ecc_syndrome = le64_to_cpu(ecc_data->ecc_syndrom); |
| 8244 | memory_wrapper_idx = ecc_data->memory_wrapper_idx; |
| 8245 | |
| 8246 | if (has_block_id) { |
| 8247 | block_id = le16_to_cpu(ecc_data->block_id); |
| 8248 | gaudi2_print_event(hdev, event_type, ratelimited: !ecc_data->is_critical, |
| 8249 | fmt: "ECC error detected. address: %#llx. Syndrome: %#llx. wrapper id %u. block id %#x. critical %u." , |
| 8250 | ecc_address, ecc_syndrome, memory_wrapper_idx, block_id, |
| 8251 | ecc_data->is_critical); |
| 8252 | } else { |
| 8253 | gaudi2_print_event(hdev, event_type, ratelimited: !ecc_data->is_critical, |
| 8254 | fmt: "ECC error detected. address: %#llx. Syndrome: %#llx. wrapper id %u. critical %u." , |
| 8255 | ecc_address, ecc_syndrome, memory_wrapper_idx, ecc_data->is_critical); |
| 8256 | } |
| 8257 | |
| 8258 | return !!ecc_data->is_critical; |
| 8259 | } |
| 8260 | |
| 8261 | static void handle_lower_qman_data_on_err(struct hl_device *hdev, u64 qman_base, u32 engine_id) |
| 8262 | { |
| 8263 | struct undefined_opcode_info *undef_opcode = &hdev->captured_err_info.undef_opcode; |
| 8264 | u64 cq_ptr, cp_current_inst; |
| 8265 | u32 lo, hi, cq_size, cp_sts; |
| 8266 | bool is_arc_cq; |
| 8267 | |
| 8268 | cp_sts = RREG32(qman_base + QM_CP_STS_4_OFFSET); |
| 8269 | is_arc_cq = FIELD_GET(PDMA0_QM_CP_STS_CUR_CQ_MASK, cp_sts); /* 0 - legacy CQ, 1 - ARC_CQ */ |
| 8270 | |
| 8271 | if (is_arc_cq) { |
| 8272 | lo = RREG32(qman_base + QM_ARC_CQ_PTR_LO_STS_OFFSET); |
| 8273 | hi = RREG32(qman_base + QM_ARC_CQ_PTR_HI_STS_OFFSET); |
| 8274 | cq_ptr = ((u64) hi) << 32 | lo; |
| 8275 | cq_size = RREG32(qman_base + QM_ARC_CQ_TSIZE_STS_OFFSET); |
| 8276 | } else { |
| 8277 | lo = RREG32(qman_base + QM_CQ_PTR_LO_STS_4_OFFSET); |
| 8278 | hi = RREG32(qman_base + QM_CQ_PTR_HI_STS_4_OFFSET); |
| 8279 | cq_ptr = ((u64) hi) << 32 | lo; |
| 8280 | cq_size = RREG32(qman_base + QM_CQ_TSIZE_STS_4_OFFSET); |
| 8281 | } |
| 8282 | |
| 8283 | lo = RREG32(qman_base + QM_CP_CURRENT_INST_LO_4_OFFSET); |
| 8284 | hi = RREG32(qman_base + QM_CP_CURRENT_INST_HI_4_OFFSET); |
| 8285 | cp_current_inst = ((u64) hi) << 32 | lo; |
| 8286 | |
| 8287 | dev_info(hdev->dev, |
| 8288 | "LowerQM. %sCQ: {ptr %#llx, size %u}, CP: {instruction %#018llx}\n" , |
| 8289 | is_arc_cq ? "ARC_" : "" , cq_ptr, cq_size, cp_current_inst); |
| 8290 | |
| 8291 | if (undef_opcode->write_enable) { |
| 8292 | memset(undef_opcode, 0, sizeof(*undef_opcode)); |
| 8293 | undef_opcode->timestamp = ktime_get(); |
| 8294 | undef_opcode->cq_addr = cq_ptr; |
| 8295 | undef_opcode->cq_size = cq_size; |
| 8296 | undef_opcode->engine_id = engine_id; |
| 8297 | undef_opcode->stream_id = QMAN_STREAMS; |
| 8298 | undef_opcode->write_enable = 0; |
| 8299 | } |
| 8300 | } |
| 8301 | |
| 8302 | static int gaudi2_handle_qman_err_generic(struct hl_device *hdev, u16 event_type, |
| 8303 | u64 qman_base, u32 qid_base, u64 *event_mask) |
| 8304 | { |
| 8305 | u32 i, j, glbl_sts_val, arb_err_val, num_error_causes, error_count = 0; |
| 8306 | u64 glbl_sts_addr, arb_err_addr; |
| 8307 | char reg_desc[32]; |
| 8308 | |
| 8309 | glbl_sts_addr = qman_base + (mmDCORE0_TPC0_QM_GLBL_ERR_STS_0 - mmDCORE0_TPC0_QM_BASE); |
| 8310 | arb_err_addr = qman_base + (mmDCORE0_TPC0_QM_ARB_ERR_CAUSE - mmDCORE0_TPC0_QM_BASE); |
| 8311 | |
| 8312 | /* Iterate through all stream GLBL_ERR_STS registers + Lower CP */ |
| 8313 | for (i = 0 ; i < QMAN_STREAMS + 1 ; i++) { |
| 8314 | glbl_sts_val = RREG32(glbl_sts_addr + 4 * i); |
| 8315 | |
| 8316 | if (!glbl_sts_val) |
| 8317 | continue; |
| 8318 | |
| 8319 | if (i == QMAN_STREAMS) { |
| 8320 | snprintf(buf: reg_desc, ARRAY_SIZE(reg_desc), fmt: "LowerQM" ); |
| 8321 | num_error_causes = GAUDI2_NUM_OF_LOWER_QM_ERR_CAUSE; |
| 8322 | } else { |
| 8323 | snprintf(buf: reg_desc, ARRAY_SIZE(reg_desc), fmt: "stream%u" , i); |
| 8324 | num_error_causes = GAUDI2_NUM_OF_QM_ERR_CAUSE; |
| 8325 | } |
| 8326 | |
| 8327 | for (j = 0 ; j < num_error_causes ; j++) |
| 8328 | if (glbl_sts_val & BIT(j)) { |
| 8329 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 8330 | fmt: "%s. err cause: %s" , reg_desc, |
| 8331 | i == QMAN_STREAMS ? |
| 8332 | gaudi2_lower_qman_error_cause[j] : |
| 8333 | gaudi2_qman_error_cause[j]); |
| 8334 | error_count++; |
| 8335 | } |
| 8336 | |
| 8337 | /* Check for undefined opcode error in lower QM */ |
| 8338 | if ((i == QMAN_STREAMS) && |
| 8339 | (glbl_sts_val & PDMA0_QM_GLBL_ERR_STS_CP_UNDEF_CMD_ERR_MASK)) { |
| 8340 | handle_lower_qman_data_on_err(hdev, qman_base, |
| 8341 | engine_id: gaudi2_queue_id_to_engine_id[qid_base]); |
| 8342 | *event_mask |= HL_NOTIFIER_EVENT_UNDEFINED_OPCODE; |
| 8343 | } |
| 8344 | } |
| 8345 | |
| 8346 | arb_err_val = RREG32(arb_err_addr); |
| 8347 | |
| 8348 | if (!arb_err_val) |
| 8349 | goto out; |
| 8350 | |
| 8351 | for (j = 0 ; j < GAUDI2_NUM_OF_QM_ARB_ERR_CAUSE ; j++) { |
| 8352 | if (arb_err_val & BIT(j)) { |
| 8353 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 8354 | fmt: "ARB_ERR. err cause: %s" , |
| 8355 | gaudi2_qman_arb_error_cause[j]); |
| 8356 | error_count++; |
| 8357 | } |
| 8358 | } |
| 8359 | |
| 8360 | out: |
| 8361 | return error_count; |
| 8362 | } |
| 8363 | |
| 8364 | static void gaudi2_razwi_rr_hbw_shared_printf_info(struct hl_device *hdev, |
| 8365 | u64 rtr_mstr_if_base_addr, bool is_write, char *name, |
| 8366 | enum gaudi2_engine_id id, u64 *event_mask) |
| 8367 | { |
| 8368 | u32 razwi_hi, razwi_lo, razwi_xy; |
| 8369 | u16 eng_id = id; |
| 8370 | u8 rd_wr_flag; |
| 8371 | |
| 8372 | if (is_write) { |
| 8373 | razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HI); |
| 8374 | razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_LO); |
| 8375 | razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_XY); |
| 8376 | rd_wr_flag = HL_RAZWI_WRITE; |
| 8377 | } else { |
| 8378 | razwi_hi = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HI); |
| 8379 | razwi_lo = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_LO); |
| 8380 | razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_XY); |
| 8381 | rd_wr_flag = HL_RAZWI_READ; |
| 8382 | } |
| 8383 | |
| 8384 | hl_handle_razwi(hdev, addr: (u64)razwi_hi << 32 | razwi_lo, engine_id: &eng_id, num_of_engines: 1, |
| 8385 | flags: rd_wr_flag | HL_RAZWI_HBW, event_mask); |
| 8386 | |
| 8387 | dev_err_ratelimited(hdev->dev, |
| 8388 | "%s-RAZWI SHARED RR HBW %s error, address %#llx, Initiator coordinates 0x%x\n" , |
| 8389 | name, is_write ? "WR" : "RD" , (u64)razwi_hi << 32 | razwi_lo, razwi_xy); |
| 8390 | } |
| 8391 | |
| 8392 | static void gaudi2_razwi_rr_lbw_shared_printf_info(struct hl_device *hdev, |
| 8393 | u64 rtr_mstr_if_base_addr, bool is_write, char *name, |
| 8394 | enum gaudi2_engine_id id, u64 *event_mask) |
| 8395 | { |
| 8396 | u64 razwi_addr = CFG_BASE; |
| 8397 | u32 razwi_xy; |
| 8398 | u16 eng_id = id; |
| 8399 | u8 rd_wr_flag; |
| 8400 | |
| 8401 | if (is_write) { |
| 8402 | razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI); |
| 8403 | razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_XY); |
| 8404 | rd_wr_flag = HL_RAZWI_WRITE; |
| 8405 | } else { |
| 8406 | razwi_addr += RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI); |
| 8407 | razwi_xy = RREG32(rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_XY); |
| 8408 | rd_wr_flag = HL_RAZWI_READ; |
| 8409 | } |
| 8410 | |
| 8411 | hl_handle_razwi(hdev, addr: razwi_addr, engine_id: &eng_id, num_of_engines: 1, flags: rd_wr_flag | HL_RAZWI_LBW, event_mask); |
| 8412 | dev_err_ratelimited(hdev->dev, |
| 8413 | "%s-RAZWI SHARED RR LBW %s error, mstr_if 0x%llx, captured address 0x%llX Initiator coordinates 0x%x\n" , |
| 8414 | name, is_write ? "WR" : "RD" , rtr_mstr_if_base_addr, razwi_addr, |
| 8415 | razwi_xy); |
| 8416 | } |
| 8417 | |
| 8418 | static enum gaudi2_engine_id gaudi2_razwi_calc_engine_id(struct hl_device *hdev, |
| 8419 | enum razwi_event_sources module, u8 module_idx) |
| 8420 | { |
| 8421 | switch (module) { |
| 8422 | case RAZWI_TPC: |
| 8423 | if (module_idx == (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES)) |
| 8424 | return GAUDI2_DCORE0_ENGINE_ID_TPC_6; |
| 8425 | return (((module_idx / NUM_OF_TPC_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) + |
| 8426 | (module_idx % NUM_OF_TPC_PER_DCORE) + |
| 8427 | (GAUDI2_DCORE0_ENGINE_ID_TPC_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)); |
| 8428 | |
| 8429 | case RAZWI_MME: |
| 8430 | return ((GAUDI2_DCORE0_ENGINE_ID_MME - GAUDI2_DCORE0_ENGINE_ID_EDMA_0) + |
| 8431 | (module_idx * ENGINE_ID_DCORE_OFFSET)); |
| 8432 | |
| 8433 | case RAZWI_EDMA: |
| 8434 | return (((module_idx / NUM_OF_EDMA_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) + |
| 8435 | (module_idx % NUM_OF_EDMA_PER_DCORE)); |
| 8436 | |
| 8437 | case RAZWI_PDMA: |
| 8438 | return (GAUDI2_ENGINE_ID_PDMA_0 + module_idx); |
| 8439 | |
| 8440 | case RAZWI_NIC: |
| 8441 | return (GAUDI2_ENGINE_ID_NIC0_0 + (NIC_NUMBER_OF_QM_PER_MACRO * module_idx)); |
| 8442 | |
| 8443 | case RAZWI_DEC: |
| 8444 | if (module_idx == 8) |
| 8445 | return GAUDI2_PCIE_ENGINE_ID_DEC_0; |
| 8446 | |
| 8447 | if (module_idx == 9) |
| 8448 | return GAUDI2_PCIE_ENGINE_ID_DEC_1; |
| 8449 | ; |
| 8450 | return (((module_idx / NUM_OF_DEC_PER_DCORE) * ENGINE_ID_DCORE_OFFSET) + |
| 8451 | (module_idx % NUM_OF_DEC_PER_DCORE) + |
| 8452 | (GAUDI2_DCORE0_ENGINE_ID_DEC_0 - GAUDI2_DCORE0_ENGINE_ID_EDMA_0)); |
| 8453 | |
| 8454 | case RAZWI_ROT: |
| 8455 | return GAUDI2_ENGINE_ID_ROT_0 + module_idx; |
| 8456 | |
| 8457 | case RAZWI_ARC_FARM: |
| 8458 | return GAUDI2_ENGINE_ID_ARC_FARM; |
| 8459 | |
| 8460 | default: |
| 8461 | return GAUDI2_ENGINE_ID_SIZE; |
| 8462 | } |
| 8463 | } |
| 8464 | |
| 8465 | /* |
| 8466 | * This function handles RR(Range register) hit events. |
| 8467 | * raised be initiators not PSOC RAZWI. |
| 8468 | */ |
| 8469 | static void gaudi2_ack_module_razwi_event_handler(struct hl_device *hdev, |
| 8470 | enum razwi_event_sources module, u8 module_idx, |
| 8471 | u8 module_sub_idx, u64 *event_mask) |
| 8472 | { |
| 8473 | bool via_sft = false; |
| 8474 | u32 hbw_rtr_id, lbw_rtr_id, dcore_id, dcore_rtr_id, eng_id, binned_idx; |
| 8475 | u64 hbw_rtr_mstr_if_base_addr, lbw_rtr_mstr_if_base_addr; |
| 8476 | u32 hbw_shrd_aw = 0, hbw_shrd_ar = 0; |
| 8477 | u32 lbw_shrd_aw = 0, lbw_shrd_ar = 0; |
| 8478 | char initiator_name[64]; |
| 8479 | |
| 8480 | switch (module) { |
| 8481 | case RAZWI_TPC: |
| 8482 | sprintf(buf: initiator_name, fmt: "TPC_%u" , module_idx); |
| 8483 | if (hdev->tpc_binning) { |
| 8484 | binned_idx = __ffs(hdev->tpc_binning); |
| 8485 | if (binned_idx == module_idx) |
| 8486 | module_idx = TPC_ID_DCORE0_TPC6; |
| 8487 | } |
| 8488 | |
| 8489 | hbw_rtr_id = gaudi2_tpc_initiator_hbw_rtr_id[module_idx]; |
| 8490 | lbw_rtr_id = gaudi2_tpc_initiator_lbw_rtr_id[module_idx]; |
| 8491 | break; |
| 8492 | case RAZWI_MME: |
| 8493 | sprintf(buf: initiator_name, fmt: "MME_%u" , module_idx); |
| 8494 | switch (module_sub_idx) { |
| 8495 | case MME_WAP0: |
| 8496 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap0; |
| 8497 | break; |
| 8498 | case MME_WAP1: |
| 8499 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].wap1; |
| 8500 | break; |
| 8501 | case MME_WRITE: |
| 8502 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].write; |
| 8503 | break; |
| 8504 | case MME_READ: |
| 8505 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].read; |
| 8506 | break; |
| 8507 | case MME_SBTE0: |
| 8508 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte0; |
| 8509 | break; |
| 8510 | case MME_SBTE1: |
| 8511 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte1; |
| 8512 | break; |
| 8513 | case MME_SBTE2: |
| 8514 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte2; |
| 8515 | break; |
| 8516 | case MME_SBTE3: |
| 8517 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte3; |
| 8518 | break; |
| 8519 | case MME_SBTE4: |
| 8520 | hbw_rtr_id = gaudi2_mme_initiator_rtr_id[module_idx].sbte4; |
| 8521 | break; |
| 8522 | default: |
| 8523 | return; |
| 8524 | } |
| 8525 | lbw_rtr_id = hbw_rtr_id; |
| 8526 | break; |
| 8527 | case RAZWI_EDMA: |
| 8528 | hbw_rtr_mstr_if_base_addr = gaudi2_edma_initiator_hbw_sft[module_idx]; |
| 8529 | dcore_id = module_idx / NUM_OF_EDMA_PER_DCORE; |
| 8530 | /* SFT has separate MSTR_IF for LBW, only there we can |
| 8531 | * read the LBW razwi related registers |
| 8532 | */ |
| 8533 | lbw_rtr_mstr_if_base_addr = mmSFT0_LBW_RTR_IF_MSTR_IF_RR_SHRD_HBW_BASE + |
| 8534 | dcore_id * SFT_DCORE_OFFSET; |
| 8535 | via_sft = true; |
| 8536 | sprintf(buf: initiator_name, fmt: "EDMA_%u" , module_idx); |
| 8537 | break; |
| 8538 | case RAZWI_PDMA: |
| 8539 | hbw_rtr_id = gaudi2_pdma_initiator_hbw_rtr_id[module_idx]; |
| 8540 | lbw_rtr_id = gaudi2_pdma_initiator_lbw_rtr_id[module_idx]; |
| 8541 | sprintf(buf: initiator_name, fmt: "PDMA_%u" , module_idx); |
| 8542 | break; |
| 8543 | case RAZWI_NIC: |
| 8544 | hbw_rtr_id = gaudi2_nic_initiator_hbw_rtr_id[module_idx]; |
| 8545 | lbw_rtr_id = gaudi2_nic_initiator_lbw_rtr_id[module_idx]; |
| 8546 | sprintf(buf: initiator_name, fmt: "NIC_%u" , module_idx); |
| 8547 | break; |
| 8548 | case RAZWI_DEC: |
| 8549 | sprintf(buf: initiator_name, fmt: "DEC_%u" , module_idx); |
| 8550 | if (hdev->decoder_binning) { |
| 8551 | binned_idx = __ffs(hdev->decoder_binning); |
| 8552 | if (binned_idx == module_idx) |
| 8553 | module_idx = DEC_ID_PCIE_VDEC1; |
| 8554 | } |
| 8555 | hbw_rtr_id = gaudi2_dec_initiator_hbw_rtr_id[module_idx]; |
| 8556 | lbw_rtr_id = gaudi2_dec_initiator_lbw_rtr_id[module_idx]; |
| 8557 | break; |
| 8558 | case RAZWI_ROT: |
| 8559 | hbw_rtr_id = gaudi2_rot_initiator_hbw_rtr_id[module_idx]; |
| 8560 | lbw_rtr_id = gaudi2_rot_initiator_lbw_rtr_id[module_idx]; |
| 8561 | sprintf(buf: initiator_name, fmt: "ROT_%u" , module_idx); |
| 8562 | break; |
| 8563 | case RAZWI_ARC_FARM: |
| 8564 | lbw_rtr_id = DCORE1_RTR5; |
| 8565 | hbw_rtr_id = DCORE1_RTR7; |
| 8566 | sprintf(buf: initiator_name, fmt: "ARC_FARM_%u" , module_idx); |
| 8567 | break; |
| 8568 | default: |
| 8569 | return; |
| 8570 | } |
| 8571 | |
| 8572 | /* Find router mstr_if register base */ |
| 8573 | if (!via_sft) { |
| 8574 | dcore_id = hbw_rtr_id / NUM_OF_RTR_PER_DCORE; |
| 8575 | dcore_rtr_id = hbw_rtr_id % NUM_OF_RTR_PER_DCORE; |
| 8576 | hbw_rtr_mstr_if_base_addr = mmDCORE0_RTR0_CTRL_BASE + |
| 8577 | dcore_id * DCORE_OFFSET + |
| 8578 | dcore_rtr_id * DCORE_RTR_OFFSET + |
| 8579 | RTR_MSTR_IF_OFFSET; |
| 8580 | lbw_rtr_mstr_if_base_addr = hbw_rtr_mstr_if_base_addr + |
| 8581 | (((s32)lbw_rtr_id - hbw_rtr_id) * DCORE_RTR_OFFSET); |
| 8582 | } |
| 8583 | |
| 8584 | /* Find out event cause by reading "RAZWI_HAPPENED" registers */ |
| 8585 | hbw_shrd_aw = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED); |
| 8586 | hbw_shrd_ar = RREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED); |
| 8587 | lbw_shrd_aw = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED); |
| 8588 | lbw_shrd_ar = RREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED); |
| 8589 | |
| 8590 | eng_id = gaudi2_razwi_calc_engine_id(hdev, module, module_idx); |
| 8591 | if (hbw_shrd_aw) { |
| 8592 | gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: hbw_rtr_mstr_if_base_addr, is_write: true, |
| 8593 | name: initiator_name, id: eng_id, event_mask); |
| 8594 | |
| 8595 | /* Clear event indication */ |
| 8596 | WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED, hbw_shrd_aw); |
| 8597 | } |
| 8598 | |
| 8599 | if (hbw_shrd_ar) { |
| 8600 | gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: hbw_rtr_mstr_if_base_addr, is_write: false, |
| 8601 | name: initiator_name, id: eng_id, event_mask); |
| 8602 | |
| 8603 | /* Clear event indication */ |
| 8604 | WREG32(hbw_rtr_mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED, hbw_shrd_ar); |
| 8605 | } |
| 8606 | |
| 8607 | if (lbw_shrd_aw) { |
| 8608 | gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: lbw_rtr_mstr_if_base_addr, is_write: true, |
| 8609 | name: initiator_name, id: eng_id, event_mask); |
| 8610 | |
| 8611 | /* Clear event indication */ |
| 8612 | WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED, lbw_shrd_aw); |
| 8613 | } |
| 8614 | |
| 8615 | if (lbw_shrd_ar) { |
| 8616 | gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: lbw_rtr_mstr_if_base_addr, is_write: false, |
| 8617 | name: initiator_name, id: eng_id, event_mask); |
| 8618 | |
| 8619 | /* Clear event indication */ |
| 8620 | WREG32(lbw_rtr_mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED, lbw_shrd_ar); |
| 8621 | } |
| 8622 | } |
| 8623 | |
| 8624 | static void gaudi2_check_if_razwi_happened(struct hl_device *hdev) |
| 8625 | { |
| 8626 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 8627 | u8 mod_idx, sub_mod; |
| 8628 | |
| 8629 | /* check all TPCs */ |
| 8630 | for (mod_idx = 0 ; mod_idx < (NUM_OF_TPC_PER_DCORE * NUM_OF_DCORES + 1) ; mod_idx++) { |
| 8631 | if (prop->tpc_enabled_mask & BIT(mod_idx)) |
| 8632 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_TPC, module_idx: mod_idx, module_sub_idx: 0, NULL); |
| 8633 | } |
| 8634 | |
| 8635 | /* check all MMEs */ |
| 8636 | for (mod_idx = 0 ; mod_idx < (NUM_OF_MME_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) |
| 8637 | for (sub_mod = MME_WAP0 ; sub_mod < MME_INITIATORS_MAX ; sub_mod++) |
| 8638 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_MME, module_idx: mod_idx, |
| 8639 | module_sub_idx: sub_mod, NULL); |
| 8640 | |
| 8641 | /* check all EDMAs */ |
| 8642 | for (mod_idx = 0 ; mod_idx < (NUM_OF_EDMA_PER_DCORE * NUM_OF_DCORES) ; mod_idx++) |
| 8643 | if (prop->edma_enabled_mask & BIT(mod_idx)) |
| 8644 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_EDMA, module_idx: mod_idx, module_sub_idx: 0, NULL); |
| 8645 | |
| 8646 | /* check all PDMAs */ |
| 8647 | for (mod_idx = 0 ; mod_idx < NUM_OF_PDMA ; mod_idx++) |
| 8648 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_PDMA, module_idx: mod_idx, module_sub_idx: 0, NULL); |
| 8649 | |
| 8650 | /* check all NICs */ |
| 8651 | for (mod_idx = 0 ; mod_idx < NIC_NUMBER_OF_PORTS ; mod_idx++) |
| 8652 | if (hdev->nic_ports_mask & BIT(mod_idx)) |
| 8653 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_NIC, module_idx: mod_idx >> 1, module_sub_idx: 0, |
| 8654 | NULL); |
| 8655 | |
| 8656 | /* check all DECs */ |
| 8657 | for (mod_idx = 0 ; mod_idx < NUMBER_OF_DEC ; mod_idx++) |
| 8658 | if (prop->decoder_enabled_mask & BIT(mod_idx)) |
| 8659 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_DEC, module_idx: mod_idx, module_sub_idx: 0, NULL); |
| 8660 | |
| 8661 | /* check all ROTs */ |
| 8662 | for (mod_idx = 0 ; mod_idx < NUM_OF_ROT ; mod_idx++) |
| 8663 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_ROT, module_idx: mod_idx, module_sub_idx: 0, NULL); |
| 8664 | } |
| 8665 | |
| 8666 | static int gaudi2_psoc_razwi_get_engines(struct gaudi2_razwi_info *razwi_info, u32 array_size, |
| 8667 | u32 axuser_xy, u32 *base, u16 *eng_id, |
| 8668 | char *eng_name) |
| 8669 | { |
| 8670 | |
| 8671 | int i, num_of_eng = 0; |
| 8672 | u16 str_size = 0; |
| 8673 | |
| 8674 | for (i = 0 ; i < array_size ; i++) { |
| 8675 | if (axuser_xy != razwi_info[i].axuser_xy) |
| 8676 | continue; |
| 8677 | |
| 8678 | eng_id[num_of_eng] = razwi_info[i].eng_id; |
| 8679 | base[num_of_eng] = razwi_info[i].rtr_ctrl; |
| 8680 | if (!num_of_eng) |
| 8681 | str_size += scnprintf(buf: eng_name + str_size, |
| 8682 | PSOC_RAZWI_ENG_STR_SIZE - str_size, fmt: "%s" , |
| 8683 | razwi_info[i].eng_name); |
| 8684 | else |
| 8685 | str_size += scnprintf(buf: eng_name + str_size, |
| 8686 | PSOC_RAZWI_ENG_STR_SIZE - str_size, fmt: " or %s" , |
| 8687 | razwi_info[i].eng_name); |
| 8688 | num_of_eng++; |
| 8689 | } |
| 8690 | |
| 8691 | return num_of_eng; |
| 8692 | } |
| 8693 | |
| 8694 | static bool gaudi2_handle_psoc_razwi_happened(struct hl_device *hdev, u32 razwi_reg, |
| 8695 | u64 *event_mask) |
| 8696 | { |
| 8697 | u32 axuser_xy = RAZWI_GET_AXUSER_XY(razwi_reg), addr_hi = 0, addr_lo = 0; |
| 8698 | u32 base[PSOC_RAZWI_MAX_ENG_PER_RTR]; |
| 8699 | u16 num_of_eng, eng_id[PSOC_RAZWI_MAX_ENG_PER_RTR]; |
| 8700 | char eng_name_str[PSOC_RAZWI_ENG_STR_SIZE]; |
| 8701 | bool razwi_happened = false; |
| 8702 | u64 addr; |
| 8703 | int i; |
| 8704 | |
| 8705 | num_of_eng = gaudi2_psoc_razwi_get_engines(razwi_info: common_razwi_info, ARRAY_SIZE(common_razwi_info), |
| 8706 | axuser_xy, base, eng_id, eng_name: eng_name_str); |
| 8707 | |
| 8708 | /* If no match for XY coordinates, try to find it in MME razwi table */ |
| 8709 | if (!num_of_eng) { |
| 8710 | axuser_xy = RAZWI_GET_AXUSER_LOW_XY(razwi_reg); |
| 8711 | num_of_eng = gaudi2_psoc_razwi_get_engines(razwi_info: mme_razwi_info, |
| 8712 | ARRAY_SIZE(mme_razwi_info), |
| 8713 | axuser_xy, base, eng_id, |
| 8714 | eng_name: eng_name_str); |
| 8715 | } |
| 8716 | |
| 8717 | for (i = 0 ; i < num_of_eng ; i++) { |
| 8718 | if (RREG32(base[i] + DEC_RAZWI_HBW_AW_SET)) { |
| 8719 | addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_HI); |
| 8720 | addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AW_ADDR_LO); |
| 8721 | addr = ((u64)addr_hi << 32) + addr_lo; |
| 8722 | if (addr) { |
| 8723 | dev_err(hdev->dev, |
| 8724 | "PSOC HBW AW RAZWI: %s, address (aligned to 128 byte): 0x%llX\n" , |
| 8725 | eng_name_str, addr); |
| 8726 | hl_handle_razwi(hdev, addr, engine_id: &eng_id[0], |
| 8727 | num_of_engines: num_of_eng, HL_RAZWI_HBW | HL_RAZWI_WRITE, event_mask); |
| 8728 | razwi_happened = true; |
| 8729 | } |
| 8730 | } |
| 8731 | |
| 8732 | if (RREG32(base[i] + DEC_RAZWI_HBW_AR_SET)) { |
| 8733 | addr_hi = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_HI); |
| 8734 | addr_lo = RREG32(base[i] + DEC_RAZWI_HBW_AR_ADDR_LO); |
| 8735 | addr = ((u64)addr_hi << 32) + addr_lo; |
| 8736 | if (addr) { |
| 8737 | dev_err(hdev->dev, |
| 8738 | "PSOC HBW AR RAZWI: %s, address (aligned to 128 byte): 0x%llX\n" , |
| 8739 | eng_name_str, addr); |
| 8740 | hl_handle_razwi(hdev, addr, engine_id: &eng_id[0], |
| 8741 | num_of_engines: num_of_eng, HL_RAZWI_HBW | HL_RAZWI_READ, event_mask); |
| 8742 | razwi_happened = true; |
| 8743 | } |
| 8744 | } |
| 8745 | |
| 8746 | if (RREG32(base[i] + DEC_RAZWI_LBW_AW_SET)) { |
| 8747 | addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AW_ADDR); |
| 8748 | if (addr_lo) { |
| 8749 | dev_err(hdev->dev, |
| 8750 | "PSOC LBW AW RAZWI: %s, address (aligned to 128 byte): 0x%X\n" , |
| 8751 | eng_name_str, addr_lo); |
| 8752 | hl_handle_razwi(hdev, addr: addr_lo, engine_id: &eng_id[0], |
| 8753 | num_of_engines: num_of_eng, HL_RAZWI_LBW | HL_RAZWI_WRITE, event_mask); |
| 8754 | razwi_happened = true; |
| 8755 | } |
| 8756 | } |
| 8757 | |
| 8758 | if (RREG32(base[i] + DEC_RAZWI_LBW_AR_SET)) { |
| 8759 | addr_lo = RREG32(base[i] + DEC_RAZWI_LBW_AR_ADDR); |
| 8760 | if (addr_lo) { |
| 8761 | dev_err(hdev->dev, |
| 8762 | "PSOC LBW AR RAZWI: %s, address (aligned to 128 byte): 0x%X\n" , |
| 8763 | eng_name_str, addr_lo); |
| 8764 | hl_handle_razwi(hdev, addr: addr_lo, engine_id: &eng_id[0], |
| 8765 | num_of_engines: num_of_eng, HL_RAZWI_LBW | HL_RAZWI_READ, event_mask); |
| 8766 | razwi_happened = true; |
| 8767 | } |
| 8768 | } |
| 8769 | /* In common case the loop will break, when there is only one engine id, or |
| 8770 | * several engines with the same router. The exceptional case is with psoc razwi |
| 8771 | * from EDMA, where it's possible to get axuser id which fits 2 routers (2 |
| 8772 | * interfaces of sft router). In this case, maybe the first router won't hold info |
| 8773 | * and we will need to iterate on the other router. |
| 8774 | */ |
| 8775 | if (razwi_happened) |
| 8776 | break; |
| 8777 | } |
| 8778 | |
| 8779 | return razwi_happened; |
| 8780 | } |
| 8781 | |
| 8782 | /* PSOC RAZWI interrupt occurs only when trying to access a bad address */ |
| 8783 | static int gaudi2_ack_psoc_razwi_event_handler(struct hl_device *hdev, u64 *event_mask) |
| 8784 | { |
| 8785 | u32 razwi_mask_info, razwi_intr = 0, error_count = 0; |
| 8786 | |
| 8787 | if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) { |
| 8788 | razwi_intr = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT); |
| 8789 | if (!razwi_intr) |
| 8790 | return 0; |
| 8791 | } |
| 8792 | |
| 8793 | razwi_mask_info = RREG32(mmPSOC_GLOBAL_CONF_RAZWI_MASK_INFO); |
| 8794 | |
| 8795 | dev_err_ratelimited(hdev->dev, |
| 8796 | "PSOC RAZWI interrupt: Mask %d, AR %d, AW %d, AXUSER_L 0x%x AXUSER_H 0x%x\n" , |
| 8797 | FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_MASK_MASK, razwi_mask_info), |
| 8798 | FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AR_MASK, razwi_mask_info), |
| 8799 | FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_WAS_AW_MASK, razwi_mask_info), |
| 8800 | FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_L_MASK, razwi_mask_info), |
| 8801 | FIELD_GET(PSOC_GLOBAL_CONF_RAZWI_MASK_INFO_AXUSER_H_MASK, razwi_mask_info)); |
| 8802 | |
| 8803 | if (gaudi2_handle_psoc_razwi_happened(hdev, razwi_reg: razwi_mask_info, event_mask)) |
| 8804 | error_count++; |
| 8805 | else |
| 8806 | dev_err_ratelimited(hdev->dev, |
| 8807 | "PSOC RAZWI interrupt: invalid razwi info (0x%x)\n" , |
| 8808 | razwi_mask_info); |
| 8809 | |
| 8810 | /* Clear Interrupts only on pldm or if f/w doesn't handle interrupts */ |
| 8811 | if (hdev->pldm || !(hdev->fw_components & FW_TYPE_LINUX)) |
| 8812 | WREG32(mmPSOC_GLOBAL_CONF_RAZWI_INTERRUPT, razwi_intr); |
| 8813 | |
| 8814 | return error_count; |
| 8815 | } |
| 8816 | |
| 8817 | static int _gaudi2_handle_qm_sei_err(struct hl_device *hdev, u64 qman_base, u16 event_type) |
| 8818 | { |
| 8819 | u32 i, sts_val, sts_clr_val = 0, error_count = 0; |
| 8820 | |
| 8821 | sts_val = RREG32(qman_base + QM_SEI_STATUS_OFFSET); |
| 8822 | |
| 8823 | for (i = 0 ; i < GAUDI2_NUM_OF_QM_SEI_ERR_CAUSE ; i++) { |
| 8824 | if (sts_val & BIT(i)) { |
| 8825 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 8826 | fmt: "err cause: %s" , gaudi2_qm_sei_error_cause[i]); |
| 8827 | sts_clr_val |= BIT(i); |
| 8828 | error_count++; |
| 8829 | } |
| 8830 | } |
| 8831 | |
| 8832 | WREG32(qman_base + QM_SEI_STATUS_OFFSET, sts_clr_val); |
| 8833 | |
| 8834 | return error_count; |
| 8835 | } |
| 8836 | |
| 8837 | static int gaudi2_handle_qm_sei_err(struct hl_device *hdev, u16 event_type, |
| 8838 | bool extended_err_check, u64 *event_mask) |
| 8839 | { |
| 8840 | enum razwi_event_sources module; |
| 8841 | u32 error_count = 0; |
| 8842 | u64 qman_base; |
| 8843 | u8 index; |
| 8844 | |
| 8845 | switch (event_type) { |
| 8846 | case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC23_AXI_ERR_RSP: |
| 8847 | index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP; |
| 8848 | qman_base = mmDCORE0_TPC0_QM_BASE + |
| 8849 | (index / NUM_OF_TPC_PER_DCORE) * DCORE_OFFSET + |
| 8850 | (index % NUM_OF_TPC_PER_DCORE) * DCORE_TPC_OFFSET; |
| 8851 | module = RAZWI_TPC; |
| 8852 | break; |
| 8853 | case GAUDI2_EVENT_TPC24_AXI_ERR_RSP: |
| 8854 | qman_base = mmDCORE0_TPC6_QM_BASE; |
| 8855 | module = RAZWI_TPC; |
| 8856 | break; |
| 8857 | case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE: |
| 8858 | case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE: |
| 8859 | case GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE: |
| 8860 | case GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE: |
| 8861 | index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) / |
| 8862 | (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE - |
| 8863 | GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE); |
| 8864 | qman_base = mmDCORE0_MME_QM_BASE + index * DCORE_OFFSET; |
| 8865 | module = RAZWI_MME; |
| 8866 | break; |
| 8867 | case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP: |
| 8868 | case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP: |
| 8869 | index = event_type - GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP; |
| 8870 | qman_base = mmPDMA0_QM_BASE + index * PDMA_OFFSET; |
| 8871 | module = RAZWI_PDMA; |
| 8872 | break; |
| 8873 | case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE: |
| 8874 | case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE: |
| 8875 | index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE; |
| 8876 | qman_base = mmROT0_QM_BASE + index * ROT_OFFSET; |
| 8877 | module = RAZWI_ROT; |
| 8878 | break; |
| 8879 | default: |
| 8880 | return 0; |
| 8881 | } |
| 8882 | |
| 8883 | error_count = _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type); |
| 8884 | |
| 8885 | /* There is a single event per NIC macro, so should check its both QMAN blocks */ |
| 8886 | if (event_type >= GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE && |
| 8887 | event_type <= GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE) |
| 8888 | error_count += _gaudi2_handle_qm_sei_err(hdev, |
| 8889 | qman_base: qman_base + NIC_QM_OFFSET, event_type); |
| 8890 | |
| 8891 | if (extended_err_check) { |
| 8892 | /* check if RAZWI happened */ |
| 8893 | gaudi2_ack_module_razwi_event_handler(hdev, module, module_idx: 0, module_sub_idx: 0, event_mask); |
| 8894 | hl_check_for_glbl_errors(hdev); |
| 8895 | } |
| 8896 | |
| 8897 | return error_count; |
| 8898 | } |
| 8899 | |
| 8900 | static int gaudi2_handle_qman_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) |
| 8901 | { |
| 8902 | u32 qid_base, error_count = 0; |
| 8903 | u64 qman_base; |
| 8904 | u8 index = 0; |
| 8905 | |
| 8906 | switch (event_type) { |
| 8907 | case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_TPC5_QM: |
| 8908 | index = event_type - GAUDI2_EVENT_TPC0_QM; |
| 8909 | qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_0_0 + index * QMAN_STREAMS; |
| 8910 | qman_base = mmDCORE0_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; |
| 8911 | break; |
| 8912 | case GAUDI2_EVENT_TPC6_QM ... GAUDI2_EVENT_TPC11_QM: |
| 8913 | index = event_type - GAUDI2_EVENT_TPC6_QM; |
| 8914 | qid_base = GAUDI2_QUEUE_ID_DCORE1_TPC_0_0 + index * QMAN_STREAMS; |
| 8915 | qman_base = mmDCORE1_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; |
| 8916 | break; |
| 8917 | case GAUDI2_EVENT_TPC12_QM ... GAUDI2_EVENT_TPC17_QM: |
| 8918 | index = event_type - GAUDI2_EVENT_TPC12_QM; |
| 8919 | qid_base = GAUDI2_QUEUE_ID_DCORE2_TPC_0_0 + index * QMAN_STREAMS; |
| 8920 | qman_base = mmDCORE2_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; |
| 8921 | break; |
| 8922 | case GAUDI2_EVENT_TPC18_QM ... GAUDI2_EVENT_TPC23_QM: |
| 8923 | index = event_type - GAUDI2_EVENT_TPC18_QM; |
| 8924 | qid_base = GAUDI2_QUEUE_ID_DCORE3_TPC_0_0 + index * QMAN_STREAMS; |
| 8925 | qman_base = mmDCORE3_TPC0_QM_BASE + index * DCORE_TPC_OFFSET; |
| 8926 | break; |
| 8927 | case GAUDI2_EVENT_TPC24_QM: |
| 8928 | qid_base = GAUDI2_QUEUE_ID_DCORE0_TPC_6_0; |
| 8929 | qman_base = mmDCORE0_TPC6_QM_BASE; |
| 8930 | break; |
| 8931 | case GAUDI2_EVENT_MME0_QM: |
| 8932 | qid_base = GAUDI2_QUEUE_ID_DCORE0_MME_0_0; |
| 8933 | qman_base = mmDCORE0_MME_QM_BASE; |
| 8934 | break; |
| 8935 | case GAUDI2_EVENT_MME1_QM: |
| 8936 | qid_base = GAUDI2_QUEUE_ID_DCORE1_MME_0_0; |
| 8937 | qman_base = mmDCORE1_MME_QM_BASE; |
| 8938 | break; |
| 8939 | case GAUDI2_EVENT_MME2_QM: |
| 8940 | qid_base = GAUDI2_QUEUE_ID_DCORE2_MME_0_0; |
| 8941 | qman_base = mmDCORE2_MME_QM_BASE; |
| 8942 | break; |
| 8943 | case GAUDI2_EVENT_MME3_QM: |
| 8944 | qid_base = GAUDI2_QUEUE_ID_DCORE3_MME_0_0; |
| 8945 | qman_base = mmDCORE3_MME_QM_BASE; |
| 8946 | break; |
| 8947 | case GAUDI2_EVENT_HDMA0_QM: |
| 8948 | index = 0; |
| 8949 | qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0; |
| 8950 | qman_base = mmDCORE0_EDMA0_QM_BASE; |
| 8951 | break; |
| 8952 | case GAUDI2_EVENT_HDMA1_QM: |
| 8953 | index = 1; |
| 8954 | qid_base = GAUDI2_QUEUE_ID_DCORE0_EDMA_1_0; |
| 8955 | qman_base = mmDCORE0_EDMA1_QM_BASE; |
| 8956 | break; |
| 8957 | case GAUDI2_EVENT_HDMA2_QM: |
| 8958 | index = 2; |
| 8959 | qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0; |
| 8960 | qman_base = mmDCORE1_EDMA0_QM_BASE; |
| 8961 | break; |
| 8962 | case GAUDI2_EVENT_HDMA3_QM: |
| 8963 | index = 3; |
| 8964 | qid_base = GAUDI2_QUEUE_ID_DCORE1_EDMA_1_0; |
| 8965 | qman_base = mmDCORE1_EDMA1_QM_BASE; |
| 8966 | break; |
| 8967 | case GAUDI2_EVENT_HDMA4_QM: |
| 8968 | index = 4; |
| 8969 | qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0; |
| 8970 | qman_base = mmDCORE2_EDMA0_QM_BASE; |
| 8971 | break; |
| 8972 | case GAUDI2_EVENT_HDMA5_QM: |
| 8973 | index = 5; |
| 8974 | qid_base = GAUDI2_QUEUE_ID_DCORE2_EDMA_1_0; |
| 8975 | qman_base = mmDCORE2_EDMA1_QM_BASE; |
| 8976 | break; |
| 8977 | case GAUDI2_EVENT_HDMA6_QM: |
| 8978 | index = 6; |
| 8979 | qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0; |
| 8980 | qman_base = mmDCORE3_EDMA0_QM_BASE; |
| 8981 | break; |
| 8982 | case GAUDI2_EVENT_HDMA7_QM: |
| 8983 | index = 7; |
| 8984 | qid_base = GAUDI2_QUEUE_ID_DCORE3_EDMA_1_0; |
| 8985 | qman_base = mmDCORE3_EDMA1_QM_BASE; |
| 8986 | break; |
| 8987 | case GAUDI2_EVENT_PDMA0_QM: |
| 8988 | qid_base = GAUDI2_QUEUE_ID_PDMA_0_0; |
| 8989 | qman_base = mmPDMA0_QM_BASE; |
| 8990 | break; |
| 8991 | case GAUDI2_EVENT_PDMA1_QM: |
| 8992 | qid_base = GAUDI2_QUEUE_ID_PDMA_1_0; |
| 8993 | qman_base = mmPDMA1_QM_BASE; |
| 8994 | break; |
| 8995 | case GAUDI2_EVENT_ROTATOR0_ROT0_QM: |
| 8996 | qid_base = GAUDI2_QUEUE_ID_ROT_0_0; |
| 8997 | qman_base = mmROT0_QM_BASE; |
| 8998 | break; |
| 8999 | case GAUDI2_EVENT_ROTATOR1_ROT1_QM: |
| 9000 | qid_base = GAUDI2_QUEUE_ID_ROT_1_0; |
| 9001 | qman_base = mmROT1_QM_BASE; |
| 9002 | break; |
| 9003 | default: |
| 9004 | return 0; |
| 9005 | } |
| 9006 | |
| 9007 | error_count = gaudi2_handle_qman_err_generic(hdev, event_type, qman_base, |
| 9008 | qid_base, event_mask); |
| 9009 | |
| 9010 | /* Handle EDMA QM SEI here because there is no AXI error response event for EDMA */ |
| 9011 | if (event_type >= GAUDI2_EVENT_HDMA2_QM && event_type <= GAUDI2_EVENT_HDMA5_QM) { |
| 9012 | error_count += _gaudi2_handle_qm_sei_err(hdev, qman_base, event_type); |
| 9013 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_EDMA, module_idx: index, module_sub_idx: 0, event_mask); |
| 9014 | } |
| 9015 | |
| 9016 | hl_check_for_glbl_errors(hdev); |
| 9017 | |
| 9018 | return error_count; |
| 9019 | } |
| 9020 | |
| 9021 | static int gaudi2_handle_arc_farm_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) |
| 9022 | { |
| 9023 | u32 i, sts_val, sts_clr_val, error_count = 0, arc_farm; |
| 9024 | |
| 9025 | for (arc_farm = 0 ; arc_farm < NUM_OF_ARC_FARMS_ARC ; arc_farm++) { |
| 9026 | sts_clr_val = 0; |
| 9027 | sts_val = RREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_STS + |
| 9028 | (arc_farm * ARC_FARM_OFFSET)); |
| 9029 | |
| 9030 | for (i = 0 ; i < GAUDI2_NUM_OF_ARC_SEI_ERR_CAUSE ; i++) { |
| 9031 | if (sts_val & BIT(i)) { |
| 9032 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9033 | fmt: "ARC FARM ARC %u err cause: %s" , |
| 9034 | arc_farm, gaudi2_arc_sei_error_cause[i]); |
| 9035 | sts_clr_val |= BIT(i); |
| 9036 | error_count++; |
| 9037 | } |
| 9038 | } |
| 9039 | WREG32(mmARC_FARM_ARC0_AUX_ARC_SEI_INTR_CLR + (arc_farm * ARC_FARM_OFFSET), |
| 9040 | sts_clr_val); |
| 9041 | } |
| 9042 | |
| 9043 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_ARC_FARM, module_idx: 0, module_sub_idx: 0, event_mask); |
| 9044 | hl_check_for_glbl_errors(hdev); |
| 9045 | |
| 9046 | return error_count; |
| 9047 | } |
| 9048 | |
| 9049 | static int gaudi2_handle_cpu_sei_err(struct hl_device *hdev, u16 event_type) |
| 9050 | { |
| 9051 | u32 i, sts_val, sts_clr_val = 0, error_count = 0; |
| 9052 | |
| 9053 | sts_val = RREG32(mmCPU_IF_CPU_SEI_INTR_STS); |
| 9054 | |
| 9055 | for (i = 0 ; i < GAUDI2_NUM_OF_CPU_SEI_ERR_CAUSE ; i++) { |
| 9056 | if (sts_val & BIT(i)) { |
| 9057 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9058 | fmt: "err cause: %s" , gaudi2_cpu_sei_error_cause[i]); |
| 9059 | sts_clr_val |= BIT(i); |
| 9060 | error_count++; |
| 9061 | } |
| 9062 | } |
| 9063 | |
| 9064 | hl_check_for_glbl_errors(hdev); |
| 9065 | |
| 9066 | WREG32(mmCPU_IF_CPU_SEI_INTR_CLR, sts_clr_val); |
| 9067 | |
| 9068 | return error_count; |
| 9069 | } |
| 9070 | |
| 9071 | static int gaudi2_handle_rot_err(struct hl_device *hdev, u8 rot_index, u16 event_type, |
| 9072 | struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause, |
| 9073 | u64 *event_mask) |
| 9074 | { |
| 9075 | u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data); |
| 9076 | u32 error_count = 0; |
| 9077 | int i; |
| 9078 | |
| 9079 | for (i = 0 ; i < GAUDI2_NUM_OF_ROT_ERR_CAUSE ; i++) |
| 9080 | if (intr_cause_data & BIT(i)) { |
| 9081 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9082 | fmt: "err cause: %s" , guadi2_rot_error_cause[i]); |
| 9083 | error_count++; |
| 9084 | } |
| 9085 | |
| 9086 | /* check if RAZWI happened */ |
| 9087 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_ROT, module_idx: rot_index, module_sub_idx: 0, event_mask); |
| 9088 | hl_check_for_glbl_errors(hdev); |
| 9089 | |
| 9090 | return error_count; |
| 9091 | } |
| 9092 | |
| 9093 | static int gaudi2_tpc_ack_interrupts(struct hl_device *hdev, u8 tpc_index, u16 event_type, |
| 9094 | struct hl_eq_razwi_with_intr_cause *razwi_with_intr_cause, |
| 9095 | u64 *event_mask) |
| 9096 | { |
| 9097 | u64 intr_cause_data = le64_to_cpu(razwi_with_intr_cause->intr_cause.intr_cause_data); |
| 9098 | u32 error_count = 0; |
| 9099 | int i; |
| 9100 | |
| 9101 | for (i = 0 ; i < GAUDI2_NUM_OF_TPC_INTR_CAUSE ; i++) |
| 9102 | if (intr_cause_data & BIT(i)) { |
| 9103 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9104 | fmt: "interrupt cause: %s" , gaudi2_tpc_interrupts_cause[i]); |
| 9105 | error_count++; |
| 9106 | } |
| 9107 | |
| 9108 | /* check if RAZWI happened */ |
| 9109 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_TPC, module_idx: tpc_index, module_sub_idx: 0, event_mask); |
| 9110 | hl_check_for_glbl_errors(hdev); |
| 9111 | |
| 9112 | return error_count; |
| 9113 | } |
| 9114 | |
| 9115 | static int gaudi2_handle_dec_err(struct hl_device *hdev, u8 dec_index, u16 event_type, |
| 9116 | u64 *event_mask) |
| 9117 | { |
| 9118 | u32 sts_addr, sts_val, sts_clr_val = 0, error_count = 0; |
| 9119 | int i; |
| 9120 | |
| 9121 | if (dec_index < NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES) |
| 9122 | /* DCORE DEC */ |
| 9123 | sts_addr = mmDCORE0_VDEC0_BRDG_CTRL_CAUSE_INTR + |
| 9124 | DCORE_OFFSET * (dec_index / NUM_OF_DEC_PER_DCORE) + |
| 9125 | DCORE_VDEC_OFFSET * (dec_index % NUM_OF_DEC_PER_DCORE); |
| 9126 | else |
| 9127 | /* PCIE DEC */ |
| 9128 | sts_addr = mmPCIE_VDEC0_BRDG_CTRL_CAUSE_INTR + PCIE_VDEC_OFFSET * |
| 9129 | (dec_index - NUM_OF_VDEC_PER_DCORE * NUM_OF_DCORES); |
| 9130 | |
| 9131 | sts_val = RREG32(sts_addr); |
| 9132 | |
| 9133 | for (i = 0 ; i < GAUDI2_NUM_OF_DEC_ERR_CAUSE ; i++) { |
| 9134 | if (sts_val & BIT(i)) { |
| 9135 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9136 | fmt: "err cause: %s" , gaudi2_dec_error_cause[i]); |
| 9137 | sts_clr_val |= BIT(i); |
| 9138 | error_count++; |
| 9139 | } |
| 9140 | } |
| 9141 | |
| 9142 | /* check if RAZWI happened */ |
| 9143 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_DEC, module_idx: dec_index, module_sub_idx: 0, event_mask); |
| 9144 | hl_check_for_glbl_errors(hdev); |
| 9145 | |
| 9146 | /* Write 1 clear errors */ |
| 9147 | WREG32(sts_addr, sts_clr_val); |
| 9148 | |
| 9149 | return error_count; |
| 9150 | } |
| 9151 | |
| 9152 | static int gaudi2_handle_mme_err(struct hl_device *hdev, u8 mme_index, u16 event_type, |
| 9153 | u64 *event_mask) |
| 9154 | { |
| 9155 | u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0; |
| 9156 | int i; |
| 9157 | |
| 9158 | sts_addr = mmDCORE0_MME_CTRL_LO_INTR_CAUSE + DCORE_OFFSET * mme_index; |
| 9159 | sts_clr_addr = mmDCORE0_MME_CTRL_LO_INTR_CLEAR + DCORE_OFFSET * mme_index; |
| 9160 | |
| 9161 | sts_val = RREG32(sts_addr); |
| 9162 | |
| 9163 | for (i = 0 ; i < GAUDI2_NUM_OF_MME_ERR_CAUSE ; i++) { |
| 9164 | if (sts_val & BIT(i)) { |
| 9165 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9166 | fmt: "err cause: %s" , guadi2_mme_error_cause[i]); |
| 9167 | sts_clr_val |= BIT(i); |
| 9168 | error_count++; |
| 9169 | } |
| 9170 | } |
| 9171 | |
| 9172 | /* check if RAZWI happened */ |
| 9173 | for (i = MME_WRITE ; i < MME_INITIATORS_MAX ; i++) |
| 9174 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_MME, module_idx: mme_index, module_sub_idx: i, event_mask); |
| 9175 | |
| 9176 | hl_check_for_glbl_errors(hdev); |
| 9177 | |
| 9178 | WREG32(sts_clr_addr, sts_clr_val); |
| 9179 | |
| 9180 | return error_count; |
| 9181 | } |
| 9182 | |
| 9183 | static int gaudi2_handle_mme_sbte_err(struct hl_device *hdev, u16 event_type) |
| 9184 | { |
| 9185 | /* |
| 9186 | * We have a single error cause here but the report mechanism is |
| 9187 | * buggy. Hence there is no good reason to fetch the cause so we |
| 9188 | * just check for glbl_errors and exit. |
| 9189 | */ |
| 9190 | hl_check_for_glbl_errors(hdev); |
| 9191 | |
| 9192 | return GAUDI2_NA_EVENT_CAUSE; |
| 9193 | } |
| 9194 | |
| 9195 | static int gaudi2_handle_mme_wap_err(struct hl_device *hdev, u8 mme_index, u16 event_type, |
| 9196 | u64 *event_mask) |
| 9197 | { |
| 9198 | u32 sts_addr, sts_val, sts_clr_addr, sts_clr_val = 0, error_count = 0; |
| 9199 | int i; |
| 9200 | |
| 9201 | sts_addr = mmDCORE0_MME_ACC_INTR_CAUSE + DCORE_OFFSET * mme_index; |
| 9202 | sts_clr_addr = mmDCORE0_MME_ACC_INTR_CLEAR + DCORE_OFFSET * mme_index; |
| 9203 | |
| 9204 | sts_val = RREG32(sts_addr); |
| 9205 | |
| 9206 | for (i = 0 ; i < GAUDI2_NUM_OF_MME_WAP_ERR_CAUSE ; i++) { |
| 9207 | if (sts_val & BIT(i)) { |
| 9208 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9209 | fmt: "err cause: %s" , guadi2_mme_wap_error_cause[i]); |
| 9210 | sts_clr_val |= BIT(i); |
| 9211 | error_count++; |
| 9212 | } |
| 9213 | } |
| 9214 | |
| 9215 | /* check if RAZWI happened on WAP0/1 */ |
| 9216 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_MME, module_idx: mme_index, module_sub_idx: MME_WAP0, event_mask); |
| 9217 | gaudi2_ack_module_razwi_event_handler(hdev, module: RAZWI_MME, module_idx: mme_index, module_sub_idx: MME_WAP1, event_mask); |
| 9218 | hl_check_for_glbl_errors(hdev); |
| 9219 | |
| 9220 | WREG32(sts_clr_addr, sts_clr_val); |
| 9221 | |
| 9222 | return error_count; |
| 9223 | } |
| 9224 | |
| 9225 | static int gaudi2_handle_kdma_core_event(struct hl_device *hdev, u16 event_type, |
| 9226 | u64 intr_cause_data) |
| 9227 | { |
| 9228 | u32 error_count = 0; |
| 9229 | int i; |
| 9230 | |
| 9231 | /* If an AXI read or write error is received, an error is reported and |
| 9232 | * interrupt message is sent. Due to an HW errata, when reading the cause |
| 9233 | * register of the KDMA engine, the reported error is always HBW even if |
| 9234 | * the actual error caused by a LBW KDMA transaction. |
| 9235 | */ |
| 9236 | for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) |
| 9237 | if (intr_cause_data & BIT(i)) { |
| 9238 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9239 | fmt: "err cause: %s" , gaudi2_kdma_core_interrupts_cause[i]); |
| 9240 | error_count++; |
| 9241 | } |
| 9242 | |
| 9243 | hl_check_for_glbl_errors(hdev); |
| 9244 | |
| 9245 | return error_count; |
| 9246 | } |
| 9247 | |
| 9248 | static int gaudi2_handle_dma_core_event(struct hl_device *hdev, u16 event_type, u64 intr_cause) |
| 9249 | { |
| 9250 | u32 error_count = 0; |
| 9251 | int i; |
| 9252 | |
| 9253 | for (i = 0 ; i < GAUDI2_NUM_OF_DMA_CORE_INTR_CAUSE ; i++) |
| 9254 | if (intr_cause & BIT(i)) { |
| 9255 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9256 | fmt: "err cause: %s" , gaudi2_dma_core_interrupts_cause[i]); |
| 9257 | error_count++; |
| 9258 | } |
| 9259 | |
| 9260 | hl_check_for_glbl_errors(hdev); |
| 9261 | |
| 9262 | return error_count; |
| 9263 | } |
| 9264 | |
| 9265 | static void gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(struct hl_device *hdev, u64 *event_mask) |
| 9266 | { |
| 9267 | u32 mstr_if_base_addr = mmPCIE_MSTR_RR_MSTR_IF_RR_SHRD_HBW_BASE, razwi_happened_addr; |
| 9268 | |
| 9269 | razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AW_RAZWI_HAPPENED; |
| 9270 | if (RREG32(razwi_happened_addr)) { |
| 9271 | gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: mstr_if_base_addr, is_write: true, name: "PCIE" , |
| 9272 | id: GAUDI2_ENGINE_ID_PCIE, event_mask); |
| 9273 | WREG32(razwi_happened_addr, 0x1); |
| 9274 | } |
| 9275 | |
| 9276 | razwi_happened_addr = mstr_if_base_addr + RR_SHRD_HBW_AR_RAZWI_HAPPENED; |
| 9277 | if (RREG32(razwi_happened_addr)) { |
| 9278 | gaudi2_razwi_rr_hbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: mstr_if_base_addr, is_write: false, name: "PCIE" , |
| 9279 | id: GAUDI2_ENGINE_ID_PCIE, event_mask); |
| 9280 | WREG32(razwi_happened_addr, 0x1); |
| 9281 | } |
| 9282 | |
| 9283 | razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AW_RAZWI_HAPPENED; |
| 9284 | if (RREG32(razwi_happened_addr)) { |
| 9285 | gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: mstr_if_base_addr, is_write: true, name: "PCIE" , |
| 9286 | id: GAUDI2_ENGINE_ID_PCIE, event_mask); |
| 9287 | WREG32(razwi_happened_addr, 0x1); |
| 9288 | } |
| 9289 | |
| 9290 | razwi_happened_addr = mstr_if_base_addr + RR_SHRD_LBW_AR_RAZWI_HAPPENED; |
| 9291 | if (RREG32(razwi_happened_addr)) { |
| 9292 | gaudi2_razwi_rr_lbw_shared_printf_info(hdev, rtr_mstr_if_base_addr: mstr_if_base_addr, is_write: false, name: "PCIE" , |
| 9293 | id: GAUDI2_ENGINE_ID_PCIE, event_mask); |
| 9294 | WREG32(razwi_happened_addr, 0x1); |
| 9295 | } |
| 9296 | } |
| 9297 | |
| 9298 | static int gaudi2_print_pcie_addr_dec_info(struct hl_device *hdev, u16 event_type, |
| 9299 | u64 intr_cause_data, u64 *event_mask) |
| 9300 | { |
| 9301 | u32 error_count = 0; |
| 9302 | int i; |
| 9303 | |
| 9304 | for (i = 0 ; i < GAUDI2_NUM_OF_PCIE_ADDR_DEC_ERR_CAUSE ; i++) { |
| 9305 | if (!(intr_cause_data & BIT_ULL(i))) |
| 9306 | continue; |
| 9307 | |
| 9308 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9309 | fmt: "err cause: %s" , gaudi2_pcie_addr_dec_error_cause[i]); |
| 9310 | error_count++; |
| 9311 | |
| 9312 | switch (intr_cause_data & BIT_ULL(i)) { |
| 9313 | case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_AXI_LBW_ERR_INTR_MASK: |
| 9314 | hl_check_for_glbl_errors(hdev); |
| 9315 | break; |
| 9316 | case PCIE_WRAP_PCIE_IC_SEI_INTR_IND_BAD_ACCESS_INTR_MASK: |
| 9317 | gaudi2_print_pcie_mstr_rr_mstr_if_razwi_info(hdev, event_mask); |
| 9318 | break; |
| 9319 | } |
| 9320 | } |
| 9321 | |
| 9322 | return error_count; |
| 9323 | } |
| 9324 | |
| 9325 | static int gaudi2_handle_pif_fatal(struct hl_device *hdev, u16 event_type, |
| 9326 | u64 intr_cause_data) |
| 9327 | |
| 9328 | { |
| 9329 | u32 error_count = 0; |
| 9330 | int i; |
| 9331 | |
| 9332 | for (i = 0 ; i < GAUDI2_NUM_OF_PMMU_FATAL_ERR_CAUSE ; i++) { |
| 9333 | if (intr_cause_data & BIT_ULL(i)) { |
| 9334 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9335 | fmt: "err cause: %s" , gaudi2_pmmu_fatal_interrupts_cause[i]); |
| 9336 | error_count++; |
| 9337 | } |
| 9338 | } |
| 9339 | |
| 9340 | return error_count; |
| 9341 | } |
| 9342 | |
| 9343 | static int gaudi2_handle_hif_fatal(struct hl_device *hdev, u16 event_type, u64 intr_cause_data) |
| 9344 | { |
| 9345 | u32 error_count = 0; |
| 9346 | int i; |
| 9347 | |
| 9348 | for (i = 0 ; i < GAUDI2_NUM_OF_HIF_FATAL_ERR_CAUSE ; i++) { |
| 9349 | if (intr_cause_data & BIT_ULL(i)) { |
| 9350 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9351 | fmt: "err cause: %s" , gaudi2_hif_fatal_interrupts_cause[i]); |
| 9352 | error_count++; |
| 9353 | } |
| 9354 | } |
| 9355 | |
| 9356 | return error_count; |
| 9357 | } |
| 9358 | |
| 9359 | static void gaudi2_handle_page_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu, |
| 9360 | u64 *event_mask) |
| 9361 | { |
| 9362 | u32 valid, val; |
| 9363 | u64 addr; |
| 9364 | |
| 9365 | valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID)); |
| 9366 | |
| 9367 | if (!(valid & DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_PAGE_ERR_VALID_ENTRY_MASK)) |
| 9368 | return; |
| 9369 | |
| 9370 | val = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE)); |
| 9371 | addr = val & DCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA_63_32_MASK; |
| 9372 | addr <<= 32; |
| 9373 | addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_PAGE_ERROR_CAPTURE_VA)); |
| 9374 | |
| 9375 | if (is_pmmu) { |
| 9376 | dev_err_ratelimited(hdev->dev, "PMMU page fault on va 0x%llx\n" , addr); |
| 9377 | } else { |
| 9378 | addr = gaudi2_mmu_descramble_addr(hdev, scrambled_addr: addr); |
| 9379 | addr &= HW_UNSCRAMBLED_BITS_MASK; |
| 9380 | dev_err_ratelimited(hdev->dev, "HMMU page fault on va range 0x%llx - 0x%llx\n" , |
| 9381 | addr, addr + ~HW_UNSCRAMBLED_BITS_MASK); |
| 9382 | } |
| 9383 | |
| 9384 | hl_handle_page_fault(hdev, addr, eng_id: 0, is_pmmu, event_mask); |
| 9385 | |
| 9386 | WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0); |
| 9387 | } |
| 9388 | |
| 9389 | static void gaudi2_handle_access_error(struct hl_device *hdev, u64 mmu_base, bool is_pmmu) |
| 9390 | { |
| 9391 | u32 valid, val; |
| 9392 | u64 addr; |
| 9393 | |
| 9394 | valid = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID)); |
| 9395 | |
| 9396 | if (!(valid & DCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID_ACCESS_ERR_VALID_ENTRY_MASK)) |
| 9397 | return; |
| 9398 | |
| 9399 | val = RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE)); |
| 9400 | addr = val & DCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA_63_32_MASK; |
| 9401 | addr <<= 32; |
| 9402 | addr |= RREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_ERROR_CAPTURE_VA)); |
| 9403 | |
| 9404 | if (!is_pmmu) |
| 9405 | addr = gaudi2_mmu_descramble_addr(hdev, scrambled_addr: addr); |
| 9406 | |
| 9407 | dev_err_ratelimited(hdev->dev, "%s access error on va 0x%llx\n" , |
| 9408 | is_pmmu ? "PMMU" : "HMMU" , addr); |
| 9409 | WREG32(mmu_base + MMU_OFFSET(mmDCORE0_HMMU0_MMU_ACCESS_PAGE_ERROR_VALID), 0); |
| 9410 | } |
| 9411 | |
| 9412 | static int gaudi2_handle_mmu_spi_sei_generic(struct hl_device *hdev, u16 event_type, |
| 9413 | u64 mmu_base, bool is_pmmu, u64 *event_mask) |
| 9414 | { |
| 9415 | u32 spi_sei_cause, interrupt_clr = 0x0, error_count = 0; |
| 9416 | int i; |
| 9417 | |
| 9418 | spi_sei_cause = RREG32(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET); |
| 9419 | |
| 9420 | for (i = 0 ; i < GAUDI2_NUM_OF_MMU_SPI_SEI_CAUSE ; i++) { |
| 9421 | if (spi_sei_cause & BIT(i)) { |
| 9422 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9423 | fmt: "err cause: %s" , gaudi2_mmu_spi_sei[i].cause); |
| 9424 | |
| 9425 | if (i == 0) |
| 9426 | gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, event_mask); |
| 9427 | else if (i == 1) |
| 9428 | gaudi2_handle_access_error(hdev, mmu_base, is_pmmu); |
| 9429 | |
| 9430 | if (gaudi2_mmu_spi_sei[i].clear_bit >= 0) |
| 9431 | interrupt_clr |= BIT(gaudi2_mmu_spi_sei[i].clear_bit); |
| 9432 | |
| 9433 | error_count++; |
| 9434 | } |
| 9435 | } |
| 9436 | |
| 9437 | /* Clear cause */ |
| 9438 | WREG32_AND(mmu_base + MMU_SPI_SEI_CAUSE_OFFSET, ~spi_sei_cause); |
| 9439 | |
| 9440 | /* Clear interrupt */ |
| 9441 | WREG32(mmu_base + MMU_INTERRUPT_CLR_OFFSET, interrupt_clr); |
| 9442 | |
| 9443 | return error_count; |
| 9444 | } |
| 9445 | |
| 9446 | static int gaudi2_handle_sm_err(struct hl_device *hdev, u16 event_type, u8 sm_index) |
| 9447 | { |
| 9448 | u32 sei_cause_addr, sei_cause_val, sei_cause_cause, sei_cause_log, |
| 9449 | cq_intr_addr, cq_intr_val, cq_intr_queue_index, error_count = 0; |
| 9450 | int i; |
| 9451 | |
| 9452 | sei_cause_addr = mmDCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE + DCORE_OFFSET * sm_index; |
| 9453 | cq_intr_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_INTR + DCORE_OFFSET * sm_index; |
| 9454 | |
| 9455 | sei_cause_val = RREG32(sei_cause_addr); |
| 9456 | sei_cause_cause = FIELD_GET(DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_CAUSE_MASK, sei_cause_val); |
| 9457 | cq_intr_val = RREG32(cq_intr_addr); |
| 9458 | |
| 9459 | /* SEI interrupt */ |
| 9460 | if (sei_cause_cause) { |
| 9461 | /* There are corresponding SEI_CAUSE_log bits for every SEI_CAUSE_cause bit */ |
| 9462 | sei_cause_log = FIELD_GET(DCORE0_SYNC_MNGR_GLBL_SM_SEI_CAUSE_LOG_MASK, |
| 9463 | sei_cause_val); |
| 9464 | |
| 9465 | for (i = 0 ; i < GAUDI2_NUM_OF_SM_SEI_ERR_CAUSE ; i++) { |
| 9466 | if (!(sei_cause_cause & BIT(i))) |
| 9467 | continue; |
| 9468 | |
| 9469 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9470 | fmt: "err cause: %s. %s: 0x%X" , |
| 9471 | gaudi2_sm_sei_cause[i].cause_name, |
| 9472 | gaudi2_sm_sei_cause[i].log_name, |
| 9473 | sei_cause_log); |
| 9474 | error_count++; |
| 9475 | break; |
| 9476 | } |
| 9477 | |
| 9478 | /* Clear SM_SEI_CAUSE */ |
| 9479 | WREG32(sei_cause_addr, 0); |
| 9480 | } |
| 9481 | |
| 9482 | /* CQ interrupt */ |
| 9483 | if (cq_intr_val & DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_SEC_INTR_MASK) { |
| 9484 | cq_intr_queue_index = |
| 9485 | FIELD_GET(DCORE0_SYNC_MNGR_GLBL_CQ_INTR_CQ_INTR_QUEUE_INDEX_MASK, |
| 9486 | cq_intr_val); |
| 9487 | |
| 9488 | dev_err_ratelimited(hdev->dev, "SM%u err. err cause: CQ_INTR. queue index: %u\n" , |
| 9489 | sm_index, cq_intr_queue_index); |
| 9490 | error_count++; |
| 9491 | |
| 9492 | /* Clear CQ_INTR */ |
| 9493 | WREG32(cq_intr_addr, 0); |
| 9494 | } |
| 9495 | |
| 9496 | hl_check_for_glbl_errors(hdev); |
| 9497 | |
| 9498 | return error_count; |
| 9499 | } |
| 9500 | |
| 9501 | static u64 get_hmmu_base(u16 event_type) |
| 9502 | { |
| 9503 | u8 dcore, index_in_dcore; |
| 9504 | |
| 9505 | switch (event_type) { |
| 9506 | case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP: |
| 9507 | case GAUDI2_EVENT_HMMU0_SPI_BASE ... GAUDI2_EVENT_HMMU0_SECURITY_ERROR: |
| 9508 | dcore = 0; |
| 9509 | index_in_dcore = 0; |
| 9510 | break; |
| 9511 | case GAUDI2_EVENT_HMMU_1_AXI_ERR_RSP: |
| 9512 | case GAUDI2_EVENT_HMMU1_SPI_BASE ... GAUDI2_EVENT_HMMU1_SECURITY_ERROR: |
| 9513 | dcore = 1; |
| 9514 | index_in_dcore = 0; |
| 9515 | break; |
| 9516 | case GAUDI2_EVENT_HMMU_2_AXI_ERR_RSP: |
| 9517 | case GAUDI2_EVENT_HMMU2_SPI_BASE ... GAUDI2_EVENT_HMMU2_SECURITY_ERROR: |
| 9518 | dcore = 0; |
| 9519 | index_in_dcore = 1; |
| 9520 | break; |
| 9521 | case GAUDI2_EVENT_HMMU_3_AXI_ERR_RSP: |
| 9522 | case GAUDI2_EVENT_HMMU3_SPI_BASE ... GAUDI2_EVENT_HMMU3_SECURITY_ERROR: |
| 9523 | dcore = 1; |
| 9524 | index_in_dcore = 1; |
| 9525 | break; |
| 9526 | case GAUDI2_EVENT_HMMU_4_AXI_ERR_RSP: |
| 9527 | case GAUDI2_EVENT_HMMU4_SPI_BASE ... GAUDI2_EVENT_HMMU4_SECURITY_ERROR: |
| 9528 | dcore = 3; |
| 9529 | index_in_dcore = 2; |
| 9530 | break; |
| 9531 | case GAUDI2_EVENT_HMMU_5_AXI_ERR_RSP: |
| 9532 | case GAUDI2_EVENT_HMMU5_SPI_BASE ... GAUDI2_EVENT_HMMU5_SECURITY_ERROR: |
| 9533 | dcore = 2; |
| 9534 | index_in_dcore = 2; |
| 9535 | break; |
| 9536 | case GAUDI2_EVENT_HMMU_6_AXI_ERR_RSP: |
| 9537 | case GAUDI2_EVENT_HMMU6_SPI_BASE ... GAUDI2_EVENT_HMMU6_SECURITY_ERROR: |
| 9538 | dcore = 3; |
| 9539 | index_in_dcore = 3; |
| 9540 | break; |
| 9541 | case GAUDI2_EVENT_HMMU_7_AXI_ERR_RSP: |
| 9542 | case GAUDI2_EVENT_HMMU7_SPI_BASE ... GAUDI2_EVENT_HMMU7_SECURITY_ERROR: |
| 9543 | dcore = 2; |
| 9544 | index_in_dcore = 3; |
| 9545 | break; |
| 9546 | case GAUDI2_EVENT_HMMU_8_AXI_ERR_RSP: |
| 9547 | case GAUDI2_EVENT_HMMU8_SPI_BASE ... GAUDI2_EVENT_HMMU8_SECURITY_ERROR: |
| 9548 | dcore = 0; |
| 9549 | index_in_dcore = 2; |
| 9550 | break; |
| 9551 | case GAUDI2_EVENT_HMMU_9_AXI_ERR_RSP: |
| 9552 | case GAUDI2_EVENT_HMMU9_SPI_BASE ... GAUDI2_EVENT_HMMU9_SECURITY_ERROR: |
| 9553 | dcore = 1; |
| 9554 | index_in_dcore = 2; |
| 9555 | break; |
| 9556 | case GAUDI2_EVENT_HMMU_10_AXI_ERR_RSP: |
| 9557 | case GAUDI2_EVENT_HMMU10_SPI_BASE ... GAUDI2_EVENT_HMMU10_SECURITY_ERROR: |
| 9558 | dcore = 0; |
| 9559 | index_in_dcore = 3; |
| 9560 | break; |
| 9561 | case GAUDI2_EVENT_HMMU_11_AXI_ERR_RSP: |
| 9562 | case GAUDI2_EVENT_HMMU11_SPI_BASE ... GAUDI2_EVENT_HMMU11_SECURITY_ERROR: |
| 9563 | dcore = 1; |
| 9564 | index_in_dcore = 3; |
| 9565 | break; |
| 9566 | case GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP: |
| 9567 | case GAUDI2_EVENT_HMMU12_SPI_BASE ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR: |
| 9568 | dcore = 3; |
| 9569 | index_in_dcore = 0; |
| 9570 | break; |
| 9571 | case GAUDI2_EVENT_HMMU_13_AXI_ERR_RSP: |
| 9572 | case GAUDI2_EVENT_HMMU13_SPI_BASE ... GAUDI2_EVENT_HMMU13_SECURITY_ERROR: |
| 9573 | dcore = 2; |
| 9574 | index_in_dcore = 0; |
| 9575 | break; |
| 9576 | case GAUDI2_EVENT_HMMU_14_AXI_ERR_RSP: |
| 9577 | case GAUDI2_EVENT_HMMU14_SPI_BASE ... GAUDI2_EVENT_HMMU14_SECURITY_ERROR: |
| 9578 | dcore = 3; |
| 9579 | index_in_dcore = 1; |
| 9580 | break; |
| 9581 | case GAUDI2_EVENT_HMMU_15_AXI_ERR_RSP: |
| 9582 | case GAUDI2_EVENT_HMMU15_SPI_BASE ... GAUDI2_EVENT_HMMU15_SECURITY_ERROR: |
| 9583 | dcore = 2; |
| 9584 | index_in_dcore = 1; |
| 9585 | break; |
| 9586 | default: |
| 9587 | return ULONG_MAX; |
| 9588 | } |
| 9589 | |
| 9590 | return mmDCORE0_HMMU0_MMU_BASE + dcore * DCORE_OFFSET + index_in_dcore * DCORE_HMMU_OFFSET; |
| 9591 | } |
| 9592 | |
| 9593 | static int gaudi2_handle_mmu_spi_sei_err(struct hl_device *hdev, u16 event_type, u64 *event_mask) |
| 9594 | { |
| 9595 | bool is_pmmu = false; |
| 9596 | u32 error_count = 0; |
| 9597 | u64 mmu_base; |
| 9598 | |
| 9599 | switch (event_type) { |
| 9600 | case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP: |
| 9601 | case GAUDI2_EVENT_HMMU0_SPI_BASE ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR: |
| 9602 | mmu_base = get_hmmu_base(event_type); |
| 9603 | break; |
| 9604 | |
| 9605 | case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR: |
| 9606 | case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0: |
| 9607 | is_pmmu = true; |
| 9608 | mmu_base = mmPMMU_HBW_MMU_BASE; |
| 9609 | break; |
| 9610 | default: |
| 9611 | return 0; |
| 9612 | } |
| 9613 | |
| 9614 | if (mmu_base == ULONG_MAX) |
| 9615 | return 0; |
| 9616 | |
| 9617 | error_count = gaudi2_handle_mmu_spi_sei_generic(hdev, event_type, mmu_base, |
| 9618 | is_pmmu, event_mask); |
| 9619 | hl_check_for_glbl_errors(hdev); |
| 9620 | |
| 9621 | return error_count; |
| 9622 | } |
| 9623 | |
| 9624 | |
| 9625 | /* returns true if hard reset is required (ECC DERR or Read parity), false otherwise (ECC SERR) */ |
| 9626 | static bool gaudi2_hbm_sei_handle_read_err(struct hl_device *hdev, |
| 9627 | struct hl_eq_hbm_sei_read_err_intr_info *rd_err_data, u32 err_cnt) |
| 9628 | { |
| 9629 | bool require_hard_reset = false; |
| 9630 | u32 addr, beat, beat_shift; |
| 9631 | |
| 9632 | dev_err_ratelimited(hdev->dev, |
| 9633 | "READ ERROR count: ECC SERR: %d, ECC DERR: %d, RD_PARITY: %d\n" , |
| 9634 | FIELD_GET(HBM_ECC_SERR_CNTR_MASK, err_cnt), |
| 9635 | FIELD_GET(HBM_ECC_DERR_CNTR_MASK, err_cnt), |
| 9636 | FIELD_GET(HBM_RD_PARITY_CNTR_MASK, err_cnt)); |
| 9637 | |
| 9638 | addr = le32_to_cpu(rd_err_data->dbg_rd_err_addr.rd_addr_val); |
| 9639 | dev_err_ratelimited(hdev->dev, |
| 9640 | "READ ERROR address: sid(%u), bg(%u), ba(%u), col(%u), row(%u)\n" , |
| 9641 | FIELD_GET(HBM_RD_ADDR_SID_MASK, addr), |
| 9642 | FIELD_GET(HBM_RD_ADDR_BG_MASK, addr), |
| 9643 | FIELD_GET(HBM_RD_ADDR_BA_MASK, addr), |
| 9644 | FIELD_GET(HBM_RD_ADDR_COL_MASK, addr), |
| 9645 | FIELD_GET(HBM_RD_ADDR_ROW_MASK, addr)); |
| 9646 | |
| 9647 | /* For each beat (RDQS edge), look for possible errors and print relevant info */ |
| 9648 | for (beat = 0 ; beat < 4 ; beat++) { |
| 9649 | if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) & |
| 9650 | (HBM_RD_ERR_SERR_BEAT0_MASK << beat)) |
| 9651 | dev_err_ratelimited(hdev->dev, "Beat%d ECC SERR: DM: %#x, Syndrome: %#x\n" , |
| 9652 | beat, |
| 9653 | le32_to_cpu(rd_err_data->dbg_rd_err_dm), |
| 9654 | le32_to_cpu(rd_err_data->dbg_rd_err_syndrome)); |
| 9655 | |
| 9656 | if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) & |
| 9657 | (HBM_RD_ERR_DERR_BEAT0_MASK << beat)) { |
| 9658 | dev_err_ratelimited(hdev->dev, "Beat%d ECC DERR: DM: %#x, Syndrome: %#x\n" , |
| 9659 | beat, |
| 9660 | le32_to_cpu(rd_err_data->dbg_rd_err_dm), |
| 9661 | le32_to_cpu(rd_err_data->dbg_rd_err_syndrome)); |
| 9662 | require_hard_reset = true; |
| 9663 | } |
| 9664 | |
| 9665 | beat_shift = beat * HBM_RD_ERR_BEAT_SHIFT; |
| 9666 | if (le32_to_cpu(rd_err_data->dbg_rd_err_misc) & |
| 9667 | (HBM_RD_ERR_PAR_ERR_BEAT0_MASK << beat_shift)) { |
| 9668 | dev_err_ratelimited(hdev->dev, |
| 9669 | "Beat%d read PARITY: DM: %#x, PAR data: %#x\n" , |
| 9670 | beat, |
| 9671 | le32_to_cpu(rd_err_data->dbg_rd_err_dm), |
| 9672 | (le32_to_cpu(rd_err_data->dbg_rd_err_misc) & |
| 9673 | (HBM_RD_ERR_PAR_DATA_BEAT0_MASK << beat_shift)) >> |
| 9674 | (HBM_RD_ERR_PAR_DATA_BEAT0_SHIFT + beat_shift)); |
| 9675 | require_hard_reset = true; |
| 9676 | } |
| 9677 | |
| 9678 | dev_err_ratelimited(hdev->dev, "Beat%d DQ data:\n" , beat); |
| 9679 | dev_err_ratelimited(hdev->dev, "\t0x%08x\n" , |
| 9680 | le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2])); |
| 9681 | dev_err_ratelimited(hdev->dev, "\t0x%08x\n" , |
| 9682 | le32_to_cpu(rd_err_data->dbg_rd_err_data[beat * 2 + 1])); |
| 9683 | } |
| 9684 | |
| 9685 | return require_hard_reset; |
| 9686 | } |
| 9687 | |
| 9688 | static void gaudi2_hbm_sei_print_wr_par_info(struct hl_device *hdev, |
| 9689 | struct hl_eq_hbm_sei_wr_par_intr_info *wr_par_err_data, u32 err_cnt) |
| 9690 | { |
| 9691 | struct hbm_sei_wr_cmd_address *wr_cmd_addr = wr_par_err_data->dbg_last_wr_cmds; |
| 9692 | u32 i, curr_addr, derr = wr_par_err_data->dbg_derr; |
| 9693 | |
| 9694 | dev_err_ratelimited(hdev->dev, "WRITE PARITY ERROR count: %d\n" , err_cnt); |
| 9695 | |
| 9696 | dev_err_ratelimited(hdev->dev, "CK-0 DERR: 0x%02x, CK-1 DERR: 0x%02x\n" , |
| 9697 | derr & 0x3, derr & 0xc); |
| 9698 | |
| 9699 | /* JIRA H6-3286 - the following prints may not be valid */ |
| 9700 | dev_err_ratelimited(hdev->dev, "Last latched write commands addresses:\n" ); |
| 9701 | for (i = 0 ; i < HBM_WR_PAR_CMD_LIFO_LEN ; i++) { |
| 9702 | curr_addr = le32_to_cpu(wr_cmd_addr[i].dbg_wr_cmd_addr); |
| 9703 | dev_err_ratelimited(hdev->dev, |
| 9704 | "\twrite cmd[%u]: Address: SID(%u) BG(%u) BA(%u) COL(%u).\n" , |
| 9705 | i, |
| 9706 | FIELD_GET(WR_PAR_LAST_CMD_SID_MASK, curr_addr), |
| 9707 | FIELD_GET(WR_PAR_LAST_CMD_BG_MASK, curr_addr), |
| 9708 | FIELD_GET(WR_PAR_LAST_CMD_BA_MASK, curr_addr), |
| 9709 | FIELD_GET(WR_PAR_LAST_CMD_COL_MASK, curr_addr)); |
| 9710 | } |
| 9711 | } |
| 9712 | |
| 9713 | static void gaudi2_hbm_sei_print_ca_par_info(struct hl_device *hdev, |
| 9714 | struct hl_eq_hbm_sei_ca_par_intr_info *ca_par_err_data, u32 err_cnt) |
| 9715 | { |
| 9716 | __le32 *col_cmd = ca_par_err_data->dbg_col; |
| 9717 | __le16 *row_cmd = ca_par_err_data->dbg_row; |
| 9718 | u32 i; |
| 9719 | |
| 9720 | dev_err_ratelimited(hdev->dev, "CA ERROR count: %d\n" , err_cnt); |
| 9721 | |
| 9722 | dev_err_ratelimited(hdev->dev, "Last latched C&R bus commands:\n" ); |
| 9723 | for (i = 0 ; i < HBM_CA_ERR_CMD_LIFO_LEN ; i++) |
| 9724 | dev_err_ratelimited(hdev->dev, "cmd%u: ROW(0x%04x) COL(0x%05x)\n" , i, |
| 9725 | le16_to_cpu(row_cmd[i]) & (u16)GENMASK(13, 0), |
| 9726 | le32_to_cpu(col_cmd[i]) & (u32)GENMASK(17, 0)); |
| 9727 | } |
| 9728 | |
| 9729 | /* Returns true if hard reset is needed or false otherwise */ |
| 9730 | static bool gaudi2_handle_hbm_mc_sei_err(struct hl_device *hdev, u16 event_type, |
| 9731 | struct hl_eq_hbm_sei_data *sei_data) |
| 9732 | { |
| 9733 | bool require_hard_reset = false; |
| 9734 | u32 hbm_id, mc_id, cause_idx; |
| 9735 | |
| 9736 | hbm_id = (event_type - GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE) / 4; |
| 9737 | mc_id = ((event_type - GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE) / 2) % 2; |
| 9738 | |
| 9739 | cause_idx = sei_data->hdr.sei_cause; |
| 9740 | if (cause_idx > GAUDI2_NUM_OF_HBM_SEI_CAUSE - 1) { |
| 9741 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9742 | fmt: "err cause: %s" , |
| 9743 | "Invalid HBM SEI event cause (%d) provided by FW" , cause_idx); |
| 9744 | return true; |
| 9745 | } |
| 9746 | |
| 9747 | gaudi2_print_event(hdev, event_type, ratelimited: !sei_data->hdr.is_critical, |
| 9748 | fmt: "System %s Error Interrupt - HBM(%u) MC(%u) MC_CH(%u) MC_PC(%u). Error cause: %s" , |
| 9749 | sei_data->hdr.is_critical ? "Critical" : "Non-critical" , |
| 9750 | hbm_id, mc_id, sei_data->hdr.mc_channel, sei_data->hdr.mc_pseudo_channel, |
| 9751 | hbm_mc_sei_cause[cause_idx]); |
| 9752 | |
| 9753 | /* Print error-specific info */ |
| 9754 | switch (cause_idx) { |
| 9755 | case HBM_SEI_CATTRIP: |
| 9756 | require_hard_reset = true; |
| 9757 | break; |
| 9758 | |
| 9759 | case HBM_SEI_CMD_PARITY_EVEN: |
| 9760 | gaudi2_hbm_sei_print_ca_par_info(hdev, ca_par_err_data: &sei_data->ca_parity_even_info, |
| 9761 | le32_to_cpu(sei_data->hdr.cnt)); |
| 9762 | require_hard_reset = true; |
| 9763 | break; |
| 9764 | |
| 9765 | case HBM_SEI_CMD_PARITY_ODD: |
| 9766 | gaudi2_hbm_sei_print_ca_par_info(hdev, ca_par_err_data: &sei_data->ca_parity_odd_info, |
| 9767 | le32_to_cpu(sei_data->hdr.cnt)); |
| 9768 | require_hard_reset = true; |
| 9769 | break; |
| 9770 | |
| 9771 | case HBM_SEI_WRITE_DATA_PARITY_ERR: |
| 9772 | gaudi2_hbm_sei_print_wr_par_info(hdev, wr_par_err_data: &sei_data->wr_parity_info, |
| 9773 | le32_to_cpu(sei_data->hdr.cnt)); |
| 9774 | require_hard_reset = true; |
| 9775 | break; |
| 9776 | |
| 9777 | case HBM_SEI_READ_ERR: |
| 9778 | /* Unlike other SEI events, read error requires further processing of the |
| 9779 | * raw data in order to determine the root cause. |
| 9780 | */ |
| 9781 | require_hard_reset = gaudi2_hbm_sei_handle_read_err(hdev, |
| 9782 | rd_err_data: &sei_data->read_err_info, |
| 9783 | le32_to_cpu(sei_data->hdr.cnt)); |
| 9784 | break; |
| 9785 | |
| 9786 | default: |
| 9787 | break; |
| 9788 | } |
| 9789 | |
| 9790 | require_hard_reset |= !!sei_data->hdr.is_critical; |
| 9791 | |
| 9792 | return require_hard_reset; |
| 9793 | } |
| 9794 | |
| 9795 | static int gaudi2_handle_hbm_cattrip(struct hl_device *hdev, u16 event_type, |
| 9796 | u64 intr_cause_data) |
| 9797 | { |
| 9798 | if (intr_cause_data) { |
| 9799 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9800 | fmt: "temperature error cause: %#llx" , intr_cause_data); |
| 9801 | return 1; |
| 9802 | } |
| 9803 | |
| 9804 | return 0; |
| 9805 | } |
| 9806 | |
| 9807 | static int gaudi2_handle_hbm_mc_spi(struct hl_device *hdev, u64 intr_cause_data) |
| 9808 | { |
| 9809 | u32 i, error_count = 0; |
| 9810 | |
| 9811 | for (i = 0 ; i < GAUDI2_NUM_OF_HBM_MC_SPI_CAUSE ; i++) |
| 9812 | if (intr_cause_data & hbm_mc_spi[i].mask) { |
| 9813 | dev_dbg(hdev->dev, "HBM spi event: notification cause(%s)\n" , |
| 9814 | hbm_mc_spi[i].cause); |
| 9815 | error_count++; |
| 9816 | } |
| 9817 | |
| 9818 | return error_count; |
| 9819 | } |
| 9820 | |
| 9821 | static void gaudi2_print_clk_change_info(struct hl_device *hdev, u16 event_type, u64 *event_mask) |
| 9822 | { |
| 9823 | ktime_t zero_time = ktime_set(secs: 0, nsecs: 0); |
| 9824 | |
| 9825 | mutex_lock(&hdev->clk_throttling.lock); |
| 9826 | |
| 9827 | switch (event_type) { |
| 9828 | case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S: |
| 9829 | hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_POWER; |
| 9830 | hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_POWER; |
| 9831 | hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].start = ktime_get(); |
| 9832 | hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = zero_time; |
| 9833 | dev_dbg_ratelimited(hdev->dev, "Clock throttling due to power consumption\n" ); |
| 9834 | break; |
| 9835 | |
| 9836 | case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E: |
| 9837 | hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_POWER; |
| 9838 | hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_POWER].end = ktime_get(); |
| 9839 | dev_dbg_ratelimited(hdev->dev, "Power envelop is safe, back to optimal clock\n" ); |
| 9840 | break; |
| 9841 | |
| 9842 | case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S: |
| 9843 | hdev->clk_throttling.current_reason |= HL_CLK_THROTTLE_THERMAL; |
| 9844 | hdev->clk_throttling.aggregated_reason |= HL_CLK_THROTTLE_THERMAL; |
| 9845 | hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].start = ktime_get(); |
| 9846 | hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = zero_time; |
| 9847 | *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 9848 | dev_info_ratelimited(hdev->dev, "Clock throttling due to overheating\n" ); |
| 9849 | break; |
| 9850 | |
| 9851 | case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: |
| 9852 | hdev->clk_throttling.current_reason &= ~HL_CLK_THROTTLE_THERMAL; |
| 9853 | hdev->clk_throttling.timestamp[HL_CLK_THROTTLE_TYPE_THERMAL].end = ktime_get(); |
| 9854 | *event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 9855 | dev_info_ratelimited(hdev->dev, "Thermal envelop is safe, back to optimal clock\n" ); |
| 9856 | break; |
| 9857 | |
| 9858 | default: |
| 9859 | dev_err(hdev->dev, "Received invalid clock change event %d\n" , event_type); |
| 9860 | break; |
| 9861 | } |
| 9862 | |
| 9863 | mutex_unlock(lock: &hdev->clk_throttling.lock); |
| 9864 | } |
| 9865 | |
| 9866 | static void gaudi2_print_out_of_sync_info(struct hl_device *hdev, u16 event_type, |
| 9867 | struct cpucp_pkt_sync_err *sync_err) |
| 9868 | { |
| 9869 | struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; |
| 9870 | |
| 9871 | gaudi2_print_event(hdev, event_type, ratelimited: false, |
| 9872 | fmt: "FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d" , |
| 9873 | le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), |
| 9874 | q->pi, atomic_read(v: &q->ci)); |
| 9875 | } |
| 9876 | |
| 9877 | static int gaudi2_handle_pcie_p2p_msix(struct hl_device *hdev, u16 event_type) |
| 9878 | { |
| 9879 | u32 p2p_intr, msix_gw_intr, error_count = 0; |
| 9880 | |
| 9881 | p2p_intr = RREG32(mmPCIE_WRAP_P2P_INTR); |
| 9882 | msix_gw_intr = RREG32(mmPCIE_WRAP_MSIX_GW_INTR); |
| 9883 | |
| 9884 | if (p2p_intr) { |
| 9885 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9886 | fmt: "pcie p2p transaction terminated due to security, req_id(0x%x)" , |
| 9887 | RREG32(mmPCIE_WRAP_P2P_REQ_ID)); |
| 9888 | |
| 9889 | WREG32(mmPCIE_WRAP_P2P_INTR, 0x1); |
| 9890 | error_count++; |
| 9891 | } |
| 9892 | |
| 9893 | if (msix_gw_intr) { |
| 9894 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9895 | fmt: "pcie msi-x gen denied due to vector num check failure, vec(0x%X)" , |
| 9896 | RREG32(mmPCIE_WRAP_MSIX_GW_VEC)); |
| 9897 | |
| 9898 | WREG32(mmPCIE_WRAP_MSIX_GW_INTR, 0x1); |
| 9899 | error_count++; |
| 9900 | } |
| 9901 | |
| 9902 | return error_count; |
| 9903 | } |
| 9904 | |
| 9905 | static int gaudi2_handle_pcie_drain(struct hl_device *hdev, |
| 9906 | struct hl_eq_pcie_drain_ind_data *drain_data) |
| 9907 | { |
| 9908 | u64 cause, error_count = 0; |
| 9909 | |
| 9910 | cause = le64_to_cpu(drain_data->intr_cause.intr_cause_data); |
| 9911 | |
| 9912 | if (cause & BIT_ULL(0)) { |
| 9913 | dev_err_ratelimited(hdev->dev, "PCIE AXI drain LBW completed\n" ); |
| 9914 | error_count++; |
| 9915 | } |
| 9916 | |
| 9917 | if (cause & BIT_ULL(1)) { |
| 9918 | dev_err_ratelimited(hdev->dev, "PCIE AXI drain HBW completed\n" ); |
| 9919 | error_count++; |
| 9920 | } |
| 9921 | |
| 9922 | return error_count; |
| 9923 | } |
| 9924 | |
| 9925 | static int gaudi2_handle_psoc_drain(struct hl_device *hdev, u64 intr_cause_data) |
| 9926 | { |
| 9927 | u32 error_count = 0; |
| 9928 | int i; |
| 9929 | |
| 9930 | for (i = 0 ; i < GAUDI2_NUM_OF_AXI_DRAIN_ERR_CAUSE ; i++) { |
| 9931 | if (intr_cause_data & BIT_ULL(i)) { |
| 9932 | dev_err_ratelimited(hdev->dev, "PSOC %s completed\n" , |
| 9933 | gaudi2_psoc_axi_drain_interrupts_cause[i]); |
| 9934 | error_count++; |
| 9935 | } |
| 9936 | } |
| 9937 | |
| 9938 | hl_check_for_glbl_errors(hdev); |
| 9939 | |
| 9940 | return error_count; |
| 9941 | } |
| 9942 | |
| 9943 | static void gaudi2_print_cpu_pkt_failure_info(struct hl_device *hdev, u16 event_type, |
| 9944 | struct cpucp_pkt_sync_err *sync_err) |
| 9945 | { |
| 9946 | struct hl_hw_queue *q = &hdev->kernel_queues[GAUDI2_QUEUE_ID_CPU_PQ]; |
| 9947 | |
| 9948 | gaudi2_print_event(hdev, event_type, ratelimited: false, |
| 9949 | fmt: "FW reported sanity check failure, FW: pi=%u, ci=%u, LKD: pi=%u, ci=%d" , |
| 9950 | le32_to_cpu(sync_err->pi), le32_to_cpu(sync_err->ci), q->pi, atomic_read(v: &q->ci)); |
| 9951 | } |
| 9952 | |
| 9953 | static int hl_arc_event_handle(struct hl_device *hdev, u16 event_type, |
| 9954 | struct hl_eq_engine_arc_intr_data *data) |
| 9955 | { |
| 9956 | struct hl_engine_arc_dccm_queue_full_irq *q; |
| 9957 | u32 intr_type, engine_id; |
| 9958 | u64 payload; |
| 9959 | |
| 9960 | intr_type = le32_to_cpu(data->intr_type); |
| 9961 | engine_id = le32_to_cpu(data->engine_id); |
| 9962 | payload = le64_to_cpu(data->payload); |
| 9963 | |
| 9964 | switch (intr_type) { |
| 9965 | case ENGINE_ARC_DCCM_QUEUE_FULL_IRQ: |
| 9966 | q = (struct hl_engine_arc_dccm_queue_full_irq *) &payload; |
| 9967 | |
| 9968 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 9969 | fmt: "ARC DCCM Full event: Eng: %s, Intr_type: %u, Qidx: %u" , |
| 9970 | GAUDI2_ENG_ID_TO_STR(engine_id), intr_type, q->queue_index); |
| 9971 | return 1; |
| 9972 | default: |
| 9973 | gaudi2_print_event(hdev, event_type, ratelimited: true, fmt: "Unknown ARC event type" ); |
| 9974 | return 0; |
| 9975 | } |
| 9976 | } |
| 9977 | |
| 9978 | static u16 event_id_to_engine_id(struct hl_device *hdev, u16 event_type) |
| 9979 | { |
| 9980 | enum gaudi2_block_types type = GAUDI2_BLOCK_TYPE_MAX; |
| 9981 | u16 index; |
| 9982 | |
| 9983 | switch (event_type) { |
| 9984 | case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP: |
| 9985 | index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP; |
| 9986 | type = GAUDI2_BLOCK_TYPE_TPC; |
| 9987 | break; |
| 9988 | case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_TPC24_QM: |
| 9989 | index = event_type - GAUDI2_EVENT_TPC0_QM; |
| 9990 | type = GAUDI2_BLOCK_TYPE_TPC; |
| 9991 | break; |
| 9992 | case GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE: |
| 9993 | case GAUDI2_EVENT_MME0_SPI_BASE ... GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID: |
| 9994 | case GAUDI2_EVENT_MME0_QM: |
| 9995 | index = 0; |
| 9996 | type = GAUDI2_BLOCK_TYPE_MME; |
| 9997 | break; |
| 9998 | case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE: |
| 9999 | case GAUDI2_EVENT_MME1_SPI_BASE ... GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID: |
| 10000 | case GAUDI2_EVENT_MME1_QM: |
| 10001 | index = 1; |
| 10002 | type = GAUDI2_BLOCK_TYPE_MME; |
| 10003 | break; |
| 10004 | case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE: |
| 10005 | case GAUDI2_EVENT_MME2_SPI_BASE ... GAUDI2_EVENT_MME2_WAP_SOURCE_RESULT_INVALID: |
| 10006 | case GAUDI2_EVENT_MME2_QM: |
| 10007 | index = 2; |
| 10008 | type = GAUDI2_BLOCK_TYPE_MME; |
| 10009 | break; |
| 10010 | case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE: |
| 10011 | case GAUDI2_EVENT_MME3_SPI_BASE ... GAUDI2_EVENT_MME3_WAP_SOURCE_RESULT_INVALID: |
| 10012 | case GAUDI2_EVENT_MME3_QM: |
| 10013 | index = 3; |
| 10014 | type = GAUDI2_BLOCK_TYPE_MME; |
| 10015 | break; |
| 10016 | case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP: |
| 10017 | case GAUDI2_EVENT_KDMA_BM_SPMU: |
| 10018 | case GAUDI2_EVENT_KDMA0_CORE: |
| 10019 | return GAUDI2_ENGINE_ID_KDMA; |
| 10020 | case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP: |
| 10021 | case GAUDI2_EVENT_PDMA0_CORE: |
| 10022 | case GAUDI2_EVENT_PDMA0_BM_SPMU: |
| 10023 | case GAUDI2_EVENT_PDMA0_QM: |
| 10024 | return GAUDI2_ENGINE_ID_PDMA_0; |
| 10025 | case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP: |
| 10026 | case GAUDI2_EVENT_PDMA1_CORE: |
| 10027 | case GAUDI2_EVENT_PDMA1_BM_SPMU: |
| 10028 | case GAUDI2_EVENT_PDMA1_QM: |
| 10029 | return GAUDI2_ENGINE_ID_PDMA_1; |
| 10030 | case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE: |
| 10031 | index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE; |
| 10032 | type = GAUDI2_BLOCK_TYPE_DEC; |
| 10033 | break; |
| 10034 | case GAUDI2_EVENT_DEC0_SPI ... GAUDI2_EVENT_DEC9_BMON_SPMU: |
| 10035 | index = (event_type - GAUDI2_EVENT_DEC0_SPI) >> 1; |
| 10036 | type = GAUDI2_BLOCK_TYPE_DEC; |
| 10037 | break; |
| 10038 | case GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_NIC11_AXI_ERROR_RESPONSE: |
| 10039 | index = event_type - GAUDI2_EVENT_NIC0_AXI_ERROR_RESPONSE; |
| 10040 | return GAUDI2_ENGINE_ID_NIC0_0 + (index * 2); |
| 10041 | case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1: |
| 10042 | index = event_type - GAUDI2_EVENT_NIC0_QM0; |
| 10043 | return GAUDI2_ENGINE_ID_NIC0_0 + index; |
| 10044 | case GAUDI2_EVENT_NIC0_BMON_SPMU ... GAUDI2_EVENT_NIC11_SW_ERROR: |
| 10045 | index = event_type - GAUDI2_EVENT_NIC0_BMON_SPMU; |
| 10046 | return GAUDI2_ENGINE_ID_NIC0_0 + (index * 2); |
| 10047 | case GAUDI2_EVENT_TPC0_BMON_SPMU ... GAUDI2_EVENT_TPC24_KERNEL_ERR: |
| 10048 | index = (event_type - GAUDI2_EVENT_TPC0_BMON_SPMU) >> 1; |
| 10049 | type = GAUDI2_BLOCK_TYPE_TPC; |
| 10050 | break; |
| 10051 | case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE: |
| 10052 | case GAUDI2_EVENT_ROTATOR0_BMON_SPMU: |
| 10053 | case GAUDI2_EVENT_ROTATOR0_ROT0_QM: |
| 10054 | return GAUDI2_ENGINE_ID_ROT_0; |
| 10055 | case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE: |
| 10056 | case GAUDI2_EVENT_ROTATOR1_BMON_SPMU: |
| 10057 | case GAUDI2_EVENT_ROTATOR1_ROT1_QM: |
| 10058 | return GAUDI2_ENGINE_ID_ROT_1; |
| 10059 | case GAUDI2_EVENT_HDMA0_BM_SPMU: |
| 10060 | case GAUDI2_EVENT_HDMA0_QM: |
| 10061 | case GAUDI2_EVENT_HDMA0_CORE: |
| 10062 | return GAUDI2_DCORE0_ENGINE_ID_EDMA_0; |
| 10063 | case GAUDI2_EVENT_HDMA1_BM_SPMU: |
| 10064 | case GAUDI2_EVENT_HDMA1_QM: |
| 10065 | case GAUDI2_EVENT_HDMA1_CORE: |
| 10066 | return GAUDI2_DCORE0_ENGINE_ID_EDMA_1; |
| 10067 | case GAUDI2_EVENT_HDMA2_BM_SPMU: |
| 10068 | case GAUDI2_EVENT_HDMA2_QM: |
| 10069 | case GAUDI2_EVENT_HDMA2_CORE: |
| 10070 | return GAUDI2_DCORE1_ENGINE_ID_EDMA_0; |
| 10071 | case GAUDI2_EVENT_HDMA3_BM_SPMU: |
| 10072 | case GAUDI2_EVENT_HDMA3_QM: |
| 10073 | case GAUDI2_EVENT_HDMA3_CORE: |
| 10074 | return GAUDI2_DCORE1_ENGINE_ID_EDMA_1; |
| 10075 | case GAUDI2_EVENT_HDMA4_BM_SPMU: |
| 10076 | case GAUDI2_EVENT_HDMA4_QM: |
| 10077 | case GAUDI2_EVENT_HDMA4_CORE: |
| 10078 | return GAUDI2_DCORE2_ENGINE_ID_EDMA_0; |
| 10079 | case GAUDI2_EVENT_HDMA5_BM_SPMU: |
| 10080 | case GAUDI2_EVENT_HDMA5_QM: |
| 10081 | case GAUDI2_EVENT_HDMA5_CORE: |
| 10082 | return GAUDI2_DCORE2_ENGINE_ID_EDMA_1; |
| 10083 | case GAUDI2_EVENT_HDMA6_BM_SPMU: |
| 10084 | case GAUDI2_EVENT_HDMA6_QM: |
| 10085 | case GAUDI2_EVENT_HDMA6_CORE: |
| 10086 | return GAUDI2_DCORE3_ENGINE_ID_EDMA_0; |
| 10087 | case GAUDI2_EVENT_HDMA7_BM_SPMU: |
| 10088 | case GAUDI2_EVENT_HDMA7_QM: |
| 10089 | case GAUDI2_EVENT_HDMA7_CORE: |
| 10090 | return GAUDI2_DCORE3_ENGINE_ID_EDMA_1; |
| 10091 | default: |
| 10092 | break; |
| 10093 | } |
| 10094 | |
| 10095 | switch (type) { |
| 10096 | case GAUDI2_BLOCK_TYPE_TPC: |
| 10097 | switch (index) { |
| 10098 | case TPC_ID_DCORE0_TPC0 ... TPC_ID_DCORE0_TPC5: |
| 10099 | return GAUDI2_DCORE0_ENGINE_ID_TPC_0 + index; |
| 10100 | case TPC_ID_DCORE1_TPC0 ... TPC_ID_DCORE1_TPC5: |
| 10101 | return GAUDI2_DCORE1_ENGINE_ID_TPC_0 + index - TPC_ID_DCORE1_TPC0; |
| 10102 | case TPC_ID_DCORE2_TPC0 ... TPC_ID_DCORE2_TPC5: |
| 10103 | return GAUDI2_DCORE2_ENGINE_ID_TPC_0 + index - TPC_ID_DCORE2_TPC0; |
| 10104 | case TPC_ID_DCORE3_TPC0 ... TPC_ID_DCORE3_TPC5: |
| 10105 | return GAUDI2_DCORE3_ENGINE_ID_TPC_0 + index - TPC_ID_DCORE3_TPC0; |
| 10106 | default: |
| 10107 | break; |
| 10108 | } |
| 10109 | break; |
| 10110 | case GAUDI2_BLOCK_TYPE_MME: |
| 10111 | switch (index) { |
| 10112 | case MME_ID_DCORE0: return GAUDI2_DCORE0_ENGINE_ID_MME; |
| 10113 | case MME_ID_DCORE1: return GAUDI2_DCORE1_ENGINE_ID_MME; |
| 10114 | case MME_ID_DCORE2: return GAUDI2_DCORE2_ENGINE_ID_MME; |
| 10115 | case MME_ID_DCORE3: return GAUDI2_DCORE3_ENGINE_ID_MME; |
| 10116 | default: |
| 10117 | break; |
| 10118 | } |
| 10119 | break; |
| 10120 | case GAUDI2_BLOCK_TYPE_DEC: |
| 10121 | switch (index) { |
| 10122 | case DEC_ID_DCORE0_DEC0: return GAUDI2_DCORE0_ENGINE_ID_DEC_0; |
| 10123 | case DEC_ID_DCORE0_DEC1: return GAUDI2_DCORE0_ENGINE_ID_DEC_1; |
| 10124 | case DEC_ID_DCORE1_DEC0: return GAUDI2_DCORE1_ENGINE_ID_DEC_0; |
| 10125 | case DEC_ID_DCORE1_DEC1: return GAUDI2_DCORE1_ENGINE_ID_DEC_1; |
| 10126 | case DEC_ID_DCORE2_DEC0: return GAUDI2_DCORE2_ENGINE_ID_DEC_0; |
| 10127 | case DEC_ID_DCORE2_DEC1: return GAUDI2_DCORE2_ENGINE_ID_DEC_1; |
| 10128 | case DEC_ID_DCORE3_DEC0: return GAUDI2_DCORE3_ENGINE_ID_DEC_0; |
| 10129 | case DEC_ID_DCORE3_DEC1: return GAUDI2_DCORE3_ENGINE_ID_DEC_1; |
| 10130 | case DEC_ID_PCIE_VDEC0: return GAUDI2_PCIE_ENGINE_ID_DEC_0; |
| 10131 | case DEC_ID_PCIE_VDEC1: return GAUDI2_PCIE_ENGINE_ID_DEC_1; |
| 10132 | default: |
| 10133 | break; |
| 10134 | } |
| 10135 | break; |
| 10136 | default: |
| 10137 | break; |
| 10138 | } |
| 10139 | |
| 10140 | return U16_MAX; |
| 10141 | } |
| 10142 | |
| 10143 | static void gaudi2_handle_eqe(struct hl_device *hdev, struct hl_eq_entry *eq_entry) |
| 10144 | { |
| 10145 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 10146 | bool reset_required = false, is_critical = false; |
| 10147 | u32 index, ctl, reset_flags = 0, error_count = 0; |
| 10148 | u64 event_mask = 0; |
| 10149 | u16 event_type; |
| 10150 | |
| 10151 | ctl = le32_to_cpu(eq_entry->hdr.ctl); |
| 10152 | event_type = ((ctl & EQ_CTL_EVENT_TYPE_MASK) >> EQ_CTL_EVENT_TYPE_SHIFT); |
| 10153 | |
| 10154 | if (event_type >= GAUDI2_EVENT_SIZE) { |
| 10155 | dev_err(hdev->dev, "Event type %u exceeds maximum of %u" , |
| 10156 | event_type, GAUDI2_EVENT_SIZE - 1); |
| 10157 | return; |
| 10158 | } |
| 10159 | |
| 10160 | gaudi2->events_stat[event_type]++; |
| 10161 | gaudi2->events_stat_aggregate[event_type]++; |
| 10162 | |
| 10163 | switch (event_type) { |
| 10164 | case GAUDI2_EVENT_PCIE_CORE_SERR ... GAUDI2_EVENT_ARC0_ECC_DERR: |
| 10165 | fallthrough; |
| 10166 | case GAUDI2_EVENT_ROTATOR0_SERR ... GAUDI2_EVENT_ROTATOR1_DERR: |
| 10167 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10168 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10169 | reset_required = gaudi2_handle_ecc_event(hdev, event_type, ecc_data: &eq_entry->ecc_data); |
| 10170 | is_critical = eq_entry->ecc_data.is_critical; |
| 10171 | error_count++; |
| 10172 | break; |
| 10173 | |
| 10174 | case GAUDI2_EVENT_TPC0_QM ... GAUDI2_EVENT_PDMA1_QM: |
| 10175 | fallthrough; |
| 10176 | case GAUDI2_EVENT_ROTATOR0_ROT0_QM ... GAUDI2_EVENT_ROTATOR1_ROT1_QM: |
| 10177 | fallthrough; |
| 10178 | case GAUDI2_EVENT_NIC0_QM0 ... GAUDI2_EVENT_NIC11_QM1: |
| 10179 | error_count = gaudi2_handle_qman_err(hdev, event_type, event_mask: &event_mask); |
| 10180 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10181 | break; |
| 10182 | |
| 10183 | case GAUDI2_EVENT_ARC_AXI_ERROR_RESPONSE_0: |
| 10184 | error_count = gaudi2_handle_arc_farm_sei_err(hdev, event_type, event_mask: &event_mask); |
| 10185 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10186 | break; |
| 10187 | |
| 10188 | case GAUDI2_EVENT_CPU_AXI_ERR_RSP: |
| 10189 | error_count = gaudi2_handle_cpu_sei_err(hdev, event_type); |
| 10190 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10191 | event_mask |= HL_NOTIFIER_EVENT_CRITICL_FW_ERR; |
| 10192 | break; |
| 10193 | |
| 10194 | case GAUDI2_EVENT_PDMA_CH0_AXI_ERR_RSP: |
| 10195 | case GAUDI2_EVENT_PDMA_CH1_AXI_ERR_RSP: |
| 10196 | error_count = gaudi2_handle_qm_sei_err(hdev, event_type, extended_err_check: true, event_mask: &event_mask); |
| 10197 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10198 | break; |
| 10199 | |
| 10200 | case GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE: |
| 10201 | case GAUDI2_EVENT_ROTATOR1_AXI_ERROR_RESPONSE: |
| 10202 | index = event_type - GAUDI2_EVENT_ROTATOR0_AXI_ERROR_RESPONSE; |
| 10203 | error_count = gaudi2_handle_rot_err(hdev, rot_index: index, event_type, |
| 10204 | razwi_with_intr_cause: &eq_entry->razwi_with_intr_cause, event_mask: &event_mask); |
| 10205 | error_count += gaudi2_handle_qm_sei_err(hdev, event_type, extended_err_check: false, event_mask: &event_mask); |
| 10206 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10207 | break; |
| 10208 | |
| 10209 | case GAUDI2_EVENT_TPC0_AXI_ERR_RSP ... GAUDI2_EVENT_TPC24_AXI_ERR_RSP: |
| 10210 | index = event_type - GAUDI2_EVENT_TPC0_AXI_ERR_RSP; |
| 10211 | error_count = gaudi2_tpc_ack_interrupts(hdev, tpc_index: index, event_type, |
| 10212 | razwi_with_intr_cause: &eq_entry->razwi_with_intr_cause, event_mask: &event_mask); |
| 10213 | error_count += gaudi2_handle_qm_sei_err(hdev, event_type, extended_err_check: false, event_mask: &event_mask); |
| 10214 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10215 | break; |
| 10216 | |
| 10217 | case GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE ... GAUDI2_EVENT_DEC9_AXI_ERR_RSPONSE: |
| 10218 | index = event_type - GAUDI2_EVENT_DEC0_AXI_ERR_RSPONSE; |
| 10219 | error_count = gaudi2_handle_dec_err(hdev, dec_index: index, event_type, event_mask: &event_mask); |
| 10220 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10221 | break; |
| 10222 | |
| 10223 | case GAUDI2_EVENT_TPC0_KERNEL_ERR: |
| 10224 | case GAUDI2_EVENT_TPC1_KERNEL_ERR: |
| 10225 | case GAUDI2_EVENT_TPC2_KERNEL_ERR: |
| 10226 | case GAUDI2_EVENT_TPC3_KERNEL_ERR: |
| 10227 | case GAUDI2_EVENT_TPC4_KERNEL_ERR: |
| 10228 | case GAUDI2_EVENT_TPC5_KERNEL_ERR: |
| 10229 | case GAUDI2_EVENT_TPC6_KERNEL_ERR: |
| 10230 | case GAUDI2_EVENT_TPC7_KERNEL_ERR: |
| 10231 | case GAUDI2_EVENT_TPC8_KERNEL_ERR: |
| 10232 | case GAUDI2_EVENT_TPC9_KERNEL_ERR: |
| 10233 | case GAUDI2_EVENT_TPC10_KERNEL_ERR: |
| 10234 | case GAUDI2_EVENT_TPC11_KERNEL_ERR: |
| 10235 | case GAUDI2_EVENT_TPC12_KERNEL_ERR: |
| 10236 | case GAUDI2_EVENT_TPC13_KERNEL_ERR: |
| 10237 | case GAUDI2_EVENT_TPC14_KERNEL_ERR: |
| 10238 | case GAUDI2_EVENT_TPC15_KERNEL_ERR: |
| 10239 | case GAUDI2_EVENT_TPC16_KERNEL_ERR: |
| 10240 | case GAUDI2_EVENT_TPC17_KERNEL_ERR: |
| 10241 | case GAUDI2_EVENT_TPC18_KERNEL_ERR: |
| 10242 | case GAUDI2_EVENT_TPC19_KERNEL_ERR: |
| 10243 | case GAUDI2_EVENT_TPC20_KERNEL_ERR: |
| 10244 | case GAUDI2_EVENT_TPC21_KERNEL_ERR: |
| 10245 | case GAUDI2_EVENT_TPC22_KERNEL_ERR: |
| 10246 | case GAUDI2_EVENT_TPC23_KERNEL_ERR: |
| 10247 | case GAUDI2_EVENT_TPC24_KERNEL_ERR: |
| 10248 | index = (event_type - GAUDI2_EVENT_TPC0_KERNEL_ERR) / |
| 10249 | (GAUDI2_EVENT_TPC1_KERNEL_ERR - GAUDI2_EVENT_TPC0_KERNEL_ERR); |
| 10250 | error_count = gaudi2_tpc_ack_interrupts(hdev, tpc_index: index, event_type, |
| 10251 | razwi_with_intr_cause: &eq_entry->razwi_with_intr_cause, event_mask: &event_mask); |
| 10252 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10253 | break; |
| 10254 | |
| 10255 | case GAUDI2_EVENT_DEC0_SPI: |
| 10256 | case GAUDI2_EVENT_DEC1_SPI: |
| 10257 | case GAUDI2_EVENT_DEC2_SPI: |
| 10258 | case GAUDI2_EVENT_DEC3_SPI: |
| 10259 | case GAUDI2_EVENT_DEC4_SPI: |
| 10260 | case GAUDI2_EVENT_DEC5_SPI: |
| 10261 | case GAUDI2_EVENT_DEC6_SPI: |
| 10262 | case GAUDI2_EVENT_DEC7_SPI: |
| 10263 | case GAUDI2_EVENT_DEC8_SPI: |
| 10264 | case GAUDI2_EVENT_DEC9_SPI: |
| 10265 | index = (event_type - GAUDI2_EVENT_DEC0_SPI) / |
| 10266 | (GAUDI2_EVENT_DEC1_SPI - GAUDI2_EVENT_DEC0_SPI); |
| 10267 | error_count = gaudi2_handle_dec_err(hdev, dec_index: index, event_type, event_mask: &event_mask); |
| 10268 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10269 | break; |
| 10270 | |
| 10271 | case GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE: |
| 10272 | case GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE: |
| 10273 | case GAUDI2_EVENT_MME2_CTRL_AXI_ERROR_RESPONSE: |
| 10274 | case GAUDI2_EVENT_MME3_CTRL_AXI_ERROR_RESPONSE: |
| 10275 | index = (event_type - GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE) / |
| 10276 | (GAUDI2_EVENT_MME1_CTRL_AXI_ERROR_RESPONSE - |
| 10277 | GAUDI2_EVENT_MME0_CTRL_AXI_ERROR_RESPONSE); |
| 10278 | error_count = gaudi2_handle_mme_err(hdev, mme_index: index, event_type, event_mask: &event_mask); |
| 10279 | error_count += gaudi2_handle_qm_sei_err(hdev, event_type, extended_err_check: false, event_mask: &event_mask); |
| 10280 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10281 | break; |
| 10282 | |
| 10283 | case GAUDI2_EVENT_MME0_QMAN_SW_ERROR: |
| 10284 | case GAUDI2_EVENT_MME1_QMAN_SW_ERROR: |
| 10285 | case GAUDI2_EVENT_MME2_QMAN_SW_ERROR: |
| 10286 | case GAUDI2_EVENT_MME3_QMAN_SW_ERROR: |
| 10287 | index = (event_type - GAUDI2_EVENT_MME0_QMAN_SW_ERROR) / |
| 10288 | (GAUDI2_EVENT_MME1_QMAN_SW_ERROR - |
| 10289 | GAUDI2_EVENT_MME0_QMAN_SW_ERROR); |
| 10290 | error_count = gaudi2_handle_mme_err(hdev, mme_index: index, event_type, event_mask: &event_mask); |
| 10291 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10292 | break; |
| 10293 | |
| 10294 | case GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID: |
| 10295 | case GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID: |
| 10296 | case GAUDI2_EVENT_MME2_WAP_SOURCE_RESULT_INVALID: |
| 10297 | case GAUDI2_EVENT_MME3_WAP_SOURCE_RESULT_INVALID: |
| 10298 | index = (event_type - GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID) / |
| 10299 | (GAUDI2_EVENT_MME1_WAP_SOURCE_RESULT_INVALID - |
| 10300 | GAUDI2_EVENT_MME0_WAP_SOURCE_RESULT_INVALID); |
| 10301 | error_count = gaudi2_handle_mme_wap_err(hdev, mme_index: index, event_type, event_mask: &event_mask); |
| 10302 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10303 | break; |
| 10304 | |
| 10305 | case GAUDI2_EVENT_KDMA_CH0_AXI_ERR_RSP: |
| 10306 | case GAUDI2_EVENT_KDMA0_CORE: |
| 10307 | error_count = gaudi2_handle_kdma_core_event(hdev, event_type, |
| 10308 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10309 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10310 | break; |
| 10311 | |
| 10312 | case GAUDI2_EVENT_HDMA2_CORE ... GAUDI2_EVENT_HDMA5_CORE: |
| 10313 | error_count = gaudi2_handle_dma_core_event(hdev, event_type, |
| 10314 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10315 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10316 | break; |
| 10317 | |
| 10318 | case GAUDI2_EVENT_PDMA0_CORE ... GAUDI2_EVENT_PDMA1_CORE: |
| 10319 | error_count = gaudi2_handle_dma_core_event(hdev, event_type, |
| 10320 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10321 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10322 | break; |
| 10323 | |
| 10324 | case GAUDI2_EVENT_PCIE_ADDR_DEC_ERR: |
| 10325 | error_count = gaudi2_print_pcie_addr_dec_info(hdev, event_type, |
| 10326 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data), event_mask: &event_mask); |
| 10327 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10328 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10329 | break; |
| 10330 | |
| 10331 | case GAUDI2_EVENT_HMMU0_PAGE_FAULT_OR_WR_PERM ... GAUDI2_EVENT_HMMU12_SECURITY_ERROR: |
| 10332 | case GAUDI2_EVENT_HMMU_0_AXI_ERR_RSP ... GAUDI2_EVENT_HMMU_12_AXI_ERR_RSP: |
| 10333 | case GAUDI2_EVENT_PMMU0_PAGE_FAULT_WR_PERM ... GAUDI2_EVENT_PMMU0_SECURITY_ERROR: |
| 10334 | case GAUDI2_EVENT_PMMU_AXI_ERR_RSP_0: |
| 10335 | error_count = gaudi2_handle_mmu_spi_sei_err(hdev, event_type, event_mask: &event_mask); |
| 10336 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10337 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10338 | break; |
| 10339 | |
| 10340 | case GAUDI2_EVENT_HIF0_FATAL ... GAUDI2_EVENT_HIF12_FATAL: |
| 10341 | error_count = gaudi2_handle_hif_fatal(hdev, event_type, |
| 10342 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10343 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10344 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10345 | break; |
| 10346 | |
| 10347 | case GAUDI2_EVENT_PMMU_FATAL_0: |
| 10348 | error_count = gaudi2_handle_pif_fatal(hdev, event_type, |
| 10349 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10350 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10351 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10352 | break; |
| 10353 | |
| 10354 | case GAUDI2_EVENT_PSOC63_RAZWI_OR_PID_MIN_MAX_INTERRUPT: |
| 10355 | error_count = gaudi2_ack_psoc_razwi_event_handler(hdev, event_mask: &event_mask); |
| 10356 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10357 | break; |
| 10358 | |
| 10359 | case GAUDI2_EVENT_HBM0_MC0_SEI_SEVERE ... GAUDI2_EVENT_HBM5_MC1_SEI_NON_SEVERE: |
| 10360 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10361 | if (gaudi2_handle_hbm_mc_sei_err(hdev, event_type, sei_data: &eq_entry->sei_data)) { |
| 10362 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10363 | reset_required = true; |
| 10364 | is_critical = eq_entry->sei_data.hdr.is_critical; |
| 10365 | } |
| 10366 | error_count++; |
| 10367 | break; |
| 10368 | |
| 10369 | case GAUDI2_EVENT_HBM_CATTRIP_0 ... GAUDI2_EVENT_HBM_CATTRIP_5: |
| 10370 | error_count = gaudi2_handle_hbm_cattrip(hdev, event_type, |
| 10371 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10372 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10373 | break; |
| 10374 | |
| 10375 | case GAUDI2_EVENT_HBM0_MC0_SPI ... GAUDI2_EVENT_HBM5_MC1_SPI: |
| 10376 | error_count = gaudi2_handle_hbm_mc_spi(hdev, |
| 10377 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10378 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10379 | break; |
| 10380 | |
| 10381 | case GAUDI2_EVENT_PCIE_DRAIN_COMPLETE: |
| 10382 | error_count = gaudi2_handle_pcie_drain(hdev, drain_data: &eq_entry->pcie_drain_ind_data); |
| 10383 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10384 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10385 | if (hl_fw_version_cmp(hdev, major: 1, minor: 13, subminor: 0) >= 0) |
| 10386 | is_critical = true; |
| 10387 | break; |
| 10388 | |
| 10389 | case GAUDI2_EVENT_PSOC59_RPM_ERROR_OR_DRAIN: |
| 10390 | error_count = gaudi2_handle_psoc_drain(hdev, |
| 10391 | le64_to_cpu(eq_entry->intr_cause.intr_cause_data)); |
| 10392 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10393 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10394 | break; |
| 10395 | |
| 10396 | case GAUDI2_EVENT_CPU_AXI_ECC: |
| 10397 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10398 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10399 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10400 | break; |
| 10401 | case GAUDI2_EVENT_CPU_L2_RAM_ECC: |
| 10402 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10403 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10404 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10405 | break; |
| 10406 | case GAUDI2_EVENT_MME0_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME0_SBTE4_AXI_ERR_RSP: |
| 10407 | case GAUDI2_EVENT_MME1_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME1_SBTE4_AXI_ERR_RSP: |
| 10408 | case GAUDI2_EVENT_MME2_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME2_SBTE4_AXI_ERR_RSP: |
| 10409 | case GAUDI2_EVENT_MME3_SBTE0_AXI_ERR_RSP ... GAUDI2_EVENT_MME3_SBTE4_AXI_ERR_RSP: |
| 10410 | error_count = gaudi2_handle_mme_sbte_err(hdev, event_type); |
| 10411 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10412 | break; |
| 10413 | case GAUDI2_EVENT_VM0_ALARM_A ... GAUDI2_EVENT_VM3_ALARM_B: |
| 10414 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10415 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10416 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10417 | break; |
| 10418 | case GAUDI2_EVENT_PSOC_AXI_ERR_RSP: |
| 10419 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10420 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10421 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10422 | break; |
| 10423 | case GAUDI2_EVENT_PSOC_PRSTN_FALL: |
| 10424 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10425 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10426 | break; |
| 10427 | case GAUDI2_EVENT_PCIE_APB_TIMEOUT: |
| 10428 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10429 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10430 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10431 | break; |
| 10432 | case GAUDI2_EVENT_PCIE_FATAL_ERR: |
| 10433 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10434 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10435 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10436 | break; |
| 10437 | case GAUDI2_EVENT_TPC0_BMON_SPMU: |
| 10438 | case GAUDI2_EVENT_TPC1_BMON_SPMU: |
| 10439 | case GAUDI2_EVENT_TPC2_BMON_SPMU: |
| 10440 | case GAUDI2_EVENT_TPC3_BMON_SPMU: |
| 10441 | case GAUDI2_EVENT_TPC4_BMON_SPMU: |
| 10442 | case GAUDI2_EVENT_TPC5_BMON_SPMU: |
| 10443 | case GAUDI2_EVENT_TPC6_BMON_SPMU: |
| 10444 | case GAUDI2_EVENT_TPC7_BMON_SPMU: |
| 10445 | case GAUDI2_EVENT_TPC8_BMON_SPMU: |
| 10446 | case GAUDI2_EVENT_TPC9_BMON_SPMU: |
| 10447 | case GAUDI2_EVENT_TPC10_BMON_SPMU: |
| 10448 | case GAUDI2_EVENT_TPC11_BMON_SPMU: |
| 10449 | case GAUDI2_EVENT_TPC12_BMON_SPMU: |
| 10450 | case GAUDI2_EVENT_TPC13_BMON_SPMU: |
| 10451 | case GAUDI2_EVENT_TPC14_BMON_SPMU: |
| 10452 | case GAUDI2_EVENT_TPC15_BMON_SPMU: |
| 10453 | case GAUDI2_EVENT_TPC16_BMON_SPMU: |
| 10454 | case GAUDI2_EVENT_TPC17_BMON_SPMU: |
| 10455 | case GAUDI2_EVENT_TPC18_BMON_SPMU: |
| 10456 | case GAUDI2_EVENT_TPC19_BMON_SPMU: |
| 10457 | case GAUDI2_EVENT_TPC20_BMON_SPMU: |
| 10458 | case GAUDI2_EVENT_TPC21_BMON_SPMU: |
| 10459 | case GAUDI2_EVENT_TPC22_BMON_SPMU: |
| 10460 | case GAUDI2_EVENT_TPC23_BMON_SPMU: |
| 10461 | case GAUDI2_EVENT_TPC24_BMON_SPMU: |
| 10462 | case GAUDI2_EVENT_MME0_CTRL_BMON_SPMU: |
| 10463 | case GAUDI2_EVENT_MME0_SBTE_BMON_SPMU: |
| 10464 | case GAUDI2_EVENT_MME0_WAP_BMON_SPMU: |
| 10465 | case GAUDI2_EVENT_MME1_CTRL_BMON_SPMU: |
| 10466 | case GAUDI2_EVENT_MME1_SBTE_BMON_SPMU: |
| 10467 | case GAUDI2_EVENT_MME1_WAP_BMON_SPMU: |
| 10468 | case GAUDI2_EVENT_MME2_CTRL_BMON_SPMU: |
| 10469 | case GAUDI2_EVENT_MME2_SBTE_BMON_SPMU: |
| 10470 | case GAUDI2_EVENT_MME2_WAP_BMON_SPMU: |
| 10471 | case GAUDI2_EVENT_MME3_CTRL_BMON_SPMU: |
| 10472 | case GAUDI2_EVENT_MME3_SBTE_BMON_SPMU: |
| 10473 | case GAUDI2_EVENT_MME3_WAP_BMON_SPMU: |
| 10474 | case GAUDI2_EVENT_HDMA2_BM_SPMU ... GAUDI2_EVENT_PDMA1_BM_SPMU: |
| 10475 | fallthrough; |
| 10476 | case GAUDI2_EVENT_DEC0_BMON_SPMU: |
| 10477 | case GAUDI2_EVENT_DEC1_BMON_SPMU: |
| 10478 | case GAUDI2_EVENT_DEC2_BMON_SPMU: |
| 10479 | case GAUDI2_EVENT_DEC3_BMON_SPMU: |
| 10480 | case GAUDI2_EVENT_DEC4_BMON_SPMU: |
| 10481 | case GAUDI2_EVENT_DEC5_BMON_SPMU: |
| 10482 | case GAUDI2_EVENT_DEC6_BMON_SPMU: |
| 10483 | case GAUDI2_EVENT_DEC7_BMON_SPMU: |
| 10484 | case GAUDI2_EVENT_DEC8_BMON_SPMU: |
| 10485 | case GAUDI2_EVENT_DEC9_BMON_SPMU: |
| 10486 | case GAUDI2_EVENT_ROTATOR0_BMON_SPMU ... GAUDI2_EVENT_SM3_BMON_SPMU: |
| 10487 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10488 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10489 | break; |
| 10490 | |
| 10491 | case GAUDI2_EVENT_CPU_FIX_POWER_ENV_S: |
| 10492 | case GAUDI2_EVENT_CPU_FIX_POWER_ENV_E: |
| 10493 | case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_S: |
| 10494 | case GAUDI2_EVENT_CPU_FIX_THERMAL_ENV_E: |
| 10495 | gaudi2_print_clk_change_info(hdev, event_type, event_mask: &event_mask); |
| 10496 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10497 | break; |
| 10498 | |
| 10499 | case GAUDI2_EVENT_CPU_PKT_QUEUE_OUT_SYNC: |
| 10500 | gaudi2_print_out_of_sync_info(hdev, event_type, sync_err: &eq_entry->pkt_sync_err); |
| 10501 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10502 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10503 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10504 | break; |
| 10505 | |
| 10506 | case GAUDI2_EVENT_PCIE_FLR_REQUESTED: |
| 10507 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10508 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10509 | /* Do nothing- FW will handle it */ |
| 10510 | break; |
| 10511 | |
| 10512 | case GAUDI2_EVENT_PCIE_P2P_MSIX: |
| 10513 | error_count = gaudi2_handle_pcie_p2p_msix(hdev, event_type); |
| 10514 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10515 | break; |
| 10516 | |
| 10517 | case GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE ... GAUDI2_EVENT_SM3_AXI_ERROR_RESPONSE: |
| 10518 | index = event_type - GAUDI2_EVENT_SM0_AXI_ERROR_RESPONSE; |
| 10519 | error_count = gaudi2_handle_sm_err(hdev, event_type, sm_index: index); |
| 10520 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10521 | break; |
| 10522 | |
| 10523 | case GAUDI2_EVENT_PSOC_MME_PLL_LOCK_ERR ... GAUDI2_EVENT_DCORE2_HBM_PLL_LOCK_ERR: |
| 10524 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10525 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10526 | break; |
| 10527 | |
| 10528 | case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_CAUSE: |
| 10529 | dev_info(hdev->dev, "CPLD shutdown cause, reset reason: 0x%llx\n" , |
| 10530 | le64_to_cpu(eq_entry->data[0])); |
| 10531 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10532 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10533 | break; |
| 10534 | case GAUDI2_EVENT_CPU_CPLD_SHUTDOWN_EVENT: |
| 10535 | dev_err(hdev->dev, "CPLD shutdown event, reset reason: 0x%llx\n" , |
| 10536 | le64_to_cpu(eq_entry->data[0])); |
| 10537 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10538 | hl_eq_cpld_shutdown_event_handle(hdev, event_id: event_type, event_mask: &event_mask); |
| 10539 | break; |
| 10540 | |
| 10541 | case GAUDI2_EVENT_CPU_PKT_SANITY_FAILED: |
| 10542 | gaudi2_print_cpu_pkt_failure_info(hdev, event_type, sync_err: &eq_entry->pkt_sync_err); |
| 10543 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10544 | reset_flags |= HL_DRV_RESET_FW_FATAL_ERR; |
| 10545 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10546 | break; |
| 10547 | |
| 10548 | case GAUDI2_EVENT_ARC_DCCM_FULL: |
| 10549 | error_count = hl_arc_event_handle(hdev, event_type, data: &eq_entry->arc_data); |
| 10550 | event_mask |= HL_NOTIFIER_EVENT_USER_ENGINE_ERR; |
| 10551 | break; |
| 10552 | |
| 10553 | case GAUDI2_EVENT_CPU_FP32_NOT_SUPPORTED: |
| 10554 | case GAUDI2_EVENT_CPU_DEV_RESET_REQ: |
| 10555 | event_mask |= HL_NOTIFIER_EVENT_GENERAL_HW_ERR; |
| 10556 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10557 | is_critical = true; |
| 10558 | break; |
| 10559 | |
| 10560 | case GAUDI2_EVENT_ARC_PWR_BRK_ENTRY: |
| 10561 | case GAUDI2_EVENT_ARC_PWR_BRK_EXT: |
| 10562 | case GAUDI2_EVENT_ARC_PWR_RD_MODE0: |
| 10563 | case GAUDI2_EVENT_ARC_PWR_RD_MODE1: |
| 10564 | case GAUDI2_EVENT_ARC_PWR_RD_MODE2: |
| 10565 | case GAUDI2_EVENT_ARC_PWR_RD_MODE3: |
| 10566 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10567 | dev_info_ratelimited(hdev->dev, "%s event received\n" , |
| 10568 | gaudi2_irq_map_table[event_type].name); |
| 10569 | break; |
| 10570 | |
| 10571 | case GAUDI2_EVENT_ARC_EQ_HEARTBEAT: |
| 10572 | hl_eq_heartbeat_event_handle(hdev); |
| 10573 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10574 | break; |
| 10575 | default: |
| 10576 | if (gaudi2_irq_map_table[event_type].valid) { |
| 10577 | dev_err_ratelimited(hdev->dev, "Cannot find handler for event %d\n" , |
| 10578 | event_type); |
| 10579 | error_count = GAUDI2_NA_EVENT_CAUSE; |
| 10580 | } |
| 10581 | } |
| 10582 | |
| 10583 | if (event_mask & HL_NOTIFIER_EVENT_USER_ENGINE_ERR) |
| 10584 | hl_capture_engine_err(hdev, engine_id: event_id_to_engine_id(hdev, event_type), error_count); |
| 10585 | |
| 10586 | /* Make sure to dump an error in case no error cause was printed so far. |
| 10587 | * Note that although we have counted the errors, we use this number as |
| 10588 | * a boolean. |
| 10589 | */ |
| 10590 | if (error_count == GAUDI2_NA_EVENT_CAUSE && !is_info_event(event: event_type)) |
| 10591 | gaudi2_print_event(hdev, event_type, ratelimited: true, fmt: "%d" , event_type); |
| 10592 | else if (error_count == 0) |
| 10593 | gaudi2_print_event(hdev, event_type, ratelimited: true, |
| 10594 | fmt: "No error cause for H/W event %u" , event_type); |
| 10595 | |
| 10596 | if ((gaudi2_irq_map_table[event_type].reset != EVENT_RESET_TYPE_NONE) || reset_required) { |
| 10597 | if (reset_required || |
| 10598 | (gaudi2_irq_map_table[event_type].reset == EVENT_RESET_TYPE_HARD)) |
| 10599 | reset_flags |= HL_DRV_RESET_HARD; |
| 10600 | |
| 10601 | if (hdev->hard_reset_on_fw_events || |
| 10602 | (hdev->asic_prop.fw_security_enabled && is_critical)) |
| 10603 | goto reset_device; |
| 10604 | } |
| 10605 | |
| 10606 | /* Send unmask irq only for interrupts not classified as MSG */ |
| 10607 | if (!gaudi2_irq_map_table[event_type].msg) |
| 10608 | hl_fw_unmask_irq(hdev, event_type); |
| 10609 | |
| 10610 | if (event_mask) |
| 10611 | hl_notifier_event_send_all(hdev, event_mask); |
| 10612 | |
| 10613 | return; |
| 10614 | |
| 10615 | reset_device: |
| 10616 | if (hdev->asic_prop.fw_security_enabled && is_critical) { |
| 10617 | reset_flags |= HL_DRV_RESET_BYPASS_REQ_TO_FW; |
| 10618 | event_mask |= HL_NOTIFIER_EVENT_DEVICE_UNAVAILABLE; |
| 10619 | } else { |
| 10620 | reset_flags |= HL_DRV_RESET_DELAY; |
| 10621 | } |
| 10622 | /* escalate general hw errors to critical/fatal error */ |
| 10623 | if (event_mask & HL_NOTIFIER_EVENT_GENERAL_HW_ERR) |
| 10624 | hl_handle_critical_hw_err(hdev, event_id: event_type, event_mask: &event_mask); |
| 10625 | |
| 10626 | hl_debugfs_cfg_access_history_dump(hdev); |
| 10627 | event_mask |= HL_NOTIFIER_EVENT_DEVICE_RESET; |
| 10628 | hl_device_cond_reset(hdev, flags: reset_flags, event_mask); |
| 10629 | } |
| 10630 | |
| 10631 | static int gaudi2_memset_memory_chunk_using_edma_qm(struct hl_device *hdev, |
| 10632 | struct packet_lin_dma *lin_dma_pkt, |
| 10633 | u64 phys_addr, u32 hw_queue_id, u32 size, u64 addr, u32 val) |
| 10634 | { |
| 10635 | u32 ctl, pkt_size; |
| 10636 | int rc = 0, i; |
| 10637 | |
| 10638 | ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_LIN_DMA); |
| 10639 | ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_MEMSET_MASK, 1); |
| 10640 | ctl |= FIELD_PREP(GAUDI2_PKT_LIN_DMA_CTL_WRCOMP_MASK, 1); |
| 10641 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 1); |
| 10642 | |
| 10643 | lin_dma_pkt->ctl = cpu_to_le32(ctl); |
| 10644 | lin_dma_pkt->src_addr = cpu_to_le64(val); |
| 10645 | lin_dma_pkt->dst_addr = cpu_to_le64(addr); |
| 10646 | lin_dma_pkt->tsize = cpu_to_le32(size); |
| 10647 | |
| 10648 | pkt_size = sizeof(struct packet_lin_dma); |
| 10649 | |
| 10650 | for (i = 0; i < 3; i++) { |
| 10651 | rc = hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM, |
| 10652 | phys_addr + (i * sizeof(u64)), |
| 10653 | ((u64 *)(lin_dma_pkt)) + i, DEBUGFS_WRITE64); |
| 10654 | if (rc) { |
| 10655 | dev_err(hdev->dev, "Failed to copy lin_dma packet to HBM (%#llx)\n" , |
| 10656 | phys_addr); |
| 10657 | return rc; |
| 10658 | } |
| 10659 | } |
| 10660 | |
| 10661 | rc = hl_hw_queue_send_cb_no_cmpl(hdev, hw_queue_id, cb_size: pkt_size, cb_ptr: phys_addr); |
| 10662 | if (rc) |
| 10663 | dev_err(hdev->dev, "Failed to send lin_dma packet to H/W queue %s\n" , |
| 10664 | GAUDI2_QUEUE_ID_TO_STR(hw_queue_id)); |
| 10665 | |
| 10666 | return rc; |
| 10667 | } |
| 10668 | |
| 10669 | static int gaudi2_memset_device_memory(struct hl_device *hdev, u64 addr, u64 size, u64 val) |
| 10670 | { |
| 10671 | u32 edma_queues_id[] = {GAUDI2_QUEUE_ID_DCORE0_EDMA_0_0, |
| 10672 | GAUDI2_QUEUE_ID_DCORE1_EDMA_0_0, |
| 10673 | GAUDI2_QUEUE_ID_DCORE2_EDMA_0_0, |
| 10674 | GAUDI2_QUEUE_ID_DCORE3_EDMA_0_0}; |
| 10675 | u32 chunk_size, dcore, edma_idx, sob_offset, sob_addr, comp_val, |
| 10676 | old_mmubp, mmubp, num_of_pkts, busy, pkt_size, cb_len; |
| 10677 | u64 comp_addr, cur_addr = addr, end_addr = addr + size; |
| 10678 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 10679 | int rc = 0, dma_num = 0, i; |
| 10680 | void *lin_dma_pkts_arr; |
| 10681 | |
| 10682 | if (prop->edma_enabled_mask == 0) { |
| 10683 | dev_info(hdev->dev, "non of the EDMA engines is enabled - skip dram scrubbing\n" ); |
| 10684 | return -EIO; |
| 10685 | } |
| 10686 | |
| 10687 | sob_offset = hdev->asic_prop.first_available_user_sob[0] * 4; |
| 10688 | sob_addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + sob_offset; |
| 10689 | comp_addr = CFG_BASE + sob_addr; |
| 10690 | comp_val = FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_INC_MASK, 1) | |
| 10691 | FIELD_PREP(DCORE0_SYNC_MNGR_OBJS_SOB_OBJ_VAL_MASK, 1); |
| 10692 | mmubp = FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_WR_MASK, 1) | |
| 10693 | FIELD_PREP(ARC_FARM_KDMA_CTX_AXUSER_HB_MMU_BP_RD_MASK, 1); |
| 10694 | |
| 10695 | /* Calculate how many lin dma pkts we'll need */ |
| 10696 | num_of_pkts = div64_u64(round_up(size, SZ_2G), SZ_2G); |
| 10697 | pkt_size = sizeof(struct packet_lin_dma); |
| 10698 | cb_len = pkt_size * num_of_pkts; |
| 10699 | |
| 10700 | /* |
| 10701 | * if we're not scrubing HMMU or NIC reserved sections in hbm, |
| 10702 | * then it the scrubing of the user section, as we use the start of the user section |
| 10703 | * to store the CB of the EDMA QM, so shift the start address of the scrubbing accordingly |
| 10704 | * and scrub the CB section before leaving this function. |
| 10705 | */ |
| 10706 | if ((addr >= prop->dram_user_base_address) && |
| 10707 | (addr < prop->dram_user_base_address + cb_len)) |
| 10708 | cur_addr += (prop->dram_user_base_address + cb_len) - addr; |
| 10709 | |
| 10710 | lin_dma_pkts_arr = kvcalloc(num_of_pkts, pkt_size, GFP_KERNEL); |
| 10711 | if (!lin_dma_pkts_arr) |
| 10712 | return -ENOMEM; |
| 10713 | |
| 10714 | /* |
| 10715 | * set mmu bypass for the scrubbing - all ddmas are configured the same so save |
| 10716 | * only the first one to restore later |
| 10717 | * also set the sob addr for all edma cores for completion. |
| 10718 | * set QM as trusted to allow it to access physical address with MMU bp. |
| 10719 | */ |
| 10720 | old_mmubp = RREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP); |
| 10721 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 10722 | for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { |
| 10723 | u32 edma_offset = dcore * DCORE_OFFSET + edma_idx * DCORE_EDMA_OFFSET; |
| 10724 | u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx; |
| 10725 | |
| 10726 | if (!(prop->edma_enabled_mask & BIT(edma_bit))) |
| 10727 | continue; |
| 10728 | |
| 10729 | WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + |
| 10730 | edma_offset, mmubp); |
| 10731 | WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, |
| 10732 | lower_32_bits(comp_addr)); |
| 10733 | WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, |
| 10734 | upper_32_bits(comp_addr)); |
| 10735 | WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, |
| 10736 | comp_val); |
| 10737 | gaudi2_qman_set_test_mode(hdev, |
| 10738 | hw_queue_id: edma_queues_id[dcore] + 4 * edma_idx, enable: true); |
| 10739 | } |
| 10740 | } |
| 10741 | |
| 10742 | WREG32(sob_addr, 0); |
| 10743 | |
| 10744 | while (cur_addr < end_addr) { |
| 10745 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 10746 | for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { |
| 10747 | u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx; |
| 10748 | |
| 10749 | if (!(prop->edma_enabled_mask & BIT(edma_bit))) |
| 10750 | continue; |
| 10751 | |
| 10752 | chunk_size = min_t(u64, SZ_2G, end_addr - cur_addr); |
| 10753 | |
| 10754 | rc = gaudi2_memset_memory_chunk_using_edma_qm(hdev, |
| 10755 | lin_dma_pkt: (struct packet_lin_dma *)lin_dma_pkts_arr + dma_num, |
| 10756 | phys_addr: prop->dram_user_base_address + (dma_num * pkt_size), |
| 10757 | hw_queue_id: edma_queues_id[dcore] + edma_idx * 4, |
| 10758 | size: chunk_size, addr: cur_addr, val); |
| 10759 | if (rc) |
| 10760 | goto end; |
| 10761 | |
| 10762 | dma_num++; |
| 10763 | cur_addr += chunk_size; |
| 10764 | if (cur_addr == end_addr) |
| 10765 | goto edma_wait; |
| 10766 | } |
| 10767 | } |
| 10768 | } |
| 10769 | |
| 10770 | edma_wait: |
| 10771 | rc = hl_poll_timeout(hdev, sob_addr, busy, (busy == dma_num), 1000, 1000000); |
| 10772 | if (rc) { |
| 10773 | dev_err(hdev->dev, "DMA Timeout during HBM scrubbing(sob: 0x%x, dma_num: 0x%x)\n" , |
| 10774 | busy, dma_num); |
| 10775 | goto end; |
| 10776 | } |
| 10777 | end: |
| 10778 | for (dcore = 0 ; dcore < NUM_OF_DCORES ; dcore++) { |
| 10779 | for (edma_idx = 0 ; edma_idx < NUM_OF_EDMA_PER_DCORE ; edma_idx++) { |
| 10780 | u32 edma_offset = dcore * DCORE_OFFSET + edma_idx * DCORE_EDMA_OFFSET; |
| 10781 | u32 edma_bit = dcore * NUM_OF_EDMA_PER_DCORE + edma_idx; |
| 10782 | |
| 10783 | if (!(prop->edma_enabled_mask & BIT(edma_bit))) |
| 10784 | continue; |
| 10785 | |
| 10786 | WREG32(mmDCORE0_EDMA0_CORE_CTX_AXUSER_HB_MMU_BP + edma_offset, old_mmubp); |
| 10787 | WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_LO + edma_offset, 0); |
| 10788 | WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_ADDR_HI + edma_offset, 0); |
| 10789 | WREG32(mmDCORE0_EDMA0_CORE_CTX_WR_COMP_WDATA + edma_offset, 0); |
| 10790 | gaudi2_qman_set_test_mode(hdev, |
| 10791 | hw_queue_id: edma_queues_id[dcore] + 4 * edma_idx, enable: false); |
| 10792 | } |
| 10793 | } |
| 10794 | |
| 10795 | memset(lin_dma_pkts_arr, 0, sizeof(u64)); |
| 10796 | |
| 10797 | /* Zero the HBM area where we copied the CB */ |
| 10798 | for (i = 0; i < cb_len / sizeof(u64); i += sizeof(u64)) |
| 10799 | rc = hdev->asic_funcs->access_dev_mem(hdev, PCI_REGION_DRAM, |
| 10800 | prop->dram_user_base_address + i, |
| 10801 | (u64 *)(lin_dma_pkts_arr), DEBUGFS_WRITE64); |
| 10802 | WREG32(sob_addr, 0); |
| 10803 | |
| 10804 | kvfree(addr: lin_dma_pkts_arr); |
| 10805 | |
| 10806 | return rc; |
| 10807 | } |
| 10808 | |
| 10809 | static int gaudi2_scrub_device_dram(struct hl_device *hdev, u64 val) |
| 10810 | { |
| 10811 | int rc; |
| 10812 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 10813 | u64 size = prop->dram_end_address - prop->dram_user_base_address; |
| 10814 | |
| 10815 | rc = gaudi2_memset_device_memory(hdev, addr: prop->dram_user_base_address, size, val); |
| 10816 | |
| 10817 | if (rc) |
| 10818 | dev_err(hdev->dev, "Failed to scrub dram, address: 0x%llx size: %llu\n" , |
| 10819 | prop->dram_user_base_address, size); |
| 10820 | return rc; |
| 10821 | } |
| 10822 | |
| 10823 | static int gaudi2_scrub_device_mem(struct hl_device *hdev) |
| 10824 | { |
| 10825 | int rc; |
| 10826 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 10827 | u64 val = hdev->memory_scrub_val; |
| 10828 | u64 addr, size; |
| 10829 | |
| 10830 | if (!hdev->memory_scrub) |
| 10831 | return 0; |
| 10832 | |
| 10833 | /* scrub SRAM */ |
| 10834 | addr = prop->sram_user_base_address; |
| 10835 | size = hdev->pldm ? 0x10000 : (prop->sram_size - SRAM_USER_BASE_OFFSET); |
| 10836 | dev_dbg(hdev->dev, "Scrubbing SRAM: 0x%09llx - 0x%09llx, val: 0x%llx\n" , |
| 10837 | addr, addr + size, val); |
| 10838 | rc = gaudi2_memset_device_memory(hdev, addr, size, val); |
| 10839 | if (rc) { |
| 10840 | dev_err(hdev->dev, "scrubbing SRAM failed (%d)\n" , rc); |
| 10841 | return rc; |
| 10842 | } |
| 10843 | |
| 10844 | /* scrub DRAM */ |
| 10845 | rc = gaudi2_scrub_device_dram(hdev, val); |
| 10846 | if (rc) { |
| 10847 | dev_err(hdev->dev, "scrubbing DRAM failed (%d)\n" , rc); |
| 10848 | return rc; |
| 10849 | } |
| 10850 | return 0; |
| 10851 | } |
| 10852 | |
| 10853 | static void gaudi2_restore_user_sm_registers(struct hl_device *hdev) |
| 10854 | { |
| 10855 | u64 addr, mon_sts_addr, mon_cfg_addr, cq_lbw_l_addr, cq_lbw_h_addr, |
| 10856 | cq_lbw_data_addr, cq_base_l_addr, cq_base_h_addr, cq_size_addr; |
| 10857 | u32 val, size, offset; |
| 10858 | int dcore_id; |
| 10859 | |
| 10860 | offset = hdev->asic_prop.first_available_cq[0] * 4; |
| 10861 | cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + offset; |
| 10862 | cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + offset; |
| 10863 | cq_lbw_data_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 + offset; |
| 10864 | cq_base_l_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + offset; |
| 10865 | cq_base_h_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + offset; |
| 10866 | cq_size_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + offset; |
| 10867 | size = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 - |
| 10868 | (mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + offset); |
| 10869 | |
| 10870 | /* memset dcore0 CQ registers */ |
| 10871 | gaudi2_memset_device_lbw(hdev, addr: cq_lbw_l_addr, size, val: 0); |
| 10872 | gaudi2_memset_device_lbw(hdev, addr: cq_lbw_h_addr, size, val: 0); |
| 10873 | gaudi2_memset_device_lbw(hdev, addr: cq_lbw_data_addr, size, val: 0); |
| 10874 | gaudi2_memset_device_lbw(hdev, addr: cq_base_l_addr, size, val: 0); |
| 10875 | gaudi2_memset_device_lbw(hdev, addr: cq_base_h_addr, size, val: 0); |
| 10876 | gaudi2_memset_device_lbw(hdev, addr: cq_size_addr, size, val: 0); |
| 10877 | |
| 10878 | cq_lbw_l_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0 + DCORE_OFFSET; |
| 10879 | cq_lbw_h_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 + DCORE_OFFSET; |
| 10880 | cq_lbw_data_addr = mmDCORE0_SYNC_MNGR_GLBL_LBW_DATA_0 + DCORE_OFFSET; |
| 10881 | cq_base_l_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_L_0 + DCORE_OFFSET; |
| 10882 | cq_base_h_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_BASE_ADDR_H_0 + DCORE_OFFSET; |
| 10883 | cq_size_addr = mmDCORE0_SYNC_MNGR_GLBL_CQ_SIZE_LOG2_0 + DCORE_OFFSET; |
| 10884 | size = mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_H_0 - mmDCORE0_SYNC_MNGR_GLBL_LBW_ADDR_L_0; |
| 10885 | |
| 10886 | for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { |
| 10887 | gaudi2_memset_device_lbw(hdev, addr: cq_lbw_l_addr, size, val: 0); |
| 10888 | gaudi2_memset_device_lbw(hdev, addr: cq_lbw_h_addr, size, val: 0); |
| 10889 | gaudi2_memset_device_lbw(hdev, addr: cq_lbw_data_addr, size, val: 0); |
| 10890 | gaudi2_memset_device_lbw(hdev, addr: cq_base_l_addr, size, val: 0); |
| 10891 | gaudi2_memset_device_lbw(hdev, addr: cq_base_h_addr, size, val: 0); |
| 10892 | gaudi2_memset_device_lbw(hdev, addr: cq_size_addr, size, val: 0); |
| 10893 | |
| 10894 | cq_lbw_l_addr += DCORE_OFFSET; |
| 10895 | cq_lbw_h_addr += DCORE_OFFSET; |
| 10896 | cq_lbw_data_addr += DCORE_OFFSET; |
| 10897 | cq_base_l_addr += DCORE_OFFSET; |
| 10898 | cq_base_h_addr += DCORE_OFFSET; |
| 10899 | cq_size_addr += DCORE_OFFSET; |
| 10900 | } |
| 10901 | |
| 10902 | offset = hdev->asic_prop.first_available_user_mon[0] * 4; |
| 10903 | addr = mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + offset; |
| 10904 | val = 1 << DCORE0_SYNC_MNGR_OBJS_MON_STATUS_PROT_SHIFT; |
| 10905 | size = mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - (mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + offset); |
| 10906 | |
| 10907 | /* memset dcore0 monitors */ |
| 10908 | gaudi2_memset_device_lbw(hdev, addr, size, val); |
| 10909 | |
| 10910 | addr = mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + offset; |
| 10911 | gaudi2_memset_device_lbw(hdev, addr, size, val: 0); |
| 10912 | |
| 10913 | mon_sts_addr = mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0 + DCORE_OFFSET; |
| 10914 | mon_cfg_addr = mmDCORE0_SYNC_MNGR_OBJS_MON_CONFIG_0 + DCORE_OFFSET; |
| 10915 | size = mmDCORE0_SYNC_MNGR_OBJS_SM_SEC_0 - mmDCORE0_SYNC_MNGR_OBJS_MON_STATUS_0; |
| 10916 | |
| 10917 | for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { |
| 10918 | gaudi2_memset_device_lbw(hdev, addr: mon_sts_addr, size, val); |
| 10919 | gaudi2_memset_device_lbw(hdev, addr: mon_cfg_addr, size, val: 0); |
| 10920 | mon_sts_addr += DCORE_OFFSET; |
| 10921 | mon_cfg_addr += DCORE_OFFSET; |
| 10922 | } |
| 10923 | |
| 10924 | offset = hdev->asic_prop.first_available_user_sob[0] * 4; |
| 10925 | addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset; |
| 10926 | val = 0; |
| 10927 | size = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 - |
| 10928 | (mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset); |
| 10929 | |
| 10930 | /* memset dcore0 sobs */ |
| 10931 | gaudi2_memset_device_lbw(hdev, addr, size, val); |
| 10932 | |
| 10933 | addr = mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + DCORE_OFFSET; |
| 10934 | size = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 - mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0; |
| 10935 | |
| 10936 | for (dcore_id = 1 ; dcore_id < NUM_OF_DCORES ; dcore_id++) { |
| 10937 | gaudi2_memset_device_lbw(hdev, addr, size, val); |
| 10938 | addr += DCORE_OFFSET; |
| 10939 | } |
| 10940 | |
| 10941 | /* Flush all WREG to prevent race */ |
| 10942 | val = RREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + offset); |
| 10943 | } |
| 10944 | |
| 10945 | static void gaudi2_restore_user_qm_registers(struct hl_device *hdev) |
| 10946 | { |
| 10947 | u32 reg_base, hw_queue_id; |
| 10948 | |
| 10949 | for (hw_queue_id = GAUDI2_QUEUE_ID_PDMA_0_0 ; hw_queue_id <= GAUDI2_QUEUE_ID_ROT_1_0; |
| 10950 | hw_queue_id += NUM_OF_PQ_PER_QMAN) { |
| 10951 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id)) |
| 10952 | continue; |
| 10953 | |
| 10954 | gaudi2_clear_qm_fence_counters_common(hdev, queue_id: hw_queue_id, skip_fence: false); |
| 10955 | |
| 10956 | reg_base = gaudi2_qm_blocks_bases[hw_queue_id]; |
| 10957 | WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0); |
| 10958 | } |
| 10959 | |
| 10960 | /* Flush all WREG to prevent race */ |
| 10961 | RREG32(mmPDMA0_QM_ARB_CFG_0); |
| 10962 | } |
| 10963 | |
| 10964 | static void gaudi2_restore_nic_qm_registers(struct hl_device *hdev) |
| 10965 | { |
| 10966 | u32 reg_base, hw_queue_id; |
| 10967 | |
| 10968 | for (hw_queue_id = GAUDI2_QUEUE_ID_NIC_0_0 ; hw_queue_id <= GAUDI2_QUEUE_ID_NIC_23_3; |
| 10969 | hw_queue_id += NUM_OF_PQ_PER_QMAN) { |
| 10970 | if (!gaudi2_is_queue_enabled(hdev, hw_queue_id)) |
| 10971 | continue; |
| 10972 | |
| 10973 | gaudi2_clear_qm_fence_counters_common(hdev, queue_id: hw_queue_id, skip_fence: false); |
| 10974 | |
| 10975 | reg_base = gaudi2_qm_blocks_bases[hw_queue_id]; |
| 10976 | WREG32(reg_base + QM_ARB_CFG_0_OFFSET, 0); |
| 10977 | } |
| 10978 | |
| 10979 | /* Flush all WREG to prevent race */ |
| 10980 | RREG32(mmPDMA0_QM_ARB_CFG_0); |
| 10981 | } |
| 10982 | |
| 10983 | static int gaudi2_context_switch(struct hl_device *hdev, u32 asid) |
| 10984 | { |
| 10985 | return 0; |
| 10986 | } |
| 10987 | |
| 10988 | static void gaudi2_restore_phase_topology(struct hl_device *hdev) |
| 10989 | { |
| 10990 | } |
| 10991 | |
| 10992 | static void gaudi2_init_block_instances(struct hl_device *hdev, u32 block_idx, |
| 10993 | struct dup_block_ctx *cfg_ctx) |
| 10994 | { |
| 10995 | u64 block_base = cfg_ctx->base + block_idx * cfg_ctx->block_off; |
| 10996 | u8 seq; |
| 10997 | int i; |
| 10998 | |
| 10999 | for (i = 0 ; i < cfg_ctx->instances ; i++) { |
| 11000 | seq = block_idx * cfg_ctx->instances + i; |
| 11001 | |
| 11002 | /* skip disabled instance */ |
| 11003 | if (!(cfg_ctx->enabled_mask & BIT_ULL(seq))) |
| 11004 | continue; |
| 11005 | |
| 11006 | cfg_ctx->instance_cfg_fn(hdev, block_base + i * cfg_ctx->instance_off, |
| 11007 | cfg_ctx->data); |
| 11008 | } |
| 11009 | } |
| 11010 | |
| 11011 | static void gaudi2_init_blocks_with_mask(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx, |
| 11012 | u64 mask) |
| 11013 | { |
| 11014 | int i; |
| 11015 | |
| 11016 | cfg_ctx->enabled_mask = mask; |
| 11017 | |
| 11018 | for (i = 0 ; i < cfg_ctx->blocks ; i++) |
| 11019 | gaudi2_init_block_instances(hdev, block_idx: i, cfg_ctx); |
| 11020 | } |
| 11021 | |
| 11022 | void gaudi2_init_blocks(struct hl_device *hdev, struct dup_block_ctx *cfg_ctx) |
| 11023 | { |
| 11024 | gaudi2_init_blocks_with_mask(hdev, cfg_ctx, U64_MAX); |
| 11025 | } |
| 11026 | |
| 11027 | static int gaudi2_debugfs_read_dma(struct hl_device *hdev, u64 addr, u32 size, void *blob_addr) |
| 11028 | { |
| 11029 | void *host_mem_virtual_addr; |
| 11030 | dma_addr_t host_mem_dma_addr; |
| 11031 | u64 reserved_va_base; |
| 11032 | u32 pos, size_left, size_to_dma; |
| 11033 | struct hl_ctx *ctx; |
| 11034 | int rc = 0; |
| 11035 | |
| 11036 | /* Fetch the ctx */ |
| 11037 | ctx = hl_get_compute_ctx(hdev); |
| 11038 | if (!ctx) { |
| 11039 | dev_err(hdev->dev, "No ctx available\n" ); |
| 11040 | return -EINVAL; |
| 11041 | } |
| 11042 | |
| 11043 | /* Allocate buffers for read and for poll */ |
| 11044 | host_mem_virtual_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &host_mem_dma_addr, |
| 11045 | GFP_KERNEL | __GFP_ZERO); |
| 11046 | if (host_mem_virtual_addr == NULL) { |
| 11047 | dev_err(hdev->dev, "Failed to allocate memory for KDMA read\n" ); |
| 11048 | rc = -ENOMEM; |
| 11049 | goto put_ctx; |
| 11050 | } |
| 11051 | |
| 11052 | /* Reserve VM region on asic side */ |
| 11053 | reserved_va_base = hl_reserve_va_block(hdev, ctx, type: HL_VA_RANGE_TYPE_HOST, SZ_2M, |
| 11054 | HL_MMU_VA_ALIGNMENT_NOT_NEEDED); |
| 11055 | if (!reserved_va_base) { |
| 11056 | dev_err(hdev->dev, "Failed to reserve vmem on asic\n" ); |
| 11057 | rc = -ENOMEM; |
| 11058 | goto free_data_buffer; |
| 11059 | } |
| 11060 | |
| 11061 | /* Create mapping on asic side */ |
| 11062 | mutex_lock(&hdev->mmu_lock); |
| 11063 | |
| 11064 | rc = hl_mmu_map_contiguous(ctx, virt_addr: reserved_va_base, phys_addr: host_mem_dma_addr, SZ_2M); |
| 11065 | if (rc) { |
| 11066 | dev_err(hdev->dev, "Failed to create mapping on asic mmu\n" ); |
| 11067 | goto unreserve_va; |
| 11068 | } |
| 11069 | |
| 11070 | rc = hl_mmu_invalidate_cache_range(hdev, is_hard: false, |
| 11071 | flags: MMU_OP_USERPTR | MMU_OP_SKIP_LOW_CACHE_INV, |
| 11072 | asid: ctx->asid, va: reserved_va_base, SZ_2M); |
| 11073 | if (rc) { |
| 11074 | hl_mmu_unmap_contiguous(ctx, virt_addr: reserved_va_base, SZ_2M); |
| 11075 | goto unreserve_va; |
| 11076 | } |
| 11077 | |
| 11078 | mutex_unlock(lock: &hdev->mmu_lock); |
| 11079 | |
| 11080 | /* Enable MMU on KDMA */ |
| 11081 | gaudi2_kdma_set_mmbp_asid(hdev, mmu_bypass: false, asid: ctx->asid); |
| 11082 | |
| 11083 | pos = 0; |
| 11084 | size_left = size; |
| 11085 | size_to_dma = SZ_2M; |
| 11086 | |
| 11087 | while (size_left > 0) { |
| 11088 | if (size_left < SZ_2M) |
| 11089 | size_to_dma = size_left; |
| 11090 | |
| 11091 | rc = gaudi2_send_job_to_kdma(hdev, src_addr: addr, dst_addr: reserved_va_base, size: size_to_dma, is_memset: false); |
| 11092 | if (rc) |
| 11093 | break; |
| 11094 | |
| 11095 | memcpy(blob_addr + pos, host_mem_virtual_addr, size_to_dma); |
| 11096 | |
| 11097 | if (size_left <= SZ_2M) |
| 11098 | break; |
| 11099 | |
| 11100 | pos += SZ_2M; |
| 11101 | addr += SZ_2M; |
| 11102 | size_left -= SZ_2M; |
| 11103 | } |
| 11104 | |
| 11105 | gaudi2_kdma_set_mmbp_asid(hdev, mmu_bypass: true, HL_KERNEL_ASID_ID); |
| 11106 | |
| 11107 | mutex_lock(&hdev->mmu_lock); |
| 11108 | |
| 11109 | rc = hl_mmu_unmap_contiguous(ctx, virt_addr: reserved_va_base, SZ_2M); |
| 11110 | if (rc) |
| 11111 | goto unreserve_va; |
| 11112 | |
| 11113 | rc = hl_mmu_invalidate_cache_range(hdev, is_hard: false, flags: MMU_OP_USERPTR, |
| 11114 | asid: ctx->asid, va: reserved_va_base, SZ_2M); |
| 11115 | |
| 11116 | unreserve_va: |
| 11117 | mutex_unlock(lock: &hdev->mmu_lock); |
| 11118 | hl_unreserve_va_block(hdev, ctx, start_addr: reserved_va_base, SZ_2M); |
| 11119 | free_data_buffer: |
| 11120 | hl_asic_dma_free_coherent(hdev, SZ_2M, host_mem_virtual_addr, host_mem_dma_addr); |
| 11121 | put_ctx: |
| 11122 | hl_ctx_put(ctx); |
| 11123 | |
| 11124 | return rc; |
| 11125 | } |
| 11126 | |
| 11127 | static int gaudi2_internal_cb_pool_init(struct hl_device *hdev, struct hl_ctx *ctx) |
| 11128 | { |
| 11129 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11130 | int min_alloc_order, rc; |
| 11131 | |
| 11132 | if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU)) |
| 11133 | return 0; |
| 11134 | |
| 11135 | hdev->internal_cb_pool_virt_addr = hl_asic_dma_alloc_coherent(hdev, |
| 11136 | HOST_SPACE_INTERNAL_CB_SZ, |
| 11137 | &hdev->internal_cb_pool_dma_addr, |
| 11138 | GFP_KERNEL | __GFP_ZERO); |
| 11139 | |
| 11140 | if (!hdev->internal_cb_pool_virt_addr) |
| 11141 | return -ENOMEM; |
| 11142 | |
| 11143 | min_alloc_order = ilog2(min(gaudi2_get_signal_cb_size(hdev), |
| 11144 | gaudi2_get_wait_cb_size(hdev))); |
| 11145 | |
| 11146 | hdev->internal_cb_pool = gen_pool_create(min_alloc_order, -1); |
| 11147 | if (!hdev->internal_cb_pool) { |
| 11148 | dev_err(hdev->dev, "Failed to create internal CB pool\n" ); |
| 11149 | rc = -ENOMEM; |
| 11150 | goto free_internal_cb_pool; |
| 11151 | } |
| 11152 | |
| 11153 | rc = gen_pool_add(pool: hdev->internal_cb_pool, addr: (uintptr_t) hdev->internal_cb_pool_virt_addr, |
| 11154 | HOST_SPACE_INTERNAL_CB_SZ, nid: -1); |
| 11155 | if (rc) { |
| 11156 | dev_err(hdev->dev, "Failed to add memory to internal CB pool\n" ); |
| 11157 | rc = -EFAULT; |
| 11158 | goto destroy_internal_cb_pool; |
| 11159 | } |
| 11160 | |
| 11161 | hdev->internal_cb_va_base = hl_reserve_va_block(hdev, ctx, type: HL_VA_RANGE_TYPE_HOST, |
| 11162 | HOST_SPACE_INTERNAL_CB_SZ, HL_MMU_VA_ALIGNMENT_NOT_NEEDED); |
| 11163 | |
| 11164 | if (!hdev->internal_cb_va_base) { |
| 11165 | rc = -ENOMEM; |
| 11166 | goto destroy_internal_cb_pool; |
| 11167 | } |
| 11168 | |
| 11169 | mutex_lock(&hdev->mmu_lock); |
| 11170 | |
| 11171 | rc = hl_mmu_map_contiguous(ctx, virt_addr: hdev->internal_cb_va_base, phys_addr: hdev->internal_cb_pool_dma_addr, |
| 11172 | HOST_SPACE_INTERNAL_CB_SZ); |
| 11173 | if (rc) |
| 11174 | goto unreserve_internal_cb_pool; |
| 11175 | |
| 11176 | rc = hl_mmu_invalidate_cache(hdev, is_hard: false, flags: MMU_OP_USERPTR); |
| 11177 | if (rc) |
| 11178 | goto unmap_internal_cb_pool; |
| 11179 | |
| 11180 | mutex_unlock(lock: &hdev->mmu_lock); |
| 11181 | |
| 11182 | return 0; |
| 11183 | |
| 11184 | unmap_internal_cb_pool: |
| 11185 | hl_mmu_unmap_contiguous(ctx, virt_addr: hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); |
| 11186 | unreserve_internal_cb_pool: |
| 11187 | mutex_unlock(lock: &hdev->mmu_lock); |
| 11188 | hl_unreserve_va_block(hdev, ctx, start_addr: hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); |
| 11189 | destroy_internal_cb_pool: |
| 11190 | gen_pool_destroy(hdev->internal_cb_pool); |
| 11191 | free_internal_cb_pool: |
| 11192 | hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, |
| 11193 | hdev->internal_cb_pool_dma_addr); |
| 11194 | |
| 11195 | return rc; |
| 11196 | } |
| 11197 | |
| 11198 | static void gaudi2_internal_cb_pool_fini(struct hl_device *hdev, struct hl_ctx *ctx) |
| 11199 | { |
| 11200 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11201 | |
| 11202 | if (!(gaudi2->hw_cap_initialized & HW_CAP_PMMU)) |
| 11203 | return; |
| 11204 | |
| 11205 | mutex_lock(&hdev->mmu_lock); |
| 11206 | hl_mmu_unmap_contiguous(ctx, virt_addr: hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); |
| 11207 | hl_unreserve_va_block(hdev, ctx, start_addr: hdev->internal_cb_va_base, HOST_SPACE_INTERNAL_CB_SZ); |
| 11208 | hl_mmu_invalidate_cache(hdev, is_hard: true, flags: MMU_OP_USERPTR); |
| 11209 | mutex_unlock(lock: &hdev->mmu_lock); |
| 11210 | |
| 11211 | gen_pool_destroy(hdev->internal_cb_pool); |
| 11212 | |
| 11213 | hl_asic_dma_free_coherent(hdev, HOST_SPACE_INTERNAL_CB_SZ, hdev->internal_cb_pool_virt_addr, |
| 11214 | hdev->internal_cb_pool_dma_addr); |
| 11215 | } |
| 11216 | |
| 11217 | static void gaudi2_restore_user_registers(struct hl_device *hdev) |
| 11218 | { |
| 11219 | gaudi2_restore_user_sm_registers(hdev); |
| 11220 | gaudi2_restore_user_qm_registers(hdev); |
| 11221 | } |
| 11222 | |
| 11223 | static int gaudi2_map_virtual_msix_doorbell_memory(struct hl_ctx *ctx) |
| 11224 | { |
| 11225 | struct hl_device *hdev = ctx->hdev; |
| 11226 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 11227 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11228 | int rc; |
| 11229 | |
| 11230 | rc = hl_mmu_map_page(ctx, RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START, |
| 11231 | phys_addr: gaudi2->virt_msix_db_dma_addr, page_size: prop->pmmu.page_size, flush_pte: true); |
| 11232 | if (rc) |
| 11233 | dev_err(hdev->dev, "Failed to map VA %#llx for virtual MSI-X doorbell memory\n" , |
| 11234 | RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START); |
| 11235 | |
| 11236 | return rc; |
| 11237 | } |
| 11238 | |
| 11239 | static void gaudi2_unmap_virtual_msix_doorbell_memory(struct hl_ctx *ctx) |
| 11240 | { |
| 11241 | struct hl_device *hdev = ctx->hdev; |
| 11242 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 11243 | int rc; |
| 11244 | |
| 11245 | rc = hl_mmu_unmap_page(ctx, RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START, |
| 11246 | page_size: prop->pmmu.page_size, flush_pte: true); |
| 11247 | if (rc) |
| 11248 | dev_err(hdev->dev, "Failed to unmap VA %#llx of virtual MSI-X doorbell memory\n" , |
| 11249 | RESERVED_VA_FOR_VIRTUAL_MSIX_DOORBELL_START); |
| 11250 | } |
| 11251 | |
| 11252 | static int gaudi2_ctx_init(struct hl_ctx *ctx) |
| 11253 | { |
| 11254 | int rc; |
| 11255 | |
| 11256 | if (ctx->asid == HL_KERNEL_ASID_ID) |
| 11257 | return 0; |
| 11258 | |
| 11259 | rc = gaudi2_mmu_prepare(hdev: ctx->hdev, asid: ctx->asid); |
| 11260 | if (rc) |
| 11261 | return rc; |
| 11262 | |
| 11263 | /* No need to clear user registers if the device has just |
| 11264 | * performed reset, we restore only nic qm registers |
| 11265 | */ |
| 11266 | if (ctx->hdev->reset_upon_device_release) |
| 11267 | gaudi2_restore_nic_qm_registers(hdev: ctx->hdev); |
| 11268 | else |
| 11269 | gaudi2_restore_user_registers(hdev: ctx->hdev); |
| 11270 | |
| 11271 | rc = gaudi2_internal_cb_pool_init(hdev: ctx->hdev, ctx); |
| 11272 | if (rc) |
| 11273 | return rc; |
| 11274 | |
| 11275 | rc = gaudi2_map_virtual_msix_doorbell_memory(ctx); |
| 11276 | if (rc) |
| 11277 | gaudi2_internal_cb_pool_fini(hdev: ctx->hdev, ctx); |
| 11278 | |
| 11279 | return rc; |
| 11280 | } |
| 11281 | |
| 11282 | static void gaudi2_ctx_fini(struct hl_ctx *ctx) |
| 11283 | { |
| 11284 | if (ctx->asid == HL_KERNEL_ASID_ID) |
| 11285 | return; |
| 11286 | |
| 11287 | gaudi2_internal_cb_pool_fini(hdev: ctx->hdev, ctx); |
| 11288 | |
| 11289 | gaudi2_unmap_virtual_msix_doorbell_memory(ctx); |
| 11290 | } |
| 11291 | |
| 11292 | static int gaudi2_pre_schedule_cs(struct hl_cs *cs) |
| 11293 | { |
| 11294 | struct hl_device *hdev = cs->ctx->hdev; |
| 11295 | int index = cs->sequence & (hdev->asic_prop.max_pending_cs - 1); |
| 11296 | u32 mon_payload, sob_id, mon_id; |
| 11297 | |
| 11298 | if (!cs_needs_completion(cs)) |
| 11299 | return 0; |
| 11300 | |
| 11301 | /* |
| 11302 | * First 64 SOB/MON are reserved for driver for QMAN auto completion |
| 11303 | * mechanism. Each SOB/MON pair are used for a pending CS with the same |
| 11304 | * cyclic index. The SOB value is increased when each of the CS jobs is |
| 11305 | * completed. When the SOB reaches the number of CS jobs, the monitor |
| 11306 | * generates MSI-X interrupt. |
| 11307 | */ |
| 11308 | |
| 11309 | sob_id = mon_id = index; |
| 11310 | mon_payload = (1 << CQ_ENTRY_SHADOW_INDEX_VALID_SHIFT) | |
| 11311 | (1 << CQ_ENTRY_READY_SHIFT) | index; |
| 11312 | |
| 11313 | gaudi2_arm_cq_monitor(hdev, sob_id, mon_id, cq_id: GAUDI2_RESERVED_CQ_CS_COMPLETION, mon_payload, |
| 11314 | sync_value: cs->jobs_cnt); |
| 11315 | |
| 11316 | return 0; |
| 11317 | } |
| 11318 | |
| 11319 | static u32 gaudi2_get_queue_id_for_cq(struct hl_device *hdev, u32 cq_idx) |
| 11320 | { |
| 11321 | return HL_INVALID_QUEUE; |
| 11322 | } |
| 11323 | |
| 11324 | static u32 gaudi2_gen_signal_cb(struct hl_device *hdev, void *data, u16 sob_id, u32 size, bool eb) |
| 11325 | { |
| 11326 | struct hl_cb *cb = data; |
| 11327 | struct packet_msg_short *pkt; |
| 11328 | u32 value, ctl, pkt_size = sizeof(*pkt); |
| 11329 | |
| 11330 | pkt = (struct packet_msg_short *) (uintptr_t) (cb->kernel_address + size); |
| 11331 | memset(pkt, 0, pkt_size); |
| 11332 | |
| 11333 | /* Inc by 1, Mode ADD */ |
| 11334 | value = FIELD_PREP(GAUDI2_PKT_SHORT_VAL_SOB_SYNC_VAL_MASK, 1); |
| 11335 | value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_SOB_MOD_MASK, 1); |
| 11336 | |
| 11337 | ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, sob_id * 4); |
| 11338 | ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 1); /* SOB base */ |
| 11339 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); |
| 11340 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, eb); |
| 11341 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1); |
| 11342 | |
| 11343 | pkt->value = cpu_to_le32(value); |
| 11344 | pkt->ctl = cpu_to_le32(ctl); |
| 11345 | |
| 11346 | return size + pkt_size; |
| 11347 | } |
| 11348 | |
| 11349 | static u32 gaudi2_add_mon_msg_short(struct packet_msg_short *pkt, u32 value, u16 addr) |
| 11350 | { |
| 11351 | u32 ctl, pkt_size = sizeof(*pkt); |
| 11352 | |
| 11353 | memset(pkt, 0, pkt_size); |
| 11354 | |
| 11355 | ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, addr); |
| 11356 | ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */ |
| 11357 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); |
| 11358 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); |
| 11359 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 0); |
| 11360 | |
| 11361 | pkt->value = cpu_to_le32(value); |
| 11362 | pkt->ctl = cpu_to_le32(ctl); |
| 11363 | |
| 11364 | return pkt_size; |
| 11365 | } |
| 11366 | |
| 11367 | static u32 gaudi2_add_arm_monitor_pkt(struct hl_device *hdev, struct packet_msg_short *pkt, |
| 11368 | u16 sob_base, u8 sob_mask, u16 sob_val, u16 addr) |
| 11369 | { |
| 11370 | u32 ctl, value, pkt_size = sizeof(*pkt); |
| 11371 | u8 mask; |
| 11372 | |
| 11373 | if (hl_gen_sob_mask(sob_base, sob_mask, mask: &mask)) { |
| 11374 | dev_err(hdev->dev, "sob_base %u (mask %#x) is not valid\n" , sob_base, sob_mask); |
| 11375 | return 0; |
| 11376 | } |
| 11377 | |
| 11378 | memset(pkt, 0, pkt_size); |
| 11379 | |
| 11380 | value = FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_SYNC_GID_MASK, sob_base / 8); |
| 11381 | value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_SYNC_VAL_MASK, sob_val); |
| 11382 | value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MODE_MASK, 0); /* GREATER OR EQUAL*/ |
| 11383 | value |= FIELD_PREP(GAUDI2_PKT_SHORT_VAL_MON_MASK_MASK, mask); |
| 11384 | |
| 11385 | ctl = FIELD_PREP(GAUDI2_PKT_SHORT_CTL_ADDR_MASK, addr); |
| 11386 | ctl |= FIELD_PREP(GAUDI2_PKT_SHORT_CTL_BASE_MASK, 0); /* MON base */ |
| 11387 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_MSG_SHORT); |
| 11388 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); |
| 11389 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1); |
| 11390 | |
| 11391 | pkt->value = cpu_to_le32(value); |
| 11392 | pkt->ctl = cpu_to_le32(ctl); |
| 11393 | |
| 11394 | return pkt_size; |
| 11395 | } |
| 11396 | |
| 11397 | static u32 gaudi2_add_fence_pkt(struct packet_fence *pkt) |
| 11398 | { |
| 11399 | u32 ctl, cfg, pkt_size = sizeof(*pkt); |
| 11400 | |
| 11401 | memset(pkt, 0, pkt_size); |
| 11402 | |
| 11403 | cfg = FIELD_PREP(GAUDI2_PKT_FENCE_CFG_DEC_VAL_MASK, 1); |
| 11404 | cfg |= FIELD_PREP(GAUDI2_PKT_FENCE_CFG_TARGET_VAL_MASK, 1); |
| 11405 | cfg |= FIELD_PREP(GAUDI2_PKT_FENCE_CFG_ID_MASK, 2); |
| 11406 | |
| 11407 | ctl = FIELD_PREP(GAUDI2_PKT_CTL_OPCODE_MASK, PACKET_FENCE); |
| 11408 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_EB_MASK, 0); |
| 11409 | ctl |= FIELD_PREP(GAUDI2_PKT_CTL_MB_MASK, 1); |
| 11410 | |
| 11411 | pkt->cfg = cpu_to_le32(cfg); |
| 11412 | pkt->ctl = cpu_to_le32(ctl); |
| 11413 | |
| 11414 | return pkt_size; |
| 11415 | } |
| 11416 | |
| 11417 | static u32 gaudi2_gen_wait_cb(struct hl_device *hdev, struct hl_gen_wait_properties *prop) |
| 11418 | { |
| 11419 | struct hl_cb *cb = prop->data; |
| 11420 | void *buf = (void *) (uintptr_t) (cb->kernel_address); |
| 11421 | |
| 11422 | u64 monitor_base, fence_addr = 0; |
| 11423 | u32 stream_index, size = prop->size; |
| 11424 | u16 msg_addr_offset; |
| 11425 | |
| 11426 | stream_index = prop->q_idx % 4; |
| 11427 | fence_addr = CFG_BASE + gaudi2_qm_blocks_bases[prop->q_idx] + |
| 11428 | QM_FENCE2_OFFSET + stream_index * 4; |
| 11429 | |
| 11430 | /* |
| 11431 | * monitor_base should be the content of the base0 address registers, |
| 11432 | * so it will be added to the msg short offsets |
| 11433 | */ |
| 11434 | monitor_base = mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0; |
| 11435 | |
| 11436 | /* First monitor config packet: low address of the sync */ |
| 11437 | msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRL_0 + prop->mon_id * 4) - |
| 11438 | monitor_base; |
| 11439 | |
| 11440 | size += gaudi2_add_mon_msg_short(pkt: buf + size, value: (u32) fence_addr, addr: msg_addr_offset); |
| 11441 | |
| 11442 | /* Second monitor config packet: high address of the sync */ |
| 11443 | msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_ADDRH_0 + prop->mon_id * 4) - |
| 11444 | monitor_base; |
| 11445 | |
| 11446 | size += gaudi2_add_mon_msg_short(pkt: buf + size, value: (u32) (fence_addr >> 32), addr: msg_addr_offset); |
| 11447 | |
| 11448 | /* |
| 11449 | * Third monitor config packet: the payload, i.e. what to write when the |
| 11450 | * sync triggers |
| 11451 | */ |
| 11452 | msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_PAY_DATA_0 + prop->mon_id * 4) - |
| 11453 | monitor_base; |
| 11454 | |
| 11455 | size += gaudi2_add_mon_msg_short(pkt: buf + size, value: 1, addr: msg_addr_offset); |
| 11456 | |
| 11457 | /* Fourth monitor config packet: bind the monitor to a sync object */ |
| 11458 | msg_addr_offset = (mmDCORE0_SYNC_MNGR_OBJS_MON_ARM_0 + prop->mon_id * 4) - monitor_base; |
| 11459 | |
| 11460 | size += gaudi2_add_arm_monitor_pkt(hdev, pkt: buf + size, sob_base: prop->sob_base, sob_mask: prop->sob_mask, |
| 11461 | sob_val: prop->sob_val, addr: msg_addr_offset); |
| 11462 | |
| 11463 | /* Fence packet */ |
| 11464 | size += gaudi2_add_fence_pkt(pkt: buf + size); |
| 11465 | |
| 11466 | return size; |
| 11467 | } |
| 11468 | |
| 11469 | static void gaudi2_reset_sob(struct hl_device *hdev, void *data) |
| 11470 | { |
| 11471 | struct hl_hw_sob *hw_sob = data; |
| 11472 | |
| 11473 | dev_dbg(hdev->dev, "reset SOB, q_idx: %d, sob_id: %d\n" , hw_sob->q_idx, hw_sob->sob_id); |
| 11474 | |
| 11475 | WREG32(mmDCORE0_SYNC_MNGR_OBJS_SOB_OBJ_0 + hw_sob->sob_id * 4, 0); |
| 11476 | |
| 11477 | kref_init(kref: &hw_sob->kref); |
| 11478 | } |
| 11479 | |
| 11480 | static void gaudi2_reset_sob_group(struct hl_device *hdev, u16 sob_group) |
| 11481 | { |
| 11482 | } |
| 11483 | |
| 11484 | static u64 gaudi2_get_device_time(struct hl_device *hdev) |
| 11485 | { |
| 11486 | u64 device_time = ((u64) RREG32(mmPSOC_TIMESTAMP_CNTCVU)) << 32; |
| 11487 | |
| 11488 | return device_time | RREG32(mmPSOC_TIMESTAMP_CNTCVL); |
| 11489 | } |
| 11490 | |
| 11491 | static int gaudi2_collective_wait_init_cs(struct hl_cs *cs) |
| 11492 | { |
| 11493 | return 0; |
| 11494 | } |
| 11495 | |
| 11496 | static int gaudi2_collective_wait_create_jobs(struct hl_device *hdev, struct hl_ctx *ctx, |
| 11497 | struct hl_cs *cs, u32 wait_queue_id, |
| 11498 | u32 collective_engine_id, u32 encaps_signal_offset) |
| 11499 | { |
| 11500 | return -EINVAL; |
| 11501 | } |
| 11502 | |
| 11503 | /* |
| 11504 | * hl_mmu_scramble - converts a dram (non power of 2) page-size aligned address |
| 11505 | * to DMMU page-size address (64MB) before mapping it in |
| 11506 | * the MMU. |
| 11507 | * The operation is performed on both the virtual and physical addresses. |
| 11508 | * for device with 6 HBMs the scramble is: |
| 11509 | * (addr[47:0] / 48M) * 64M + addr % 48M + addr[63:48] |
| 11510 | * |
| 11511 | * Example: |
| 11512 | * ============================================================================= |
| 11513 | * Allocated DRAM Reserved VA scrambled VA for MMU mapping Scrambled PA |
| 11514 | * Phys address in MMU last |
| 11515 | * HOP |
| 11516 | * ============================================================================= |
| 11517 | * PA1 0x3000000 VA1 0x9C000000 SVA1= (VA1/48M)*64M 0xD0000000 <- PA1/48M 0x1 |
| 11518 | * PA2 0x9000000 VA2 0x9F000000 SVA2= (VA2/48M)*64M 0xD4000000 <- PA2/48M 0x3 |
| 11519 | * ============================================================================= |
| 11520 | */ |
| 11521 | static u64 gaudi2_mmu_scramble_addr(struct hl_device *hdev, u64 raw_addr) |
| 11522 | { |
| 11523 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 11524 | u32 divisor, mod_va; |
| 11525 | u64 div_va; |
| 11526 | |
| 11527 | /* accept any address in the DRAM address space */ |
| 11528 | if (hl_mem_area_inside_range(address: raw_addr, size: sizeof(raw_addr), DRAM_PHYS_BASE, |
| 11529 | VA_HBM_SPACE_END)) { |
| 11530 | |
| 11531 | divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE; |
| 11532 | div_va = div_u64_rem(dividend: raw_addr & GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK, divisor, remainder: &mod_va); |
| 11533 | return (raw_addr & ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK) | |
| 11534 | (div_va << GAUDI2_HBM_MMU_SCRM_DIV_SHIFT) | |
| 11535 | (mod_va << GAUDI2_HBM_MMU_SCRM_MOD_SHIFT); |
| 11536 | } |
| 11537 | |
| 11538 | return raw_addr; |
| 11539 | } |
| 11540 | |
| 11541 | static u64 gaudi2_mmu_descramble_addr(struct hl_device *hdev, u64 scrambled_addr) |
| 11542 | { |
| 11543 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 11544 | u32 divisor, mod_va; |
| 11545 | u64 div_va; |
| 11546 | |
| 11547 | /* accept any address in the DRAM address space */ |
| 11548 | if (hl_mem_area_inside_range(address: scrambled_addr, size: sizeof(scrambled_addr), DRAM_PHYS_BASE, |
| 11549 | VA_HBM_SPACE_END)) { |
| 11550 | |
| 11551 | divisor = prop->num_functional_hbms * GAUDI2_HBM_MMU_SCRM_MEM_SIZE; |
| 11552 | div_va = div_u64_rem(dividend: scrambled_addr & GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK, |
| 11553 | PAGE_SIZE_64MB, remainder: &mod_va); |
| 11554 | |
| 11555 | return ((scrambled_addr & ~GAUDI2_HBM_MMU_SCRM_ADDRESS_MASK) + |
| 11556 | (div_va * divisor + mod_va)); |
| 11557 | } |
| 11558 | |
| 11559 | return scrambled_addr; |
| 11560 | } |
| 11561 | |
| 11562 | static u32 gaudi2_get_dec_base_addr(struct hl_device *hdev, u32 core_id) |
| 11563 | { |
| 11564 | u32 base = 0, dcore_id, dec_id; |
| 11565 | |
| 11566 | if (core_id >= NUMBER_OF_DEC) { |
| 11567 | dev_err(hdev->dev, "Unexpected core number %d for DEC\n" , core_id); |
| 11568 | goto out; |
| 11569 | } |
| 11570 | |
| 11571 | if (core_id < 8) { |
| 11572 | dcore_id = core_id / NUM_OF_DEC_PER_DCORE; |
| 11573 | dec_id = core_id % NUM_OF_DEC_PER_DCORE; |
| 11574 | |
| 11575 | base = mmDCORE0_DEC0_CMD_BASE + dcore_id * DCORE_OFFSET + |
| 11576 | dec_id * DCORE_VDEC_OFFSET; |
| 11577 | } else { |
| 11578 | /* PCIe Shared Decoder */ |
| 11579 | base = mmPCIE_DEC0_CMD_BASE + ((core_id % 8) * PCIE_VDEC_OFFSET); |
| 11580 | } |
| 11581 | out: |
| 11582 | return base; |
| 11583 | } |
| 11584 | |
| 11585 | static int gaudi2_get_hw_block_id(struct hl_device *hdev, u64 block_addr, |
| 11586 | u32 *block_size, u32 *block_id) |
| 11587 | { |
| 11588 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11589 | int i; |
| 11590 | |
| 11591 | for (i = 0 ; i < NUM_USER_MAPPED_BLOCKS ; i++) { |
| 11592 | if (block_addr == CFG_BASE + gaudi2->mapped_blocks[i].address) { |
| 11593 | *block_id = i; |
| 11594 | if (block_size) |
| 11595 | *block_size = gaudi2->mapped_blocks[i].size; |
| 11596 | return 0; |
| 11597 | } |
| 11598 | } |
| 11599 | |
| 11600 | dev_err(hdev->dev, "Invalid block address %#llx" , block_addr); |
| 11601 | |
| 11602 | return -EINVAL; |
| 11603 | } |
| 11604 | |
| 11605 | static int gaudi2_block_mmap(struct hl_device *hdev, struct vm_area_struct *vma, |
| 11606 | u32 block_id, u32 block_size) |
| 11607 | { |
| 11608 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11609 | u64 offset_in_bar; |
| 11610 | u64 address; |
| 11611 | int rc; |
| 11612 | |
| 11613 | if (block_id >= NUM_USER_MAPPED_BLOCKS) { |
| 11614 | dev_err(hdev->dev, "Invalid block id %u" , block_id); |
| 11615 | return -EINVAL; |
| 11616 | } |
| 11617 | |
| 11618 | /* we allow mapping only an entire block */ |
| 11619 | if (block_size != gaudi2->mapped_blocks[block_id].size) { |
| 11620 | dev_err(hdev->dev, "Invalid block size %u" , block_size); |
| 11621 | return -EINVAL; |
| 11622 | } |
| 11623 | |
| 11624 | offset_in_bar = CFG_BASE + gaudi2->mapped_blocks[block_id].address - STM_FLASH_BASE_ADDR; |
| 11625 | |
| 11626 | address = pci_resource_start(hdev->pdev, SRAM_CFG_BAR_ID) + offset_in_bar; |
| 11627 | |
| 11628 | vm_flags_set(vma, VM_IO | VM_PFNMAP | VM_DONTEXPAND | VM_DONTDUMP | |
| 11629 | VM_DONTCOPY | VM_NORESERVE); |
| 11630 | |
| 11631 | rc = remap_pfn_range(vma, addr: vma->vm_start, pfn: address >> PAGE_SHIFT, |
| 11632 | size: block_size, pgprot: vma->vm_page_prot); |
| 11633 | if (rc) |
| 11634 | dev_err(hdev->dev, "remap_pfn_range error %d" , rc); |
| 11635 | |
| 11636 | return rc; |
| 11637 | } |
| 11638 | |
| 11639 | static void gaudi2_enable_events_from_fw(struct hl_device *hdev) |
| 11640 | { |
| 11641 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11642 | |
| 11643 | struct cpu_dyn_regs *dyn_regs = &hdev->fw_loader.dynamic_loader.comm_desc.cpu_dyn_regs; |
| 11644 | u32 irq_handler_offset = le32_to_cpu(dyn_regs->gic_host_ints_irq); |
| 11645 | |
| 11646 | if (gaudi2->hw_cap_initialized & HW_CAP_CPU_Q) |
| 11647 | WREG32(irq_handler_offset, |
| 11648 | gaudi2_irq_map_table[GAUDI2_EVENT_CPU_INTS_REGISTER].cpu_id); |
| 11649 | } |
| 11650 | |
| 11651 | static int gaudi2_get_mmu_base(struct hl_device *hdev, u64 mmu_id, u32 *mmu_base) |
| 11652 | { |
| 11653 | switch (mmu_id) { |
| 11654 | case HW_CAP_DCORE0_DMMU0: |
| 11655 | *mmu_base = mmDCORE0_HMMU0_MMU_BASE; |
| 11656 | break; |
| 11657 | case HW_CAP_DCORE0_DMMU1: |
| 11658 | *mmu_base = mmDCORE0_HMMU1_MMU_BASE; |
| 11659 | break; |
| 11660 | case HW_CAP_DCORE0_DMMU2: |
| 11661 | *mmu_base = mmDCORE0_HMMU2_MMU_BASE; |
| 11662 | break; |
| 11663 | case HW_CAP_DCORE0_DMMU3: |
| 11664 | *mmu_base = mmDCORE0_HMMU3_MMU_BASE; |
| 11665 | break; |
| 11666 | case HW_CAP_DCORE1_DMMU0: |
| 11667 | *mmu_base = mmDCORE1_HMMU0_MMU_BASE; |
| 11668 | break; |
| 11669 | case HW_CAP_DCORE1_DMMU1: |
| 11670 | *mmu_base = mmDCORE1_HMMU1_MMU_BASE; |
| 11671 | break; |
| 11672 | case HW_CAP_DCORE1_DMMU2: |
| 11673 | *mmu_base = mmDCORE1_HMMU2_MMU_BASE; |
| 11674 | break; |
| 11675 | case HW_CAP_DCORE1_DMMU3: |
| 11676 | *mmu_base = mmDCORE1_HMMU3_MMU_BASE; |
| 11677 | break; |
| 11678 | case HW_CAP_DCORE2_DMMU0: |
| 11679 | *mmu_base = mmDCORE2_HMMU0_MMU_BASE; |
| 11680 | break; |
| 11681 | case HW_CAP_DCORE2_DMMU1: |
| 11682 | *mmu_base = mmDCORE2_HMMU1_MMU_BASE; |
| 11683 | break; |
| 11684 | case HW_CAP_DCORE2_DMMU2: |
| 11685 | *mmu_base = mmDCORE2_HMMU2_MMU_BASE; |
| 11686 | break; |
| 11687 | case HW_CAP_DCORE2_DMMU3: |
| 11688 | *mmu_base = mmDCORE2_HMMU3_MMU_BASE; |
| 11689 | break; |
| 11690 | case HW_CAP_DCORE3_DMMU0: |
| 11691 | *mmu_base = mmDCORE3_HMMU0_MMU_BASE; |
| 11692 | break; |
| 11693 | case HW_CAP_DCORE3_DMMU1: |
| 11694 | *mmu_base = mmDCORE3_HMMU1_MMU_BASE; |
| 11695 | break; |
| 11696 | case HW_CAP_DCORE3_DMMU2: |
| 11697 | *mmu_base = mmDCORE3_HMMU2_MMU_BASE; |
| 11698 | break; |
| 11699 | case HW_CAP_DCORE3_DMMU3: |
| 11700 | *mmu_base = mmDCORE3_HMMU3_MMU_BASE; |
| 11701 | break; |
| 11702 | case HW_CAP_PMMU: |
| 11703 | *mmu_base = mmPMMU_HBW_MMU_BASE; |
| 11704 | break; |
| 11705 | default: |
| 11706 | return -EINVAL; |
| 11707 | } |
| 11708 | |
| 11709 | return 0; |
| 11710 | } |
| 11711 | |
| 11712 | static void gaudi2_ack_mmu_error(struct hl_device *hdev, u64 mmu_id) |
| 11713 | { |
| 11714 | bool is_pmmu = (mmu_id == HW_CAP_PMMU); |
| 11715 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11716 | u32 mmu_base; |
| 11717 | |
| 11718 | if (!(gaudi2->hw_cap_initialized & mmu_id)) |
| 11719 | return; |
| 11720 | |
| 11721 | if (gaudi2_get_mmu_base(hdev, mmu_id, mmu_base: &mmu_base)) |
| 11722 | return; |
| 11723 | |
| 11724 | gaudi2_handle_page_error(hdev, mmu_base, is_pmmu, NULL); |
| 11725 | gaudi2_handle_access_error(hdev, mmu_base, is_pmmu); |
| 11726 | } |
| 11727 | |
| 11728 | static int gaudi2_ack_mmu_page_fault_or_access_error(struct hl_device *hdev, u64 mmu_cap_mask) |
| 11729 | { |
| 11730 | u32 i, mmu_id, num_of_hmmus = NUM_OF_HMMU_PER_DCORE * NUM_OF_DCORES; |
| 11731 | |
| 11732 | /* check all HMMUs */ |
| 11733 | for (i = 0 ; i < num_of_hmmus ; i++) { |
| 11734 | mmu_id = HW_CAP_DCORE0_DMMU0 << i; |
| 11735 | |
| 11736 | if (mmu_cap_mask & mmu_id) |
| 11737 | gaudi2_ack_mmu_error(hdev, mmu_id); |
| 11738 | } |
| 11739 | |
| 11740 | /* check PMMU */ |
| 11741 | if (mmu_cap_mask & HW_CAP_PMMU) |
| 11742 | gaudi2_ack_mmu_error(hdev, HW_CAP_PMMU); |
| 11743 | |
| 11744 | return 0; |
| 11745 | } |
| 11746 | |
| 11747 | static void gaudi2_get_msi_info(__le32 *table) |
| 11748 | { |
| 11749 | table[CPUCP_EVENT_QUEUE_MSI_TYPE] = cpu_to_le32(GAUDI2_EVENT_QUEUE_MSIX_IDX); |
| 11750 | table[CPUCP_EVENT_QUEUE_ERR_MSI_TYPE] = cpu_to_le32(GAUDI2_IRQ_NUM_EQ_ERROR); |
| 11751 | } |
| 11752 | |
| 11753 | static int gaudi2_map_pll_idx_to_fw_idx(u32 pll_idx) |
| 11754 | { |
| 11755 | switch (pll_idx) { |
| 11756 | case HL_GAUDI2_CPU_PLL: return CPU_PLL; |
| 11757 | case HL_GAUDI2_PCI_PLL: return PCI_PLL; |
| 11758 | case HL_GAUDI2_NIC_PLL: return NIC_PLL; |
| 11759 | case HL_GAUDI2_DMA_PLL: return DMA_PLL; |
| 11760 | case HL_GAUDI2_MESH_PLL: return MESH_PLL; |
| 11761 | case HL_GAUDI2_MME_PLL: return MME_PLL; |
| 11762 | case HL_GAUDI2_TPC_PLL: return TPC_PLL; |
| 11763 | case HL_GAUDI2_IF_PLL: return IF_PLL; |
| 11764 | case HL_GAUDI2_SRAM_PLL: return SRAM_PLL; |
| 11765 | case HL_GAUDI2_HBM_PLL: return HBM_PLL; |
| 11766 | case HL_GAUDI2_VID_PLL: return VID_PLL; |
| 11767 | case HL_GAUDI2_MSS_PLL: return MSS_PLL; |
| 11768 | default: return -EINVAL; |
| 11769 | } |
| 11770 | } |
| 11771 | |
| 11772 | static int gaudi2_gen_sync_to_engine_map(struct hl_device *hdev, struct hl_sync_to_engine_map *map) |
| 11773 | { |
| 11774 | /* Not implemented */ |
| 11775 | return 0; |
| 11776 | } |
| 11777 | |
| 11778 | static int gaudi2_monitor_valid(struct hl_mon_state_dump *mon) |
| 11779 | { |
| 11780 | /* Not implemented */ |
| 11781 | return 0; |
| 11782 | } |
| 11783 | |
| 11784 | static int gaudi2_print_single_monitor(char **buf, size_t *size, size_t *offset, |
| 11785 | struct hl_device *hdev, struct hl_mon_state_dump *mon) |
| 11786 | { |
| 11787 | /* Not implemented */ |
| 11788 | return 0; |
| 11789 | } |
| 11790 | |
| 11791 | |
| 11792 | static int gaudi2_print_fences_single_engine(struct hl_device *hdev, u64 base_offset, |
| 11793 | u64 status_base_offset, enum hl_sync_engine_type engine_type, |
| 11794 | u32 engine_id, char **buf, size_t *size, size_t *offset) |
| 11795 | { |
| 11796 | /* Not implemented */ |
| 11797 | return 0; |
| 11798 | } |
| 11799 | |
| 11800 | |
| 11801 | static struct hl_state_dump_specs_funcs gaudi2_state_dump_funcs = { |
| 11802 | .monitor_valid = gaudi2_monitor_valid, |
| 11803 | .print_single_monitor = gaudi2_print_single_monitor, |
| 11804 | .gen_sync_to_engine_map = gaudi2_gen_sync_to_engine_map, |
| 11805 | .print_fences_single_engine = gaudi2_print_fences_single_engine, |
| 11806 | }; |
| 11807 | |
| 11808 | static void gaudi2_state_dump_init(struct hl_device *hdev) |
| 11809 | { |
| 11810 | /* Not implemented */ |
| 11811 | hdev->state_dump_specs.props = gaudi2_state_dump_specs_props; |
| 11812 | hdev->state_dump_specs.funcs = gaudi2_state_dump_funcs; |
| 11813 | } |
| 11814 | |
| 11815 | static u32 gaudi2_get_sob_addr(struct hl_device *hdev, u32 sob_id) |
| 11816 | { |
| 11817 | return 0; |
| 11818 | } |
| 11819 | |
| 11820 | static u32 *gaudi2_get_stream_master_qid_arr(void) |
| 11821 | { |
| 11822 | return NULL; |
| 11823 | } |
| 11824 | |
| 11825 | static void gaudi2_add_device_attr(struct hl_device *hdev, struct attribute_group *dev_clk_attr_grp, |
| 11826 | struct attribute_group *dev_vrm_attr_grp) |
| 11827 | { |
| 11828 | hl_sysfs_add_dev_clk_attr(hdev, dev_clk_attr_grp); |
| 11829 | hl_sysfs_add_dev_vrm_attr(hdev, dev_vrm_attr_grp); |
| 11830 | } |
| 11831 | |
| 11832 | static int gaudi2_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop, |
| 11833 | u32 page_size, u32 *real_page_size, bool is_dram_addr) |
| 11834 | { |
| 11835 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 11836 | |
| 11837 | /* for host pages the page size must be */ |
| 11838 | if (!is_dram_addr) { |
| 11839 | if (page_size % mmu_prop->page_size) |
| 11840 | goto page_size_err; |
| 11841 | |
| 11842 | *real_page_size = mmu_prop->page_size; |
| 11843 | return 0; |
| 11844 | } |
| 11845 | |
| 11846 | if ((page_size % prop->dram_page_size) || (prop->dram_page_size > mmu_prop->page_size)) |
| 11847 | goto page_size_err; |
| 11848 | |
| 11849 | /* |
| 11850 | * MMU page size is different from DRAM page size (more precisely, DMMU page is greater |
| 11851 | * than DRAM page size). |
| 11852 | * for this reason work with the DRAM page size and let the MMU scrambling routine handle |
| 11853 | * this mismatch when calculating the address to place in the MMU page table. |
| 11854 | * (in that case also make sure that the dram_page_size is not greater than the |
| 11855 | * mmu page size) |
| 11856 | */ |
| 11857 | *real_page_size = prop->dram_page_size; |
| 11858 | |
| 11859 | return 0; |
| 11860 | |
| 11861 | page_size_err: |
| 11862 | dev_err(hdev->dev, "page size of 0x%X is not 0x%X aligned, can't map\n" , |
| 11863 | page_size, mmu_prop->page_size >> 10); |
| 11864 | return -EFAULT; |
| 11865 | } |
| 11866 | |
| 11867 | static int gaudi2_get_monitor_dump(struct hl_device *hdev, void *data) |
| 11868 | { |
| 11869 | return -EOPNOTSUPP; |
| 11870 | } |
| 11871 | |
| 11872 | int gaudi2_send_device_activity(struct hl_device *hdev, bool open) |
| 11873 | { |
| 11874 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11875 | |
| 11876 | if (!(gaudi2->hw_cap_initialized & HW_CAP_CPU_Q)) |
| 11877 | return 0; |
| 11878 | |
| 11879 | return hl_fw_send_device_activity(hdev, open); |
| 11880 | } |
| 11881 | |
| 11882 | static u64 gaudi2_read_pte(struct hl_device *hdev, u64 addr) |
| 11883 | { |
| 11884 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11885 | u64 val; |
| 11886 | |
| 11887 | if (hdev->reset_info.hard_reset_pending) |
| 11888 | return U64_MAX; |
| 11889 | |
| 11890 | val = readq(addr: hdev->pcie_bar[DRAM_BAR_ID] + (addr - gaudi2->dram_bar_cur_addr)); |
| 11891 | |
| 11892 | return val; |
| 11893 | } |
| 11894 | |
| 11895 | static void gaudi2_write_pte(struct hl_device *hdev, u64 addr, u64 val) |
| 11896 | { |
| 11897 | struct gaudi2_device *gaudi2 = hdev->asic_specific; |
| 11898 | |
| 11899 | if (hdev->reset_info.hard_reset_pending) |
| 11900 | return; |
| 11901 | |
| 11902 | writeq(val, addr: hdev->pcie_bar[DRAM_BAR_ID] + (addr - gaudi2->dram_bar_cur_addr)); |
| 11903 | } |
| 11904 | |
| 11905 | static const struct hl_asic_funcs gaudi2_funcs = { |
| 11906 | .early_init = gaudi2_early_init, |
| 11907 | .early_fini = gaudi2_early_fini, |
| 11908 | .late_init = gaudi2_late_init, |
| 11909 | .late_fini = gaudi2_late_fini, |
| 11910 | .sw_init = gaudi2_sw_init, |
| 11911 | .sw_fini = gaudi2_sw_fini, |
| 11912 | .hw_init = gaudi2_hw_init, |
| 11913 | .hw_fini = gaudi2_hw_fini, |
| 11914 | .halt_engines = gaudi2_halt_engines, |
| 11915 | .suspend = gaudi2_suspend, |
| 11916 | .resume = gaudi2_resume, |
| 11917 | .mmap = gaudi2_mmap, |
| 11918 | .ring_doorbell = gaudi2_ring_doorbell, |
| 11919 | .pqe_write = gaudi2_pqe_write, |
| 11920 | .asic_dma_alloc_coherent = gaudi2_dma_alloc_coherent, |
| 11921 | .asic_dma_free_coherent = gaudi2_dma_free_coherent, |
| 11922 | .scrub_device_mem = gaudi2_scrub_device_mem, |
| 11923 | .scrub_device_dram = gaudi2_scrub_device_dram, |
| 11924 | .get_int_queue_base = NULL, |
| 11925 | .test_queues = gaudi2_test_queues, |
| 11926 | .asic_dma_pool_zalloc = gaudi2_dma_pool_zalloc, |
| 11927 | .asic_dma_pool_free = gaudi2_dma_pool_free, |
| 11928 | .cpu_accessible_dma_pool_alloc = gaudi2_cpu_accessible_dma_pool_alloc, |
| 11929 | .cpu_accessible_dma_pool_free = gaudi2_cpu_accessible_dma_pool_free, |
| 11930 | .dma_unmap_sgtable = hl_asic_dma_unmap_sgtable, |
| 11931 | .cs_parser = gaudi2_cs_parser, |
| 11932 | .dma_map_sgtable = hl_asic_dma_map_sgtable, |
| 11933 | .add_end_of_cb_packets = NULL, |
| 11934 | .update_eq_ci = gaudi2_update_eq_ci, |
| 11935 | .context_switch = gaudi2_context_switch, |
| 11936 | .restore_phase_topology = gaudi2_restore_phase_topology, |
| 11937 | .debugfs_read_dma = gaudi2_debugfs_read_dma, |
| 11938 | .add_device_attr = gaudi2_add_device_attr, |
| 11939 | .handle_eqe = gaudi2_handle_eqe, |
| 11940 | .get_events_stat = gaudi2_get_events_stat, |
| 11941 | .read_pte = gaudi2_read_pte, |
| 11942 | .write_pte = gaudi2_write_pte, |
| 11943 | .mmu_invalidate_cache = gaudi2_mmu_invalidate_cache, |
| 11944 | .mmu_invalidate_cache_range = gaudi2_mmu_invalidate_cache_range, |
| 11945 | .mmu_prefetch_cache_range = NULL, |
| 11946 | .send_heartbeat = gaudi2_send_heartbeat, |
| 11947 | .debug_coresight = gaudi2_debug_coresight, |
| 11948 | .is_device_idle = gaudi2_is_device_idle, |
| 11949 | .compute_reset_late_init = gaudi2_compute_reset_late_init, |
| 11950 | .hw_queues_lock = gaudi2_hw_queues_lock, |
| 11951 | .hw_queues_unlock = gaudi2_hw_queues_unlock, |
| 11952 | .get_pci_id = gaudi2_get_pci_id, |
| 11953 | .get_eeprom_data = gaudi2_get_eeprom_data, |
| 11954 | .get_monitor_dump = gaudi2_get_monitor_dump, |
| 11955 | .send_cpu_message = gaudi2_send_cpu_message, |
| 11956 | .pci_bars_map = gaudi2_pci_bars_map, |
| 11957 | .init_iatu = gaudi2_init_iatu, |
| 11958 | .rreg = hl_rreg, |
| 11959 | .wreg = hl_wreg, |
| 11960 | .halt_coresight = gaudi2_halt_coresight, |
| 11961 | .ctx_init = gaudi2_ctx_init, |
| 11962 | .ctx_fini = gaudi2_ctx_fini, |
| 11963 | .pre_schedule_cs = gaudi2_pre_schedule_cs, |
| 11964 | .get_queue_id_for_cq = gaudi2_get_queue_id_for_cq, |
| 11965 | .load_firmware_to_device = NULL, |
| 11966 | .load_boot_fit_to_device = NULL, |
| 11967 | .get_signal_cb_size = gaudi2_get_signal_cb_size, |
| 11968 | .get_wait_cb_size = gaudi2_get_wait_cb_size, |
| 11969 | .gen_signal_cb = gaudi2_gen_signal_cb, |
| 11970 | .gen_wait_cb = gaudi2_gen_wait_cb, |
| 11971 | .reset_sob = gaudi2_reset_sob, |
| 11972 | .reset_sob_group = gaudi2_reset_sob_group, |
| 11973 | .get_device_time = gaudi2_get_device_time, |
| 11974 | .pb_print_security_errors = gaudi2_pb_print_security_errors, |
| 11975 | .collective_wait_init_cs = gaudi2_collective_wait_init_cs, |
| 11976 | .collective_wait_create_jobs = gaudi2_collective_wait_create_jobs, |
| 11977 | .get_dec_base_addr = gaudi2_get_dec_base_addr, |
| 11978 | .scramble_addr = gaudi2_mmu_scramble_addr, |
| 11979 | .descramble_addr = gaudi2_mmu_descramble_addr, |
| 11980 | .ack_protection_bits_errors = gaudi2_ack_protection_bits_errors, |
| 11981 | .get_hw_block_id = gaudi2_get_hw_block_id, |
| 11982 | .hw_block_mmap = gaudi2_block_mmap, |
| 11983 | .enable_events_from_fw = gaudi2_enable_events_from_fw, |
| 11984 | .ack_mmu_errors = gaudi2_ack_mmu_page_fault_or_access_error, |
| 11985 | .get_msi_info = gaudi2_get_msi_info, |
| 11986 | .map_pll_idx_to_fw_idx = gaudi2_map_pll_idx_to_fw_idx, |
| 11987 | .init_firmware_preload_params = gaudi2_init_firmware_preload_params, |
| 11988 | .init_firmware_loader = gaudi2_init_firmware_loader, |
| 11989 | .init_cpu_scrambler_dram = gaudi2_init_scrambler_hbm, |
| 11990 | .state_dump_init = gaudi2_state_dump_init, |
| 11991 | .get_sob_addr = &gaudi2_get_sob_addr, |
| 11992 | .set_pci_memory_regions = gaudi2_set_pci_memory_regions, |
| 11993 | .get_stream_master_qid_arr = gaudi2_get_stream_master_qid_arr, |
| 11994 | .check_if_razwi_happened = gaudi2_check_if_razwi_happened, |
| 11995 | .mmu_get_real_page_size = gaudi2_mmu_get_real_page_size, |
| 11996 | .access_dev_mem = hl_access_dev_mem, |
| 11997 | .set_dram_bar_base = gaudi2_set_hbm_bar_base, |
| 11998 | .set_engine_cores = gaudi2_set_engine_cores, |
| 11999 | .set_engines = gaudi2_set_engines, |
| 12000 | .send_device_activity = gaudi2_send_device_activity, |
| 12001 | .set_dram_properties = gaudi2_set_dram_properties, |
| 12002 | .set_binning_masks = gaudi2_set_binning_masks, |
| 12003 | }; |
| 12004 | |
| 12005 | void gaudi2_set_asic_funcs(struct hl_device *hdev) |
| 12006 | { |
| 12007 | hdev->asic_funcs = &gaudi2_funcs; |
| 12008 | } |
| 12009 | |