| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | |
| 3 | /* |
| 4 | * Copyright 2016-2022 HabanaLabs, Ltd. |
| 5 | * All Rights Reserved. |
| 6 | */ |
| 7 | |
| 8 | #include <linux/slab.h> |
| 9 | #include <linux/pci.h> |
| 10 | |
| 11 | #include "../habanalabs.h" |
| 12 | |
| 13 | #include <trace/events/habanalabs.h> |
| 14 | |
| 15 | /** |
| 16 | * hl_mmu_get_funcs() - get MMU functions structure |
| 17 | * @hdev: habanalabs device structure. |
| 18 | * @pgt_residency: page table residency. |
| 19 | * @is_dram_addr: true if we need HMMU functions |
| 20 | * |
| 21 | * @return appropriate MMU functions structure |
| 22 | */ |
| 23 | static struct hl_mmu_funcs *hl_mmu_get_funcs(struct hl_device *hdev, int pgt_residency, |
| 24 | bool is_dram_addr) |
| 25 | { |
| 26 | return &hdev->mmu_func[pgt_residency]; |
| 27 | } |
| 28 | |
| 29 | bool hl_is_dram_va(struct hl_device *hdev, u64 virt_addr) |
| 30 | { |
| 31 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 32 | |
| 33 | return hl_mem_area_inside_range(address: virt_addr, size: prop->dmmu.page_size, |
| 34 | range_start_address: prop->dmmu.start_addr, |
| 35 | range_end_address: prop->dmmu.end_addr); |
| 36 | } |
| 37 | |
| 38 | /** |
| 39 | * hl_mmu_init() - initialize the MMU module. |
| 40 | * @hdev: habanalabs device structure. |
| 41 | * |
| 42 | * Return: 0 for success, non-zero for failure. |
| 43 | */ |
| 44 | int hl_mmu_init(struct hl_device *hdev) |
| 45 | { |
| 46 | int rc = -EOPNOTSUPP; |
| 47 | |
| 48 | if (hdev->mmu_disable) |
| 49 | return 0; |
| 50 | |
| 51 | mutex_init(&hdev->mmu_lock); |
| 52 | |
| 53 | if (hdev->mmu_func[MMU_DR_PGT].init != NULL) { |
| 54 | rc = hdev->mmu_func[MMU_DR_PGT].init(hdev); |
| 55 | if (rc) |
| 56 | return rc; |
| 57 | } |
| 58 | |
| 59 | if (hdev->mmu_func[MMU_HR_PGT].init != NULL) { |
| 60 | rc = hdev->mmu_func[MMU_HR_PGT].init(hdev); |
| 61 | if (rc) |
| 62 | goto fini_dr_mmu; |
| 63 | } |
| 64 | |
| 65 | return 0; |
| 66 | |
| 67 | fini_dr_mmu: |
| 68 | if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) |
| 69 | hdev->mmu_func[MMU_DR_PGT].fini(hdev); |
| 70 | |
| 71 | return rc; |
| 72 | } |
| 73 | |
| 74 | /** |
| 75 | * hl_mmu_fini() - release the MMU module. |
| 76 | * @hdev: habanalabs device structure. |
| 77 | * |
| 78 | * This function does the following: |
| 79 | * - Disable MMU in H/W. |
| 80 | * - Free the pgt_infos pool. |
| 81 | * |
| 82 | * All contexts should be freed before calling this function. |
| 83 | */ |
| 84 | void hl_mmu_fini(struct hl_device *hdev) |
| 85 | { |
| 86 | if (hdev->mmu_disable) |
| 87 | return; |
| 88 | |
| 89 | if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) |
| 90 | hdev->mmu_func[MMU_DR_PGT].fini(hdev); |
| 91 | |
| 92 | if (hdev->mmu_func[MMU_HR_PGT].fini != NULL) |
| 93 | hdev->mmu_func[MMU_HR_PGT].fini(hdev); |
| 94 | |
| 95 | mutex_destroy(lock: &hdev->mmu_lock); |
| 96 | } |
| 97 | |
| 98 | /** |
| 99 | * hl_mmu_ctx_init() - initialize a context for using the MMU module. |
| 100 | * @ctx: pointer to the context structure to initialize. |
| 101 | * |
| 102 | * Initialize a mutex to protect the concurrent mapping flow, a hash to hold all |
| 103 | * page tables hops related to this context. |
| 104 | * Return: 0 on success, non-zero otherwise. |
| 105 | */ |
| 106 | int hl_mmu_ctx_init(struct hl_ctx *ctx) |
| 107 | { |
| 108 | struct hl_device *hdev = ctx->hdev; |
| 109 | int rc = -EOPNOTSUPP; |
| 110 | |
| 111 | if (hdev->mmu_disable) |
| 112 | return 0; |
| 113 | |
| 114 | if (hdev->mmu_func[MMU_DR_PGT].ctx_init != NULL) { |
| 115 | rc = hdev->mmu_func[MMU_DR_PGT].ctx_init(ctx); |
| 116 | if (rc) |
| 117 | return rc; |
| 118 | } |
| 119 | |
| 120 | if (hdev->mmu_func[MMU_HR_PGT].ctx_init != NULL) { |
| 121 | rc = hdev->mmu_func[MMU_HR_PGT].ctx_init(ctx); |
| 122 | if (rc) |
| 123 | goto fini_dr_ctx; |
| 124 | } |
| 125 | |
| 126 | return 0; |
| 127 | |
| 128 | fini_dr_ctx: |
| 129 | if (hdev->mmu_func[MMU_DR_PGT].fini != NULL) |
| 130 | hdev->mmu_func[MMU_DR_PGT].fini(hdev); |
| 131 | |
| 132 | return rc; |
| 133 | } |
| 134 | |
| 135 | /* |
| 136 | * hl_mmu_ctx_fini - disable a ctx from using the mmu module |
| 137 | * |
| 138 | * @ctx: pointer to the context structure |
| 139 | * |
| 140 | * This function does the following: |
| 141 | * - Free any pgts which were not freed yet |
| 142 | * - Free the mutex |
| 143 | * - Free DRAM default page mapping hops |
| 144 | */ |
| 145 | void hl_mmu_ctx_fini(struct hl_ctx *ctx) |
| 146 | { |
| 147 | struct hl_device *hdev = ctx->hdev; |
| 148 | |
| 149 | if (hdev->mmu_disable) |
| 150 | return; |
| 151 | |
| 152 | if (hdev->mmu_func[MMU_DR_PGT].ctx_fini != NULL) |
| 153 | hdev->mmu_func[MMU_DR_PGT].ctx_fini(ctx); |
| 154 | |
| 155 | if (hdev->mmu_func[MMU_HR_PGT].ctx_fini != NULL) |
| 156 | hdev->mmu_func[MMU_HR_PGT].ctx_fini(ctx); |
| 157 | } |
| 158 | |
| 159 | /* |
| 160 | * hl_mmu_get_real_page_size - get real page size to use in map/unmap operation |
| 161 | * |
| 162 | * @hdev: pointer to device data. |
| 163 | * @mmu_prop: MMU properties. |
| 164 | * @page_size: page size |
| 165 | * @real_page_size: set here the actual page size to use for the operation |
| 166 | * @is_dram_addr: true if DRAM address, otherwise false. |
| 167 | * |
| 168 | * @return 0 on success, otherwise non 0 error code |
| 169 | * |
| 170 | * note that this is general implementation that can fit most MMU arch. but as this is used as an |
| 171 | * MMU function: |
| 172 | * 1. it shall not be called directly- only from mmu_func structure instance |
| 173 | * 2. each MMU may modify the implementation internally |
| 174 | */ |
| 175 | int hl_mmu_get_real_page_size(struct hl_device *hdev, struct hl_mmu_properties *mmu_prop, |
| 176 | u32 page_size, u32 *real_page_size, bool is_dram_addr) |
| 177 | { |
| 178 | /* |
| 179 | * The H/W handles mapping of specific page sizes. Hence if the page |
| 180 | * size is bigger, we break it to sub-pages and map them separately. |
| 181 | */ |
| 182 | if ((page_size % mmu_prop->page_size) == 0) { |
| 183 | *real_page_size = mmu_prop->page_size; |
| 184 | return 0; |
| 185 | } |
| 186 | |
| 187 | dev_err(hdev->dev, "page size of %u is not %uKB aligned, can't map\n" , |
| 188 | page_size, mmu_prop->page_size >> 10); |
| 189 | |
| 190 | return -EFAULT; |
| 191 | } |
| 192 | |
| 193 | static struct hl_mmu_properties *hl_mmu_get_prop(struct hl_device *hdev, u32 page_size, |
| 194 | bool is_dram_addr) |
| 195 | { |
| 196 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 197 | |
| 198 | if (is_dram_addr) |
| 199 | return &prop->dmmu; |
| 200 | else if ((page_size % prop->pmmu_huge.page_size) == 0) |
| 201 | return &prop->pmmu_huge; |
| 202 | |
| 203 | return &prop->pmmu; |
| 204 | } |
| 205 | |
| 206 | /* |
| 207 | * hl_mmu_unmap_page - unmaps a virtual addr |
| 208 | * |
| 209 | * @ctx: pointer to the context structure |
| 210 | * @virt_addr: virt addr to map from |
| 211 | * @page_size: size of the page to unmap |
| 212 | * @flush_pte: whether to do a PCI flush |
| 213 | * |
| 214 | * This function does the following: |
| 215 | * - Check that the virt addr is mapped |
| 216 | * - Unmap the virt addr and frees pgts if possible |
| 217 | * - Returns 0 on success, -EINVAL if the given addr is not mapped |
| 218 | * |
| 219 | * Because this function changes the page tables in the device and because it |
| 220 | * changes the MMU hash, it must be protected by a lock. |
| 221 | * However, because it maps only a single page, the lock should be implemented |
| 222 | * in a higher level in order to protect the entire mapping of the memory area |
| 223 | * |
| 224 | * For optimization reasons PCI flush may be requested once after unmapping of |
| 225 | * large area. |
| 226 | */ |
| 227 | int hl_mmu_unmap_page(struct hl_ctx *ctx, u64 virt_addr, u32 page_size, bool flush_pte) |
| 228 | { |
| 229 | struct hl_device *hdev = ctx->hdev; |
| 230 | struct hl_mmu_properties *mmu_prop; |
| 231 | struct hl_mmu_funcs *mmu_funcs; |
| 232 | int i, pgt_residency, rc = 0; |
| 233 | u32 real_page_size, npages; |
| 234 | u64 real_virt_addr; |
| 235 | bool is_dram_addr; |
| 236 | |
| 237 | if (hdev->mmu_disable) |
| 238 | return 0; |
| 239 | |
| 240 | is_dram_addr = hl_is_dram_va(hdev, virt_addr); |
| 241 | mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr); |
| 242 | |
| 243 | pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; |
| 244 | mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr); |
| 245 | |
| 246 | rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size, |
| 247 | is_dram_addr); |
| 248 | if (rc) |
| 249 | return rc; |
| 250 | |
| 251 | npages = page_size / real_page_size; |
| 252 | real_virt_addr = virt_addr; |
| 253 | |
| 254 | for (i = 0 ; i < npages ; i++) { |
| 255 | rc = mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr); |
| 256 | if (rc) |
| 257 | break; |
| 258 | |
| 259 | real_virt_addr += real_page_size; |
| 260 | } |
| 261 | |
| 262 | if (flush_pte) |
| 263 | mmu_funcs->flush(ctx); |
| 264 | |
| 265 | if (trace_habanalabs_mmu_unmap_enabled() && !rc) |
| 266 | trace_habanalabs_mmu_unmap(dev: &hdev->pdev->dev, virt_addr, phys_addr: 0, page_size, flush_pte); |
| 267 | |
| 268 | return rc; |
| 269 | } |
| 270 | |
| 271 | /* |
| 272 | * hl_mmu_map_page - maps a virtual addr to physical addr |
| 273 | * |
| 274 | * @ctx: pointer to the context structure |
| 275 | * @virt_addr: virt addr to map from |
| 276 | * @phys_addr: phys addr to map to |
| 277 | * @page_size: physical page size |
| 278 | * @flush_pte: whether to do a PCI flush |
| 279 | * |
| 280 | * This function does the following: |
| 281 | * - Check that the virt addr is not mapped |
| 282 | * - Allocate pgts as necessary in order to map the virt addr to the phys |
| 283 | * - Returns 0 on success, -EINVAL if addr is already mapped, or -ENOMEM. |
| 284 | * |
| 285 | * Because this function changes the page tables in the device and because it |
| 286 | * changes the MMU hash, it must be protected by a lock. |
| 287 | * However, because it maps only a single page, the lock should be implemented |
| 288 | * in a higher level in order to protect the entire mapping of the memory area |
| 289 | * |
| 290 | * For optimization reasons PCI flush may be requested once after mapping of |
| 291 | * large area. |
| 292 | */ |
| 293 | int hl_mmu_map_page(struct hl_ctx *ctx, u64 virt_addr, u64 phys_addr, u32 page_size, |
| 294 | bool flush_pte) |
| 295 | { |
| 296 | int i, rc, pgt_residency, mapped_cnt = 0; |
| 297 | struct hl_device *hdev = ctx->hdev; |
| 298 | struct hl_mmu_properties *mmu_prop; |
| 299 | u64 real_virt_addr, real_phys_addr; |
| 300 | struct hl_mmu_funcs *mmu_funcs; |
| 301 | u32 real_page_size, npages; |
| 302 | bool is_dram_addr; |
| 303 | |
| 304 | |
| 305 | if (hdev->mmu_disable) |
| 306 | return 0; |
| 307 | |
| 308 | is_dram_addr = hl_is_dram_va(hdev, virt_addr); |
| 309 | mmu_prop = hl_mmu_get_prop(hdev, page_size, is_dram_addr); |
| 310 | |
| 311 | pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; |
| 312 | mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr); |
| 313 | |
| 314 | rc = hdev->asic_funcs->mmu_get_real_page_size(hdev, mmu_prop, page_size, &real_page_size, |
| 315 | is_dram_addr); |
| 316 | if (rc) |
| 317 | return rc; |
| 318 | |
| 319 | /* |
| 320 | * Verify that the phys and virt addresses are aligned with the |
| 321 | * MMU page size (in dram this means checking the address and MMU |
| 322 | * after scrambling) |
| 323 | */ |
| 324 | if ((is_dram_addr && |
| 325 | ((hdev->asic_funcs->scramble_addr(hdev, phys_addr) & |
| 326 | (mmu_prop->page_size - 1)) || |
| 327 | (hdev->asic_funcs->scramble_addr(hdev, virt_addr) & |
| 328 | (mmu_prop->page_size - 1)))) || |
| 329 | (!is_dram_addr && ((phys_addr & (real_page_size - 1)) || |
| 330 | (virt_addr & (real_page_size - 1))))) |
| 331 | dev_crit(hdev->dev, |
| 332 | "Mapping address 0x%llx with virtual address 0x%llx and page size of 0x%x is erroneous! Addresses must be divisible by page size" , |
| 333 | phys_addr, virt_addr, real_page_size); |
| 334 | |
| 335 | npages = page_size / real_page_size; |
| 336 | real_virt_addr = virt_addr; |
| 337 | real_phys_addr = phys_addr; |
| 338 | |
| 339 | for (i = 0 ; i < npages ; i++) { |
| 340 | rc = mmu_funcs->map(ctx, real_virt_addr, real_phys_addr, real_page_size, |
| 341 | is_dram_addr); |
| 342 | if (rc) |
| 343 | goto err; |
| 344 | |
| 345 | real_virt_addr += real_page_size; |
| 346 | real_phys_addr += real_page_size; |
| 347 | mapped_cnt++; |
| 348 | } |
| 349 | |
| 350 | if (flush_pte) |
| 351 | mmu_funcs->flush(ctx); |
| 352 | |
| 353 | trace_habanalabs_mmu_map(dev: &hdev->pdev->dev, virt_addr, phys_addr, page_size, flush_pte); |
| 354 | |
| 355 | return 0; |
| 356 | |
| 357 | err: |
| 358 | real_virt_addr = virt_addr; |
| 359 | for (i = 0 ; i < mapped_cnt ; i++) { |
| 360 | if (mmu_funcs->unmap(ctx, real_virt_addr, is_dram_addr)) |
| 361 | dev_warn_ratelimited(hdev->dev, |
| 362 | "failed to unmap va: 0x%llx\n" , real_virt_addr); |
| 363 | |
| 364 | real_virt_addr += real_page_size; |
| 365 | } |
| 366 | |
| 367 | mmu_funcs->flush(ctx); |
| 368 | |
| 369 | return rc; |
| 370 | } |
| 371 | |
| 372 | /* |
| 373 | * hl_mmu_map_contiguous - implements a wrapper for hl_mmu_map_page |
| 374 | * for mapping contiguous physical memory |
| 375 | * |
| 376 | * @ctx: pointer to the context structure |
| 377 | * @virt_addr: virt addr to map from |
| 378 | * @phys_addr: phys addr to map to |
| 379 | * @size: size to map |
| 380 | * |
| 381 | */ |
| 382 | int hl_mmu_map_contiguous(struct hl_ctx *ctx, u64 virt_addr, |
| 383 | u64 phys_addr, u32 size) |
| 384 | { |
| 385 | struct hl_device *hdev = ctx->hdev; |
| 386 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 387 | u64 curr_va, curr_pa; |
| 388 | u32 page_size; |
| 389 | bool flush_pte; |
| 390 | int rc = 0, off; |
| 391 | |
| 392 | if (hl_mem_area_inside_range(address: virt_addr, size, |
| 393 | range_start_address: prop->dmmu.start_addr, range_end_address: prop->dmmu.end_addr)) |
| 394 | page_size = prop->dmmu.page_size; |
| 395 | else if (hl_mem_area_inside_range(address: virt_addr, size, |
| 396 | range_start_address: prop->pmmu.start_addr, range_end_address: prop->pmmu.end_addr)) |
| 397 | page_size = prop->pmmu.page_size; |
| 398 | else if (hl_mem_area_inside_range(address: virt_addr, size, |
| 399 | range_start_address: prop->pmmu_huge.start_addr, range_end_address: prop->pmmu_huge.end_addr)) |
| 400 | page_size = prop->pmmu_huge.page_size; |
| 401 | else |
| 402 | return -EINVAL; |
| 403 | |
| 404 | for (off = 0 ; off < size ; off += page_size) { |
| 405 | curr_va = virt_addr + off; |
| 406 | curr_pa = phys_addr + off; |
| 407 | flush_pte = (off + page_size) >= size; |
| 408 | rc = hl_mmu_map_page(ctx, virt_addr: curr_va, phys_addr: curr_pa, page_size, |
| 409 | flush_pte); |
| 410 | if (rc) { |
| 411 | dev_err(hdev->dev, |
| 412 | "Map failed for va 0x%llx to pa 0x%llx\n" , |
| 413 | curr_va, curr_pa); |
| 414 | /* last mapping failed so don't try to unmap it - reduce off by page_size */ |
| 415 | off -= page_size; |
| 416 | goto unmap; |
| 417 | } |
| 418 | } |
| 419 | |
| 420 | return rc; |
| 421 | |
| 422 | unmap: |
| 423 | for (; off >= 0 ; off -= page_size) { |
| 424 | curr_va = virt_addr + off; |
| 425 | flush_pte = (off - (s32) page_size) < 0; |
| 426 | if (hl_mmu_unmap_page(ctx, virt_addr: curr_va, page_size, flush_pte)) |
| 427 | dev_warn_ratelimited(hdev->dev, |
| 428 | "failed to unmap va 0x%llx\n" , curr_va); |
| 429 | } |
| 430 | |
| 431 | return rc; |
| 432 | } |
| 433 | |
| 434 | /* |
| 435 | * hl_mmu_unmap_contiguous - implements a wrapper for hl_mmu_unmap_page |
| 436 | * for unmapping contiguous physical memory |
| 437 | * |
| 438 | * @ctx: pointer to the context structure |
| 439 | * @virt_addr: virt addr to unmap |
| 440 | * @size: size to unmap |
| 441 | * |
| 442 | */ |
| 443 | int hl_mmu_unmap_contiguous(struct hl_ctx *ctx, u64 virt_addr, u32 size) |
| 444 | { |
| 445 | struct hl_device *hdev = ctx->hdev; |
| 446 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 447 | u64 curr_va; |
| 448 | u32 page_size; |
| 449 | bool flush_pte; |
| 450 | int rc = 0, off; |
| 451 | |
| 452 | if (hl_mem_area_inside_range(address: virt_addr, size, |
| 453 | range_start_address: prop->dmmu.start_addr, range_end_address: prop->dmmu.end_addr)) |
| 454 | page_size = prop->dmmu.page_size; |
| 455 | else if (hl_mem_area_inside_range(address: virt_addr, size, |
| 456 | range_start_address: prop->pmmu.start_addr, range_end_address: prop->pmmu.end_addr)) |
| 457 | page_size = prop->pmmu.page_size; |
| 458 | else if (hl_mem_area_inside_range(address: virt_addr, size, |
| 459 | range_start_address: prop->pmmu_huge.start_addr, range_end_address: prop->pmmu_huge.end_addr)) |
| 460 | page_size = prop->pmmu_huge.page_size; |
| 461 | else |
| 462 | return -EINVAL; |
| 463 | |
| 464 | for (off = 0 ; off < size ; off += page_size) { |
| 465 | curr_va = virt_addr + off; |
| 466 | flush_pte = (off + page_size) >= size; |
| 467 | rc = hl_mmu_unmap_page(ctx, virt_addr: curr_va, page_size, flush_pte); |
| 468 | if (rc) |
| 469 | dev_warn_ratelimited(hdev->dev, |
| 470 | "Unmap failed for va 0x%llx\n" , curr_va); |
| 471 | } |
| 472 | |
| 473 | return rc; |
| 474 | } |
| 475 | |
| 476 | static void hl_mmu_pa_page_with_offset(struct hl_ctx *ctx, u64 virt_addr, |
| 477 | struct hl_mmu_hop_info *hops, |
| 478 | u64 *phys_addr) |
| 479 | { |
| 480 | struct asic_fixed_properties *prop = &ctx->hdev->asic_prop; |
| 481 | u64 offset_mask, addr_mask, hop_shift, tmp_phys_addr; |
| 482 | struct hl_mmu_properties *mmu_prop; |
| 483 | |
| 484 | /* last hop holds the phys address and flags */ |
| 485 | if (hops->unscrambled_paddr) |
| 486 | tmp_phys_addr = hops->unscrambled_paddr; |
| 487 | else |
| 488 | tmp_phys_addr = hops->hop_info[hops->used_hops - 1].hop_pte_val; |
| 489 | |
| 490 | if (hops->range_type == HL_VA_RANGE_TYPE_HOST_HUGE) |
| 491 | mmu_prop = &prop->pmmu_huge; |
| 492 | else if (hops->range_type == HL_VA_RANGE_TYPE_HOST) |
| 493 | mmu_prop = &prop->pmmu; |
| 494 | else /* HL_VA_RANGE_TYPE_DRAM */ |
| 495 | mmu_prop = &prop->dmmu; |
| 496 | |
| 497 | if ((hops->range_type == HL_VA_RANGE_TYPE_DRAM) && |
| 498 | !is_power_of_2(n: prop->dram_page_size)) { |
| 499 | u64 dram_page_size, dram_base, abs_phys_addr, abs_virt_addr, |
| 500 | page_id, page_start; |
| 501 | u32 page_off; |
| 502 | |
| 503 | /* |
| 504 | * Bit arithmetic cannot be used for non power of two page |
| 505 | * sizes. In addition, since bit arithmetic is not used, |
| 506 | * we cannot ignore dram base. All that shall be considered. |
| 507 | */ |
| 508 | |
| 509 | dram_page_size = prop->dram_page_size; |
| 510 | dram_base = prop->dram_base_address; |
| 511 | abs_phys_addr = tmp_phys_addr - dram_base; |
| 512 | abs_virt_addr = virt_addr - dram_base; |
| 513 | page_id = DIV_ROUND_DOWN_ULL(abs_phys_addr, dram_page_size); |
| 514 | page_start = page_id * dram_page_size; |
| 515 | div_u64_rem(dividend: abs_virt_addr, divisor: dram_page_size, remainder: &page_off); |
| 516 | |
| 517 | *phys_addr = page_start + page_off + dram_base; |
| 518 | } else { |
| 519 | /* |
| 520 | * find the correct hop shift field in hl_mmu_properties |
| 521 | * structure in order to determine the right masks |
| 522 | * for the page offset. |
| 523 | */ |
| 524 | hop_shift = mmu_prop->hop_shifts[hops->used_hops - 1]; |
| 525 | offset_mask = (1ull << hop_shift) - 1; |
| 526 | addr_mask = ~(offset_mask); |
| 527 | *phys_addr = (tmp_phys_addr & addr_mask) | |
| 528 | (virt_addr & offset_mask); |
| 529 | } |
| 530 | } |
| 531 | |
| 532 | int hl_mmu_va_to_pa(struct hl_ctx *ctx, u64 virt_addr, u64 *phys_addr) |
| 533 | { |
| 534 | struct hl_mmu_hop_info hops; |
| 535 | int rc; |
| 536 | |
| 537 | memset(&hops, 0, sizeof(hops)); |
| 538 | |
| 539 | rc = hl_mmu_get_tlb_info(ctx, virt_addr, hops: &hops); |
| 540 | if (rc) |
| 541 | return rc; |
| 542 | |
| 543 | hl_mmu_pa_page_with_offset(ctx, virt_addr, hops: &hops, phys_addr); |
| 544 | |
| 545 | return 0; |
| 546 | } |
| 547 | |
| 548 | int hl_mmu_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, |
| 549 | struct hl_mmu_hop_info *hops) |
| 550 | { |
| 551 | struct hl_device *hdev = ctx->hdev; |
| 552 | struct asic_fixed_properties *prop; |
| 553 | struct hl_mmu_properties *mmu_prop; |
| 554 | struct hl_mmu_funcs *mmu_funcs; |
| 555 | int pgt_residency, rc; |
| 556 | bool is_dram_addr; |
| 557 | |
| 558 | if (hdev->mmu_disable) |
| 559 | return -EOPNOTSUPP; |
| 560 | |
| 561 | prop = &hdev->asic_prop; |
| 562 | hops->scrambled_vaddr = virt_addr; /* assume no scrambling */ |
| 563 | |
| 564 | is_dram_addr = hl_mem_area_inside_range(address: virt_addr, size: prop->dmmu.page_size, |
| 565 | range_start_address: prop->dmmu.start_addr, |
| 566 | range_end_address: prop->dmmu.end_addr); |
| 567 | |
| 568 | /* host-residency is the same in PMMU and PMMU huge, no need to distinguish here */ |
| 569 | mmu_prop = is_dram_addr ? &prop->dmmu : &prop->pmmu; |
| 570 | pgt_residency = mmu_prop->host_resident ? MMU_HR_PGT : MMU_DR_PGT; |
| 571 | mmu_funcs = hl_mmu_get_funcs(hdev, pgt_residency, is_dram_addr); |
| 572 | |
| 573 | mutex_lock(&hdev->mmu_lock); |
| 574 | rc = mmu_funcs->get_tlb_info(ctx, virt_addr, hops); |
| 575 | mutex_unlock(lock: &hdev->mmu_lock); |
| 576 | |
| 577 | if (rc) |
| 578 | return rc; |
| 579 | |
| 580 | /* add page offset to physical address */ |
| 581 | if (hops->unscrambled_paddr) |
| 582 | hl_mmu_pa_page_with_offset(ctx, virt_addr, hops, phys_addr: &hops->unscrambled_paddr); |
| 583 | |
| 584 | return 0; |
| 585 | } |
| 586 | |
| 587 | int hl_mmu_if_set_funcs(struct hl_device *hdev) |
| 588 | { |
| 589 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 590 | |
| 591 | if (hdev->mmu_disable) |
| 592 | return 0; |
| 593 | |
| 594 | switch (hdev->asic_type) { |
| 595 | case ASIC_GOYA: |
| 596 | case ASIC_GAUDI: |
| 597 | case ASIC_GAUDI_SEC: |
| 598 | hl_mmu_v1_set_funcs(hdev, mmu: &hdev->mmu_func[MMU_DR_PGT]); |
| 599 | break; |
| 600 | case ASIC_GAUDI2: |
| 601 | case ASIC_GAUDI2B: |
| 602 | case ASIC_GAUDI2C: |
| 603 | case ASIC_GAUDI2D: |
| 604 | hl_mmu_v2_set_funcs(hdev, mmu: &hdev->mmu_func[MMU_DR_PGT]); |
| 605 | if (prop->pmmu.host_resident) |
| 606 | hl_mmu_v2_hr_set_funcs(hdev, mmu: &hdev->mmu_func[MMU_HR_PGT]); |
| 607 | break; |
| 608 | default: |
| 609 | dev_err(hdev->dev, "Unrecognized ASIC type %d\n" , |
| 610 | hdev->asic_type); |
| 611 | return -EOPNOTSUPP; |
| 612 | } |
| 613 | |
| 614 | return 0; |
| 615 | } |
| 616 | |
| 617 | /** |
| 618 | * hl_mmu_scramble_addr() - The generic mmu address scrambling routine. |
| 619 | * @hdev: pointer to device data. |
| 620 | * @addr: The address to scramble. |
| 621 | * |
| 622 | * Return: The scrambled address. |
| 623 | */ |
| 624 | u64 hl_mmu_scramble_addr(struct hl_device *hdev, u64 addr) |
| 625 | { |
| 626 | return addr; |
| 627 | } |
| 628 | |
| 629 | /** |
| 630 | * hl_mmu_descramble_addr() - The generic mmu address descrambling |
| 631 | * routine. |
| 632 | * @hdev: pointer to device data. |
| 633 | * @addr: The address to descramble. |
| 634 | * |
| 635 | * Return: The un-scrambled address. |
| 636 | */ |
| 637 | u64 hl_mmu_descramble_addr(struct hl_device *hdev, u64 addr) |
| 638 | { |
| 639 | return addr; |
| 640 | } |
| 641 | |
| 642 | int hl_mmu_invalidate_cache(struct hl_device *hdev, bool is_hard, u32 flags) |
| 643 | { |
| 644 | int rc; |
| 645 | |
| 646 | rc = hdev->asic_funcs->mmu_invalidate_cache(hdev, is_hard, flags); |
| 647 | if (rc) |
| 648 | dev_err_ratelimited(hdev->dev, |
| 649 | "%s: %s cache invalidation failed, rc=%d\n" , |
| 650 | dev_name(&hdev->pdev->dev), |
| 651 | flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU" , rc); |
| 652 | |
| 653 | return rc; |
| 654 | } |
| 655 | |
| 656 | int hl_mmu_invalidate_cache_range(struct hl_device *hdev, bool is_hard, |
| 657 | u32 flags, u32 asid, u64 va, u64 size) |
| 658 | { |
| 659 | int rc; |
| 660 | |
| 661 | rc = hdev->asic_funcs->mmu_invalidate_cache_range(hdev, is_hard, flags, |
| 662 | asid, va, size); |
| 663 | if (rc) |
| 664 | dev_err_ratelimited(hdev->dev, |
| 665 | "%s: %s cache range invalidation failed: va=%#llx, size=%llu, rc=%d" , |
| 666 | dev_name(&hdev->pdev->dev), flags == VM_TYPE_USERPTR ? "PMMU" : "HMMU" , |
| 667 | va, size, rc); |
| 668 | |
| 669 | return rc; |
| 670 | } |
| 671 | |
| 672 | static void hl_mmu_prefetch_work_function(struct work_struct *work) |
| 673 | { |
| 674 | struct hl_prefetch_work *pfw = container_of(work, struct hl_prefetch_work, prefetch_work); |
| 675 | struct hl_ctx *ctx = pfw->ctx; |
| 676 | struct hl_device *hdev = ctx->hdev; |
| 677 | |
| 678 | if (!hl_device_operational(hdev, NULL)) |
| 679 | goto put_ctx; |
| 680 | |
| 681 | mutex_lock(&hdev->mmu_lock); |
| 682 | |
| 683 | hdev->asic_funcs->mmu_prefetch_cache_range(ctx, pfw->flags, pfw->asid, pfw->va, pfw->size); |
| 684 | |
| 685 | mutex_unlock(lock: &hdev->mmu_lock); |
| 686 | |
| 687 | put_ctx: |
| 688 | /* |
| 689 | * context was taken in the common mmu prefetch function- see comment there about |
| 690 | * context handling. |
| 691 | */ |
| 692 | hl_ctx_put(ctx); |
| 693 | kfree(objp: pfw); |
| 694 | } |
| 695 | |
| 696 | int hl_mmu_prefetch_cache_range(struct hl_ctx *ctx, u32 flags, u32 asid, u64 va, u64 size) |
| 697 | { |
| 698 | struct hl_prefetch_work *handle_prefetch_work; |
| 699 | |
| 700 | handle_prefetch_work = kmalloc(sizeof(*handle_prefetch_work), GFP_KERNEL); |
| 701 | if (!handle_prefetch_work) |
| 702 | return -ENOMEM; |
| 703 | |
| 704 | INIT_WORK(&handle_prefetch_work->prefetch_work, hl_mmu_prefetch_work_function); |
| 705 | handle_prefetch_work->ctx = ctx; |
| 706 | handle_prefetch_work->va = va; |
| 707 | handle_prefetch_work->size = size; |
| 708 | handle_prefetch_work->flags = flags; |
| 709 | handle_prefetch_work->asid = asid; |
| 710 | |
| 711 | /* |
| 712 | * as actual prefetch is done in a WQ we must get the context (and put it |
| 713 | * at the end of the work function) |
| 714 | */ |
| 715 | hl_ctx_get(ctx); |
| 716 | queue_work(wq: ctx->hdev->prefetch_wq, work: &handle_prefetch_work->prefetch_work); |
| 717 | |
| 718 | return 0; |
| 719 | } |
| 720 | |
| 721 | u64 hl_mmu_get_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte) |
| 722 | { |
| 723 | return (curr_pte & PAGE_PRESENT_MASK) ? (curr_pte & HOP_PHYS_ADDR_MASK) : ULLONG_MAX; |
| 724 | } |
| 725 | |
| 726 | /** |
| 727 | * hl_mmu_get_hop_pte_phys_addr() - extract PTE address from HOP |
| 728 | * @ctx: pointer to the context structure to initialize. |
| 729 | * @mmu_prop: MMU properties. |
| 730 | * @hop_idx: HOP index. |
| 731 | * @hop_addr: HOP address. |
| 732 | * @virt_addr: virtual address for the translation. |
| 733 | * |
| 734 | * @return the matching PTE value on success, otherwise U64_MAX. |
| 735 | */ |
| 736 | u64 hl_mmu_get_hop_pte_phys_addr(struct hl_ctx *ctx, struct hl_mmu_properties *mmu_prop, |
| 737 | u8 hop_idx, u64 hop_addr, u64 virt_addr) |
| 738 | { |
| 739 | u64 mask, shift; |
| 740 | |
| 741 | if (hop_idx >= mmu_prop->num_hops) { |
| 742 | dev_err_ratelimited(ctx->hdev->dev, "Invalid hop index %d\n" , hop_idx); |
| 743 | return U64_MAX; |
| 744 | } |
| 745 | |
| 746 | shift = mmu_prop->hop_shifts[hop_idx]; |
| 747 | mask = mmu_prop->hop_masks[hop_idx]; |
| 748 | |
| 749 | return hop_addr + ctx->hdev->asic_prop.mmu_pte_size * ((virt_addr & mask) >> shift); |
| 750 | } |
| 751 | |
| 752 | static void mmu_dma_mem_free_from_chunk(struct gen_pool *pool, |
| 753 | struct gen_pool_chunk *chunk, |
| 754 | void *data) |
| 755 | { |
| 756 | struct hl_device *hdev = data; |
| 757 | |
| 758 | hl_asic_dma_free_coherent(hdev, (chunk->end_addr - chunk->start_addr) + 1, |
| 759 | (void *)chunk->start_addr, chunk->phys_addr); |
| 760 | } |
| 761 | |
| 762 | void hl_mmu_hr_flush(struct hl_ctx *ctx) |
| 763 | { |
| 764 | /* a flush operation requires memory barrier */ |
| 765 | mb(); |
| 766 | } |
| 767 | |
| 768 | /** |
| 769 | * hl_mmu_hr_pool_destroy() - destroy genpool |
| 770 | * @hdev: habanalabs device structure. |
| 771 | * @hr_priv: MMU HR private data. |
| 772 | * @hop_table_size: HOP table size. |
| 773 | * |
| 774 | * This function does the following: |
| 775 | * - free entries allocated for shadow HOP0 |
| 776 | * - free pool chunks |
| 777 | * - free pool |
| 778 | */ |
| 779 | static void hl_mmu_hr_pool_destroy(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, |
| 780 | u32 hop_table_size) |
| 781 | { |
| 782 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 783 | struct gen_pool **pool = &hr_priv->mmu_pgt_pool; |
| 784 | struct pgt_info *hop0_pgt; |
| 785 | int asid; |
| 786 | |
| 787 | if (ZERO_OR_NULL_PTR(*pool)) |
| 788 | return; |
| 789 | |
| 790 | /* Free the Fixed allocation of HOPs0 */ |
| 791 | if (hr_priv->mmu_asid_hop0) { |
| 792 | for (asid = 0 ; asid < prop->max_asid ; asid++) { |
| 793 | hop0_pgt = &hr_priv->mmu_asid_hop0[asid]; |
| 794 | if (ZERO_OR_NULL_PTR(hop0_pgt->virt_addr)) |
| 795 | continue; |
| 796 | |
| 797 | gen_pool_free(pool: *pool, addr: (uintptr_t) hop0_pgt->virt_addr, size: hop_table_size); |
| 798 | } |
| 799 | } |
| 800 | |
| 801 | gen_pool_for_each_chunk(*pool, mmu_dma_mem_free_from_chunk, hdev); |
| 802 | gen_pool_destroy(*pool); |
| 803 | |
| 804 | /* Make sure that if we arrive here again without init was called we |
| 805 | * won't cause kernel panic. This can happen for example if we fail |
| 806 | * during hard reset code at certain points |
| 807 | */ |
| 808 | *pool = NULL; |
| 809 | } |
| 810 | |
| 811 | /** |
| 812 | * hl_mmu_hr_init() - initialize the MMU module. |
| 813 | * @hdev: habanalabs device structure. |
| 814 | * @hr_priv: MMU HR private data. |
| 815 | * @hop_table_size: HOP table size. |
| 816 | * @pgt_size: memory size allocated for the page table |
| 817 | * |
| 818 | * @return 0 on success otherwise non-zero error code |
| 819 | * |
| 820 | * This function does the following: |
| 821 | * - Create a pool of pages for pgt_infos. |
| 822 | * - Create a shadow table for pgt |
| 823 | */ |
| 824 | int hl_mmu_hr_init(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size, |
| 825 | u64 pgt_size) |
| 826 | { |
| 827 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 828 | size_t pool_chunk_size = SZ_4M; |
| 829 | struct pgt_info *hop0_pgt; |
| 830 | dma_addr_t dma_addr; |
| 831 | u64 virt_addr; |
| 832 | int i, rc; |
| 833 | |
| 834 | /* |
| 835 | * we set alloc size as PAGE_SIZE (sine dma_alloc_coherent allocation order/size is |
| 836 | * PAGE_SHIFT/PAGE_SIZE) in order to be able to control the allocations alignment. |
| 837 | * This way we can call "DMA alloc align" according to dma_alloc granularity and supply |
| 838 | * allocations with higher-order alignment restrictions |
| 839 | */ |
| 840 | hr_priv->mmu_pgt_pool = gen_pool_create(PAGE_SHIFT, -1); |
| 841 | if (ZERO_OR_NULL_PTR(hr_priv->mmu_pgt_pool)) { |
| 842 | dev_err(hdev->dev, "Failed to create hr page pool\n" ); |
| 843 | return -ENOMEM; |
| 844 | } |
| 845 | |
| 846 | hr_priv->mmu_asid_hop0 = kvcalloc(prop->max_asid, sizeof(struct pgt_info), GFP_KERNEL); |
| 847 | if (ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) { |
| 848 | dev_err(hdev->dev, "Failed to allocate hr-mmu hop0 table\n" ); |
| 849 | rc = -ENOMEM; |
| 850 | goto destroy_mmu_pgt_pool; |
| 851 | } |
| 852 | |
| 853 | for (i = 0 ; i < pgt_size ; i += pool_chunk_size) { |
| 854 | virt_addr = (uintptr_t) hl_asic_dma_alloc_coherent(hdev, pool_chunk_size, |
| 855 | &dma_addr, |
| 856 | GFP_KERNEL | __GFP_ZERO); |
| 857 | if (ZERO_OR_NULL_PTR(virt_addr)) { |
| 858 | dev_err(hdev->dev, |
| 859 | "Failed to allocate memory for host-resident page pool\n" ); |
| 860 | rc = -ENOMEM; |
| 861 | goto destroy_mmu_pgt_pool; |
| 862 | } |
| 863 | |
| 864 | rc = gen_pool_add_virt(pool: hr_priv->mmu_pgt_pool, addr: virt_addr, phys: (phys_addr_t) dma_addr, |
| 865 | size: pool_chunk_size, nid: -1); |
| 866 | if (rc) { |
| 867 | dev_err(hdev->dev, "Failed to fill host-resident page pool\n" ); |
| 868 | goto destroy_mmu_pgt_pool; |
| 869 | } |
| 870 | } |
| 871 | |
| 872 | for (i = 0 ; i < prop->max_asid ; i++) { |
| 873 | hop0_pgt = &hr_priv->mmu_asid_hop0[i]; |
| 874 | hop0_pgt->virt_addr = (uintptr_t) |
| 875 | gen_pool_dma_zalloc_align(pool: hr_priv->mmu_pgt_pool, |
| 876 | size: hop_table_size, |
| 877 | dma: (dma_addr_t *) &hop0_pgt->phys_addr, |
| 878 | align: hop_table_size); |
| 879 | if (!hop0_pgt->virt_addr) { |
| 880 | dev_err(hdev->dev, "Failed to allocate HOP from pgt pool\n" ); |
| 881 | rc = -ENOMEM; |
| 882 | goto destroy_mmu_pgt_pool; |
| 883 | } |
| 884 | } |
| 885 | |
| 886 | /* MMU H/W init will be done in device hw_init() */ |
| 887 | |
| 888 | return 0; |
| 889 | |
| 890 | destroy_mmu_pgt_pool: |
| 891 | hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size); |
| 892 | if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) |
| 893 | kvfree(addr: hr_priv->mmu_asid_hop0); |
| 894 | |
| 895 | return rc; |
| 896 | } |
| 897 | |
| 898 | /** |
| 899 | * hl_mmu_hr_fini() - release the MMU module. |
| 900 | * @hdev: habanalabs device structure. |
| 901 | * @hr_priv: MMU host resident private info. |
| 902 | * @hop_table_size: HOP table size |
| 903 | * |
| 904 | * This function does the following: |
| 905 | * - Disable MMU in H/W. |
| 906 | * - Free the pgt_infos pool. |
| 907 | * |
| 908 | * All contexts should be freed before calling this function. |
| 909 | */ |
| 910 | void hl_mmu_hr_fini(struct hl_device *hdev, struct hl_mmu_hr_priv *hr_priv, u32 hop_table_size) |
| 911 | { |
| 912 | /* MMU H/W fini was already done in device hw_fini() */ |
| 913 | |
| 914 | hl_mmu_hr_pool_destroy(hdev, hr_priv, hop_table_size); |
| 915 | |
| 916 | if (!ZERO_OR_NULL_PTR(hr_priv->mmu_asid_hop0)) { |
| 917 | kvfree(addr: hr_priv->mmu_asid_hop0); |
| 918 | |
| 919 | /* Make sure that if we arrive here again without init was |
| 920 | * called we won't cause kernel panic. This can happen for |
| 921 | * example if we fail during hard reset code at certain points |
| 922 | */ |
| 923 | hr_priv->mmu_asid_hop0 = NULL; |
| 924 | } |
| 925 | } |
| 926 | |
| 927 | /** |
| 928 | * hl_mmu_hr_free_hop_remove_pgt() - free HOP and remove PGT from hash |
| 929 | * @pgt_info: page table info structure. |
| 930 | * @hr_priv: MMU HR private data. |
| 931 | * @hop_table_size: HOP table size. |
| 932 | */ |
| 933 | void hl_mmu_hr_free_hop_remove_pgt(struct pgt_info *pgt_info, struct hl_mmu_hr_priv *hr_priv, |
| 934 | u32 hop_table_size) |
| 935 | { |
| 936 | gen_pool_free(pool: hr_priv->mmu_pgt_pool, addr: pgt_info->virt_addr, size: hop_table_size); |
| 937 | hash_del(node: &pgt_info->node); |
| 938 | kfree(objp: pgt_info); |
| 939 | } |
| 940 | |
| 941 | /** |
| 942 | * hl_mmu_hr_pte_phys_to_virt() - translate PTE phys addr to virt addr |
| 943 | * @ctx: pointer to the context structure |
| 944 | * @pgt: pgt_info for the HOP hosting the PTE |
| 945 | * @phys_pte_addr: phys address of the PTE |
| 946 | * @hop_table_size: HOP table size |
| 947 | * |
| 948 | * @return PTE virtual address |
| 949 | * |
| 950 | * The function use the pgt_info to get HOP base virt addr and obtain the PTE's virt addr |
| 951 | * by adding the PTE offset. |
| 952 | */ |
| 953 | u64 hl_mmu_hr_pte_phys_to_virt(struct hl_ctx *ctx, struct pgt_info *pgt, |
| 954 | u64 phys_pte_addr, u32 hop_table_size) |
| 955 | { |
| 956 | u64 page_mask = (hop_table_size - 1); |
| 957 | u64 pte_offset = phys_pte_addr & page_mask; |
| 958 | |
| 959 | return pgt->virt_addr + pte_offset; |
| 960 | } |
| 961 | |
| 962 | /** |
| 963 | * hl_mmu_hr_write_pte() - write HR PTE |
| 964 | * @ctx: pointer to the context structure |
| 965 | * @pgt_info: HOP's page table info structure |
| 966 | * @phys_pte_addr: phys PTE address |
| 967 | * @val: raw PTE data |
| 968 | * @hop_table_size: HOP table size |
| 969 | */ |
| 970 | void hl_mmu_hr_write_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr, |
| 971 | u64 val, u32 hop_table_size) |
| 972 | { |
| 973 | /* |
| 974 | * The value to write is the phys address of the next hop + |
| 975 | * flags at the 12 LSBs. |
| 976 | */ |
| 977 | u64 virt_addr = hl_mmu_hr_pte_phys_to_virt(ctx, pgt: pgt_info, phys_pte_addr, hop_table_size); |
| 978 | |
| 979 | *((u64 *) (uintptr_t) virt_addr) = val; |
| 980 | } |
| 981 | |
| 982 | /** |
| 983 | * hl_mmu_hr_clear_pte() - clear HR PTE |
| 984 | * @ctx: pointer to the context structure |
| 985 | * @pgt_info: HOP's page table info structure |
| 986 | * @phys_pte_addr: phys PTE address |
| 987 | * @hop_table_size: HOP table size |
| 988 | */ |
| 989 | void hl_mmu_hr_clear_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, u64 phys_pte_addr, |
| 990 | u32 hop_table_size) |
| 991 | { |
| 992 | /* no need to transform the value to physical address */ |
| 993 | hl_mmu_hr_write_pte(ctx, pgt_info, phys_pte_addr, val: 0, hop_table_size); |
| 994 | } |
| 995 | |
| 996 | /** |
| 997 | * hl_mmu_hr_put_pte() - put HR PTE and remove it if necessary (no more PTEs) |
| 998 | * @ctx: pointer to the context structure |
| 999 | * @pgt_info: HOP's page table info structure |
| 1000 | * @hr_priv: HR MMU private info |
| 1001 | * @hop_table_size: HOP table size |
| 1002 | * |
| 1003 | * @return number of PTEs still in the HOP |
| 1004 | */ |
| 1005 | int hl_mmu_hr_put_pte(struct hl_ctx *ctx, struct pgt_info *pgt_info, |
| 1006 | struct hl_mmu_hr_priv *hr_priv, |
| 1007 | u32 hop_table_size) |
| 1008 | { |
| 1009 | int num_of_ptes_left; |
| 1010 | |
| 1011 | pgt_info->num_of_ptes--; |
| 1012 | |
| 1013 | /* |
| 1014 | * Need to save the number of ptes left because free_hop might free |
| 1015 | * the pgt_info |
| 1016 | */ |
| 1017 | num_of_ptes_left = pgt_info->num_of_ptes; |
| 1018 | if (!num_of_ptes_left) |
| 1019 | hl_mmu_hr_free_hop_remove_pgt(pgt_info, hr_priv, hop_table_size); |
| 1020 | |
| 1021 | return num_of_ptes_left; |
| 1022 | } |
| 1023 | |
| 1024 | /** |
| 1025 | * hl_mmu_hr_get_pte() - increase PGT PTE count |
| 1026 | * @ctx: pointer to the context structure |
| 1027 | * @hr_func: host resident functions |
| 1028 | * @phys_hop_addr: HOP phys address |
| 1029 | */ |
| 1030 | void hl_mmu_hr_get_pte(struct hl_ctx *ctx, struct hl_hr_mmu_funcs *hr_func, u64 phys_hop_addr) |
| 1031 | { |
| 1032 | hr_func->get_pgt_info(ctx, phys_hop_addr)->num_of_ptes++; |
| 1033 | } |
| 1034 | |
| 1035 | /** |
| 1036 | * hl_mmu_hr_get_next_hop_pgt_info() - get pgt_info structure for the next HOP |
| 1037 | * @ctx: pointer to the context structure. |
| 1038 | * @hr_func: host resident functions. |
| 1039 | * @curr_pte: current PTE value. |
| 1040 | * |
| 1041 | * @return pgt_info structure on success, otherwise NULL. |
| 1042 | */ |
| 1043 | struct pgt_info *hl_mmu_hr_get_next_hop_pgt_info(struct hl_ctx *ctx, |
| 1044 | struct hl_hr_mmu_funcs *hr_func, |
| 1045 | u64 curr_pte) |
| 1046 | { |
| 1047 | u64 next_hop_phys_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); |
| 1048 | |
| 1049 | if (next_hop_phys_addr == ULLONG_MAX) |
| 1050 | return NULL; |
| 1051 | |
| 1052 | return hr_func->get_pgt_info(ctx, next_hop_phys_addr); |
| 1053 | } |
| 1054 | |
| 1055 | /** |
| 1056 | * hl_mmu_hr_alloc_hop() - allocate HOP |
| 1057 | * @ctx: pointer to the context structure. |
| 1058 | * @hr_priv: host resident private info structure. |
| 1059 | * @hr_func: host resident functions. |
| 1060 | * @mmu_prop: MMU properties. |
| 1061 | * |
| 1062 | * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL. |
| 1063 | */ |
| 1064 | struct pgt_info *hl_mmu_hr_alloc_hop(struct hl_ctx *ctx, struct hl_mmu_hr_priv *hr_priv, |
| 1065 | struct hl_hr_mmu_funcs *hr_func, |
| 1066 | struct hl_mmu_properties *mmu_prop) |
| 1067 | { |
| 1068 | struct hl_device *hdev = ctx->hdev; |
| 1069 | struct pgt_info *pgt_info; |
| 1070 | dma_addr_t phys_addr; |
| 1071 | void *virt_addr; |
| 1072 | int i, retry = 1; |
| 1073 | |
| 1074 | pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); |
| 1075 | if (!pgt_info) |
| 1076 | return NULL; |
| 1077 | |
| 1078 | for (i = 0; i <= retry; i++) { |
| 1079 | virt_addr = gen_pool_dma_zalloc_align(pool: hr_priv->mmu_pgt_pool, |
| 1080 | size: mmu_prop->hop_table_size, |
| 1081 | dma: &phys_addr, |
| 1082 | align: mmu_prop->hop_table_size); |
| 1083 | if (virt_addr) |
| 1084 | break; |
| 1085 | |
| 1086 | /* No memory in pool - get some and try again */ |
| 1087 | virt_addr = hl_asic_dma_alloc_coherent(hdev, SZ_2M, &phys_addr, |
| 1088 | GFP_KERNEL | __GFP_ZERO); |
| 1089 | if (ZERO_OR_NULL_PTR(virt_addr)) |
| 1090 | break; |
| 1091 | |
| 1092 | if (gen_pool_add_virt(pool: hr_priv->mmu_pgt_pool, addr: (unsigned long)virt_addr, |
| 1093 | phys: phys_addr, SZ_2M, nid: -1)) { |
| 1094 | hl_asic_dma_free_coherent(hdev, SZ_2M, virt_addr, phys_addr); |
| 1095 | virt_addr = NULL; |
| 1096 | break; |
| 1097 | } |
| 1098 | } |
| 1099 | |
| 1100 | if (ZERO_OR_NULL_PTR(virt_addr)) { |
| 1101 | dev_err(hdev->dev, "failed to allocate page\n" ); |
| 1102 | goto pool_alloc_err; |
| 1103 | } |
| 1104 | |
| 1105 | pgt_info->phys_addr = phys_addr; |
| 1106 | pgt_info->shadow_addr = (unsigned long) NULL; |
| 1107 | pgt_info->virt_addr = (unsigned long)virt_addr; |
| 1108 | pgt_info->ctx = ctx; |
| 1109 | pgt_info->num_of_ptes = 0; |
| 1110 | hr_func->add_pgt_info(ctx, pgt_info, phys_addr); |
| 1111 | |
| 1112 | return pgt_info; |
| 1113 | |
| 1114 | pool_alloc_err: |
| 1115 | kfree(objp: pgt_info); |
| 1116 | |
| 1117 | return NULL; |
| 1118 | } |
| 1119 | |
| 1120 | /** |
| 1121 | * hl_mmu_hr_get_alloc_next_hop() - get the next HOP, allocate it if it does not exist |
| 1122 | * @ctx: pointer to the context structure. |
| 1123 | * @hr_priv: host resident private info structure. |
| 1124 | * @hr_func: host resident functions. |
| 1125 | * @mmu_prop: MMU properties. |
| 1126 | * @curr_pte: current PTE value. |
| 1127 | * @is_new_hop: set to true if HOP is new (caller responsibility to set it to false). |
| 1128 | * |
| 1129 | * @return pgt_info structure associated with the allocated HOP on success, otherwise NULL. |
| 1130 | */ |
| 1131 | struct pgt_info *hl_mmu_hr_get_alloc_next_hop(struct hl_ctx *ctx, |
| 1132 | struct hl_mmu_hr_priv *hr_priv, |
| 1133 | struct hl_hr_mmu_funcs *hr_func, |
| 1134 | struct hl_mmu_properties *mmu_prop, |
| 1135 | u64 curr_pte, bool *is_new_hop) |
| 1136 | { |
| 1137 | u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); |
| 1138 | |
| 1139 | if (hop_addr != ULLONG_MAX) |
| 1140 | return hr_func->get_pgt_info(ctx, hop_addr); |
| 1141 | |
| 1142 | *is_new_hop = true; |
| 1143 | return hl_mmu_hr_alloc_hop(ctx, hr_priv, hr_func, mmu_prop); |
| 1144 | } |
| 1145 | |
| 1146 | /** |
| 1147 | * hl_mmu_hr_get_tlb_info() - get the TLB info (info for a specific mapping) |
| 1148 | * @ctx: pointer to the context structure. |
| 1149 | * @virt_addr: the virt address for which to get info. |
| 1150 | * @hops: HOPs info structure. |
| 1151 | * @hr_func: host resident functions. |
| 1152 | * |
| 1153 | * @return 0 on success, otherwise non 0 error code.. |
| 1154 | */ |
| 1155 | int hl_mmu_hr_get_tlb_info(struct hl_ctx *ctx, u64 virt_addr, struct hl_mmu_hop_info *hops, |
| 1156 | struct hl_hr_mmu_funcs *hr_func) |
| 1157 | { |
| 1158 | /* using 6 HOPs as this is the maximum number of HOPs */ |
| 1159 | struct pgt_info *hops_pgt_info[MMU_ARCH_6_HOPS] = { NULL }; |
| 1160 | struct hl_device *hdev = ctx->hdev; |
| 1161 | struct hl_mmu_properties *mmu_prop; |
| 1162 | int rc, i, used_hops; |
| 1163 | bool is_huge; |
| 1164 | |
| 1165 | rc = hr_func->get_tlb_mapping_params(hdev, &mmu_prop, hops, virt_addr, &is_huge); |
| 1166 | if (rc) |
| 1167 | return rc; |
| 1168 | |
| 1169 | used_hops = mmu_prop->num_hops; |
| 1170 | |
| 1171 | /* huge pages use one less hop */ |
| 1172 | if (is_huge) |
| 1173 | used_hops--; |
| 1174 | |
| 1175 | hops->scrambled_vaddr = hdev->asic_funcs->scramble_addr(hdev, virt_addr); |
| 1176 | |
| 1177 | for (i = 0 ; i < used_hops ; i++) { |
| 1178 | if (i == 0) |
| 1179 | hops_pgt_info[i] = hr_func->get_hop0_pgt_info(ctx); |
| 1180 | else |
| 1181 | hops_pgt_info[i] = hl_mmu_hr_get_next_hop_pgt_info(ctx, hr_func, |
| 1182 | curr_pte: hops->hop_info[i - 1].hop_pte_val); |
| 1183 | |
| 1184 | if (!hops_pgt_info[i]) |
| 1185 | return -EFAULT; |
| 1186 | |
| 1187 | hops->hop_info[i].hop_addr = hops_pgt_info[i]->phys_addr; |
| 1188 | hops->hop_info[i].hop_pte_addr = |
| 1189 | hl_mmu_get_hop_pte_phys_addr(ctx, mmu_prop, hop_idx: i, |
| 1190 | hop_addr: hops->hop_info[i].hop_addr, |
| 1191 | virt_addr: hops->scrambled_vaddr); |
| 1192 | hops->hop_info[i].hop_pte_val = *(u64 *) (uintptr_t) |
| 1193 | hl_mmu_hr_pte_phys_to_virt(ctx, pgt: hops_pgt_info[i], |
| 1194 | phys_pte_addr: hops->hop_info[i].hop_pte_addr, |
| 1195 | hop_table_size: mmu_prop->hop_table_size); |
| 1196 | |
| 1197 | if (!(hops->hop_info[i].hop_pte_val & PAGE_PRESENT_MASK)) |
| 1198 | return -EFAULT; |
| 1199 | |
| 1200 | if (hops->hop_info[i].hop_pte_val & mmu_prop->last_mask) |
| 1201 | break; |
| 1202 | } |
| 1203 | |
| 1204 | /* if passed over all hops then no last hop was found */ |
| 1205 | if (i == mmu_prop->num_hops) |
| 1206 | return -EFAULT; |
| 1207 | |
| 1208 | if (hops->scrambled_vaddr != virt_addr) |
| 1209 | hops->unscrambled_paddr = hdev->asic_funcs->descramble_addr |
| 1210 | (hdev, hops->hop_info[i].hop_pte_val); |
| 1211 | else |
| 1212 | hops->unscrambled_paddr = hops->hop_info[i].hop_pte_val; |
| 1213 | |
| 1214 | hops->used_hops = i + 1; |
| 1215 | |
| 1216 | return 0; |
| 1217 | } |
| 1218 | |
| 1219 | struct pgt_info *hl_mmu_dr_get_pgt_info(struct hl_ctx *ctx, u64 hop_addr) |
| 1220 | { |
| 1221 | struct pgt_info *pgt_info = NULL; |
| 1222 | |
| 1223 | hash_for_each_possible(ctx->mmu_shadow_hash, pgt_info, node, |
| 1224 | (unsigned long) hop_addr) |
| 1225 | if (hop_addr == pgt_info->shadow_addr) |
| 1226 | break; |
| 1227 | |
| 1228 | return pgt_info; |
| 1229 | } |
| 1230 | |
| 1231 | void hl_mmu_dr_free_hop(struct hl_ctx *ctx, u64 hop_addr) |
| 1232 | { |
| 1233 | struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr); |
| 1234 | |
| 1235 | hl_mmu_dr_free_pgt_node(ctx, pgt_info); |
| 1236 | } |
| 1237 | |
| 1238 | void hl_mmu_dr_free_pgt_node(struct hl_ctx *ctx, struct pgt_info *pgt_info) |
| 1239 | { |
| 1240 | struct hl_device *hdev = ctx->hdev; |
| 1241 | |
| 1242 | gen_pool_free(pool: hdev->mmu_priv.dr.mmu_pgt_pool, addr: pgt_info->phys_addr, |
| 1243 | size: hdev->asic_prop.dmmu.hop_table_size); |
| 1244 | hash_del(node: &pgt_info->node); |
| 1245 | kfree(objp: (u64 *) (uintptr_t) pgt_info->shadow_addr); |
| 1246 | kfree(objp: pgt_info); |
| 1247 | } |
| 1248 | |
| 1249 | u64 hl_mmu_dr_get_phys_hop0_addr(struct hl_ctx *ctx) |
| 1250 | { |
| 1251 | return ctx->hdev->asic_prop.mmu_pgt_addr + |
| 1252 | (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size); |
| 1253 | } |
| 1254 | |
| 1255 | u64 hl_mmu_dr_get_hop0_addr(struct hl_ctx *ctx) |
| 1256 | { |
| 1257 | return (u64) (uintptr_t) ctx->hdev->mmu_priv.dr.mmu_shadow_hop0 + |
| 1258 | (ctx->asid * ctx->hdev->asic_prop.dmmu.hop_table_size); |
| 1259 | } |
| 1260 | |
| 1261 | u64 hl_mmu_dr_get_phys_addr(struct hl_ctx *ctx, u64 shadow_addr) |
| 1262 | { |
| 1263 | u64 page_mask = ctx->hdev->asic_prop.dmmu.hop_table_size - 1; |
| 1264 | u64 shadow_hop_addr = shadow_addr & (~page_mask); |
| 1265 | u64 pte_offset = shadow_addr & page_mask; |
| 1266 | u64 phys_hop_addr; |
| 1267 | |
| 1268 | if (shadow_hop_addr != hl_mmu_dr_get_hop0_addr(ctx)) |
| 1269 | phys_hop_addr = hl_mmu_dr_get_pgt_info(ctx, hop_addr: shadow_hop_addr)->phys_addr; |
| 1270 | else |
| 1271 | phys_hop_addr = hl_mmu_dr_get_phys_hop0_addr(ctx); |
| 1272 | |
| 1273 | return phys_hop_addr + pte_offset; |
| 1274 | } |
| 1275 | |
| 1276 | void hl_mmu_dr_write_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) |
| 1277 | { |
| 1278 | u64 phys_val = hl_mmu_dr_get_phys_addr(ctx, shadow_addr: val); |
| 1279 | |
| 1280 | ctx->hdev->asic_funcs->write_pte(ctx->hdev, hl_mmu_dr_get_phys_addr(ctx, shadow_addr: shadow_pte_addr), |
| 1281 | phys_val); |
| 1282 | |
| 1283 | *(u64 *) (uintptr_t) shadow_pte_addr = val; |
| 1284 | } |
| 1285 | |
| 1286 | void hl_mmu_dr_write_final_pte(struct hl_ctx *ctx, u64 shadow_pte_addr, u64 val) |
| 1287 | { |
| 1288 | ctx->hdev->asic_funcs->write_pte(ctx->hdev, |
| 1289 | hl_mmu_dr_get_phys_addr(ctx, shadow_addr: shadow_pte_addr), val); |
| 1290 | *(u64 *) (uintptr_t) shadow_pte_addr = val; |
| 1291 | } |
| 1292 | |
| 1293 | void hl_mmu_dr_clear_pte(struct hl_ctx *ctx, u64 pte_addr) |
| 1294 | { |
| 1295 | hl_mmu_dr_write_final_pte(ctx, shadow_pte_addr: pte_addr, val: 0); |
| 1296 | } |
| 1297 | |
| 1298 | void hl_mmu_dr_get_pte(struct hl_ctx *ctx, u64 hop_addr) |
| 1299 | { |
| 1300 | hl_mmu_dr_get_pgt_info(ctx, hop_addr)->num_of_ptes++; |
| 1301 | } |
| 1302 | |
| 1303 | int hl_mmu_dr_put_pte(struct hl_ctx *ctx, u64 hop_addr) |
| 1304 | { |
| 1305 | struct pgt_info *pgt_info = hl_mmu_dr_get_pgt_info(ctx, hop_addr); |
| 1306 | int num_of_ptes_left; |
| 1307 | |
| 1308 | pgt_info->num_of_ptes--; |
| 1309 | |
| 1310 | /* |
| 1311 | * Need to save the number of ptes left because hl_mmu_free_hop might free |
| 1312 | * the pgt_info |
| 1313 | */ |
| 1314 | num_of_ptes_left = pgt_info->num_of_ptes; |
| 1315 | if (!num_of_ptes_left) |
| 1316 | hl_mmu_dr_free_pgt_node(ctx, pgt_info); |
| 1317 | |
| 1318 | return num_of_ptes_left; |
| 1319 | } |
| 1320 | |
| 1321 | u64 hl_mmu_dr_alloc_hop(struct hl_ctx *ctx) |
| 1322 | { |
| 1323 | struct hl_device *hdev = ctx->hdev; |
| 1324 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 1325 | struct pgt_info *pgt_info; |
| 1326 | u64 phys_addr, shadow_addr; |
| 1327 | |
| 1328 | pgt_info = kmalloc(sizeof(*pgt_info), GFP_KERNEL); |
| 1329 | if (!pgt_info) |
| 1330 | return ULLONG_MAX; |
| 1331 | |
| 1332 | phys_addr = (u64) gen_pool_alloc(pool: hdev->mmu_priv.dr.mmu_pgt_pool, |
| 1333 | size: prop->dmmu.hop_table_size); |
| 1334 | if (!phys_addr) { |
| 1335 | dev_err(hdev->dev, "failed to allocate page\n" ); |
| 1336 | goto pool_add_err; |
| 1337 | } |
| 1338 | |
| 1339 | shadow_addr = (u64) (uintptr_t) kzalloc(prop->dmmu.hop_table_size, |
| 1340 | GFP_KERNEL); |
| 1341 | if (!shadow_addr) |
| 1342 | goto shadow_err; |
| 1343 | |
| 1344 | pgt_info->phys_addr = phys_addr; |
| 1345 | pgt_info->shadow_addr = shadow_addr; |
| 1346 | pgt_info->ctx = ctx; |
| 1347 | pgt_info->num_of_ptes = 0; |
| 1348 | hash_add(ctx->mmu_shadow_hash, &pgt_info->node, shadow_addr); |
| 1349 | |
| 1350 | return shadow_addr; |
| 1351 | |
| 1352 | shadow_err: |
| 1353 | gen_pool_free(pool: hdev->mmu_priv.dr.mmu_pgt_pool, |
| 1354 | addr: phys_addr, size: prop->dmmu.hop_table_size); |
| 1355 | pool_add_err: |
| 1356 | kfree(objp: pgt_info); |
| 1357 | |
| 1358 | return ULLONG_MAX; |
| 1359 | } |
| 1360 | |
| 1361 | u64 hl_mmu_dr_get_alloc_next_hop_addr(struct hl_ctx *ctx, u64 curr_pte, bool *is_new_hop) |
| 1362 | { |
| 1363 | u64 hop_addr = hl_mmu_get_next_hop_addr(ctx, curr_pte); |
| 1364 | |
| 1365 | if (hop_addr == ULLONG_MAX) { |
| 1366 | hop_addr = hl_mmu_dr_alloc_hop(ctx); |
| 1367 | *is_new_hop = (hop_addr != ULLONG_MAX); |
| 1368 | } |
| 1369 | |
| 1370 | return hop_addr; |
| 1371 | } |
| 1372 | |
| 1373 | void hl_mmu_dr_flush(struct hl_ctx *ctx) |
| 1374 | { |
| 1375 | /* flush all writes from all cores to reach PCI */ |
| 1376 | mb(); |
| 1377 | ctx->hdev->asic_funcs->read_pte(ctx->hdev, hl_mmu_dr_get_phys_hop0_addr(ctx)); |
| 1378 | } |
| 1379 | |
| 1380 | int hl_mmu_dr_init(struct hl_device *hdev) |
| 1381 | { |
| 1382 | struct asic_fixed_properties *prop = &hdev->asic_prop; |
| 1383 | int rc; |
| 1384 | |
| 1385 | hdev->mmu_priv.dr.mmu_pgt_pool = |
| 1386 | gen_pool_create(__ffs(prop->dmmu.hop_table_size), -1); |
| 1387 | |
| 1388 | if (!hdev->mmu_priv.dr.mmu_pgt_pool) { |
| 1389 | dev_err(hdev->dev, "Failed to create page gen pool\n" ); |
| 1390 | return -ENOMEM; |
| 1391 | } |
| 1392 | |
| 1393 | rc = gen_pool_add(pool: hdev->mmu_priv.dr.mmu_pgt_pool, addr: prop->mmu_pgt_addr + |
| 1394 | prop->dmmu.hop0_tables_total_size, |
| 1395 | size: prop->dmmu.pgt_size - prop->dmmu.hop0_tables_total_size, |
| 1396 | nid: -1); |
| 1397 | if (rc) { |
| 1398 | dev_err(hdev->dev, "Failed to add memory to page gen pool\n" ); |
| 1399 | goto err_pool_add; |
| 1400 | } |
| 1401 | |
| 1402 | hdev->mmu_priv.dr.mmu_shadow_hop0 = kvcalloc(prop->max_asid, |
| 1403 | prop->dmmu.hop_table_size, GFP_KERNEL); |
| 1404 | if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) { |
| 1405 | rc = -ENOMEM; |
| 1406 | goto err_pool_add; |
| 1407 | } |
| 1408 | |
| 1409 | /* MMU H/W init will be done in device hw_init() */ |
| 1410 | |
| 1411 | return 0; |
| 1412 | |
| 1413 | err_pool_add: |
| 1414 | gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); |
| 1415 | |
| 1416 | return rc; |
| 1417 | } |
| 1418 | |
| 1419 | void hl_mmu_dr_fini(struct hl_device *hdev) |
| 1420 | { |
| 1421 | /* MMU H/W fini was already done in device hw_fini() */ |
| 1422 | |
| 1423 | if (ZERO_OR_NULL_PTR(hdev->mmu_priv.dr.mmu_shadow_hop0)) |
| 1424 | return; |
| 1425 | |
| 1426 | kvfree(addr: hdev->mmu_priv.dr.mmu_shadow_hop0); |
| 1427 | gen_pool_destroy(hdev->mmu_priv.dr.mmu_pgt_pool); |
| 1428 | |
| 1429 | /* Make sure that if we arrive here again without init was |
| 1430 | * called we won't cause kernel panic. This can happen for |
| 1431 | * example if we fail during hard reset code at certain points |
| 1432 | */ |
| 1433 | hdev->mmu_priv.dr.mmu_shadow_hop0 = NULL; |
| 1434 | } |
| 1435 | |