| 1 | /* SPDX-License-Identifier: GPL-2.0-only */ |
| 2 | /* |
| 3 | * Copyright (C) 2020-2023 Intel Corporation |
| 4 | */ |
| 5 | |
| 6 | #ifndef __IVPU_MMU_H__ |
| 7 | #define __IVPU_MMU_H__ |
| 8 | |
| 9 | struct ivpu_device; |
| 10 | |
| 11 | struct ivpu_mmu_cdtab { |
| 12 | void *base; |
| 13 | dma_addr_t dma; |
| 14 | }; |
| 15 | |
| 16 | struct ivpu_mmu_strtab { |
| 17 | void *base; |
| 18 | dma_addr_t dma; |
| 19 | u64 dma_q; |
| 20 | u32 base_cfg; |
| 21 | }; |
| 22 | |
| 23 | struct ivpu_mmu_queue { |
| 24 | void *base; |
| 25 | dma_addr_t dma; |
| 26 | u64 dma_q; |
| 27 | u32 prod; |
| 28 | u32 cons; |
| 29 | }; |
| 30 | |
| 31 | struct ivpu_mmu_info { |
| 32 | struct mutex lock; /* Protects cdtab, strtab, cmdq, on */ |
| 33 | struct ivpu_mmu_cdtab cdtab; |
| 34 | struct ivpu_mmu_strtab strtab; |
| 35 | struct ivpu_mmu_queue cmdq; |
| 36 | struct ivpu_mmu_queue evtq; |
| 37 | bool on; |
| 38 | }; |
| 39 | |
| 40 | int ivpu_mmu_init(struct ivpu_device *vdev); |
| 41 | void ivpu_mmu_disable(struct ivpu_device *vdev); |
| 42 | int ivpu_mmu_enable(struct ivpu_device *vdev); |
| 43 | int ivpu_mmu_cd_set(struct ivpu_device *vdev, int ssid, struct ivpu_mmu_pgtable *pgtable); |
| 44 | void ivpu_mmu_cd_clear(struct ivpu_device *vdev, int ssid); |
| 45 | int ivpu_mmu_invalidate_tlb(struct ivpu_device *vdev, u16 ssid); |
| 46 | |
| 47 | void ivpu_mmu_irq_evtq_handler(struct ivpu_device *vdev); |
| 48 | void ivpu_mmu_irq_gerr_handler(struct ivpu_device *vdev); |
| 49 | void ivpu_mmu_evtq_dump(struct ivpu_device *vdev); |
| 50 | void ivpu_mmu_discard_events(struct ivpu_device *vdev); |
| 51 | int ivpu_mmu_disable_ssid_events(struct ivpu_device *vdev, u32 ssid); |
| 52 | |
| 53 | #endif /* __IVPU_MMU_H__ */ |
| 54 | |