1 | #ifndef __NVKM_VMM_H__ |
2 | #define __NVKM_VMM_H__ |
3 | #include "priv.h" |
4 | #include <core/memory.h> |
5 | enum nvkm_memory_target; |
6 | |
7 | struct nvkm_vmm_pt { |
8 | /* Some GPUs have a mapping level with a dual page tables to |
9 | * support large and small pages in the same address-range. |
10 | * |
11 | * We track the state of both page tables in one place, which |
12 | * is why there's multiple PT pointers/refcounts here. |
13 | */ |
14 | struct nvkm_mmu_pt *pt[2]; |
15 | u32 refs[2]; |
16 | |
17 | /* Page size handled by this PT. |
18 | * |
19 | * Tesla backend needs to know this when writinge PDEs, |
20 | * otherwise unnecessary. |
21 | */ |
22 | u8 page; |
23 | |
24 | /* Entire page table sparse. |
25 | * |
26 | * Used to propagate sparseness to child page tables. |
27 | */ |
28 | bool sparse:1; |
29 | |
30 | /* Tracking for page directories. |
31 | * |
32 | * The array is indexed by PDE, and will either point to the |
33 | * child page table, or indicate the PDE is marked as sparse. |
34 | **/ |
35 | #define NVKM_VMM_PDE_INVALID(pde) IS_ERR_OR_NULL(pde) |
36 | #define NVKM_VMM_PDE_SPARSED(pde) IS_ERR(pde) |
37 | #define NVKM_VMM_PDE_SPARSE ERR_PTR(-EBUSY) |
38 | struct nvkm_vmm_pt **pde; |
39 | |
40 | /* Tracking for dual page tables. |
41 | * |
42 | * There's one entry for each LPTE, keeping track of whether |
43 | * there are valid SPTEs in the same address-range. |
44 | * |
45 | * This information is used to manage LPTE state transitions. |
46 | */ |
47 | #define NVKM_VMM_PTE_SPARSE 0x80 |
48 | #define NVKM_VMM_PTE_VALID 0x40 |
49 | #define NVKM_VMM_PTE_SPTES 0x3f |
50 | u8 pte[]; |
51 | }; |
52 | |
53 | typedef void (*nvkm_vmm_pxe_func)(struct nvkm_vmm *, |
54 | struct nvkm_mmu_pt *, u32 ptei, u32 ptes); |
55 | typedef void (*nvkm_vmm_pde_func)(struct nvkm_vmm *, |
56 | struct nvkm_vmm_pt *, u32 pdei); |
57 | typedef void (*nvkm_vmm_pte_func)(struct nvkm_vmm *, struct nvkm_mmu_pt *, |
58 | u32 ptei, u32 ptes, struct nvkm_vmm_map *); |
59 | |
60 | struct nvkm_vmm_desc_func { |
61 | nvkm_vmm_pxe_func invalid; |
62 | nvkm_vmm_pxe_func unmap; |
63 | nvkm_vmm_pxe_func sparse; |
64 | |
65 | nvkm_vmm_pde_func pde; |
66 | |
67 | nvkm_vmm_pte_func mem; |
68 | nvkm_vmm_pte_func dma; |
69 | nvkm_vmm_pte_func sgl; |
70 | |
71 | nvkm_vmm_pte_func pfn; |
72 | bool (*pfn_clear)(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32 ptei, u32 ptes); |
73 | nvkm_vmm_pxe_func pfn_unmap; |
74 | }; |
75 | |
76 | extern const struct nvkm_vmm_desc_func gf100_vmm_pgd; |
77 | void gf100_vmm_pgd_pde(struct nvkm_vmm *, struct nvkm_vmm_pt *, u32); |
78 | extern const struct nvkm_vmm_desc_func gf100_vmm_pgt; |
79 | void gf100_vmm_pgt_unmap(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32); |
80 | void gf100_vmm_pgt_mem(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32, |
81 | struct nvkm_vmm_map *); |
82 | void gf100_vmm_pgt_dma(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32, |
83 | struct nvkm_vmm_map *); |
84 | void gf100_vmm_pgt_sgl(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32, |
85 | struct nvkm_vmm_map *); |
86 | |
87 | void gk104_vmm_lpt_invalid(struct nvkm_vmm *, struct nvkm_mmu_pt *, u32, u32); |
88 | |
89 | struct nvkm_vmm_desc { |
90 | enum { |
91 | PGD, |
92 | PGT, |
93 | SPT, |
94 | LPT, |
95 | } type; |
96 | u8 bits; /* VMA bits covered by PT. */ |
97 | u8 size; /* Bytes-per-PTE. */ |
98 | u32 align; /* PT address alignment. */ |
99 | const struct nvkm_vmm_desc_func *func; |
100 | }; |
101 | |
102 | extern const struct nvkm_vmm_desc nv50_vmm_desc_12[]; |
103 | extern const struct nvkm_vmm_desc nv50_vmm_desc_16[]; |
104 | |
105 | extern const struct nvkm_vmm_desc gk104_vmm_desc_16_12[]; |
106 | extern const struct nvkm_vmm_desc gk104_vmm_desc_16_16[]; |
107 | extern const struct nvkm_vmm_desc gk104_vmm_desc_17_12[]; |
108 | extern const struct nvkm_vmm_desc gk104_vmm_desc_17_17[]; |
109 | |
110 | extern const struct nvkm_vmm_desc gm200_vmm_desc_16_12[]; |
111 | extern const struct nvkm_vmm_desc gm200_vmm_desc_16_16[]; |
112 | extern const struct nvkm_vmm_desc gm200_vmm_desc_17_12[]; |
113 | extern const struct nvkm_vmm_desc gm200_vmm_desc_17_17[]; |
114 | |
115 | extern const struct nvkm_vmm_desc gp100_vmm_desc_12[]; |
116 | extern const struct nvkm_vmm_desc gp100_vmm_desc_16[]; |
117 | |
118 | struct nvkm_vmm_page { |
119 | u8 shift; |
120 | const struct nvkm_vmm_desc *desc; |
121 | #define NVKM_VMM_PAGE_SPARSE 0x01 |
122 | #define NVKM_VMM_PAGE_VRAM 0x02 |
123 | #define NVKM_VMM_PAGE_HOST 0x04 |
124 | #define NVKM_VMM_PAGE_COMP 0x08 |
125 | #define NVKM_VMM_PAGE_Sxxx (NVKM_VMM_PAGE_SPARSE) |
126 | #define NVKM_VMM_PAGE_xVxx (NVKM_VMM_PAGE_VRAM) |
127 | #define NVKM_VMM_PAGE_SVxx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_VRAM) |
128 | #define NVKM_VMM_PAGE_xxHx (NVKM_VMM_PAGE_HOST) |
129 | #define NVKM_VMM_PAGE_SxHx (NVKM_VMM_PAGE_Sxxx | NVKM_VMM_PAGE_HOST) |
130 | #define NVKM_VMM_PAGE_xVHx (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_HOST) |
131 | #define NVKM_VMM_PAGE_SVHx (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_HOST) |
132 | #define NVKM_VMM_PAGE_xVxC (NVKM_VMM_PAGE_xVxx | NVKM_VMM_PAGE_COMP) |
133 | #define NVKM_VMM_PAGE_SVxC (NVKM_VMM_PAGE_SVxx | NVKM_VMM_PAGE_COMP) |
134 | #define NVKM_VMM_PAGE_xxHC (NVKM_VMM_PAGE_xxHx | NVKM_VMM_PAGE_COMP) |
135 | #define NVKM_VMM_PAGE_SxHC (NVKM_VMM_PAGE_SxHx | NVKM_VMM_PAGE_COMP) |
136 | u8 type; |
137 | }; |
138 | |
139 | struct nvkm_vmm_func { |
140 | int (*join)(struct nvkm_vmm *, struct nvkm_memory *inst); |
141 | void (*part)(struct nvkm_vmm *, struct nvkm_memory *inst); |
142 | |
143 | int (*aper)(enum nvkm_memory_target); |
144 | int (*valid)(struct nvkm_vmm *, void *argv, u32 argc, |
145 | struct nvkm_vmm_map *); |
146 | int (*valid2)(struct nvkm_vmm *, bool ro, bool priv, u8 kind, u8 comp, |
147 | struct nvkm_vmm_map *); |
148 | void (*flush)(struct nvkm_vmm *, int depth); |
149 | |
150 | int (*mthd)(struct nvkm_vmm *, struct nvkm_client *, |
151 | u32 mthd, void *argv, u32 argc); |
152 | |
153 | void (*invalidate_pdb)(struct nvkm_vmm *, u64 addr); |
154 | |
155 | u64 page_block; |
156 | const struct nvkm_vmm_page page[]; |
157 | }; |
158 | |
159 | struct nvkm_vmm_join { |
160 | struct nvkm_memory *inst; |
161 | struct list_head head; |
162 | }; |
163 | |
164 | int nvkm_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, |
165 | u32 , bool managed, u64 addr, u64 size, |
166 | struct lock_class_key *, const char *name, |
167 | struct nvkm_vmm **); |
168 | struct nvkm_vma *nvkm_vma_new(u64 addr, u64 size); |
169 | struct nvkm_vma *nvkm_vmm_node_search(struct nvkm_vmm *, u64 addr); |
170 | struct nvkm_vma *nvkm_vmm_node_split(struct nvkm_vmm *, struct nvkm_vma *, |
171 | u64 addr, u64 size); |
172 | int nvkm_vmm_get_locked(struct nvkm_vmm *, bool getref, bool mapref, |
173 | bool sparse, u8 page, u8 align, u64 size, |
174 | struct nvkm_vma **pvma); |
175 | void nvkm_vmm_put_locked(struct nvkm_vmm *, struct nvkm_vma *); |
176 | void nvkm_vmm_unmap_locked(struct nvkm_vmm *, struct nvkm_vma *, bool pfn); |
177 | void nvkm_vmm_unmap_region(struct nvkm_vmm *, struct nvkm_vma *); |
178 | |
179 | int nvkm_vmm_raw_get(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd); |
180 | void nvkm_vmm_raw_put(struct nvkm_vmm *vmm, u64 addr, u64 size, u8 refd); |
181 | void nvkm_vmm_raw_unmap(struct nvkm_vmm *vmm, u64 addr, u64 size, |
182 | bool sparse, u8 refd); |
183 | int nvkm_vmm_raw_sparse(struct nvkm_vmm *, u64 addr, u64 size, bool ref); |
184 | |
185 | static inline bool |
186 | nvkm_vmm_in_managed_range(struct nvkm_vmm *vmm, u64 start, u64 size) |
187 | { |
188 | u64 p_start = vmm->managed.p.addr; |
189 | u64 p_end = p_start + vmm->managed.p.size; |
190 | u64 n_start = vmm->managed.n.addr; |
191 | u64 n_end = n_start + vmm->managed.n.size; |
192 | u64 end = start + size; |
193 | |
194 | if (start >= p_start && end <= p_end) |
195 | return true; |
196 | |
197 | if (start >= n_start && end <= n_end) |
198 | return true; |
199 | |
200 | return false; |
201 | } |
202 | |
203 | #define NVKM_VMM_PFN_ADDR 0xfffffffffffff000ULL |
204 | #define NVKM_VMM_PFN_ADDR_SHIFT 12 |
205 | #define NVKM_VMM_PFN_APER 0x00000000000000f0ULL |
206 | #define NVKM_VMM_PFN_HOST 0x0000000000000000ULL |
207 | #define NVKM_VMM_PFN_VRAM 0x0000000000000010ULL |
208 | #define NVKM_VMM_PFN_A 0x0000000000000004ULL |
209 | #define NVKM_VMM_PFN_W 0x0000000000000002ULL |
210 | #define NVKM_VMM_PFN_V 0x0000000000000001ULL |
211 | #define NVKM_VMM_PFN_NONE 0x0000000000000000ULL |
212 | |
213 | int nvkm_vmm_pfn_map(struct nvkm_vmm *, u8 page, u64 addr, u64 size, u64 *pfn); |
214 | int nvkm_vmm_pfn_unmap(struct nvkm_vmm *, u64 addr, u64 size); |
215 | |
216 | struct nvkm_vma *nvkm_vma_tail(struct nvkm_vma *, u64 tail); |
217 | |
218 | int nv04_vmm_new_(const struct nvkm_vmm_func *, struct nvkm_mmu *, u32, |
219 | bool, u64, u64, void *, u32, struct lock_class_key *, |
220 | const char *, struct nvkm_vmm **); |
221 | int nv04_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); |
222 | |
223 | int nv50_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); |
224 | void nv50_vmm_part(struct nvkm_vmm *, struct nvkm_memory *); |
225 | int nv50_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); |
226 | void nv50_vmm_flush(struct nvkm_vmm *, int); |
227 | |
228 | int gf100_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, |
229 | struct nvkm_mmu *, bool, u64, u64, void *, u32, |
230 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
231 | int gf100_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base); |
232 | int gf100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); |
233 | void gf100_vmm_part(struct nvkm_vmm *, struct nvkm_memory *); |
234 | int gf100_vmm_aper(enum nvkm_memory_target); |
235 | int gf100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); |
236 | void gf100_vmm_flush(struct nvkm_vmm *, int); |
237 | void gf100_vmm_invalidate(struct nvkm_vmm *, u32 type); |
238 | void gf100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr); |
239 | |
240 | int gk20a_vmm_aper(enum nvkm_memory_target); |
241 | |
242 | int gm200_vmm_new_(const struct nvkm_vmm_func *, const struct nvkm_vmm_func *, |
243 | struct nvkm_mmu *, bool, u64, u64, void *, u32, |
244 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
245 | int gm200_vmm_join_(struct nvkm_vmm *, struct nvkm_memory *, u64 base); |
246 | int gm200_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); |
247 | |
248 | int gp100_vmm_new_(const struct nvkm_vmm_func *, |
249 | struct nvkm_mmu *, bool, u64, u64, void *, u32, |
250 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
251 | int gp100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); |
252 | int gp100_vmm_valid(struct nvkm_vmm *, void *, u32, struct nvkm_vmm_map *); |
253 | void gp100_vmm_flush(struct nvkm_vmm *, int); |
254 | int gp100_vmm_mthd(struct nvkm_vmm *, struct nvkm_client *, u32, void *, u32); |
255 | void gp100_vmm_invalidate_pdb(struct nvkm_vmm *, u64 addr); |
256 | |
257 | int gv100_vmm_join(struct nvkm_vmm *, struct nvkm_memory *); |
258 | |
259 | void tu102_vmm_flush(struct nvkm_vmm *, int depth); |
260 | |
261 | int nv04_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
262 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
263 | int nv41_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
264 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
265 | int nv44_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
266 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
267 | int nv50_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
268 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
269 | int mcp77_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
270 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
271 | int g84_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
272 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
273 | int gf100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
274 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
275 | int gk104_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
276 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
277 | int gk20a_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
278 | struct lock_class_key *, const char *, struct nvkm_vmm **); |
279 | int gm200_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
280 | struct lock_class_key *, const char *, |
281 | struct nvkm_vmm **); |
282 | int gm200_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
283 | struct lock_class_key *, const char *, |
284 | struct nvkm_vmm **); |
285 | int gm20b_vmm_new_fixed(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
286 | struct lock_class_key *, const char *, |
287 | struct nvkm_vmm **); |
288 | int gm20b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
289 | struct lock_class_key *, const char *, |
290 | struct nvkm_vmm **); |
291 | int gp100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
292 | struct lock_class_key *, const char *, |
293 | struct nvkm_vmm **); |
294 | int gp10b_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
295 | struct lock_class_key *, const char *, |
296 | struct nvkm_vmm **); |
297 | int gv100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
298 | struct lock_class_key *, const char *, |
299 | struct nvkm_vmm **); |
300 | int tu102_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
301 | struct lock_class_key *, const char *, |
302 | struct nvkm_vmm **); |
303 | int gh100_vmm_new(struct nvkm_mmu *, bool, u64, u64, void *, u32, |
304 | struct lock_class_key *, const char *, |
305 | struct nvkm_vmm **); |
306 | |
307 | #define VMM_PRINT(l,v,p,f,a...) do { \ |
308 | struct nvkm_vmm *_vmm = (v); \ |
309 | if (CONFIG_NOUVEAU_DEBUG >= (l) && _vmm->debug >= (l)) { \ |
310 | nvkm_printk_(&_vmm->mmu->subdev, 0, p, "%s: "f"\n", \ |
311 | _vmm->name, ##a); \ |
312 | } \ |
313 | } while(0) |
314 | #define VMM_DEBUG(v,f,a...) VMM_PRINT(NV_DBG_DEBUG, (v), info, f, ##a) |
315 | #define VMM_TRACE(v,f,a...) VMM_PRINT(NV_DBG_TRACE, (v), info, f, ##a) |
316 | #define VMM_SPAM(v,f,a...) VMM_PRINT(NV_DBG_SPAM , (v), dbg, f, ##a) |
317 | |
318 | #define VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL,BASE,SIZE,NEXT) do { \ |
319 | nvkm_kmap((PT)->memory); \ |
320 | while (PTEN) { \ |
321 | u64 _ptes = ((SIZE) - MAP->off) >> MAP->page->shift; \ |
322 | u64 _addr = ((BASE) + MAP->off); \ |
323 | \ |
324 | if (_ptes > PTEN) { \ |
325 | MAP->off += PTEN << MAP->page->shift; \ |
326 | _ptes = PTEN; \ |
327 | } else { \ |
328 | MAP->off = 0; \ |
329 | NEXT; \ |
330 | } \ |
331 | \ |
332 | VMM_SPAM(VMM, "ITER %08x %08x PTE(s)", PTEI, (u32)_ptes); \ |
333 | \ |
334 | FILL(VMM, PT, PTEI, _ptes, MAP, _addr); \ |
335 | PTEI += _ptes; \ |
336 | PTEN -= _ptes; \ |
337 | } \ |
338 | nvkm_done((PT)->memory); \ |
339 | } while(0) |
340 | |
341 | #define VMM_MAP_ITER_MEM(VMM,PT,PTEI,PTEN,MAP,FILL) \ |
342 | VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \ |
343 | ((u64)MAP->mem->offset << NVKM_RAM_MM_SHIFT), \ |
344 | ((u64)MAP->mem->length << NVKM_RAM_MM_SHIFT), \ |
345 | (MAP->mem = MAP->mem->next)) |
346 | #define VMM_MAP_ITER_DMA(VMM,PT,PTEI,PTEN,MAP,FILL) \ |
347 | VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \ |
348 | *MAP->dma, PAGE_SIZE, MAP->dma++) |
349 | #define VMM_MAP_ITER_SGL(VMM,PT,PTEI,PTEN,MAP,FILL) \ |
350 | VMM_MAP_ITER(VMM,PT,PTEI,PTEN,MAP,FILL, \ |
351 | sg_dma_address(MAP->sgl), sg_dma_len(MAP->sgl), \ |
352 | (MAP->sgl = sg_next(MAP->sgl))) |
353 | |
354 | #define VMM_FO(m,o,d,c,b) nvkm_fo##b((m)->memory, (o), (d), (c)) |
355 | #define VMM_WO(m,o,d,c,b) nvkm_wo##b((m)->memory, (o), (d)) |
356 | #define VMM_XO(m,v,o,d,c,b,fn,f,a...) do { \ |
357 | const u32 _pteo = (o); u##b _data = (d); \ |
358 | VMM_SPAM((v), " %010llx "f, (m)->addr + _pteo, _data, ##a); \ |
359 | VMM_##fn((m), (m)->base + _pteo, _data, (c), b); \ |
360 | } while(0) |
361 | |
362 | #define VMM_WO032(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 32, WO, "%08x") |
363 | #define VMM_FO032(m,v,o,d,c) \ |
364 | VMM_XO((m),(v),(o),(d),(c), 32, FO, "%08x %08x", (c)) |
365 | |
366 | #define VMM_WO064(m,v,o,d) VMM_XO((m),(v),(o),(d), 1, 64, WO, "%016llx") |
367 | #define VMM_FO064(m,v,o,d,c) \ |
368 | VMM_XO((m),(v),(o),(d),(c), 64, FO, "%016llx %08x", (c)) |
369 | |
370 | #define VMM_XO128(m,v,o,lo,hi,c,f,a...) do { \ |
371 | u32 _pteo = (o), _ptes = (c); \ |
372 | const u64 _addr = (m)->addr + _pteo; \ |
373 | VMM_SPAM((v), " %010llx %016llx%016llx"f, _addr, (hi), (lo), ##a); \ |
374 | while (_ptes--) { \ |
375 | nvkm_wo64((m)->memory, (m)->base + _pteo + 0, (lo)); \ |
376 | nvkm_wo64((m)->memory, (m)->base + _pteo + 8, (hi)); \ |
377 | _pteo += 0x10; \ |
378 | } \ |
379 | } while(0) |
380 | |
381 | #define VMM_WO128(m,v,o,lo,hi) VMM_XO128((m),(v),(o),(lo),(hi), 1, "") |
382 | #define VMM_FO128(m,v,o,lo,hi,c) do { \ |
383 | nvkm_kmap((m)->memory); \ |
384 | VMM_XO128((m),(v),(o),(lo),(hi),(c), " %08x", (c)); \ |
385 | nvkm_done((m)->memory); \ |
386 | } while(0) |
387 | #endif |
388 | |