1 | /* |
2 | * Copyright 2008 Advanced Micro Devices, Inc. |
3 | * Copyright 2008 Red Hat Inc. |
4 | * Copyright 2009 Jerome Glisse. |
5 | * |
6 | * Permission is hereby granted, free of charge, to any person obtaining a |
7 | * copy of this software and associated documentation files (the "Software"), |
8 | * to deal in the Software without restriction, including without limitation |
9 | * the rights to use, copy, modify, merge, publish, distribute, sublicense, |
10 | * and/or sell copies of the Software, and to permit persons to whom the |
11 | * Software is furnished to do so, subject to the following conditions: |
12 | * |
13 | * The above copyright notice and this permission notice shall be included in |
14 | * all copies or substantial portions of the Software. |
15 | * |
16 | * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR |
17 | * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, |
18 | * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL |
19 | * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR |
20 | * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, |
21 | * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR |
22 | * OTHER DEALINGS IN THE SOFTWARE. |
23 | * |
24 | * Authors: Dave Airlie |
25 | * Alex Deucher |
26 | * Jerome Glisse |
27 | */ |
28 | |
29 | #include <linux/debugfs.h> |
30 | #include <linux/pci.h> |
31 | #include <linux/seq_file.h> |
32 | #include <linux/slab.h> |
33 | |
34 | #include <drm/drm.h> |
35 | #include <drm/drm_device.h> |
36 | #include <drm/drm_file.h> |
37 | #include <drm/radeon_drm.h> |
38 | |
39 | #include "r100_track.h" |
40 | #include "r300_reg_safe.h" |
41 | #include "r300d.h" |
42 | #include "radeon.h" |
43 | #include "radeon_asic.h" |
44 | #include "radeon_reg.h" |
45 | #include "rv350d.h" |
46 | |
47 | /* This files gather functions specifics to: r300,r350,rv350,rv370,rv380 |
48 | * |
49 | * GPU Errata: |
50 | * - HOST_PATH_CNTL: r300 family seems to dislike write to HOST_PATH_CNTL |
51 | * using MMIO to flush host path read cache, this lead to HARDLOCKUP. |
52 | * However, scheduling such write to the ring seems harmless, i suspect |
53 | * the CP read collide with the flush somehow, or maybe the MC, hard to |
54 | * tell. (Jerome Glisse) |
55 | */ |
56 | |
57 | /* |
58 | * Indirect registers accessor |
59 | */ |
60 | uint32_t rv370_pcie_rreg(struct radeon_device *rdev, uint32_t reg) |
61 | { |
62 | unsigned long flags; |
63 | uint32_t r; |
64 | |
65 | spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
66 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
67 | r = RREG32(RADEON_PCIE_DATA); |
68 | spin_unlock_irqrestore(lock: &rdev->pcie_idx_lock, flags); |
69 | return r; |
70 | } |
71 | |
72 | void rv370_pcie_wreg(struct radeon_device *rdev, uint32_t reg, uint32_t v) |
73 | { |
74 | unsigned long flags; |
75 | |
76 | spin_lock_irqsave(&rdev->pcie_idx_lock, flags); |
77 | WREG32(RADEON_PCIE_INDEX, ((reg) & rdev->pcie_reg_mask)); |
78 | WREG32(RADEON_PCIE_DATA, (v)); |
79 | spin_unlock_irqrestore(lock: &rdev->pcie_idx_lock, flags); |
80 | } |
81 | |
82 | /* |
83 | * rv370,rv380 PCIE GART |
84 | */ |
85 | static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev); |
86 | |
87 | void rv370_pcie_gart_tlb_flush(struct radeon_device *rdev) |
88 | { |
89 | uint32_t tmp; |
90 | int i; |
91 | |
92 | /* Workaround HW bug do flush 2 times */ |
93 | for (i = 0; i < 2; i++) { |
94 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
95 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp | RADEON_PCIE_TX_GART_INVALIDATE_TLB); |
96 | (void)RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
97 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
98 | } |
99 | mb(); |
100 | } |
101 | |
102 | #define R300_PTE_UNSNOOPED (1 << 0) |
103 | #define R300_PTE_WRITEABLE (1 << 2) |
104 | #define R300_PTE_READABLE (1 << 3) |
105 | |
106 | uint64_t rv370_pcie_gart_get_page_entry(uint64_t addr, uint32_t flags) |
107 | { |
108 | addr = (lower_32_bits(addr) >> 8) | |
109 | ((upper_32_bits(addr) & 0xff) << 24); |
110 | if (flags & RADEON_GART_PAGE_READ) |
111 | addr |= R300_PTE_READABLE; |
112 | if (flags & RADEON_GART_PAGE_WRITE) |
113 | addr |= R300_PTE_WRITEABLE; |
114 | if (!(flags & RADEON_GART_PAGE_SNOOP)) |
115 | addr |= R300_PTE_UNSNOOPED; |
116 | return addr; |
117 | } |
118 | |
119 | void rv370_pcie_gart_set_page(struct radeon_device *rdev, unsigned i, |
120 | uint64_t entry) |
121 | { |
122 | void __iomem *ptr = rdev->gart.ptr; |
123 | |
124 | /* on x86 we want this to be CPU endian, on powerpc |
125 | * on powerpc without HW swappers, it'll get swapped on way |
126 | * into VRAM - so no need for cpu_to_le32 on VRAM tables */ |
127 | writel(val: entry, addr: ((void __iomem *)ptr) + (i * 4)); |
128 | } |
129 | |
130 | int rv370_pcie_gart_init(struct radeon_device *rdev) |
131 | { |
132 | int r; |
133 | |
134 | if (rdev->gart.robj) { |
135 | WARN(1, "RV370 PCIE GART already initialized\n" ); |
136 | return 0; |
137 | } |
138 | /* Initialize common gart structure */ |
139 | r = radeon_gart_init(rdev); |
140 | if (r) |
141 | return r; |
142 | rv370_debugfs_pcie_gart_info_init(rdev); |
143 | |
144 | rdev->gart.table_size = rdev->gart.num_gpu_pages * 4; |
145 | rdev->asic->gart.tlb_flush = &rv370_pcie_gart_tlb_flush; |
146 | rdev->asic->gart.get_page_entry = &rv370_pcie_gart_get_page_entry; |
147 | rdev->asic->gart.set_page = &rv370_pcie_gart_set_page; |
148 | return radeon_gart_table_vram_alloc(rdev); |
149 | } |
150 | |
151 | int rv370_pcie_gart_enable(struct radeon_device *rdev) |
152 | { |
153 | uint32_t table_addr; |
154 | uint32_t tmp; |
155 | int r; |
156 | |
157 | if (rdev->gart.robj == NULL) { |
158 | dev_err(rdev->dev, "No VRAM object for PCIE GART.\n" ); |
159 | return -EINVAL; |
160 | } |
161 | r = radeon_gart_table_vram_pin(rdev); |
162 | if (r) |
163 | return r; |
164 | /* discard memory request outside of configured range */ |
165 | tmp = RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
166 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
167 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, rdev->mc.gtt_start); |
168 | tmp = rdev->mc.gtt_end & ~RADEON_GPU_PAGE_MASK; |
169 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, tmp); |
170 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
171 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
172 | table_addr = rdev->gart.table_addr; |
173 | WREG32_PCIE(RADEON_PCIE_TX_GART_BASE, table_addr); |
174 | /* FIXME: setup default page */ |
175 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_LO, rdev->mc.vram_start); |
176 | WREG32_PCIE(RADEON_PCIE_TX_DISCARD_RD_ADDR_HI, 0); |
177 | /* Clear error */ |
178 | WREG32_PCIE(RADEON_PCIE_TX_GART_ERROR, 0); |
179 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
180 | tmp |= RADEON_PCIE_TX_GART_EN; |
181 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
182 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp); |
183 | rv370_pcie_gart_tlb_flush(rdev); |
184 | DRM_INFO("PCIE GART of %uM enabled (table at 0x%016llX).\n" , |
185 | (unsigned)(rdev->mc.gtt_size >> 20), |
186 | (unsigned long long)table_addr); |
187 | rdev->gart.ready = true; |
188 | return 0; |
189 | } |
190 | |
191 | void rv370_pcie_gart_disable(struct radeon_device *rdev) |
192 | { |
193 | u32 tmp; |
194 | |
195 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_LO, 0); |
196 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_LO, 0); |
197 | WREG32_PCIE(RADEON_PCIE_TX_GART_START_HI, 0); |
198 | WREG32_PCIE(RADEON_PCIE_TX_GART_END_HI, 0); |
199 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
200 | tmp |= RADEON_PCIE_TX_GART_UNMAPPED_ACCESS_DISCARD; |
201 | WREG32_PCIE(RADEON_PCIE_TX_GART_CNTL, tmp & ~RADEON_PCIE_TX_GART_EN); |
202 | radeon_gart_table_vram_unpin(rdev); |
203 | } |
204 | |
205 | void rv370_pcie_gart_fini(struct radeon_device *rdev) |
206 | { |
207 | radeon_gart_fini(rdev); |
208 | rv370_pcie_gart_disable(rdev); |
209 | radeon_gart_table_vram_free(rdev); |
210 | } |
211 | |
212 | void r300_fence_ring_emit(struct radeon_device *rdev, |
213 | struct radeon_fence *fence) |
214 | { |
215 | struct radeon_ring *ring = &rdev->ring[fence->ring]; |
216 | |
217 | /* Who ever call radeon_fence_emit should call ring_lock and ask |
218 | * for enough space (today caller are ib schedule and buffer move) */ |
219 | /* Write SC register so SC & US assert idle */ |
220 | radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_TL, 0)); |
221 | radeon_ring_write(ring, v: 0); |
222 | radeon_ring_write(ring, PACKET0(R300_RE_SCISSORS_BR, 0)); |
223 | radeon_ring_write(ring, v: 0); |
224 | /* Flush 3D cache */ |
225 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
226 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH); |
227 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
228 | radeon_ring_write(ring, R300_ZC_FLUSH); |
229 | /* Wait until IDLE & CLEAN */ |
230 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
231 | radeon_ring_write(ring, v: (RADEON_WAIT_3D_IDLECLEAN | |
232 | RADEON_WAIT_2D_IDLECLEAN | |
233 | RADEON_WAIT_DMA_GUI_IDLE)); |
234 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
235 | radeon_ring_write(ring, v: rdev->config.r300.hdp_cntl | |
236 | RADEON_HDP_READ_BUFFER_INVALIDATE); |
237 | radeon_ring_write(ring, PACKET0(RADEON_HOST_PATH_CNTL, 0)); |
238 | radeon_ring_write(ring, v: rdev->config.r300.hdp_cntl); |
239 | /* Emit fence sequence & fire IRQ */ |
240 | radeon_ring_write(ring, PACKET0(rdev->fence_drv[fence->ring].scratch_reg, 0)); |
241 | radeon_ring_write(ring, v: fence->seq); |
242 | radeon_ring_write(ring, PACKET0(RADEON_GEN_INT_STATUS, 0)); |
243 | radeon_ring_write(ring, RADEON_SW_INT_FIRE); |
244 | } |
245 | |
246 | void r300_ring_start(struct radeon_device *rdev, struct radeon_ring *ring) |
247 | { |
248 | unsigned gb_tile_config; |
249 | int r; |
250 | |
251 | /* Sub pixel 1/12 so we can have 4K rendering according to doc */ |
252 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
253 | switch (rdev->num_gb_pipes) { |
254 | case 2: |
255 | gb_tile_config |= R300_PIPE_COUNT_R300; |
256 | break; |
257 | case 3: |
258 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
259 | break; |
260 | case 4: |
261 | gb_tile_config |= R300_PIPE_COUNT_R420; |
262 | break; |
263 | case 1: |
264 | default: |
265 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
266 | break; |
267 | } |
268 | |
269 | r = radeon_ring_lock(rdev, cp: ring, ndw: 64); |
270 | if (r) { |
271 | return; |
272 | } |
273 | radeon_ring_write(ring, PACKET0(RADEON_ISYNC_CNTL, 0)); |
274 | radeon_ring_write(ring, |
275 | RADEON_ISYNC_ANY2D_IDLE3D | |
276 | RADEON_ISYNC_ANY3D_IDLE2D | |
277 | RADEON_ISYNC_WAIT_IDLEGUI | |
278 | RADEON_ISYNC_CPSCRATCH_IDLEGUI); |
279 | radeon_ring_write(ring, PACKET0(R300_GB_TILE_CONFIG, 0)); |
280 | radeon_ring_write(ring, v: gb_tile_config); |
281 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
282 | radeon_ring_write(ring, |
283 | RADEON_WAIT_2D_IDLECLEAN | |
284 | RADEON_WAIT_3D_IDLECLEAN); |
285 | radeon_ring_write(ring, PACKET0(R300_DST_PIPE_CONFIG, 0)); |
286 | radeon_ring_write(ring, R300_PIPE_AUTO_CONFIG); |
287 | radeon_ring_write(ring, PACKET0(R300_GB_SELECT, 0)); |
288 | radeon_ring_write(ring, v: 0); |
289 | radeon_ring_write(ring, PACKET0(R300_GB_ENABLE, 0)); |
290 | radeon_ring_write(ring, v: 0); |
291 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
292 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
293 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
294 | radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); |
295 | radeon_ring_write(ring, PACKET0(RADEON_WAIT_UNTIL, 0)); |
296 | radeon_ring_write(ring, |
297 | RADEON_WAIT_2D_IDLECLEAN | |
298 | RADEON_WAIT_3D_IDLECLEAN); |
299 | radeon_ring_write(ring, PACKET0(R300_GB_AA_CONFIG, 0)); |
300 | radeon_ring_write(ring, v: 0); |
301 | radeon_ring_write(ring, PACKET0(R300_RB3D_DSTCACHE_CTLSTAT, 0)); |
302 | radeon_ring_write(ring, R300_RB3D_DC_FLUSH | R300_RB3D_DC_FREE); |
303 | radeon_ring_write(ring, PACKET0(R300_RB3D_ZCACHE_CTLSTAT, 0)); |
304 | radeon_ring_write(ring, R300_ZC_FLUSH | R300_ZC_FREE); |
305 | radeon_ring_write(ring, PACKET0(R300_GB_MSPOS0, 0)); |
306 | radeon_ring_write(ring, |
307 | v: ((6 << R300_MS_X0_SHIFT) | |
308 | (6 << R300_MS_Y0_SHIFT) | |
309 | (6 << R300_MS_X1_SHIFT) | |
310 | (6 << R300_MS_Y1_SHIFT) | |
311 | (6 << R300_MS_X2_SHIFT) | |
312 | (6 << R300_MS_Y2_SHIFT) | |
313 | (6 << R300_MSBD0_Y_SHIFT) | |
314 | (6 << R300_MSBD0_X_SHIFT))); |
315 | radeon_ring_write(ring, PACKET0(R300_GB_MSPOS1, 0)); |
316 | radeon_ring_write(ring, |
317 | v: ((6 << R300_MS_X3_SHIFT) | |
318 | (6 << R300_MS_Y3_SHIFT) | |
319 | (6 << R300_MS_X4_SHIFT) | |
320 | (6 << R300_MS_Y4_SHIFT) | |
321 | (6 << R300_MS_X5_SHIFT) | |
322 | (6 << R300_MS_Y5_SHIFT) | |
323 | (6 << R300_MSBD1_SHIFT))); |
324 | radeon_ring_write(ring, PACKET0(R300_GA_ENHANCE, 0)); |
325 | radeon_ring_write(ring, R300_GA_DEADLOCK_CNTL | R300_GA_FASTSYNC_CNTL); |
326 | radeon_ring_write(ring, PACKET0(R300_GA_POLY_MODE, 0)); |
327 | radeon_ring_write(ring, |
328 | R300_FRONT_PTYPE_TRIANGE | R300_BACK_PTYPE_TRIANGE); |
329 | radeon_ring_write(ring, PACKET0(R300_GA_ROUND_MODE, 0)); |
330 | radeon_ring_write(ring, |
331 | R300_GEOMETRY_ROUND_NEAREST | |
332 | R300_COLOR_ROUND_NEAREST); |
333 | radeon_ring_unlock_commit(rdev, cp: ring, hdp_flush: false); |
334 | } |
335 | |
336 | static void r300_errata(struct radeon_device *rdev) |
337 | { |
338 | rdev->pll_errata = 0; |
339 | |
340 | if (rdev->family == CHIP_R300 && |
341 | (RREG32(RADEON_CONFIG_CNTL) & RADEON_CFG_ATI_REV_ID_MASK) == RADEON_CFG_ATI_REV_A11) { |
342 | rdev->pll_errata |= CHIP_ERRATA_R300_CG; |
343 | } |
344 | } |
345 | |
346 | int r300_mc_wait_for_idle(struct radeon_device *rdev) |
347 | { |
348 | unsigned i; |
349 | uint32_t tmp; |
350 | |
351 | for (i = 0; i < rdev->usec_timeout; i++) { |
352 | /* read MC_STATUS */ |
353 | tmp = RREG32(RADEON_MC_STATUS); |
354 | if (tmp & R300_MC_IDLE) { |
355 | return 0; |
356 | } |
357 | udelay(usec: 1); |
358 | } |
359 | return -1; |
360 | } |
361 | |
362 | /* rs400_gpu_init also calls this! */ |
363 | void r300_gpu_init(struct radeon_device *rdev) |
364 | { |
365 | uint32_t gb_tile_config, tmp; |
366 | |
367 | if ((rdev->family == CHIP_R300 && rdev->pdev->device != 0x4144) || |
368 | (rdev->family == CHIP_R350 && rdev->pdev->device != 0x4148)) { |
369 | /* r300,r350 */ |
370 | rdev->num_gb_pipes = 2; |
371 | } else { |
372 | /* rv350,rv370,rv380,r300 AD, r350 AH */ |
373 | rdev->num_gb_pipes = 1; |
374 | } |
375 | rdev->num_z_pipes = 1; |
376 | gb_tile_config = (R300_ENABLE_TILING | R300_TILE_SIZE_16); |
377 | switch (rdev->num_gb_pipes) { |
378 | case 2: |
379 | gb_tile_config |= R300_PIPE_COUNT_R300; |
380 | break; |
381 | case 3: |
382 | gb_tile_config |= R300_PIPE_COUNT_R420_3P; |
383 | break; |
384 | case 4: |
385 | gb_tile_config |= R300_PIPE_COUNT_R420; |
386 | break; |
387 | default: |
388 | case 1: |
389 | gb_tile_config |= R300_PIPE_COUNT_RV350; |
390 | break; |
391 | } |
392 | WREG32(R300_GB_TILE_CONFIG, gb_tile_config); |
393 | |
394 | if (r100_gui_wait_for_idle(rdev)) { |
395 | pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n" ); |
396 | } |
397 | |
398 | tmp = RREG32(R300_DST_PIPE_CONFIG); |
399 | WREG32(R300_DST_PIPE_CONFIG, tmp | R300_PIPE_AUTO_CONFIG); |
400 | |
401 | WREG32(R300_RB2D_DSTCACHE_MODE, |
402 | R300_DC_AUTOFLUSH_ENABLE | |
403 | R300_DC_DC_DISABLE_IGNORE_PE); |
404 | |
405 | if (r100_gui_wait_for_idle(rdev)) { |
406 | pr_warn("Failed to wait GUI idle while programming pipes. Bad things might happen.\n" ); |
407 | } |
408 | if (r300_mc_wait_for_idle(rdev)) { |
409 | pr_warn("Failed to wait MC idle while programming pipes. Bad things might happen.\n" ); |
410 | } |
411 | DRM_INFO("radeon: %d quad pipes, %d Z pipes initialized\n" , |
412 | rdev->num_gb_pipes, rdev->num_z_pipes); |
413 | } |
414 | |
415 | int r300_asic_reset(struct radeon_device *rdev, bool hard) |
416 | { |
417 | struct r100_mc_save save; |
418 | u32 status, tmp; |
419 | int ret = 0; |
420 | |
421 | status = RREG32(R_000E40_RBBM_STATUS); |
422 | if (!G_000E40_GUI_ACTIVE(status)) { |
423 | return 0; |
424 | } |
425 | r100_mc_stop(rdev, save: &save); |
426 | status = RREG32(R_000E40_RBBM_STATUS); |
427 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
428 | /* stop CP */ |
429 | WREG32(RADEON_CP_CSQ_CNTL, 0); |
430 | tmp = RREG32(RADEON_CP_RB_CNTL); |
431 | WREG32(RADEON_CP_RB_CNTL, tmp | RADEON_RB_RPTR_WR_ENA); |
432 | WREG32(RADEON_CP_RB_RPTR_WR, 0); |
433 | WREG32(RADEON_CP_RB_WPTR, 0); |
434 | WREG32(RADEON_CP_RB_CNTL, tmp); |
435 | /* save PCI state */ |
436 | pci_save_state(dev: rdev->pdev); |
437 | /* disable bus mastering */ |
438 | r100_bm_disable(rdev); |
439 | WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_VAP(1) | |
440 | S_0000F0_SOFT_RESET_GA(1)); |
441 | RREG32(R_0000F0_RBBM_SOFT_RESET); |
442 | mdelay(500); |
443 | WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
444 | mdelay(1); |
445 | status = RREG32(R_000E40_RBBM_STATUS); |
446 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
447 | /* resetting the CP seems to be problematic sometimes it end up |
448 | * hard locking the computer, but it's necessary for successful |
449 | * reset more test & playing is needed on R3XX/R4XX to find a |
450 | * reliable (if any solution) |
451 | */ |
452 | WREG32(R_0000F0_RBBM_SOFT_RESET, S_0000F0_SOFT_RESET_CP(1)); |
453 | RREG32(R_0000F0_RBBM_SOFT_RESET); |
454 | mdelay(500); |
455 | WREG32(R_0000F0_RBBM_SOFT_RESET, 0); |
456 | mdelay(1); |
457 | status = RREG32(R_000E40_RBBM_STATUS); |
458 | dev_info(rdev->dev, "(%s:%d) RBBM_STATUS=0x%08X\n" , __func__, __LINE__, status); |
459 | /* restore PCI & busmastering */ |
460 | pci_restore_state(dev: rdev->pdev); |
461 | r100_enable_bm(rdev); |
462 | /* Check if GPU is idle */ |
463 | if (G_000E40_GA_BUSY(status) || G_000E40_VAP_BUSY(status)) { |
464 | dev_err(rdev->dev, "failed to reset GPU\n" ); |
465 | ret = -1; |
466 | } else |
467 | dev_info(rdev->dev, "GPU reset succeed\n" ); |
468 | r100_mc_resume(rdev, save: &save); |
469 | return ret; |
470 | } |
471 | |
472 | /* |
473 | * r300,r350,rv350,rv380 VRAM info |
474 | */ |
475 | void r300_mc_init(struct radeon_device *rdev) |
476 | { |
477 | u64 base; |
478 | u32 tmp; |
479 | |
480 | /* DDR for all card after R300 & IGP */ |
481 | rdev->mc.vram_is_ddr = true; |
482 | tmp = RREG32(RADEON_MEM_CNTL); |
483 | tmp &= R300_MEM_NUM_CHANNELS_MASK; |
484 | switch (tmp) { |
485 | case 0: rdev->mc.vram_width = 64; break; |
486 | case 1: rdev->mc.vram_width = 128; break; |
487 | case 2: rdev->mc.vram_width = 256; break; |
488 | default: rdev->mc.vram_width = 128; break; |
489 | } |
490 | r100_vram_init_sizes(rdev); |
491 | base = rdev->mc.aper_base; |
492 | if (rdev->flags & RADEON_IS_IGP) |
493 | base = (RREG32(RADEON_NB_TOM) & 0xffff) << 16; |
494 | radeon_vram_location(rdev, mc: &rdev->mc, base); |
495 | rdev->mc.gtt_base_align = 0; |
496 | if (!(rdev->flags & RADEON_IS_AGP)) |
497 | radeon_gtt_location(rdev, mc: &rdev->mc); |
498 | radeon_update_bandwidth_info(rdev); |
499 | } |
500 | |
501 | void rv370_set_pcie_lanes(struct radeon_device *rdev, int lanes) |
502 | { |
503 | uint32_t link_width_cntl, mask; |
504 | |
505 | if (rdev->flags & RADEON_IS_IGP) |
506 | return; |
507 | |
508 | if (!(rdev->flags & RADEON_IS_PCIE)) |
509 | return; |
510 | |
511 | /* FIXME wait for idle */ |
512 | |
513 | switch (lanes) { |
514 | case 0: |
515 | mask = RADEON_PCIE_LC_LINK_WIDTH_X0; |
516 | break; |
517 | case 1: |
518 | mask = RADEON_PCIE_LC_LINK_WIDTH_X1; |
519 | break; |
520 | case 2: |
521 | mask = RADEON_PCIE_LC_LINK_WIDTH_X2; |
522 | break; |
523 | case 4: |
524 | mask = RADEON_PCIE_LC_LINK_WIDTH_X4; |
525 | break; |
526 | case 8: |
527 | mask = RADEON_PCIE_LC_LINK_WIDTH_X8; |
528 | break; |
529 | case 12: |
530 | mask = RADEON_PCIE_LC_LINK_WIDTH_X12; |
531 | break; |
532 | case 16: |
533 | default: |
534 | mask = RADEON_PCIE_LC_LINK_WIDTH_X16; |
535 | break; |
536 | } |
537 | |
538 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
539 | |
540 | if ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) == |
541 | (mask << RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT)) |
542 | return; |
543 | |
544 | link_width_cntl &= ~(RADEON_PCIE_LC_LINK_WIDTH_MASK | |
545 | RADEON_PCIE_LC_RECONFIG_NOW | |
546 | RADEON_PCIE_LC_RECONFIG_LATER | |
547 | RADEON_PCIE_LC_SHORT_RECONFIG_EN); |
548 | link_width_cntl |= mask; |
549 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, link_width_cntl); |
550 | WREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL, (link_width_cntl | |
551 | RADEON_PCIE_LC_RECONFIG_NOW)); |
552 | |
553 | /* wait for lane set to complete */ |
554 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
555 | while (link_width_cntl == 0xffffffff) |
556 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
557 | |
558 | } |
559 | |
560 | int rv370_get_pcie_lanes(struct radeon_device *rdev) |
561 | { |
562 | u32 link_width_cntl; |
563 | |
564 | if (rdev->flags & RADEON_IS_IGP) |
565 | return 0; |
566 | |
567 | if (!(rdev->flags & RADEON_IS_PCIE)) |
568 | return 0; |
569 | |
570 | /* FIXME wait for idle */ |
571 | |
572 | link_width_cntl = RREG32_PCIE(RADEON_PCIE_LC_LINK_WIDTH_CNTL); |
573 | |
574 | switch ((link_width_cntl & RADEON_PCIE_LC_LINK_WIDTH_RD_MASK) >> RADEON_PCIE_LC_LINK_WIDTH_RD_SHIFT) { |
575 | case RADEON_PCIE_LC_LINK_WIDTH_X0: |
576 | return 0; |
577 | case RADEON_PCIE_LC_LINK_WIDTH_X1: |
578 | return 1; |
579 | case RADEON_PCIE_LC_LINK_WIDTH_X2: |
580 | return 2; |
581 | case RADEON_PCIE_LC_LINK_WIDTH_X4: |
582 | return 4; |
583 | case RADEON_PCIE_LC_LINK_WIDTH_X8: |
584 | return 8; |
585 | case RADEON_PCIE_LC_LINK_WIDTH_X16: |
586 | default: |
587 | return 16; |
588 | } |
589 | } |
590 | |
591 | #if defined(CONFIG_DEBUG_FS) |
592 | static int rv370_debugfs_pcie_gart_info_show(struct seq_file *m, void *unused) |
593 | { |
594 | struct radeon_device *rdev = m->private; |
595 | uint32_t tmp; |
596 | |
597 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_CNTL); |
598 | seq_printf(m, fmt: "PCIE_TX_GART_CNTL 0x%08x\n" , tmp); |
599 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_BASE); |
600 | seq_printf(m, fmt: "PCIE_TX_GART_BASE 0x%08x\n" , tmp); |
601 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_LO); |
602 | seq_printf(m, fmt: "PCIE_TX_GART_START_LO 0x%08x\n" , tmp); |
603 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_START_HI); |
604 | seq_printf(m, fmt: "PCIE_TX_GART_START_HI 0x%08x\n" , tmp); |
605 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_LO); |
606 | seq_printf(m, fmt: "PCIE_TX_GART_END_LO 0x%08x\n" , tmp); |
607 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_END_HI); |
608 | seq_printf(m, fmt: "PCIE_TX_GART_END_HI 0x%08x\n" , tmp); |
609 | tmp = RREG32_PCIE(RADEON_PCIE_TX_GART_ERROR); |
610 | seq_printf(m, fmt: "PCIE_TX_GART_ERROR 0x%08x\n" , tmp); |
611 | return 0; |
612 | } |
613 | |
614 | DEFINE_SHOW_ATTRIBUTE(rv370_debugfs_pcie_gart_info); |
615 | #endif |
616 | |
617 | static void rv370_debugfs_pcie_gart_info_init(struct radeon_device *rdev) |
618 | { |
619 | #if defined(CONFIG_DEBUG_FS) |
620 | struct dentry *root = rdev_to_drm(rdev)->primary->debugfs_root; |
621 | |
622 | debugfs_create_file("rv370_pcie_gart_info" , 0444, root, rdev, |
623 | &rv370_debugfs_pcie_gart_info_fops); |
624 | #endif |
625 | } |
626 | |
627 | static int r300_packet0_check(struct radeon_cs_parser *p, |
628 | struct radeon_cs_packet *pkt, |
629 | unsigned idx, unsigned reg) |
630 | { |
631 | struct radeon_bo_list *reloc; |
632 | struct r100_cs_track *track; |
633 | volatile uint32_t *ib; |
634 | uint32_t tmp, tile_flags = 0; |
635 | unsigned i; |
636 | int r; |
637 | u32 idx_value; |
638 | |
639 | ib = p->ib.ptr; |
640 | track = (struct r100_cs_track *)p->track; |
641 | idx_value = radeon_get_ib_value(p, idx); |
642 | |
643 | switch (reg) { |
644 | case AVIVO_D1MODE_VLINE_START_END: |
645 | case RADEON_CRTC_GUI_TRIG_VLINE: |
646 | r = r100_cs_packet_parse_vline(p); |
647 | if (r) { |
648 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
649 | idx, reg); |
650 | radeon_cs_dump_packet(p, pkt); |
651 | return r; |
652 | } |
653 | break; |
654 | case RADEON_DST_PITCH_OFFSET: |
655 | case RADEON_SRC_PITCH_OFFSET: |
656 | r = r100_reloc_pitch_offset(p, pkt, idx, reg); |
657 | if (r) |
658 | return r; |
659 | break; |
660 | case R300_RB3D_COLOROFFSET0: |
661 | case R300_RB3D_COLOROFFSET1: |
662 | case R300_RB3D_COLOROFFSET2: |
663 | case R300_RB3D_COLOROFFSET3: |
664 | i = (reg - R300_RB3D_COLOROFFSET0) >> 2; |
665 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
666 | if (r) { |
667 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
668 | idx, reg); |
669 | radeon_cs_dump_packet(p, pkt); |
670 | return r; |
671 | } |
672 | track->cb[i].robj = reloc->robj; |
673 | track->cb[i].offset = idx_value; |
674 | track->cb_dirty = true; |
675 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
676 | break; |
677 | case R300_ZB_DEPTHOFFSET: |
678 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
679 | if (r) { |
680 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
681 | idx, reg); |
682 | radeon_cs_dump_packet(p, pkt); |
683 | return r; |
684 | } |
685 | track->zb.robj = reloc->robj; |
686 | track->zb.offset = idx_value; |
687 | track->zb_dirty = true; |
688 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
689 | break; |
690 | case R300_TX_OFFSET_0: |
691 | case R300_TX_OFFSET_0+4: |
692 | case R300_TX_OFFSET_0+8: |
693 | case R300_TX_OFFSET_0+12: |
694 | case R300_TX_OFFSET_0+16: |
695 | case R300_TX_OFFSET_0+20: |
696 | case R300_TX_OFFSET_0+24: |
697 | case R300_TX_OFFSET_0+28: |
698 | case R300_TX_OFFSET_0+32: |
699 | case R300_TX_OFFSET_0+36: |
700 | case R300_TX_OFFSET_0+40: |
701 | case R300_TX_OFFSET_0+44: |
702 | case R300_TX_OFFSET_0+48: |
703 | case R300_TX_OFFSET_0+52: |
704 | case R300_TX_OFFSET_0+56: |
705 | case R300_TX_OFFSET_0+60: |
706 | i = (reg - R300_TX_OFFSET_0) >> 2; |
707 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
708 | if (r) { |
709 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
710 | idx, reg); |
711 | radeon_cs_dump_packet(p, pkt); |
712 | return r; |
713 | } |
714 | |
715 | if (p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS) { |
716 | ib[idx] = (idx_value & 31) | /* keep the 1st 5 bits */ |
717 | ((idx_value & ~31) + (u32)reloc->gpu_offset); |
718 | } else { |
719 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
720 | tile_flags |= R300_TXO_MACRO_TILE; |
721 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
722 | tile_flags |= R300_TXO_MICRO_TILE; |
723 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
724 | tile_flags |= R300_TXO_MICRO_TILE_SQUARE; |
725 | |
726 | tmp = idx_value + ((u32)reloc->gpu_offset); |
727 | tmp |= tile_flags; |
728 | ib[idx] = tmp; |
729 | } |
730 | track->textures[i].robj = reloc->robj; |
731 | track->tex_dirty = true; |
732 | break; |
733 | /* Tracked registers */ |
734 | case 0x2084: |
735 | /* VAP_VF_CNTL */ |
736 | track->vap_vf_cntl = idx_value; |
737 | break; |
738 | case 0x20B4: |
739 | /* VAP_VTX_SIZE */ |
740 | track->vtx_size = idx_value & 0x7F; |
741 | break; |
742 | case 0x2134: |
743 | /* VAP_VF_MAX_VTX_INDX */ |
744 | track->max_indx = idx_value & 0x00FFFFFFUL; |
745 | break; |
746 | case 0x2088: |
747 | /* VAP_ALT_NUM_VERTICES - only valid on r500 */ |
748 | if (p->rdev->family < CHIP_RV515) |
749 | goto fail; |
750 | track->vap_alt_nverts = idx_value & 0xFFFFFF; |
751 | break; |
752 | case 0x43E4: |
753 | /* SC_SCISSOR1 */ |
754 | track->maxy = ((idx_value >> 13) & 0x1FFF) + 1; |
755 | if (p->rdev->family < CHIP_RV515) { |
756 | track->maxy -= 1440; |
757 | } |
758 | track->cb_dirty = true; |
759 | track->zb_dirty = true; |
760 | break; |
761 | case 0x4E00: |
762 | /* RB3D_CCTL */ |
763 | if ((idx_value & (1 << 10)) && /* CMASK_ENABLE */ |
764 | p->rdev->cmask_filp != p->filp) { |
765 | DRM_ERROR("Invalid RB3D_CCTL: Cannot enable CMASK.\n" ); |
766 | return -EINVAL; |
767 | } |
768 | track->num_cb = ((idx_value >> 5) & 0x3) + 1; |
769 | track->cb_dirty = true; |
770 | break; |
771 | case 0x4E38: |
772 | case 0x4E3C: |
773 | case 0x4E40: |
774 | case 0x4E44: |
775 | /* RB3D_COLORPITCH0 */ |
776 | /* RB3D_COLORPITCH1 */ |
777 | /* RB3D_COLORPITCH2 */ |
778 | /* RB3D_COLORPITCH3 */ |
779 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
780 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
781 | if (r) { |
782 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
783 | idx, reg); |
784 | radeon_cs_dump_packet(p, pkt); |
785 | return r; |
786 | } |
787 | |
788 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
789 | tile_flags |= R300_COLOR_TILE_ENABLE; |
790 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
791 | tile_flags |= R300_COLOR_MICROTILE_ENABLE; |
792 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
793 | tile_flags |= R300_COLOR_MICROTILE_SQUARE_ENABLE; |
794 | |
795 | tmp = idx_value & ~(0x7 << 16); |
796 | tmp |= tile_flags; |
797 | ib[idx] = tmp; |
798 | } |
799 | i = (reg - 0x4E38) >> 2; |
800 | track->cb[i].pitch = idx_value & 0x3FFE; |
801 | switch (((idx_value >> 21) & 0xF)) { |
802 | case 9: |
803 | case 11: |
804 | case 12: |
805 | track->cb[i].cpp = 1; |
806 | break; |
807 | case 3: |
808 | case 4: |
809 | case 13: |
810 | case 15: |
811 | track->cb[i].cpp = 2; |
812 | break; |
813 | case 5: |
814 | if (p->rdev->family < CHIP_RV515) { |
815 | DRM_ERROR("Invalid color buffer format (%d)!\n" , |
816 | ((idx_value >> 21) & 0xF)); |
817 | return -EINVAL; |
818 | } |
819 | fallthrough; |
820 | case 6: |
821 | track->cb[i].cpp = 4; |
822 | break; |
823 | case 10: |
824 | track->cb[i].cpp = 8; |
825 | break; |
826 | case 7: |
827 | track->cb[i].cpp = 16; |
828 | break; |
829 | default: |
830 | DRM_ERROR("Invalid color buffer format (%d) !\n" , |
831 | ((idx_value >> 21) & 0xF)); |
832 | return -EINVAL; |
833 | } |
834 | track->cb_dirty = true; |
835 | break; |
836 | case 0x4F00: |
837 | /* ZB_CNTL */ |
838 | if (idx_value & 2) { |
839 | track->z_enabled = true; |
840 | } else { |
841 | track->z_enabled = false; |
842 | } |
843 | track->zb_dirty = true; |
844 | break; |
845 | case 0x4F10: |
846 | /* ZB_FORMAT */ |
847 | switch ((idx_value & 0xF)) { |
848 | case 0: |
849 | case 1: |
850 | track->zb.cpp = 2; |
851 | break; |
852 | case 2: |
853 | track->zb.cpp = 4; |
854 | break; |
855 | default: |
856 | DRM_ERROR("Invalid z buffer format (%d) !\n" , |
857 | (idx_value & 0xF)); |
858 | return -EINVAL; |
859 | } |
860 | track->zb_dirty = true; |
861 | break; |
862 | case 0x4F24: |
863 | /* ZB_DEPTHPITCH */ |
864 | if (!(p->cs_flags & RADEON_CS_KEEP_TILING_FLAGS)) { |
865 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
866 | if (r) { |
867 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
868 | idx, reg); |
869 | radeon_cs_dump_packet(p, pkt); |
870 | return r; |
871 | } |
872 | |
873 | if (reloc->tiling_flags & RADEON_TILING_MACRO) |
874 | tile_flags |= R300_DEPTHMACROTILE_ENABLE; |
875 | if (reloc->tiling_flags & RADEON_TILING_MICRO) |
876 | tile_flags |= R300_DEPTHMICROTILE_TILED; |
877 | else if (reloc->tiling_flags & RADEON_TILING_MICRO_SQUARE) |
878 | tile_flags |= R300_DEPTHMICROTILE_TILED_SQUARE; |
879 | |
880 | tmp = idx_value & ~(0x7 << 16); |
881 | tmp |= tile_flags; |
882 | ib[idx] = tmp; |
883 | } |
884 | track->zb.pitch = idx_value & 0x3FFC; |
885 | track->zb_dirty = true; |
886 | break; |
887 | case 0x4104: |
888 | /* TX_ENABLE */ |
889 | for (i = 0; i < 16; i++) { |
890 | bool enabled; |
891 | |
892 | enabled = !!(idx_value & (1 << i)); |
893 | track->textures[i].enabled = enabled; |
894 | } |
895 | track->tex_dirty = true; |
896 | break; |
897 | case 0x44C0: |
898 | case 0x44C4: |
899 | case 0x44C8: |
900 | case 0x44CC: |
901 | case 0x44D0: |
902 | case 0x44D4: |
903 | case 0x44D8: |
904 | case 0x44DC: |
905 | case 0x44E0: |
906 | case 0x44E4: |
907 | case 0x44E8: |
908 | case 0x44EC: |
909 | case 0x44F0: |
910 | case 0x44F4: |
911 | case 0x44F8: |
912 | case 0x44FC: |
913 | /* TX_FORMAT1_[0-15] */ |
914 | i = (reg - 0x44C0) >> 2; |
915 | tmp = (idx_value >> 25) & 0x3; |
916 | track->textures[i].tex_coord_type = tmp; |
917 | switch ((idx_value & 0x1F)) { |
918 | case R300_TX_FORMAT_X8: |
919 | case R300_TX_FORMAT_Y4X4: |
920 | case R300_TX_FORMAT_Z3Y3X2: |
921 | track->textures[i].cpp = 1; |
922 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
923 | break; |
924 | case R300_TX_FORMAT_X16: |
925 | case R300_TX_FORMAT_FL_I16: |
926 | case R300_TX_FORMAT_Y8X8: |
927 | case R300_TX_FORMAT_Z5Y6X5: |
928 | case R300_TX_FORMAT_Z6Y5X5: |
929 | case R300_TX_FORMAT_W4Z4Y4X4: |
930 | case R300_TX_FORMAT_W1Z5Y5X5: |
931 | case R300_TX_FORMAT_D3DMFT_CxV8U8: |
932 | case R300_TX_FORMAT_B8G8_B8G8: |
933 | case R300_TX_FORMAT_G8R8_G8B8: |
934 | track->textures[i].cpp = 2; |
935 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
936 | break; |
937 | case R300_TX_FORMAT_Y16X16: |
938 | case R300_TX_FORMAT_FL_I16A16: |
939 | case R300_TX_FORMAT_Z11Y11X10: |
940 | case R300_TX_FORMAT_Z10Y11X11: |
941 | case R300_TX_FORMAT_W8Z8Y8X8: |
942 | case R300_TX_FORMAT_W2Z10Y10X10: |
943 | case 0x17: |
944 | case R300_TX_FORMAT_FL_I32: |
945 | case 0x1e: |
946 | track->textures[i].cpp = 4; |
947 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
948 | break; |
949 | case R300_TX_FORMAT_W16Z16Y16X16: |
950 | case R300_TX_FORMAT_FL_R16G16B16A16: |
951 | case R300_TX_FORMAT_FL_I32A32: |
952 | track->textures[i].cpp = 8; |
953 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
954 | break; |
955 | case R300_TX_FORMAT_FL_R32G32B32A32: |
956 | track->textures[i].cpp = 16; |
957 | track->textures[i].compress_format = R100_TRACK_COMP_NONE; |
958 | break; |
959 | case R300_TX_FORMAT_DXT1: |
960 | track->textures[i].cpp = 1; |
961 | track->textures[i].compress_format = R100_TRACK_COMP_DXT1; |
962 | break; |
963 | case R300_TX_FORMAT_ATI2N: |
964 | if (p->rdev->family < CHIP_R420) { |
965 | DRM_ERROR("Invalid texture format %u\n" , |
966 | (idx_value & 0x1F)); |
967 | return -EINVAL; |
968 | } |
969 | /* The same rules apply as for DXT3/5. */ |
970 | fallthrough; |
971 | case R300_TX_FORMAT_DXT3: |
972 | case R300_TX_FORMAT_DXT5: |
973 | track->textures[i].cpp = 1; |
974 | track->textures[i].compress_format = R100_TRACK_COMP_DXT35; |
975 | break; |
976 | default: |
977 | DRM_ERROR("Invalid texture format %u\n" , |
978 | (idx_value & 0x1F)); |
979 | return -EINVAL; |
980 | } |
981 | track->tex_dirty = true; |
982 | break; |
983 | case 0x4400: |
984 | case 0x4404: |
985 | case 0x4408: |
986 | case 0x440C: |
987 | case 0x4410: |
988 | case 0x4414: |
989 | case 0x4418: |
990 | case 0x441C: |
991 | case 0x4420: |
992 | case 0x4424: |
993 | case 0x4428: |
994 | case 0x442C: |
995 | case 0x4430: |
996 | case 0x4434: |
997 | case 0x4438: |
998 | case 0x443C: |
999 | /* TX_FILTER0_[0-15] */ |
1000 | i = (reg - 0x4400) >> 2; |
1001 | tmp = idx_value & 0x7; |
1002 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
1003 | track->textures[i].roundup_w = false; |
1004 | } |
1005 | tmp = (idx_value >> 3) & 0x7; |
1006 | if (tmp == 2 || tmp == 4 || tmp == 6) { |
1007 | track->textures[i].roundup_h = false; |
1008 | } |
1009 | track->tex_dirty = true; |
1010 | break; |
1011 | case 0x4500: |
1012 | case 0x4504: |
1013 | case 0x4508: |
1014 | case 0x450C: |
1015 | case 0x4510: |
1016 | case 0x4514: |
1017 | case 0x4518: |
1018 | case 0x451C: |
1019 | case 0x4520: |
1020 | case 0x4524: |
1021 | case 0x4528: |
1022 | case 0x452C: |
1023 | case 0x4530: |
1024 | case 0x4534: |
1025 | case 0x4538: |
1026 | case 0x453C: |
1027 | /* TX_FORMAT2_[0-15] */ |
1028 | i = (reg - 0x4500) >> 2; |
1029 | tmp = idx_value & 0x3FFF; |
1030 | track->textures[i].pitch = tmp + 1; |
1031 | if (p->rdev->family >= CHIP_RV515) { |
1032 | tmp = ((idx_value >> 15) & 1) << 11; |
1033 | track->textures[i].width_11 = tmp; |
1034 | tmp = ((idx_value >> 16) & 1) << 11; |
1035 | track->textures[i].height_11 = tmp; |
1036 | |
1037 | /* ATI1N */ |
1038 | if (idx_value & (1 << 14)) { |
1039 | /* The same rules apply as for DXT1. */ |
1040 | track->textures[i].compress_format = |
1041 | R100_TRACK_COMP_DXT1; |
1042 | } |
1043 | } else if (idx_value & (1 << 14)) { |
1044 | DRM_ERROR("Forbidden bit TXFORMAT_MSB\n" ); |
1045 | return -EINVAL; |
1046 | } |
1047 | track->tex_dirty = true; |
1048 | break; |
1049 | case 0x4480: |
1050 | case 0x4484: |
1051 | case 0x4488: |
1052 | case 0x448C: |
1053 | case 0x4490: |
1054 | case 0x4494: |
1055 | case 0x4498: |
1056 | case 0x449C: |
1057 | case 0x44A0: |
1058 | case 0x44A4: |
1059 | case 0x44A8: |
1060 | case 0x44AC: |
1061 | case 0x44B0: |
1062 | case 0x44B4: |
1063 | case 0x44B8: |
1064 | case 0x44BC: |
1065 | /* TX_FORMAT0_[0-15] */ |
1066 | i = (reg - 0x4480) >> 2; |
1067 | tmp = idx_value & 0x7FF; |
1068 | track->textures[i].width = tmp + 1; |
1069 | tmp = (idx_value >> 11) & 0x7FF; |
1070 | track->textures[i].height = tmp + 1; |
1071 | tmp = (idx_value >> 26) & 0xF; |
1072 | track->textures[i].num_levels = tmp; |
1073 | tmp = idx_value & (1 << 31); |
1074 | track->textures[i].use_pitch = !!tmp; |
1075 | tmp = (idx_value >> 22) & 0xF; |
1076 | track->textures[i].txdepth = tmp; |
1077 | track->tex_dirty = true; |
1078 | break; |
1079 | case R300_ZB_ZPASS_ADDR: |
1080 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
1081 | if (r) { |
1082 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1083 | idx, reg); |
1084 | radeon_cs_dump_packet(p, pkt); |
1085 | return r; |
1086 | } |
1087 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1088 | break; |
1089 | case 0x4e0c: |
1090 | /* RB3D_COLOR_CHANNEL_MASK */ |
1091 | track->color_channel_mask = idx_value; |
1092 | track->cb_dirty = true; |
1093 | break; |
1094 | case 0x43a4: |
1095 | /* SC_HYPERZ_EN */ |
1096 | /* r300c emits this register - we need to disable hyperz for it |
1097 | * without complaining */ |
1098 | if (p->rdev->hyperz_filp != p->filp) { |
1099 | if (idx_value & 0x1) |
1100 | ib[idx] = idx_value & ~1; |
1101 | } |
1102 | break; |
1103 | case 0x4f1c: |
1104 | /* ZB_BW_CNTL */ |
1105 | track->zb_cb_clear = !!(idx_value & (1 << 5)); |
1106 | track->cb_dirty = true; |
1107 | track->zb_dirty = true; |
1108 | if (p->rdev->hyperz_filp != p->filp) { |
1109 | if (idx_value & (R300_HIZ_ENABLE | |
1110 | R300_RD_COMP_ENABLE | |
1111 | R300_WR_COMP_ENABLE | |
1112 | R300_FAST_FILL_ENABLE)) |
1113 | goto fail; |
1114 | } |
1115 | break; |
1116 | case 0x4e04: |
1117 | /* RB3D_BLENDCNTL */ |
1118 | track->blend_read_enable = !!(idx_value & (1 << 2)); |
1119 | track->cb_dirty = true; |
1120 | break; |
1121 | case R300_RB3D_AARESOLVE_OFFSET: |
1122 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
1123 | if (r) { |
1124 | DRM_ERROR("No reloc for ib[%d]=0x%04X\n" , |
1125 | idx, reg); |
1126 | radeon_cs_dump_packet(p, pkt); |
1127 | return r; |
1128 | } |
1129 | track->aa.robj = reloc->robj; |
1130 | track->aa.offset = idx_value; |
1131 | track->aa_dirty = true; |
1132 | ib[idx] = idx_value + ((u32)reloc->gpu_offset); |
1133 | break; |
1134 | case R300_RB3D_AARESOLVE_PITCH: |
1135 | track->aa.pitch = idx_value & 0x3FFE; |
1136 | track->aa_dirty = true; |
1137 | break; |
1138 | case R300_RB3D_AARESOLVE_CTL: |
1139 | track->aaresolve = idx_value & 0x1; |
1140 | track->aa_dirty = true; |
1141 | break; |
1142 | case 0x4f30: /* ZB_MASK_OFFSET */ |
1143 | case 0x4f34: /* ZB_ZMASK_PITCH */ |
1144 | case 0x4f44: /* ZB_HIZ_OFFSET */ |
1145 | case 0x4f54: /* ZB_HIZ_PITCH */ |
1146 | if (idx_value && (p->rdev->hyperz_filp != p->filp)) |
1147 | goto fail; |
1148 | break; |
1149 | case 0x4028: |
1150 | if (idx_value && (p->rdev->hyperz_filp != p->filp)) |
1151 | goto fail; |
1152 | /* GB_Z_PEQ_CONFIG */ |
1153 | if (p->rdev->family >= CHIP_RV350) |
1154 | break; |
1155 | goto fail; |
1156 | break; |
1157 | case 0x4be8: |
1158 | /* valid register only on RV530 */ |
1159 | if (p->rdev->family == CHIP_RV530) |
1160 | break; |
1161 | fallthrough; |
1162 | /* fallthrough do not move */ |
1163 | default: |
1164 | goto fail; |
1165 | } |
1166 | return 0; |
1167 | fail: |
1168 | pr_err("Forbidden register 0x%04X in cs at %d (val=%08x)\n" , |
1169 | reg, idx, idx_value); |
1170 | return -EINVAL; |
1171 | } |
1172 | |
1173 | static int r300_packet3_check(struct radeon_cs_parser *p, |
1174 | struct radeon_cs_packet *pkt) |
1175 | { |
1176 | struct radeon_bo_list *reloc; |
1177 | struct r100_cs_track *track; |
1178 | volatile uint32_t *ib; |
1179 | unsigned idx; |
1180 | int r; |
1181 | |
1182 | ib = p->ib.ptr; |
1183 | idx = pkt->idx + 1; |
1184 | track = (struct r100_cs_track *)p->track; |
1185 | switch (pkt->opcode) { |
1186 | case PACKET3_3D_LOAD_VBPNTR: |
1187 | r = r100_packet3_load_vbpntr(p, pkt, idx); |
1188 | if (r) |
1189 | return r; |
1190 | break; |
1191 | case PACKET3_INDX_BUFFER: |
1192 | r = radeon_cs_packet_next_reloc(p, cs_reloc: &reloc, nomm: 0); |
1193 | if (r) { |
1194 | DRM_ERROR("No reloc for packet3 %d\n" , pkt->opcode); |
1195 | radeon_cs_dump_packet(p, pkt); |
1196 | return r; |
1197 | } |
1198 | ib[idx+1] = radeon_get_ib_value(p, idx: idx + 1) + ((u32)reloc->gpu_offset); |
1199 | r = r100_cs_track_check_pkt3_indx_buffer(p, pkt, robj: reloc->robj); |
1200 | if (r) { |
1201 | return r; |
1202 | } |
1203 | break; |
1204 | /* Draw packet */ |
1205 | case PACKET3_3D_DRAW_IMMD: |
1206 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1207 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1208 | * in cmd stream */ |
1209 | if (((radeon_get_ib_value(p, idx: idx + 1) >> 4) & 0x3) != 3) { |
1210 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n" ); |
1211 | return -EINVAL; |
1212 | } |
1213 | track->vap_vf_cntl = radeon_get_ib_value(p, idx: idx + 1); |
1214 | track->immd_dwords = pkt->count - 1; |
1215 | r = r100_cs_track_check(rdev: p->rdev, track); |
1216 | if (r) { |
1217 | return r; |
1218 | } |
1219 | break; |
1220 | case PACKET3_3D_DRAW_IMMD_2: |
1221 | /* Number of dwords is vtx_size * (num_vertices - 1) |
1222 | * PRIM_WALK must be equal to 3 vertex data in embedded |
1223 | * in cmd stream */ |
1224 | if (((radeon_get_ib_value(p, idx) >> 4) & 0x3) != 3) { |
1225 | DRM_ERROR("PRIM_WALK must be 3 for IMMD draw\n" ); |
1226 | return -EINVAL; |
1227 | } |
1228 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1229 | track->immd_dwords = pkt->count; |
1230 | r = r100_cs_track_check(rdev: p->rdev, track); |
1231 | if (r) { |
1232 | return r; |
1233 | } |
1234 | break; |
1235 | case PACKET3_3D_DRAW_VBUF: |
1236 | track->vap_vf_cntl = radeon_get_ib_value(p, idx: idx + 1); |
1237 | r = r100_cs_track_check(rdev: p->rdev, track); |
1238 | if (r) { |
1239 | return r; |
1240 | } |
1241 | break; |
1242 | case PACKET3_3D_DRAW_VBUF_2: |
1243 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1244 | r = r100_cs_track_check(rdev: p->rdev, track); |
1245 | if (r) { |
1246 | return r; |
1247 | } |
1248 | break; |
1249 | case PACKET3_3D_DRAW_INDX: |
1250 | track->vap_vf_cntl = radeon_get_ib_value(p, idx: idx + 1); |
1251 | r = r100_cs_track_check(rdev: p->rdev, track); |
1252 | if (r) { |
1253 | return r; |
1254 | } |
1255 | break; |
1256 | case PACKET3_3D_DRAW_INDX_2: |
1257 | track->vap_vf_cntl = radeon_get_ib_value(p, idx); |
1258 | r = r100_cs_track_check(rdev: p->rdev, track); |
1259 | if (r) { |
1260 | return r; |
1261 | } |
1262 | break; |
1263 | case PACKET3_3D_CLEAR_HIZ: |
1264 | case PACKET3_3D_CLEAR_ZMASK: |
1265 | if (p->rdev->hyperz_filp != p->filp) |
1266 | return -EINVAL; |
1267 | break; |
1268 | case PACKET3_3D_CLEAR_CMASK: |
1269 | if (p->rdev->cmask_filp != p->filp) |
1270 | return -EINVAL; |
1271 | break; |
1272 | case PACKET3_NOP: |
1273 | break; |
1274 | default: |
1275 | DRM_ERROR("Packet3 opcode %x not supported\n" , pkt->opcode); |
1276 | return -EINVAL; |
1277 | } |
1278 | return 0; |
1279 | } |
1280 | |
1281 | int r300_cs_parse(struct radeon_cs_parser *p) |
1282 | { |
1283 | struct radeon_cs_packet pkt; |
1284 | struct r100_cs_track *track; |
1285 | int r; |
1286 | |
1287 | track = kzalloc(sizeof(*track), GFP_KERNEL); |
1288 | if (track == NULL) |
1289 | return -ENOMEM; |
1290 | r100_cs_track_clear(rdev: p->rdev, track); |
1291 | p->track = track; |
1292 | do { |
1293 | r = radeon_cs_packet_parse(p, pkt: &pkt, idx: p->idx); |
1294 | if (r) { |
1295 | return r; |
1296 | } |
1297 | p->idx += pkt.count + 2; |
1298 | switch (pkt.type) { |
1299 | case RADEON_PACKET_TYPE0: |
1300 | r = r100_cs_parse_packet0(p, pkt: &pkt, |
1301 | auth: p->rdev->config.r300.reg_safe_bm, |
1302 | n: p->rdev->config.r300.reg_safe_bm_size, |
1303 | check: &r300_packet0_check); |
1304 | break; |
1305 | case RADEON_PACKET_TYPE2: |
1306 | break; |
1307 | case RADEON_PACKET_TYPE3: |
1308 | r = r300_packet3_check(p, pkt: &pkt); |
1309 | break; |
1310 | default: |
1311 | DRM_ERROR("Unknown packet type %d !\n" , pkt.type); |
1312 | return -EINVAL; |
1313 | } |
1314 | if (r) { |
1315 | return r; |
1316 | } |
1317 | } while (p->idx < p->chunk_ib->length_dw); |
1318 | return 0; |
1319 | } |
1320 | |
1321 | void r300_set_reg_safe(struct radeon_device *rdev) |
1322 | { |
1323 | rdev->config.r300.reg_safe_bm = r300_reg_safe_bm; |
1324 | rdev->config.r300.reg_safe_bm_size = ARRAY_SIZE(r300_reg_safe_bm); |
1325 | } |
1326 | |
1327 | void r300_mc_program(struct radeon_device *rdev) |
1328 | { |
1329 | struct r100_mc_save save; |
1330 | |
1331 | r100_debugfs_mc_info_init(rdev); |
1332 | |
1333 | /* Stops all mc clients */ |
1334 | r100_mc_stop(rdev, save: &save); |
1335 | if (rdev->flags & RADEON_IS_AGP) { |
1336 | WREG32(R_00014C_MC_AGP_LOCATION, |
1337 | S_00014C_MC_AGP_START(rdev->mc.gtt_start >> 16) | |
1338 | S_00014C_MC_AGP_TOP(rdev->mc.gtt_end >> 16)); |
1339 | WREG32(R_000170_AGP_BASE, lower_32_bits(rdev->mc.agp_base)); |
1340 | WREG32(R_00015C_AGP_BASE_2, |
1341 | upper_32_bits(rdev->mc.agp_base) & 0xff); |
1342 | } else { |
1343 | WREG32(R_00014C_MC_AGP_LOCATION, 0x0FFFFFFF); |
1344 | WREG32(R_000170_AGP_BASE, 0); |
1345 | WREG32(R_00015C_AGP_BASE_2, 0); |
1346 | } |
1347 | /* Wait for mc idle */ |
1348 | if (r300_mc_wait_for_idle(rdev)) |
1349 | DRM_INFO("Failed to wait MC idle before programming MC.\n" ); |
1350 | /* Program MC, should be a 32bits limited address space */ |
1351 | WREG32(R_000148_MC_FB_LOCATION, |
1352 | S_000148_MC_FB_START(rdev->mc.vram_start >> 16) | |
1353 | S_000148_MC_FB_TOP(rdev->mc.vram_end >> 16)); |
1354 | r100_mc_resume(rdev, save: &save); |
1355 | } |
1356 | |
1357 | void r300_clock_startup(struct radeon_device *rdev) |
1358 | { |
1359 | u32 tmp; |
1360 | |
1361 | if (radeon_dynclks != -1 && radeon_dynclks) |
1362 | radeon_legacy_set_clock_gating(rdev, enable: 1); |
1363 | /* We need to force on some of the block */ |
1364 | tmp = RREG32_PLL(R_00000D_SCLK_CNTL); |
1365 | tmp |= S_00000D_FORCE_CP(1) | S_00000D_FORCE_VIP(1); |
1366 | if ((rdev->family == CHIP_RV350) || (rdev->family == CHIP_RV380)) |
1367 | tmp |= S_00000D_FORCE_VAP(1); |
1368 | WREG32_PLL(R_00000D_SCLK_CNTL, tmp); |
1369 | } |
1370 | |
1371 | static int r300_startup(struct radeon_device *rdev) |
1372 | { |
1373 | int r; |
1374 | |
1375 | /* set common regs */ |
1376 | r100_set_common_regs(rdev); |
1377 | /* program mc */ |
1378 | r300_mc_program(rdev); |
1379 | /* Resume clock */ |
1380 | r300_clock_startup(rdev); |
1381 | /* Initialize GPU configuration (# pipes, ...) */ |
1382 | r300_gpu_init(rdev); |
1383 | /* Initialize GART (initialize after TTM so we can allocate |
1384 | * memory through TTM but finalize after TTM) */ |
1385 | if (rdev->flags & RADEON_IS_PCIE) { |
1386 | r = rv370_pcie_gart_enable(rdev); |
1387 | if (r) |
1388 | return r; |
1389 | } |
1390 | |
1391 | if (rdev->family == CHIP_R300 || |
1392 | rdev->family == CHIP_R350 || |
1393 | rdev->family == CHIP_RV350) |
1394 | r100_enable_bm(rdev); |
1395 | |
1396 | if (rdev->flags & RADEON_IS_PCI) { |
1397 | r = r100_pci_gart_enable(rdev); |
1398 | if (r) |
1399 | return r; |
1400 | } |
1401 | |
1402 | /* allocate wb buffer */ |
1403 | r = radeon_wb_init(rdev); |
1404 | if (r) |
1405 | return r; |
1406 | |
1407 | r = radeon_fence_driver_start_ring(rdev, RADEON_RING_TYPE_GFX_INDEX); |
1408 | if (r) { |
1409 | dev_err(rdev->dev, "failed initializing CP fences (%d).\n" , r); |
1410 | return r; |
1411 | } |
1412 | |
1413 | /* Enable IRQ */ |
1414 | if (!rdev->irq.installed) { |
1415 | r = radeon_irq_kms_init(rdev); |
1416 | if (r) |
1417 | return r; |
1418 | } |
1419 | |
1420 | r100_irq_set(rdev); |
1421 | rdev->config.r300.hdp_cntl = RREG32(RADEON_HOST_PATH_CNTL); |
1422 | /* 1M ring buffer */ |
1423 | r = r100_cp_init(rdev, ring_size: 1024 * 1024); |
1424 | if (r) { |
1425 | dev_err(rdev->dev, "failed initializing CP (%d).\n" , r); |
1426 | return r; |
1427 | } |
1428 | |
1429 | r = radeon_ib_pool_init(rdev); |
1430 | if (r) { |
1431 | dev_err(rdev->dev, "IB initialization failed (%d).\n" , r); |
1432 | return r; |
1433 | } |
1434 | |
1435 | return 0; |
1436 | } |
1437 | |
1438 | int r300_resume(struct radeon_device *rdev) |
1439 | { |
1440 | int r; |
1441 | |
1442 | /* Make sur GART are not working */ |
1443 | if (rdev->flags & RADEON_IS_PCIE) |
1444 | rv370_pcie_gart_disable(rdev); |
1445 | if (rdev->flags & RADEON_IS_PCI) |
1446 | r100_pci_gart_disable(rdev); |
1447 | /* Resume clock before doing reset */ |
1448 | r300_clock_startup(rdev); |
1449 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1450 | if (radeon_asic_reset(rdev)) { |
1451 | dev_warn(rdev->dev, "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n" , |
1452 | RREG32(R_000E40_RBBM_STATUS), |
1453 | RREG32(R_0007C0_CP_STAT)); |
1454 | } |
1455 | /* post */ |
1456 | radeon_combios_asic_init(dev: rdev_to_drm(rdev)); |
1457 | /* Resume clock after posting */ |
1458 | r300_clock_startup(rdev); |
1459 | /* Initialize surface registers */ |
1460 | radeon_surface_init(rdev); |
1461 | |
1462 | rdev->accel_working = true; |
1463 | r = r300_startup(rdev); |
1464 | if (r) { |
1465 | rdev->accel_working = false; |
1466 | } |
1467 | return r; |
1468 | } |
1469 | |
1470 | int r300_suspend(struct radeon_device *rdev) |
1471 | { |
1472 | radeon_pm_suspend(rdev); |
1473 | r100_cp_disable(rdev); |
1474 | radeon_wb_disable(rdev); |
1475 | r100_irq_disable(rdev); |
1476 | if (rdev->flags & RADEON_IS_PCIE) |
1477 | rv370_pcie_gart_disable(rdev); |
1478 | if (rdev->flags & RADEON_IS_PCI) |
1479 | r100_pci_gart_disable(rdev); |
1480 | return 0; |
1481 | } |
1482 | |
1483 | void r300_fini(struct radeon_device *rdev) |
1484 | { |
1485 | radeon_pm_fini(rdev); |
1486 | r100_cp_fini(rdev); |
1487 | radeon_wb_fini(rdev); |
1488 | radeon_ib_pool_fini(rdev); |
1489 | radeon_gem_fini(rdev); |
1490 | if (rdev->flags & RADEON_IS_PCIE) |
1491 | rv370_pcie_gart_fini(rdev); |
1492 | if (rdev->flags & RADEON_IS_PCI) |
1493 | r100_pci_gart_fini(rdev); |
1494 | radeon_agp_fini(rdev); |
1495 | radeon_irq_kms_fini(rdev); |
1496 | radeon_fence_driver_fini(rdev); |
1497 | radeon_bo_fini(rdev); |
1498 | radeon_atombios_fini(rdev); |
1499 | kfree(objp: rdev->bios); |
1500 | rdev->bios = NULL; |
1501 | } |
1502 | |
1503 | int r300_init(struct radeon_device *rdev) |
1504 | { |
1505 | int r; |
1506 | |
1507 | /* Disable VGA */ |
1508 | r100_vga_render_disable(rdev); |
1509 | /* Initialize scratch registers */ |
1510 | radeon_scratch_init(rdev); |
1511 | /* Initialize surface registers */ |
1512 | radeon_surface_init(rdev); |
1513 | /* TODO: disable VGA need to use VGA request */ |
1514 | /* restore some register to sane defaults */ |
1515 | r100_restore_sanity(rdev); |
1516 | /* BIOS*/ |
1517 | if (!radeon_get_bios(rdev)) { |
1518 | if (ASIC_IS_AVIVO(rdev)) |
1519 | return -EINVAL; |
1520 | } |
1521 | if (rdev->is_atom_bios) { |
1522 | dev_err(rdev->dev, "Expecting combios for RS400/RS480 GPU\n" ); |
1523 | return -EINVAL; |
1524 | } else { |
1525 | r = radeon_combios_init(rdev); |
1526 | if (r) |
1527 | return r; |
1528 | } |
1529 | /* Reset gpu before posting otherwise ATOM will enter infinite loop */ |
1530 | if (radeon_asic_reset(rdev)) { |
1531 | dev_warn(rdev->dev, |
1532 | "GPU reset failed ! (0xE40=0x%08X, 0x7C0=0x%08X)\n" , |
1533 | RREG32(R_000E40_RBBM_STATUS), |
1534 | RREG32(R_0007C0_CP_STAT)); |
1535 | } |
1536 | /* check if cards are posted or not */ |
1537 | if (radeon_boot_test_post_card(rdev) == false) |
1538 | return -EINVAL; |
1539 | /* Set asic errata */ |
1540 | r300_errata(rdev); |
1541 | /* Initialize clocks */ |
1542 | radeon_get_clock_info(dev: rdev_to_drm(rdev)); |
1543 | /* initialize AGP */ |
1544 | if (rdev->flags & RADEON_IS_AGP) { |
1545 | r = radeon_agp_init(rdev); |
1546 | if (r) { |
1547 | radeon_agp_disable(rdev); |
1548 | } |
1549 | } |
1550 | /* initialize memory controller */ |
1551 | r300_mc_init(rdev); |
1552 | /* Fence driver */ |
1553 | radeon_fence_driver_init(rdev); |
1554 | /* Memory manager */ |
1555 | r = radeon_bo_init(rdev); |
1556 | if (r) |
1557 | return r; |
1558 | if (rdev->flags & RADEON_IS_PCIE) { |
1559 | r = rv370_pcie_gart_init(rdev); |
1560 | if (r) |
1561 | return r; |
1562 | } |
1563 | if (rdev->flags & RADEON_IS_PCI) { |
1564 | r = r100_pci_gart_init(rdev); |
1565 | if (r) |
1566 | return r; |
1567 | } |
1568 | r300_set_reg_safe(rdev); |
1569 | |
1570 | /* Initialize power management */ |
1571 | radeon_pm_init(rdev); |
1572 | |
1573 | rdev->accel_working = true; |
1574 | r = r300_startup(rdev); |
1575 | if (r) { |
1576 | /* Something went wrong with the accel init, so stop accel */ |
1577 | dev_err(rdev->dev, "Disabling GPU acceleration\n" ); |
1578 | r100_cp_fini(rdev); |
1579 | radeon_wb_fini(rdev); |
1580 | radeon_ib_pool_fini(rdev); |
1581 | radeon_irq_kms_fini(rdev); |
1582 | if (rdev->flags & RADEON_IS_PCIE) |
1583 | rv370_pcie_gart_fini(rdev); |
1584 | if (rdev->flags & RADEON_IS_PCI) |
1585 | r100_pci_gart_fini(rdev); |
1586 | radeon_agp_fini(rdev); |
1587 | rdev->accel_working = false; |
1588 | } |
1589 | return 0; |
1590 | } |
1591 | |