1 | // SPDX-License-Identifier: MIT |
2 | /* |
3 | * Copyright © 2019 Intel Corporation |
4 | */ |
5 | |
6 | #include "i915_drv.h" |
7 | #include "i915_pci.h" |
8 | #include "i915_reg.h" |
9 | #include "intel_memory_region.h" |
10 | #include "intel_pci_config.h" |
11 | #include "intel_region_lmem.h" |
12 | #include "intel_region_ttm.h" |
13 | #include "gem/i915_gem_lmem.h" |
14 | #include "gem/i915_gem_region.h" |
15 | #include "gem/i915_gem_ttm.h" |
16 | #include "gt/intel_gt.h" |
17 | #include "gt/intel_gt_mcr.h" |
18 | #include "gt/intel_gt_regs.h" |
19 | |
20 | #ifdef CONFIG_64BIT |
21 | static void _release_bars(struct pci_dev *pdev) |
22 | { |
23 | int resno; |
24 | |
25 | for (resno = PCI_STD_RESOURCES; resno < PCI_STD_RESOURCE_END; resno++) { |
26 | if (pci_resource_len(pdev, resno)) |
27 | pci_release_resource(dev: pdev, resno); |
28 | } |
29 | } |
30 | |
31 | static void |
32 | _resize_bar(struct drm_i915_private *i915, int resno, resource_size_t size) |
33 | { |
34 | struct pci_dev *pdev = to_pci_dev(i915->drm.dev); |
35 | int bar_size = pci_rebar_bytes_to_size(bytes: size); |
36 | int ret; |
37 | |
38 | _release_bars(pdev); |
39 | |
40 | ret = pci_resize_resource(dev: pdev, i: resno, size: bar_size); |
41 | if (ret) { |
42 | drm_info(&i915->drm, "Failed to resize BAR%d to %dM (%pe)\n" , |
43 | resno, 1 << bar_size, ERR_PTR(ret)); |
44 | return; |
45 | } |
46 | |
47 | drm_info(&i915->drm, "BAR%d resized to %dM\n" , resno, 1 << bar_size); |
48 | } |
49 | |
50 | static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) |
51 | { |
52 | struct pci_dev *pdev = to_pci_dev(i915->drm.dev); |
53 | struct pci_bus *root = pdev->bus; |
54 | struct resource *root_res; |
55 | resource_size_t rebar_size; |
56 | resource_size_t current_size; |
57 | intel_wakeref_t wakeref; |
58 | u32 pci_cmd; |
59 | int i; |
60 | |
61 | current_size = roundup_pow_of_two(pci_resource_len(pdev, GEN12_LMEM_BAR)); |
62 | |
63 | if (i915->params.lmem_bar_size) { |
64 | u32 bar_sizes; |
65 | |
66 | rebar_size = i915->params.lmem_bar_size * |
67 | (resource_size_t)SZ_1M; |
68 | bar_sizes = pci_rebar_get_possible_sizes(pdev, GEN12_LMEM_BAR); |
69 | |
70 | if (rebar_size == current_size) |
71 | return; |
72 | |
73 | if (!(bar_sizes & BIT(pci_rebar_bytes_to_size(rebar_size))) || |
74 | rebar_size >= roundup_pow_of_two(lmem_size)) { |
75 | rebar_size = lmem_size; |
76 | |
77 | drm_info(&i915->drm, |
78 | "Given bar size is not within supported size, setting it to default: %llu\n" , |
79 | (u64)lmem_size >> 20); |
80 | } |
81 | } else { |
82 | rebar_size = current_size; |
83 | |
84 | if (rebar_size != roundup_pow_of_two(lmem_size)) |
85 | rebar_size = lmem_size; |
86 | else |
87 | return; |
88 | } |
89 | |
90 | /* Find out if root bus contains 64bit memory addressing */ |
91 | while (root->parent) |
92 | root = root->parent; |
93 | |
94 | pci_bus_for_each_resource(root, root_res, i) { |
95 | if (root_res && root_res->flags & (IORESOURCE_MEM | IORESOURCE_MEM_64) && |
96 | root_res->start > 0x100000000ull) |
97 | break; |
98 | } |
99 | |
100 | /* pci_resize_resource will fail anyways */ |
101 | if (!root_res) { |
102 | drm_info(&i915->drm, "Can't resize LMEM BAR - platform support is missing\n" ); |
103 | return; |
104 | } |
105 | |
106 | /* |
107 | * Releasing forcewake during BAR resizing results in later forcewake |
108 | * ack timeouts and former can happen any time - it is asynchronous. |
109 | * Grabbing all forcewakes prevents it. |
110 | */ |
111 | with_intel_runtime_pm(i915->uncore.rpm, wakeref) { |
112 | intel_uncore_forcewake_get(uncore: &i915->uncore, domains: FORCEWAKE_ALL); |
113 | |
114 | /* First disable PCI memory decoding references */ |
115 | pci_read_config_dword(dev: pdev, PCI_COMMAND, val: &pci_cmd); |
116 | pci_write_config_dword(dev: pdev, PCI_COMMAND, |
117 | val: pci_cmd & ~PCI_COMMAND_MEMORY); |
118 | |
119 | _resize_bar(i915, GEN12_LMEM_BAR, size: rebar_size); |
120 | |
121 | pci_assign_unassigned_bus_resources(bus: pdev->bus); |
122 | pci_write_config_dword(dev: pdev, PCI_COMMAND, val: pci_cmd); |
123 | intel_uncore_forcewake_put(uncore: &i915->uncore, domains: FORCEWAKE_ALL); |
124 | } |
125 | } |
126 | #else |
127 | static void i915_resize_lmem_bar(struct drm_i915_private *i915, resource_size_t lmem_size) {} |
128 | #endif |
129 | |
130 | static int |
131 | region_lmem_release(struct intel_memory_region *mem) |
132 | { |
133 | int ret; |
134 | |
135 | ret = intel_region_ttm_fini(mem); |
136 | io_mapping_fini(mapping: &mem->iomap); |
137 | |
138 | return ret; |
139 | } |
140 | |
141 | static int |
142 | region_lmem_init(struct intel_memory_region *mem) |
143 | { |
144 | int ret; |
145 | |
146 | if (!io_mapping_init_wc(iomap: &mem->iomap, |
147 | base: mem->io.start, |
148 | size: resource_size(res: &mem->io))) |
149 | return -EIO; |
150 | |
151 | ret = intel_region_ttm_init(mem); |
152 | if (ret) |
153 | goto out_no_buddy; |
154 | |
155 | return 0; |
156 | |
157 | out_no_buddy: |
158 | io_mapping_fini(mapping: &mem->iomap); |
159 | |
160 | return ret; |
161 | } |
162 | |
163 | static const struct intel_memory_region_ops intel_region_lmem_ops = { |
164 | .init = region_lmem_init, |
165 | .release = region_lmem_release, |
166 | .init_object = __i915_gem_ttm_object_init, |
167 | }; |
168 | |
169 | static bool get_legacy_lowmem_region(struct intel_uncore *uncore, |
170 | u64 *start, u32 *size) |
171 | { |
172 | if (!IS_DG1(uncore->i915)) |
173 | return false; |
174 | |
175 | *start = 0; |
176 | *size = SZ_1M; |
177 | |
178 | drm_dbg(&uncore->i915->drm, "LMEM: reserved legacy low-memory [0x%llx-0x%llx]\n" , |
179 | *start, *start + *size); |
180 | |
181 | return true; |
182 | } |
183 | |
184 | static int reserve_lowmem_region(struct intel_uncore *uncore, |
185 | struct intel_memory_region *mem) |
186 | { |
187 | u64 reserve_start; |
188 | u32 reserve_size; |
189 | int ret; |
190 | |
191 | if (!get_legacy_lowmem_region(uncore, start: &reserve_start, size: &reserve_size)) |
192 | return 0; |
193 | |
194 | ret = intel_memory_region_reserve(mem, offset: reserve_start, size: reserve_size); |
195 | if (ret) |
196 | drm_err(&uncore->i915->drm, "LMEM: reserving low memory region failed\n" ); |
197 | |
198 | return ret; |
199 | } |
200 | |
201 | static struct intel_memory_region *setup_lmem(struct intel_gt *gt) |
202 | { |
203 | struct drm_i915_private *i915 = gt->i915; |
204 | struct intel_uncore *uncore = gt->uncore; |
205 | struct pci_dev *pdev = to_pci_dev(i915->drm.dev); |
206 | struct intel_memory_region *mem; |
207 | resource_size_t min_page_size; |
208 | resource_size_t io_start; |
209 | resource_size_t io_size; |
210 | resource_size_t lmem_size; |
211 | int err; |
212 | |
213 | if (!IS_DGFX(i915)) |
214 | return ERR_PTR(error: -ENODEV); |
215 | |
216 | if (!i915_pci_resource_valid(pdev, GEN12_LMEM_BAR)) |
217 | return ERR_PTR(error: -ENXIO); |
218 | |
219 | if (HAS_FLAT_CCS(i915)) { |
220 | resource_size_t lmem_range; |
221 | u64 tile_stolen, flat_ccs_base; |
222 | |
223 | lmem_range = intel_gt_mcr_read_any(gt: to_gt(i915), XEHP_TILE0_ADDR_RANGE) & 0xFFFF; |
224 | lmem_size = lmem_range >> XEHP_TILE_LMEM_RANGE_SHIFT; |
225 | lmem_size *= SZ_1G; |
226 | |
227 | flat_ccs_base = intel_gt_mcr_read_any(gt, XEHP_FLAT_CCS_BASE_ADDR); |
228 | flat_ccs_base = (flat_ccs_base >> XEHP_CCS_BASE_SHIFT) * SZ_64K; |
229 | |
230 | if (GEM_WARN_ON(lmem_size < flat_ccs_base)) |
231 | return ERR_PTR(error: -EIO); |
232 | |
233 | tile_stolen = lmem_size - flat_ccs_base; |
234 | |
235 | /* If the FLAT_CCS_BASE_ADDR register is not populated, flag an error */ |
236 | if (tile_stolen == lmem_size) |
237 | drm_err(&i915->drm, |
238 | "CCS_BASE_ADDR register did not have expected value\n" ); |
239 | |
240 | lmem_size -= tile_stolen; |
241 | } else { |
242 | /* Stolen starts from GSMBASE without CCS */ |
243 | lmem_size = intel_uncore_read64(uncore: &i915->uncore, GEN6_GSMBASE); |
244 | } |
245 | |
246 | i915_resize_lmem_bar(i915, lmem_size); |
247 | |
248 | if (i915->params.lmem_size > 0) { |
249 | lmem_size = min_t(resource_size_t, lmem_size, |
250 | mul_u32_u32(i915->params.lmem_size, SZ_1M)); |
251 | } |
252 | |
253 | io_start = pci_resource_start(pdev, GEN12_LMEM_BAR); |
254 | io_size = min(pci_resource_len(pdev, GEN12_LMEM_BAR), lmem_size); |
255 | if (!io_size) |
256 | return ERR_PTR(error: -EIO); |
257 | |
258 | min_page_size = HAS_64K_PAGES(i915) ? I915_GTT_PAGE_SIZE_64K : |
259 | I915_GTT_PAGE_SIZE_4K; |
260 | mem = intel_memory_region_create(i915, |
261 | start: 0, |
262 | size: lmem_size, |
263 | min_page_size, |
264 | io_start, |
265 | io_size, |
266 | type: INTEL_MEMORY_LOCAL, |
267 | instance: 0, |
268 | ops: &intel_region_lmem_ops); |
269 | if (IS_ERR(ptr: mem)) |
270 | return mem; |
271 | |
272 | err = reserve_lowmem_region(uncore, mem); |
273 | if (err) |
274 | goto err_region_put; |
275 | |
276 | if (io_size < lmem_size) |
277 | drm_info(&i915->drm, "Using a reduced BAR size of %lluMiB. Consider enabling 'Resizable BAR' or similar, if available in the BIOS.\n" , |
278 | (u64)io_size >> 20); |
279 | |
280 | return mem; |
281 | |
282 | err_region_put: |
283 | intel_memory_region_destroy(mem); |
284 | return ERR_PTR(error: err); |
285 | } |
286 | |
287 | struct intel_memory_region *intel_gt_setup_lmem(struct intel_gt *gt) |
288 | { |
289 | return setup_lmem(gt); |
290 | } |
291 | |