| 1 | // SPDX-License-Identifier: GPL-2.0 |
| 2 | /* |
| 3 | * arch-independent dma-mapping routines |
| 4 | * |
| 5 | * Copyright (c) 2006 SUSE Linux Products GmbH |
| 6 | * Copyright (c) 2006 Tejun Heo <teheo@suse.de> |
| 7 | */ |
| 8 | #include <linux/memblock.h> /* for max_pfn */ |
| 9 | #include <linux/acpi.h> |
| 10 | #include <linux/dma-map-ops.h> |
| 11 | #include <linux/export.h> |
| 12 | #include <linux/gfp.h> |
| 13 | #include <linux/iommu-dma.h> |
| 14 | #include <linux/kmsan.h> |
| 15 | #include <linux/of_device.h> |
| 16 | #include <linux/slab.h> |
| 17 | #include <linux/vmalloc.h> |
| 18 | #include "debug.h" |
| 19 | #include "direct.h" |
| 20 | |
| 21 | #define CREATE_TRACE_POINTS |
| 22 | #include <trace/events/dma.h> |
| 23 | |
| 24 | #if defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_DEVICE) || \ |
| 25 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU) || \ |
| 26 | defined(CONFIG_ARCH_HAS_SYNC_DMA_FOR_CPU_ALL) |
| 27 | bool dma_default_coherent = IS_ENABLED(CONFIG_ARCH_DMA_DEFAULT_COHERENT); |
| 28 | #endif |
| 29 | |
| 30 | /* |
| 31 | * Managed DMA API |
| 32 | */ |
| 33 | struct dma_devres { |
| 34 | size_t size; |
| 35 | void *vaddr; |
| 36 | dma_addr_t dma_handle; |
| 37 | unsigned long attrs; |
| 38 | }; |
| 39 | |
| 40 | static void dmam_release(struct device *dev, void *res) |
| 41 | { |
| 42 | struct dma_devres *this = res; |
| 43 | |
| 44 | dma_free_attrs(dev, size: this->size, cpu_addr: this->vaddr, dma_handle: this->dma_handle, |
| 45 | attrs: this->attrs); |
| 46 | } |
| 47 | |
| 48 | static int dmam_match(struct device *dev, void *res, void *match_data) |
| 49 | { |
| 50 | struct dma_devres *this = res, *match = match_data; |
| 51 | |
| 52 | if (this->vaddr == match->vaddr) { |
| 53 | WARN_ON(this->size != match->size || |
| 54 | this->dma_handle != match->dma_handle); |
| 55 | return 1; |
| 56 | } |
| 57 | return 0; |
| 58 | } |
| 59 | |
| 60 | /** |
| 61 | * dmam_free_coherent - Managed dma_free_coherent() |
| 62 | * @dev: Device to free coherent memory for |
| 63 | * @size: Size of allocation |
| 64 | * @vaddr: Virtual address of the memory to free |
| 65 | * @dma_handle: DMA handle of the memory to free |
| 66 | * |
| 67 | * Managed dma_free_coherent(). |
| 68 | */ |
| 69 | void dmam_free_coherent(struct device *dev, size_t size, void *vaddr, |
| 70 | dma_addr_t dma_handle) |
| 71 | { |
| 72 | struct dma_devres match_data = { size, vaddr, dma_handle }; |
| 73 | |
| 74 | WARN_ON(devres_destroy(dev, dmam_release, dmam_match, &match_data)); |
| 75 | dma_free_coherent(dev, size, cpu_addr: vaddr, dma_handle); |
| 76 | } |
| 77 | EXPORT_SYMBOL(dmam_free_coherent); |
| 78 | |
| 79 | /** |
| 80 | * dmam_alloc_attrs - Managed dma_alloc_attrs() |
| 81 | * @dev: Device to allocate non_coherent memory for |
| 82 | * @size: Size of allocation |
| 83 | * @dma_handle: Out argument for allocated DMA handle |
| 84 | * @gfp: Allocation flags |
| 85 | * @attrs: Flags in the DMA_ATTR_* namespace. |
| 86 | * |
| 87 | * Managed dma_alloc_attrs(). Memory allocated using this function will be |
| 88 | * automatically released on driver detach. |
| 89 | * |
| 90 | * RETURNS: |
| 91 | * Pointer to allocated memory on success, NULL on failure. |
| 92 | */ |
| 93 | void *dmam_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 94 | gfp_t gfp, unsigned long attrs) |
| 95 | { |
| 96 | struct dma_devres *dr; |
| 97 | void *vaddr; |
| 98 | |
| 99 | dr = devres_alloc(dmam_release, sizeof(*dr), gfp); |
| 100 | if (!dr) |
| 101 | return NULL; |
| 102 | |
| 103 | vaddr = dma_alloc_attrs(dev, size, dma_handle, flag: gfp, attrs); |
| 104 | if (!vaddr) { |
| 105 | devres_free(res: dr); |
| 106 | return NULL; |
| 107 | } |
| 108 | |
| 109 | dr->vaddr = vaddr; |
| 110 | dr->dma_handle = *dma_handle; |
| 111 | dr->size = size; |
| 112 | dr->attrs = attrs; |
| 113 | |
| 114 | devres_add(dev, res: dr); |
| 115 | |
| 116 | return vaddr; |
| 117 | } |
| 118 | EXPORT_SYMBOL(dmam_alloc_attrs); |
| 119 | |
| 120 | static bool dma_go_direct(struct device *dev, dma_addr_t mask, |
| 121 | const struct dma_map_ops *ops) |
| 122 | { |
| 123 | if (use_dma_iommu(dev)) |
| 124 | return false; |
| 125 | |
| 126 | if (likely(!ops)) |
| 127 | return true; |
| 128 | |
| 129 | #ifdef CONFIG_DMA_OPS_BYPASS |
| 130 | if (dev->dma_ops_bypass) |
| 131 | return min_not_zero(mask, dev->bus_dma_limit) >= |
| 132 | dma_direct_get_required_mask(dev); |
| 133 | #endif |
| 134 | return false; |
| 135 | } |
| 136 | |
| 137 | |
| 138 | /* |
| 139 | * Check if the devices uses a direct mapping for streaming DMA operations. |
| 140 | * This allows IOMMU drivers to set a bypass mode if the DMA mask is large |
| 141 | * enough. |
| 142 | */ |
| 143 | static inline bool dma_alloc_direct(struct device *dev, |
| 144 | const struct dma_map_ops *ops) |
| 145 | { |
| 146 | return dma_go_direct(dev, mask: dev->coherent_dma_mask, ops); |
| 147 | } |
| 148 | |
| 149 | static inline bool dma_map_direct(struct device *dev, |
| 150 | const struct dma_map_ops *ops) |
| 151 | { |
| 152 | return dma_go_direct(dev, mask: *dev->dma_mask, ops); |
| 153 | } |
| 154 | |
| 155 | dma_addr_t dma_map_phys(struct device *dev, phys_addr_t phys, size_t size, |
| 156 | enum dma_data_direction dir, unsigned long attrs) |
| 157 | { |
| 158 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 159 | bool is_mmio = attrs & DMA_ATTR_MMIO; |
| 160 | dma_addr_t addr = DMA_MAPPING_ERROR; |
| 161 | |
| 162 | BUG_ON(!valid_dma_direction(dir)); |
| 163 | |
| 164 | if (WARN_ON_ONCE(!dev->dma_mask)) |
| 165 | return DMA_MAPPING_ERROR; |
| 166 | |
| 167 | if (dma_map_direct(dev, ops) || |
| 168 | (!is_mmio && arch_dma_map_phys_direct(dev, phys + size))) |
| 169 | addr = dma_direct_map_phys(dev, phys, size, dir, attrs); |
| 170 | else if (use_dma_iommu(dev)) |
| 171 | addr = iommu_dma_map_phys(dev, phys, size, dir, attrs); |
| 172 | else if (ops->map_phys) |
| 173 | addr = ops->map_phys(dev, phys, size, dir, attrs); |
| 174 | |
| 175 | if (!is_mmio) |
| 176 | kmsan_handle_dma(phys, size, dir); |
| 177 | trace_dma_map_phys(dev, phys_addr: phys, dma_addr: addr, size, dir, attrs); |
| 178 | debug_dma_map_phys(dev, phys, size, direction: dir, dma_addr: addr, attrs); |
| 179 | |
| 180 | return addr; |
| 181 | } |
| 182 | EXPORT_SYMBOL_GPL(dma_map_phys); |
| 183 | |
| 184 | dma_addr_t dma_map_page_attrs(struct device *dev, struct page *page, |
| 185 | size_t offset, size_t size, enum dma_data_direction dir, |
| 186 | unsigned long attrs) |
| 187 | { |
| 188 | phys_addr_t phys = page_to_phys(page) + offset; |
| 189 | |
| 190 | if (unlikely(attrs & DMA_ATTR_MMIO)) |
| 191 | return DMA_MAPPING_ERROR; |
| 192 | |
| 193 | if (IS_ENABLED(CONFIG_DMA_API_DEBUG) && |
| 194 | WARN_ON_ONCE(is_zone_device_page(page))) |
| 195 | return DMA_MAPPING_ERROR; |
| 196 | |
| 197 | return dma_map_phys(dev, phys, size, dir, attrs); |
| 198 | } |
| 199 | EXPORT_SYMBOL(dma_map_page_attrs); |
| 200 | |
| 201 | void dma_unmap_phys(struct device *dev, dma_addr_t addr, size_t size, |
| 202 | enum dma_data_direction dir, unsigned long attrs) |
| 203 | { |
| 204 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 205 | bool is_mmio = attrs & DMA_ATTR_MMIO; |
| 206 | |
| 207 | BUG_ON(!valid_dma_direction(dir)); |
| 208 | if (dma_map_direct(dev, ops) || |
| 209 | (!is_mmio && arch_dma_unmap_phys_direct(dev, addr + size))) |
| 210 | dma_direct_unmap_phys(dev, addr, size, dir, attrs); |
| 211 | else if (use_dma_iommu(dev)) |
| 212 | iommu_dma_unmap_phys(dev, dma_handle: addr, size, dir, attrs); |
| 213 | else if (ops->unmap_phys) |
| 214 | ops->unmap_phys(dev, addr, size, dir, attrs); |
| 215 | trace_dma_unmap_phys(dev, addr, size, dir, attrs); |
| 216 | debug_dma_unmap_phys(dev, addr, size, direction: dir); |
| 217 | } |
| 218 | EXPORT_SYMBOL_GPL(dma_unmap_phys); |
| 219 | |
| 220 | void dma_unmap_page_attrs(struct device *dev, dma_addr_t addr, size_t size, |
| 221 | enum dma_data_direction dir, unsigned long attrs) |
| 222 | { |
| 223 | if (unlikely(attrs & DMA_ATTR_MMIO)) |
| 224 | return; |
| 225 | |
| 226 | dma_unmap_phys(dev, addr, size, dir, attrs); |
| 227 | } |
| 228 | EXPORT_SYMBOL(dma_unmap_page_attrs); |
| 229 | |
| 230 | static int __dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 231 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 232 | { |
| 233 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 234 | int ents; |
| 235 | |
| 236 | BUG_ON(!valid_dma_direction(dir)); |
| 237 | |
| 238 | if (WARN_ON_ONCE(!dev->dma_mask)) |
| 239 | return 0; |
| 240 | |
| 241 | if (dma_map_direct(dev, ops) || |
| 242 | arch_dma_map_sg_direct(dev, sg, nents)) |
| 243 | ents = dma_direct_map_sg(dev, sgl: sg, nents, dir, attrs); |
| 244 | else if (use_dma_iommu(dev)) |
| 245 | ents = iommu_dma_map_sg(dev, sg, nents, dir, attrs); |
| 246 | else |
| 247 | ents = ops->map_sg(dev, sg, nents, dir, attrs); |
| 248 | |
| 249 | if (ents > 0) { |
| 250 | kmsan_handle_dma_sg(sg, nents, dir); |
| 251 | trace_dma_map_sg(dev, sgl: sg, nents, ents, dir, attrs); |
| 252 | debug_dma_map_sg(dev, sg, nents, mapped_ents: ents, direction: dir, attrs); |
| 253 | } else if (WARN_ON_ONCE(ents != -EINVAL && ents != -ENOMEM && |
| 254 | ents != -EIO && ents != -EREMOTEIO)) { |
| 255 | trace_dma_map_sg_err(dev, sgl: sg, nents, err: ents, dir, attrs); |
| 256 | return -EIO; |
| 257 | } |
| 258 | |
| 259 | return ents; |
| 260 | } |
| 261 | |
| 262 | /** |
| 263 | * dma_map_sg_attrs - Map the given buffer for DMA |
| 264 | * @dev: The device for which to perform the DMA operation |
| 265 | * @sg: The sg_table object describing the buffer |
| 266 | * @nents: Number of entries to map |
| 267 | * @dir: DMA direction |
| 268 | * @attrs: Optional DMA attributes for the map operation |
| 269 | * |
| 270 | * Maps a buffer described by a scatterlist passed in the sg argument with |
| 271 | * nents segments for the @dir DMA operation by the @dev device. |
| 272 | * |
| 273 | * Returns the number of mapped entries (which can be less than nents) |
| 274 | * on success. Zero is returned for any error. |
| 275 | * |
| 276 | * dma_unmap_sg_attrs() should be used to unmap the buffer with the |
| 277 | * original sg and original nents (not the value returned by this funciton). |
| 278 | */ |
| 279 | unsigned int dma_map_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 280 | int nents, enum dma_data_direction dir, unsigned long attrs) |
| 281 | { |
| 282 | int ret; |
| 283 | |
| 284 | ret = __dma_map_sg_attrs(dev, sg, nents, dir, attrs); |
| 285 | if (ret < 0) |
| 286 | return 0; |
| 287 | return ret; |
| 288 | } |
| 289 | EXPORT_SYMBOL(dma_map_sg_attrs); |
| 290 | |
| 291 | /** |
| 292 | * dma_map_sgtable - Map the given buffer for DMA |
| 293 | * @dev: The device for which to perform the DMA operation |
| 294 | * @sgt: The sg_table object describing the buffer |
| 295 | * @dir: DMA direction |
| 296 | * @attrs: Optional DMA attributes for the map operation |
| 297 | * |
| 298 | * Maps a buffer described by a scatterlist stored in the given sg_table |
| 299 | * object for the @dir DMA operation by the @dev device. After success, the |
| 300 | * ownership for the buffer is transferred to the DMA domain. One has to |
| 301 | * call dma_sync_sgtable_for_cpu() or dma_unmap_sgtable() to move the |
| 302 | * ownership of the buffer back to the CPU domain before touching the |
| 303 | * buffer by the CPU. |
| 304 | * |
| 305 | * Returns 0 on success or a negative error code on error. The following |
| 306 | * error codes are supported with the given meaning: |
| 307 | * |
| 308 | * -EINVAL An invalid argument, unaligned access or other error |
| 309 | * in usage. Will not succeed if retried. |
| 310 | * -ENOMEM Insufficient resources (like memory or IOVA space) to |
| 311 | * complete the mapping. Should succeed if retried later. |
| 312 | * -EIO Legacy error code with an unknown meaning. eg. this is |
| 313 | * returned if a lower level call returned |
| 314 | * DMA_MAPPING_ERROR. |
| 315 | * -EREMOTEIO The DMA device cannot access P2PDMA memory specified |
| 316 | * in the sg_table. This will not succeed if retried. |
| 317 | */ |
| 318 | int dma_map_sgtable(struct device *dev, struct sg_table *sgt, |
| 319 | enum dma_data_direction dir, unsigned long attrs) |
| 320 | { |
| 321 | int nents; |
| 322 | |
| 323 | nents = __dma_map_sg_attrs(dev, sg: sgt->sgl, nents: sgt->orig_nents, dir, attrs); |
| 324 | if (nents < 0) |
| 325 | return nents; |
| 326 | sgt->nents = nents; |
| 327 | return 0; |
| 328 | } |
| 329 | EXPORT_SYMBOL_GPL(dma_map_sgtable); |
| 330 | |
| 331 | void dma_unmap_sg_attrs(struct device *dev, struct scatterlist *sg, |
| 332 | int nents, enum dma_data_direction dir, |
| 333 | unsigned long attrs) |
| 334 | { |
| 335 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 336 | |
| 337 | BUG_ON(!valid_dma_direction(dir)); |
| 338 | trace_dma_unmap_sg(dev, sgl: sg, nents, dir, attrs); |
| 339 | debug_dma_unmap_sg(dev, sglist: sg, nelems: nents, dir); |
| 340 | if (dma_map_direct(dev, ops) || |
| 341 | arch_dma_unmap_sg_direct(dev, sg, nents)) |
| 342 | dma_direct_unmap_sg(dev, sgl: sg, nents, dir, attrs); |
| 343 | else if (use_dma_iommu(dev)) |
| 344 | iommu_dma_unmap_sg(dev, sg, nents, dir, attrs); |
| 345 | else if (ops->unmap_sg) |
| 346 | ops->unmap_sg(dev, sg, nents, dir, attrs); |
| 347 | } |
| 348 | EXPORT_SYMBOL(dma_unmap_sg_attrs); |
| 349 | |
| 350 | dma_addr_t dma_map_resource(struct device *dev, phys_addr_t phys_addr, |
| 351 | size_t size, enum dma_data_direction dir, unsigned long attrs) |
| 352 | { |
| 353 | if (IS_ENABLED(CONFIG_DMA_API_DEBUG) && |
| 354 | WARN_ON_ONCE(pfn_valid(PHYS_PFN(phys_addr)))) |
| 355 | return DMA_MAPPING_ERROR; |
| 356 | |
| 357 | return dma_map_phys(dev, phys_addr, size, dir, attrs | DMA_ATTR_MMIO); |
| 358 | } |
| 359 | EXPORT_SYMBOL(dma_map_resource); |
| 360 | |
| 361 | void dma_unmap_resource(struct device *dev, dma_addr_t addr, size_t size, |
| 362 | enum dma_data_direction dir, unsigned long attrs) |
| 363 | { |
| 364 | dma_unmap_phys(dev, addr, size, dir, attrs | DMA_ATTR_MMIO); |
| 365 | } |
| 366 | EXPORT_SYMBOL(dma_unmap_resource); |
| 367 | |
| 368 | #ifdef CONFIG_DMA_NEED_SYNC |
| 369 | void __dma_sync_single_for_cpu(struct device *dev, dma_addr_t addr, size_t size, |
| 370 | enum dma_data_direction dir) |
| 371 | { |
| 372 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 373 | |
| 374 | BUG_ON(!valid_dma_direction(dir)); |
| 375 | if (dma_map_direct(dev, ops)) |
| 376 | dma_direct_sync_single_for_cpu(dev, addr, size, dir); |
| 377 | else if (use_dma_iommu(dev)) |
| 378 | iommu_dma_sync_single_for_cpu(dev, dma_handle: addr, size, dir); |
| 379 | else if (ops->sync_single_for_cpu) |
| 380 | ops->sync_single_for_cpu(dev, addr, size, dir); |
| 381 | trace_dma_sync_single_for_cpu(dev, dma_addr: addr, size, dir); |
| 382 | debug_dma_sync_single_for_cpu(dev, dma_handle: addr, size, direction: dir); |
| 383 | } |
| 384 | EXPORT_SYMBOL(__dma_sync_single_for_cpu); |
| 385 | |
| 386 | void __dma_sync_single_for_device(struct device *dev, dma_addr_t addr, |
| 387 | size_t size, enum dma_data_direction dir) |
| 388 | { |
| 389 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 390 | |
| 391 | BUG_ON(!valid_dma_direction(dir)); |
| 392 | if (dma_map_direct(dev, ops)) |
| 393 | dma_direct_sync_single_for_device(dev, addr, size, dir); |
| 394 | else if (use_dma_iommu(dev)) |
| 395 | iommu_dma_sync_single_for_device(dev, dma_handle: addr, size, dir); |
| 396 | else if (ops->sync_single_for_device) |
| 397 | ops->sync_single_for_device(dev, addr, size, dir); |
| 398 | trace_dma_sync_single_for_device(dev, dma_addr: addr, size, dir); |
| 399 | debug_dma_sync_single_for_device(dev, dma_handle: addr, size, direction: dir); |
| 400 | } |
| 401 | EXPORT_SYMBOL(__dma_sync_single_for_device); |
| 402 | |
| 403 | void __dma_sync_sg_for_cpu(struct device *dev, struct scatterlist *sg, |
| 404 | int nelems, enum dma_data_direction dir) |
| 405 | { |
| 406 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 407 | |
| 408 | BUG_ON(!valid_dma_direction(dir)); |
| 409 | if (dma_map_direct(dev, ops)) |
| 410 | dma_direct_sync_sg_for_cpu(dev, sgl: sg, nents: nelems, dir); |
| 411 | else if (use_dma_iommu(dev)) |
| 412 | iommu_dma_sync_sg_for_cpu(dev, sgl: sg, nelems, dir); |
| 413 | else if (ops->sync_sg_for_cpu) |
| 414 | ops->sync_sg_for_cpu(dev, sg, nelems, dir); |
| 415 | trace_dma_sync_sg_for_cpu(dev, sg, nents: nelems, dir); |
| 416 | debug_dma_sync_sg_for_cpu(dev, sg, nelems, direction: dir); |
| 417 | } |
| 418 | EXPORT_SYMBOL(__dma_sync_sg_for_cpu); |
| 419 | |
| 420 | void __dma_sync_sg_for_device(struct device *dev, struct scatterlist *sg, |
| 421 | int nelems, enum dma_data_direction dir) |
| 422 | { |
| 423 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 424 | |
| 425 | BUG_ON(!valid_dma_direction(dir)); |
| 426 | if (dma_map_direct(dev, ops)) |
| 427 | dma_direct_sync_sg_for_device(dev, sgl: sg, nents: nelems, dir); |
| 428 | else if (use_dma_iommu(dev)) |
| 429 | iommu_dma_sync_sg_for_device(dev, sgl: sg, nelems, dir); |
| 430 | else if (ops->sync_sg_for_device) |
| 431 | ops->sync_sg_for_device(dev, sg, nelems, dir); |
| 432 | trace_dma_sync_sg_for_device(dev, sg, nents: nelems, dir); |
| 433 | debug_dma_sync_sg_for_device(dev, sg, nelems, direction: dir); |
| 434 | } |
| 435 | EXPORT_SYMBOL(__dma_sync_sg_for_device); |
| 436 | |
| 437 | bool __dma_need_sync(struct device *dev, dma_addr_t dma_addr) |
| 438 | { |
| 439 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 440 | |
| 441 | if (dma_map_direct(dev, ops)) |
| 442 | /* |
| 443 | * dma_skip_sync could've been reset on first SWIOTLB buffer |
| 444 | * mapping, but @dma_addr is not necessary an SWIOTLB buffer. |
| 445 | * In this case, fall back to more granular check. |
| 446 | */ |
| 447 | return dma_direct_need_sync(dev, dma_addr); |
| 448 | return true; |
| 449 | } |
| 450 | EXPORT_SYMBOL_GPL(__dma_need_sync); |
| 451 | |
| 452 | /** |
| 453 | * dma_need_unmap - does this device need dma_unmap_* operations |
| 454 | * @dev: device to check |
| 455 | * |
| 456 | * If this function returns %false, drivers can skip calling dma_unmap_* after |
| 457 | * finishing an I/O. This function must be called after all mappings that might |
| 458 | * need to be unmapped have been performed. |
| 459 | */ |
| 460 | bool dma_need_unmap(struct device *dev) |
| 461 | { |
| 462 | if (!dma_map_direct(dev, ops: get_dma_ops(dev))) |
| 463 | return true; |
| 464 | if (!dev->dma_skip_sync) |
| 465 | return true; |
| 466 | return IS_ENABLED(CONFIG_DMA_API_DEBUG); |
| 467 | } |
| 468 | EXPORT_SYMBOL_GPL(dma_need_unmap); |
| 469 | |
| 470 | static void dma_setup_need_sync(struct device *dev) |
| 471 | { |
| 472 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 473 | |
| 474 | if (dma_map_direct(dev, ops) || use_dma_iommu(dev)) |
| 475 | /* |
| 476 | * dma_skip_sync will be reset to %false on first SWIOTLB buffer |
| 477 | * mapping, if any. During the device initialization, it's |
| 478 | * enough to check only for the DMA coherence. |
| 479 | */ |
| 480 | dev->dma_skip_sync = dev_is_dma_coherent(dev); |
| 481 | else if (!ops->sync_single_for_device && !ops->sync_single_for_cpu && |
| 482 | !ops->sync_sg_for_device && !ops->sync_sg_for_cpu) |
| 483 | /* |
| 484 | * Synchronization is not possible when none of DMA sync ops |
| 485 | * is set. |
| 486 | */ |
| 487 | dev->dma_skip_sync = true; |
| 488 | else |
| 489 | dev->dma_skip_sync = false; |
| 490 | } |
| 491 | #else /* !CONFIG_DMA_NEED_SYNC */ |
| 492 | static inline void dma_setup_need_sync(struct device *dev) { } |
| 493 | #endif /* !CONFIG_DMA_NEED_SYNC */ |
| 494 | |
| 495 | /* |
| 496 | * The whole dma_get_sgtable() idea is fundamentally unsafe - it seems |
| 497 | * that the intention is to allow exporting memory allocated via the |
| 498 | * coherent DMA APIs through the dma_buf API, which only accepts a |
| 499 | * scattertable. This presents a couple of problems: |
| 500 | * 1. Not all memory allocated via the coherent DMA APIs is backed by |
| 501 | * a struct page |
| 502 | * 2. Passing coherent DMA memory into the streaming APIs is not allowed |
| 503 | * as we will try to flush the memory through a different alias to that |
| 504 | * actually being used (and the flushes are redundant.) |
| 505 | */ |
| 506 | int dma_get_sgtable_attrs(struct device *dev, struct sg_table *sgt, |
| 507 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 508 | unsigned long attrs) |
| 509 | { |
| 510 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 511 | |
| 512 | if (dma_alloc_direct(dev, ops)) |
| 513 | return dma_direct_get_sgtable(dev, sgt, cpu_addr, dma_addr, |
| 514 | size, attrs); |
| 515 | if (use_dma_iommu(dev)) |
| 516 | return iommu_dma_get_sgtable(dev, sgt, cpu_addr, dma_addr, |
| 517 | size, attrs); |
| 518 | if (!ops->get_sgtable) |
| 519 | return -ENXIO; |
| 520 | return ops->get_sgtable(dev, sgt, cpu_addr, dma_addr, size, attrs); |
| 521 | } |
| 522 | EXPORT_SYMBOL(dma_get_sgtable_attrs); |
| 523 | |
| 524 | #ifdef CONFIG_MMU |
| 525 | /* |
| 526 | * Return the page attributes used for mapping dma_alloc_* memory, either in |
| 527 | * kernel space if remapping is needed, or to userspace through dma_mmap_*. |
| 528 | */ |
| 529 | pgprot_t dma_pgprot(struct device *dev, pgprot_t prot, unsigned long attrs) |
| 530 | { |
| 531 | if (dev_is_dma_coherent(dev)) |
| 532 | return prot; |
| 533 | #ifdef CONFIG_ARCH_HAS_DMA_WRITE_COMBINE |
| 534 | if (attrs & DMA_ATTR_WRITE_COMBINE) |
| 535 | return pgprot_writecombine(prot); |
| 536 | #endif |
| 537 | return pgprot_dmacoherent(prot); |
| 538 | } |
| 539 | #endif /* CONFIG_MMU */ |
| 540 | |
| 541 | /** |
| 542 | * dma_can_mmap - check if a given device supports dma_mmap_* |
| 543 | * @dev: device to check |
| 544 | * |
| 545 | * Returns %true if @dev supports dma_mmap_coherent() and dma_mmap_attrs() to |
| 546 | * map DMA allocations to userspace. |
| 547 | */ |
| 548 | bool dma_can_mmap(struct device *dev) |
| 549 | { |
| 550 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 551 | |
| 552 | if (dma_alloc_direct(dev, ops)) |
| 553 | return dma_direct_can_mmap(dev); |
| 554 | if (use_dma_iommu(dev)) |
| 555 | return true; |
| 556 | return ops->mmap != NULL; |
| 557 | } |
| 558 | EXPORT_SYMBOL_GPL(dma_can_mmap); |
| 559 | |
| 560 | /** |
| 561 | * dma_mmap_attrs - map a coherent DMA allocation into user space |
| 562 | * @dev: valid struct device pointer, or NULL for ISA and EISA-like devices |
| 563 | * @vma: vm_area_struct describing requested user mapping |
| 564 | * @cpu_addr: kernel CPU-view address returned from dma_alloc_attrs |
| 565 | * @dma_addr: device-view address returned from dma_alloc_attrs |
| 566 | * @size: size of memory originally requested in dma_alloc_attrs |
| 567 | * @attrs: attributes of mapping properties requested in dma_alloc_attrs |
| 568 | * |
| 569 | * Map a coherent DMA buffer previously allocated by dma_alloc_attrs into user |
| 570 | * space. The coherent DMA buffer must not be freed by the driver until the |
| 571 | * user space mapping has been released. |
| 572 | */ |
| 573 | int dma_mmap_attrs(struct device *dev, struct vm_area_struct *vma, |
| 574 | void *cpu_addr, dma_addr_t dma_addr, size_t size, |
| 575 | unsigned long attrs) |
| 576 | { |
| 577 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 578 | |
| 579 | if (dma_alloc_direct(dev, ops)) |
| 580 | return dma_direct_mmap(dev, vma, cpu_addr, dma_addr, size, |
| 581 | attrs); |
| 582 | if (use_dma_iommu(dev)) |
| 583 | return iommu_dma_mmap(dev, vma, cpu_addr, dma_addr, size, |
| 584 | attrs); |
| 585 | if (!ops->mmap) |
| 586 | return -ENXIO; |
| 587 | return ops->mmap(dev, vma, cpu_addr, dma_addr, size, attrs); |
| 588 | } |
| 589 | EXPORT_SYMBOL(dma_mmap_attrs); |
| 590 | |
| 591 | u64 dma_get_required_mask(struct device *dev) |
| 592 | { |
| 593 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 594 | |
| 595 | if (dma_alloc_direct(dev, ops)) |
| 596 | return dma_direct_get_required_mask(dev); |
| 597 | |
| 598 | if (use_dma_iommu(dev)) |
| 599 | return DMA_BIT_MASK(32); |
| 600 | |
| 601 | if (ops->get_required_mask) |
| 602 | return ops->get_required_mask(dev); |
| 603 | |
| 604 | /* |
| 605 | * We require every DMA ops implementation to at least support a 32-bit |
| 606 | * DMA mask (and use bounce buffering if that isn't supported in |
| 607 | * hardware). As the direct mapping code has its own routine to |
| 608 | * actually report an optimal mask we default to 32-bit here as that |
| 609 | * is the right thing for most IOMMUs, and at least not actively |
| 610 | * harmful in general. |
| 611 | */ |
| 612 | return DMA_BIT_MASK(32); |
| 613 | } |
| 614 | EXPORT_SYMBOL_GPL(dma_get_required_mask); |
| 615 | |
| 616 | void *dma_alloc_attrs(struct device *dev, size_t size, dma_addr_t *dma_handle, |
| 617 | gfp_t flag, unsigned long attrs) |
| 618 | { |
| 619 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 620 | void *cpu_addr; |
| 621 | |
| 622 | WARN_ON_ONCE(!dev->coherent_dma_mask); |
| 623 | |
| 624 | /* |
| 625 | * DMA allocations can never be turned back into a page pointer, so |
| 626 | * requesting compound pages doesn't make sense (and can't even be |
| 627 | * supported at all by various backends). |
| 628 | */ |
| 629 | if (WARN_ON_ONCE(flag & __GFP_COMP)) |
| 630 | return NULL; |
| 631 | |
| 632 | if (dma_alloc_from_dev_coherent(dev, size, dma_handle, ret: &cpu_addr)) { |
| 633 | trace_dma_alloc(dev, virt_addr: cpu_addr, dma_addr: *dma_handle, size, |
| 634 | dir: DMA_BIDIRECTIONAL, flags: flag, attrs); |
| 635 | return cpu_addr; |
| 636 | } |
| 637 | |
| 638 | /* let the implementation decide on the zone to allocate from: */ |
| 639 | flag &= ~(__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM); |
| 640 | |
| 641 | if (dma_alloc_direct(dev, ops)) { |
| 642 | cpu_addr = dma_direct_alloc(dev, size, dma_handle, gfp: flag, attrs); |
| 643 | } else if (use_dma_iommu(dev)) { |
| 644 | cpu_addr = iommu_dma_alloc(dev, size, handle: dma_handle, gfp: flag, attrs); |
| 645 | } else if (ops->alloc) { |
| 646 | cpu_addr = ops->alloc(dev, size, dma_handle, flag, attrs); |
| 647 | } else { |
| 648 | trace_dma_alloc(dev, NULL, dma_addr: 0, size, dir: DMA_BIDIRECTIONAL, flags: flag, |
| 649 | attrs); |
| 650 | return NULL; |
| 651 | } |
| 652 | |
| 653 | trace_dma_alloc(dev, virt_addr: cpu_addr, dma_addr: *dma_handle, size, dir: DMA_BIDIRECTIONAL, |
| 654 | flags: flag, attrs); |
| 655 | debug_dma_alloc_coherent(dev, size, dma_addr: *dma_handle, virt: cpu_addr, attrs); |
| 656 | return cpu_addr; |
| 657 | } |
| 658 | EXPORT_SYMBOL(dma_alloc_attrs); |
| 659 | |
| 660 | void dma_free_attrs(struct device *dev, size_t size, void *cpu_addr, |
| 661 | dma_addr_t dma_handle, unsigned long attrs) |
| 662 | { |
| 663 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 664 | |
| 665 | if (dma_release_from_dev_coherent(dev, order: get_order(size), vaddr: cpu_addr)) |
| 666 | return; |
| 667 | /* |
| 668 | * On non-coherent platforms which implement DMA-coherent buffers via |
| 669 | * non-cacheable remaps, ops->free() may call vunmap(). Thus getting |
| 670 | * this far in IRQ context is a) at risk of a BUG_ON() or trying to |
| 671 | * sleep on some machines, and b) an indication that the driver is |
| 672 | * probably misusing the coherent API anyway. |
| 673 | */ |
| 674 | WARN_ON(irqs_disabled()); |
| 675 | |
| 676 | trace_dma_free(dev, virt_addr: cpu_addr, dma_addr: dma_handle, size, dir: DMA_BIDIRECTIONAL, |
| 677 | attrs); |
| 678 | if (!cpu_addr) |
| 679 | return; |
| 680 | |
| 681 | debug_dma_free_coherent(dev, size, virt: cpu_addr, addr: dma_handle); |
| 682 | if (dma_alloc_direct(dev, ops)) |
| 683 | dma_direct_free(dev, size, cpu_addr, dma_addr: dma_handle, attrs); |
| 684 | else if (use_dma_iommu(dev)) |
| 685 | iommu_dma_free(dev, size, cpu_addr, handle: dma_handle, attrs); |
| 686 | else if (ops->free) |
| 687 | ops->free(dev, size, cpu_addr, dma_handle, attrs); |
| 688 | } |
| 689 | EXPORT_SYMBOL(dma_free_attrs); |
| 690 | |
| 691 | static struct page *__dma_alloc_pages(struct device *dev, size_t size, |
| 692 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
| 693 | { |
| 694 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 695 | |
| 696 | if (WARN_ON_ONCE(!dev->coherent_dma_mask)) |
| 697 | return NULL; |
| 698 | if (WARN_ON_ONCE(gfp & (__GFP_DMA | __GFP_DMA32 | __GFP_HIGHMEM))) |
| 699 | return NULL; |
| 700 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) |
| 701 | return NULL; |
| 702 | |
| 703 | size = PAGE_ALIGN(size); |
| 704 | if (dma_alloc_direct(dev, ops)) |
| 705 | return dma_direct_alloc_pages(dev, size, dma_handle, dir, gfp); |
| 706 | if (use_dma_iommu(dev)) |
| 707 | return dma_common_alloc_pages(dev, size, dma_handle, dir, gfp); |
| 708 | if (!ops->alloc_pages_op) |
| 709 | return NULL; |
| 710 | return ops->alloc_pages_op(dev, size, dma_handle, dir, gfp); |
| 711 | } |
| 712 | |
| 713 | struct page *dma_alloc_pages(struct device *dev, size_t size, |
| 714 | dma_addr_t *dma_handle, enum dma_data_direction dir, gfp_t gfp) |
| 715 | { |
| 716 | struct page *page = __dma_alloc_pages(dev, size, dma_handle, dir, gfp); |
| 717 | |
| 718 | if (page) { |
| 719 | trace_dma_alloc_pages(dev, page_to_virt(page), dma_addr: *dma_handle, |
| 720 | size, dir, flags: gfp, attrs: 0); |
| 721 | debug_dma_alloc_pages(dev, page, size, direction: dir, dma_addr: *dma_handle, attrs: 0); |
| 722 | } else { |
| 723 | trace_dma_alloc_pages(dev, NULL, dma_addr: 0, size, dir, flags: gfp, attrs: 0); |
| 724 | } |
| 725 | return page; |
| 726 | } |
| 727 | EXPORT_SYMBOL_GPL(dma_alloc_pages); |
| 728 | |
| 729 | static void __dma_free_pages(struct device *dev, size_t size, struct page *page, |
| 730 | dma_addr_t dma_handle, enum dma_data_direction dir) |
| 731 | { |
| 732 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 733 | |
| 734 | size = PAGE_ALIGN(size); |
| 735 | if (dma_alloc_direct(dev, ops)) |
| 736 | dma_direct_free_pages(dev, size, page, dma_addr: dma_handle, dir); |
| 737 | else if (use_dma_iommu(dev)) |
| 738 | dma_common_free_pages(dev, size, vaddr: page, dma_handle, dir); |
| 739 | else if (ops->free_pages) |
| 740 | ops->free_pages(dev, size, page, dma_handle, dir); |
| 741 | } |
| 742 | |
| 743 | void dma_free_pages(struct device *dev, size_t size, struct page *page, |
| 744 | dma_addr_t dma_handle, enum dma_data_direction dir) |
| 745 | { |
| 746 | trace_dma_free_pages(dev, page_to_virt(page), dma_addr: dma_handle, size, dir, attrs: 0); |
| 747 | debug_dma_free_pages(dev, page, size, direction: dir, dma_addr: dma_handle); |
| 748 | __dma_free_pages(dev, size, page, dma_handle, dir); |
| 749 | } |
| 750 | EXPORT_SYMBOL_GPL(dma_free_pages); |
| 751 | |
| 752 | int dma_mmap_pages(struct device *dev, struct vm_area_struct *vma, |
| 753 | size_t size, struct page *page) |
| 754 | { |
| 755 | unsigned long count = PAGE_ALIGN(size) >> PAGE_SHIFT; |
| 756 | |
| 757 | if (vma->vm_pgoff >= count || vma_pages(vma) > count - vma->vm_pgoff) |
| 758 | return -ENXIO; |
| 759 | return remap_pfn_range(vma, addr: vma->vm_start, |
| 760 | page_to_pfn(page) + vma->vm_pgoff, |
| 761 | size: vma_pages(vma) << PAGE_SHIFT, pgprot: vma->vm_page_prot); |
| 762 | } |
| 763 | EXPORT_SYMBOL_GPL(dma_mmap_pages); |
| 764 | |
| 765 | static struct sg_table *alloc_single_sgt(struct device *dev, size_t size, |
| 766 | enum dma_data_direction dir, gfp_t gfp) |
| 767 | { |
| 768 | struct sg_table *sgt; |
| 769 | struct page *page; |
| 770 | |
| 771 | sgt = kmalloc(sizeof(*sgt), gfp); |
| 772 | if (!sgt) |
| 773 | return NULL; |
| 774 | if (sg_alloc_table(sgt, 1, gfp)) |
| 775 | goto out_free_sgt; |
| 776 | page = __dma_alloc_pages(dev, size, dma_handle: &sgt->sgl->dma_address, dir, gfp); |
| 777 | if (!page) |
| 778 | goto out_free_table; |
| 779 | sg_set_page(sg: sgt->sgl, page, PAGE_ALIGN(size), offset: 0); |
| 780 | sg_dma_len(sgt->sgl) = sgt->sgl->length; |
| 781 | return sgt; |
| 782 | out_free_table: |
| 783 | sg_free_table(sgt); |
| 784 | out_free_sgt: |
| 785 | kfree(objp: sgt); |
| 786 | return NULL; |
| 787 | } |
| 788 | |
| 789 | struct sg_table *dma_alloc_noncontiguous(struct device *dev, size_t size, |
| 790 | enum dma_data_direction dir, gfp_t gfp, unsigned long attrs) |
| 791 | { |
| 792 | struct sg_table *sgt; |
| 793 | |
| 794 | if (WARN_ON_ONCE(attrs & ~DMA_ATTR_ALLOC_SINGLE_PAGES)) |
| 795 | return NULL; |
| 796 | if (WARN_ON_ONCE(gfp & __GFP_COMP)) |
| 797 | return NULL; |
| 798 | |
| 799 | if (use_dma_iommu(dev)) |
| 800 | sgt = iommu_dma_alloc_noncontiguous(dev, size, dir, gfp, attrs); |
| 801 | else |
| 802 | sgt = alloc_single_sgt(dev, size, dir, gfp); |
| 803 | |
| 804 | if (sgt) { |
| 805 | sgt->nents = 1; |
| 806 | trace_dma_alloc_sgt(dev, sgt, size, dir, flags: gfp, attrs); |
| 807 | debug_dma_map_sg(dev, sg: sgt->sgl, nents: sgt->orig_nents, mapped_ents: 1, direction: dir, attrs); |
| 808 | } else { |
| 809 | trace_dma_alloc_sgt_err(dev, NULL, dma_addr: 0, size, dir, flags: gfp, attrs); |
| 810 | } |
| 811 | return sgt; |
| 812 | } |
| 813 | EXPORT_SYMBOL_GPL(dma_alloc_noncontiguous); |
| 814 | |
| 815 | static void free_single_sgt(struct device *dev, size_t size, |
| 816 | struct sg_table *sgt, enum dma_data_direction dir) |
| 817 | { |
| 818 | __dma_free_pages(dev, size, page: sg_page(sg: sgt->sgl), dma_handle: sgt->sgl->dma_address, |
| 819 | dir); |
| 820 | sg_free_table(sgt); |
| 821 | kfree(objp: sgt); |
| 822 | } |
| 823 | |
| 824 | void dma_free_noncontiguous(struct device *dev, size_t size, |
| 825 | struct sg_table *sgt, enum dma_data_direction dir) |
| 826 | { |
| 827 | trace_dma_free_sgt(dev, sgt, size, dir); |
| 828 | debug_dma_unmap_sg(dev, sglist: sgt->sgl, nelems: sgt->orig_nents, dir); |
| 829 | |
| 830 | if (use_dma_iommu(dev)) |
| 831 | iommu_dma_free_noncontiguous(dev, size, sgt, dir); |
| 832 | else |
| 833 | free_single_sgt(dev, size, sgt, dir); |
| 834 | } |
| 835 | EXPORT_SYMBOL_GPL(dma_free_noncontiguous); |
| 836 | |
| 837 | void *dma_vmap_noncontiguous(struct device *dev, size_t size, |
| 838 | struct sg_table *sgt) |
| 839 | { |
| 840 | |
| 841 | if (use_dma_iommu(dev)) |
| 842 | return iommu_dma_vmap_noncontiguous(dev, size, sgt); |
| 843 | |
| 844 | return page_address(sg_page(sgt->sgl)); |
| 845 | } |
| 846 | EXPORT_SYMBOL_GPL(dma_vmap_noncontiguous); |
| 847 | |
| 848 | void dma_vunmap_noncontiguous(struct device *dev, void *vaddr) |
| 849 | { |
| 850 | if (use_dma_iommu(dev)) |
| 851 | iommu_dma_vunmap_noncontiguous(dev, vaddr); |
| 852 | } |
| 853 | EXPORT_SYMBOL_GPL(dma_vunmap_noncontiguous); |
| 854 | |
| 855 | int dma_mmap_noncontiguous(struct device *dev, struct vm_area_struct *vma, |
| 856 | size_t size, struct sg_table *sgt) |
| 857 | { |
| 858 | if (use_dma_iommu(dev)) |
| 859 | return iommu_dma_mmap_noncontiguous(dev, vma, size, sgt); |
| 860 | return dma_mmap_pages(dev, vma, size, sg_page(sg: sgt->sgl)); |
| 861 | } |
| 862 | EXPORT_SYMBOL_GPL(dma_mmap_noncontiguous); |
| 863 | |
| 864 | static int dma_supported(struct device *dev, u64 mask) |
| 865 | { |
| 866 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 867 | |
| 868 | if (use_dma_iommu(dev)) { |
| 869 | if (WARN_ON(ops)) |
| 870 | return false; |
| 871 | return true; |
| 872 | } |
| 873 | |
| 874 | /* |
| 875 | * ->dma_supported sets and clears the bypass flag, so ignore it here |
| 876 | * and always call into the method if there is one. |
| 877 | */ |
| 878 | if (ops) { |
| 879 | if (!ops->dma_supported) |
| 880 | return true; |
| 881 | return ops->dma_supported(dev, mask); |
| 882 | } |
| 883 | |
| 884 | return dma_direct_supported(dev, mask); |
| 885 | } |
| 886 | |
| 887 | bool dma_pci_p2pdma_supported(struct device *dev) |
| 888 | { |
| 889 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 890 | |
| 891 | /* |
| 892 | * Note: dma_ops_bypass is not checked here because P2PDMA should |
| 893 | * not be used with dma mapping ops that do not have support even |
| 894 | * if the specific device is bypassing them. |
| 895 | */ |
| 896 | |
| 897 | /* if ops is not set, dma direct and default IOMMU support P2PDMA */ |
| 898 | return !ops; |
| 899 | } |
| 900 | EXPORT_SYMBOL_GPL(dma_pci_p2pdma_supported); |
| 901 | |
| 902 | int dma_set_mask(struct device *dev, u64 mask) |
| 903 | { |
| 904 | /* |
| 905 | * Truncate the mask to the actually supported dma_addr_t width to |
| 906 | * avoid generating unsupportable addresses. |
| 907 | */ |
| 908 | mask = (dma_addr_t)mask; |
| 909 | |
| 910 | if (!dev->dma_mask || !dma_supported(dev, mask)) |
| 911 | return -EIO; |
| 912 | |
| 913 | arch_dma_set_mask(dev, mask); |
| 914 | *dev->dma_mask = mask; |
| 915 | dma_setup_need_sync(dev); |
| 916 | |
| 917 | return 0; |
| 918 | } |
| 919 | EXPORT_SYMBOL(dma_set_mask); |
| 920 | |
| 921 | int dma_set_coherent_mask(struct device *dev, u64 mask) |
| 922 | { |
| 923 | /* |
| 924 | * Truncate the mask to the actually supported dma_addr_t width to |
| 925 | * avoid generating unsupportable addresses. |
| 926 | */ |
| 927 | mask = (dma_addr_t)mask; |
| 928 | |
| 929 | if (!dma_supported(dev, mask)) |
| 930 | return -EIO; |
| 931 | |
| 932 | dev->coherent_dma_mask = mask; |
| 933 | return 0; |
| 934 | } |
| 935 | EXPORT_SYMBOL(dma_set_coherent_mask); |
| 936 | |
| 937 | static bool __dma_addressing_limited(struct device *dev) |
| 938 | { |
| 939 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 940 | |
| 941 | if (min_not_zero(dma_get_mask(dev), dev->bus_dma_limit) < |
| 942 | dma_get_required_mask(dev)) |
| 943 | return true; |
| 944 | |
| 945 | if (unlikely(ops) || use_dma_iommu(dev)) |
| 946 | return false; |
| 947 | return !dma_direct_all_ram_mapped(dev); |
| 948 | } |
| 949 | |
| 950 | /** |
| 951 | * dma_addressing_limited - return if the device is addressing limited |
| 952 | * @dev: device to check |
| 953 | * |
| 954 | * Return %true if the devices DMA mask is too small to address all memory in |
| 955 | * the system, else %false. Lack of addressing bits is the prime reason for |
| 956 | * bounce buffering, but might not be the only one. |
| 957 | */ |
| 958 | bool dma_addressing_limited(struct device *dev) |
| 959 | { |
| 960 | if (!__dma_addressing_limited(dev)) |
| 961 | return false; |
| 962 | |
| 963 | dev_dbg(dev, "device is DMA addressing limited\n" ); |
| 964 | return true; |
| 965 | } |
| 966 | EXPORT_SYMBOL_GPL(dma_addressing_limited); |
| 967 | |
| 968 | size_t dma_max_mapping_size(struct device *dev) |
| 969 | { |
| 970 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 971 | size_t size = SIZE_MAX; |
| 972 | |
| 973 | if (dma_map_direct(dev, ops)) |
| 974 | size = dma_direct_max_mapping_size(dev); |
| 975 | else if (use_dma_iommu(dev)) |
| 976 | size = iommu_dma_max_mapping_size(dev); |
| 977 | else if (ops && ops->max_mapping_size) |
| 978 | size = ops->max_mapping_size(dev); |
| 979 | |
| 980 | return size; |
| 981 | } |
| 982 | EXPORT_SYMBOL_GPL(dma_max_mapping_size); |
| 983 | |
| 984 | size_t dma_opt_mapping_size(struct device *dev) |
| 985 | { |
| 986 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 987 | size_t size = SIZE_MAX; |
| 988 | |
| 989 | if (use_dma_iommu(dev)) |
| 990 | size = iommu_dma_opt_mapping_size(); |
| 991 | else if (ops && ops->opt_mapping_size) |
| 992 | size = ops->opt_mapping_size(); |
| 993 | |
| 994 | return min(dma_max_mapping_size(dev), size); |
| 995 | } |
| 996 | EXPORT_SYMBOL_GPL(dma_opt_mapping_size); |
| 997 | |
| 998 | unsigned long dma_get_merge_boundary(struct device *dev) |
| 999 | { |
| 1000 | const struct dma_map_ops *ops = get_dma_ops(dev); |
| 1001 | |
| 1002 | if (use_dma_iommu(dev)) |
| 1003 | return iommu_dma_get_merge_boundary(dev); |
| 1004 | |
| 1005 | if (!ops || !ops->get_merge_boundary) |
| 1006 | return 0; /* can't merge */ |
| 1007 | |
| 1008 | return ops->get_merge_boundary(dev); |
| 1009 | } |
| 1010 | EXPORT_SYMBOL_GPL(dma_get_merge_boundary); |
| 1011 | |