aboutsummaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorLeon Romanovsky <leonro@nvidia.com>2025-09-09 16:27:40 +0300
committerMarek Szyprowski <m.szyprowski@samsung.com>2025-09-12 00:18:21 +0200
commite1d69da24fb8ee02e13dcbc281f510f01332a7f8 (patch)
treeb4981e48a90b6f6af2e4791e3be0e79c8a2b2a98
parentdma-mapping: export new dma_*map_phys() interface (diff)
downloadlinux-e1d69da24fb8ee02e13dcbc281f510f01332a7f8.tar.gz
linux-e1d69da24fb8ee02e13dcbc281f510f01332a7f8.zip
mm/hmm: migrate to physical address-based DMA mapping API
Convert HMM DMA operations from the legacy page-based API to the new physical address-based dma_map_phys() and dma_unmap_phys() functions. This demonstrates the preferred approach for new code that should use physical addresses directly rather than page+offset parameters. The change replaces dma_map_page() and dma_unmap_page() calls with dma_map_phys() and dma_unmap_phys() respectively, using the physical address that was already available in the code. This eliminates the redundant page-to-physical address conversion and aligns with the DMA subsystem's move toward physical address-centric interfaces. This serves as an example of how new code should be written to leverage the more efficient physical address API, which provides cleaner interfaces for drivers that already have access to physical addresses. Reviewed-by: Jason Gunthorpe <jgg@nvidia.com> Signed-off-by: Leon Romanovsky <leonro@nvidia.com> Signed-off-by: Marek Szyprowski <m.szyprowski@samsung.com> Link: https://lore.kernel.org/r/d45207f195b8f77d23cc2d571c83197328a86b04.1757423202.git.leonro@nvidia.com
-rw-r--r--mm/hmm.c8
1 files changed, 4 insertions, 4 deletions
diff --git a/mm/hmm.c b/mm/hmm.c
index d545e2494994..015ab243f081 100644
--- a/mm/hmm.c
+++ b/mm/hmm.c
@@ -775,8 +775,8 @@ dma_addr_t hmm_dma_map_pfn(struct device *dev, struct hmm_dma_map *map,
if (WARN_ON_ONCE(dma_need_unmap(dev) && !dma_addrs))
goto error;
- dma_addr = dma_map_page(dev, page, 0, map->dma_entry_size,
- DMA_BIDIRECTIONAL);
+ dma_addr = dma_map_phys(dev, paddr, map->dma_entry_size,
+ DMA_BIDIRECTIONAL, 0);
if (dma_mapping_error(dev, dma_addr))
goto error;
@@ -819,8 +819,8 @@ bool hmm_dma_unmap_pfn(struct device *dev, struct hmm_dma_map *map, size_t idx)
dma_iova_unlink(dev, state, idx * map->dma_entry_size,
map->dma_entry_size, DMA_BIDIRECTIONAL, attrs);
} else if (dma_need_unmap(dev))
- dma_unmap_page(dev, dma_addrs[idx], map->dma_entry_size,
- DMA_BIDIRECTIONAL);
+ dma_unmap_phys(dev, dma_addrs[idx], map->dma_entry_size,
+ DMA_BIDIRECTIONAL, 0);
pfns[idx] &=
~(HMM_PFN_DMA_MAPPED | HMM_PFN_P2PDMA | HMM_PFN_P2PDMA_BUS);