From 057e55f337c5bb2aecc8967dc045c266a4e6c49d Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:49 -0700 Subject: drm/msm: Rename msm_gem_address_space -> msm_gem_vm Re-aligning naming to better match drm_gpuvm terminology will make things less confusing at the end of the drm_gpuvm conversion. This is just rename churn, no functional change. Signed-off-by: Rob Clark Reviewed-by: Dmitry Baryshkov Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661466/ --- drivers/gpu/drm/msm/msm_gem.h | 34 +++++++++++++++++----------------- 1 file changed, 17 insertions(+), 17 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index ba5c4ff76292..64ea3ed213c1 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -22,7 +22,7 @@ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ -struct msm_gem_address_space { +struct msm_gem_vm { const char *name; /* NOTE: mm managed at the page level, size is in # of pages * and position mm_node->start is in # of pages: @@ -47,13 +47,13 @@ struct msm_gem_address_space { uint64_t va_size; }; -struct msm_gem_address_space * -msm_gem_address_space_get(struct msm_gem_address_space *aspace); +struct msm_gem_vm * +msm_gem_vm_get(struct msm_gem_vm *vm); -void msm_gem_address_space_put(struct msm_gem_address_space *aspace); +void msm_gem_vm_put(struct msm_gem_vm *vm); -struct msm_gem_address_space * -msm_gem_address_space_create(struct msm_mmu *mmu, const char *name, +struct msm_gem_vm * +msm_gem_vm_create(struct msm_mmu *mmu, const char *name, u64 va_start, u64 size); struct msm_fence_context; @@ -61,12 +61,12 @@ struct msm_fence_context; struct msm_gem_vma { struct drm_mm_node node; uint64_t iova; - struct msm_gem_address_space *aspace; + struct msm_gem_vm *vm; struct list_head list; /* node in msm_gem_object::vmas */ bool mapped; }; -struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_address_space *aspace); +struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_vm *vm); int msm_gem_vma_init(struct msm_gem_vma *vma, int size, u64 range_start, u64 range_end); void msm_gem_vma_purge(struct msm_gem_vma *vma); @@ -127,18 +127,18 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); void msm_gem_unpin_locked(struct drm_gem_object *obj); void msm_gem_unpin_active(struct drm_gem_object *obj); struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace); + struct msm_gem_vm *vm); int msm_gem_get_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova); + struct msm_gem_vm *vm, uint64_t *iova); int msm_gem_set_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t iova); + struct msm_gem_vm *vm, uint64_t iova); int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova, + struct msm_gem_vm *vm, uint64_t *iova, u64 range_start, u64 range_end); int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace, uint64_t *iova); + struct msm_gem_vm *vm, uint64_t *iova); void msm_gem_unpin_iova(struct drm_gem_object *obj, - struct msm_gem_address_space *aspace); + struct msm_gem_vm *vm); void msm_gem_pin_obj_locked(struct drm_gem_object *obj); struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); @@ -160,10 +160,10 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, - uint32_t flags, struct msm_gem_address_space *aspace, + uint32_t flags, struct msm_gem_vm *vm, struct drm_gem_object **bo, uint64_t *iova); void msm_gem_kernel_put(struct drm_gem_object *bo, - struct msm_gem_address_space *aspace); + struct msm_gem_vm *vm); struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); __printf(2, 3) @@ -257,7 +257,7 @@ struct msm_gem_submit { struct kref ref; struct drm_device *dev; struct msm_gpu *gpu; - struct msm_gem_address_space *aspace; + struct msm_gem_vm *vm; struct list_head node; /* node in ring submit list */ struct drm_exec exec; uint32_t seqno; /* Sequence number of the submit on the ring */ -- cgit v1.2.3 From eab7766c79fd472d33a3a73a0d301cfe8a7c7246 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:50 -0700 Subject: drm/msm: Remove vram carveout support It is standing in the way of drm_gpuvm / VM_BIND support. Not to mention frequently broken and rarely tested. And I think only needed for a 10yr old not quite upstream SoC (msm8974). Maybe we can add support back in later, but I'm doubtful. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661467/ --- drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 8 -- drivers/gpu/drm/msm/adreno/a3xx_gpu.c | 15 ---- drivers/gpu/drm/msm/adreno/a4xx_gpu.c | 15 ---- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 3 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 3 +- drivers/gpu/drm/msm/adreno/adreno_device.c | 4 - drivers/gpu/drm/msm/adreno/adreno_gpu.c | 4 +- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 1 - drivers/gpu/drm/msm/msm_drv.c | 117 +------------------------- drivers/gpu/drm/msm/msm_drv.h | 11 --- drivers/gpu/drm/msm/msm_gem.c | 131 +++-------------------------- drivers/gpu/drm/msm/msm_gem.h | 5 -- drivers/gpu/drm/msm/msm_gem_submit.c | 5 -- drivers/gpu/drm/msm/msm_gpu.c | 6 +- 14 files changed, 19 insertions(+), 309 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 5eb063ed0b46..095bae92e3e8 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -551,14 +551,6 @@ struct msm_gpu *a2xx_gpu_init(struct drm_device *dev) else adreno_gpu->registers = a220_registers; - if (!gpu->vm) { - dev_err(dev->dev, "No memory protection without MMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - return gpu; fail: diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c index 434e6ededf83..a956cd79195e 100644 --- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c @@ -581,21 +581,6 @@ struct msm_gpu *a3xx_gpu_init(struct drm_device *dev) goto fail; } - if (!gpu->vm) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { ret = PTR_ERR(icc_path); diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c index 2c75debcfd84..83f6329accba 100644 --- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c @@ -695,21 +695,6 @@ struct msm_gpu *a4xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0xffff0000ffff0000ull; - if (!gpu->vm) { - /* TODO we think it is possible to configure the GPU to - * restrict access to VRAM carveout. But the required - * registers are unknown. For now just bail out and - * limp along with just modesetting. If it turns out - * to not be possible to restrict access, then we must - * implement a cmdstream validator. - */ - DRM_DEV_ERROR(dev->dev, "No memory protection without IOMMU\n"); - if (!allow_vram_carveout) { - ret = -ENXIO; - goto fail; - } - } - icc_path = devm_of_icc_get(&pdev->dev, "gfx-mem"); if (IS_ERR(icc_path)) { ret = PTR_ERR(icc_path); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index dc31bc0afca4..04138a06724b 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1786,8 +1786,7 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - if (gpu->vm) - msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a5xx_fault_handler); + msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a5xx_fault_handler); /* Set up the preemption specific bits and pieces for each ringbuffer */ a5xx_preempt_init(gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 5078152eb8d3..7b3be2b46cc4 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -2560,8 +2560,7 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0x1fffffffff000ull; - if (gpu->vm) - msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a6xx_fault_handler); + msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a6xx_fault_handler); a6xx_calc_ubwc_config(adreno_gpu); /* Set up the preemption specific bits and pieces for each ringbuffer */ diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c index 16e7ac444efd..27dbbb302081 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_device.c +++ b/drivers/gpu/drm/msm/adreno/adreno_device.c @@ -16,10 +16,6 @@ bool snapshot_debugbus = false; MODULE_PARM_DESC(snapshot_debugbus, "Include debugbus sections in GPU devcoredump (if not fused off)"); module_param_named(snapshot_debugbus, snapshot_debugbus, bool, 0600); -bool allow_vram_carveout = false; -MODULE_PARM_DESC(allow_vram_carveout, "Allow using VRAM Carveout, in place of IOMMU"); -module_param_named(allow_vram_carveout, allow_vram_carveout, bool, 0600); - int enable_preemption = -1; MODULE_PARM_DESC(enable_preemption, "Enable preemption (A7xx only) (1=on , 0=disable, -1=auto (default))"); module_param(enable_preemption, int, 0600); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index be723fe4de2b..0f71c39696a5 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -209,7 +209,9 @@ adreno_iommu_create_vm(struct msm_gpu *gpu, u64 start, size; mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); - if (IS_ERR_OR_NULL(mmu)) + if (!mmu) + return ERR_PTR(-ENODEV); + else if (IS_ERR_OR_NULL(mmu)) return ERR_CAST(mmu); geometry = msm_iommu_get_geometry(mmu); diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index 4fa4b11442ba..b1761f990aa1 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -18,7 +18,6 @@ #include "adreno_pm4.xml.h" extern bool snapshot_debugbus; -extern bool allow_vram_carveout; enum { ADRENO_FW_PM4 = 0, diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index a80a96d5b92f..1af93cc86901 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -46,12 +46,6 @@ #define MSM_VERSION_MINOR 12 #define MSM_VERSION_PATCHLEVEL 0 -static void msm_deinit_vram(struct drm_device *ddev); - -static char *vram = "16m"; -MODULE_PARM_DESC(vram, "Configure VRAM size (for devices without IOMMU/GPUMMU)"); -module_param(vram, charp, 0); - bool dumpstate; MODULE_PARM_DESC(dumpstate, "Dump KMS state on errors"); module_param(dumpstate, bool, 0600); @@ -97,8 +91,6 @@ static int msm_drm_uninit(struct device *dev) if (priv->kms) msm_drm_kms_uninit(dev); - msm_deinit_vram(ddev); - component_unbind_all(dev, ddev); ddev->dev_private = NULL; @@ -109,107 +101,6 @@ static int msm_drm_uninit(struct device *dev) return 0; } -bool msm_use_mmu(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - - /* - * a2xx comes with its own MMU - * On other platforms IOMMU can be declared specified either for the - * MDP/DPU device or for its parent, MDSS device. - */ - return priv->is_a2xx || - device_iommu_mapped(dev->dev) || - device_iommu_mapped(dev->dev->parent); -} - -static int msm_init_vram(struct drm_device *dev) -{ - struct msm_drm_private *priv = dev->dev_private; - struct device_node *node; - unsigned long size = 0; - int ret = 0; - - /* In the device-tree world, we could have a 'memory-region' - * phandle, which gives us a link to our "vram". Allocating - * is all nicely abstracted behind the dma api, but we need - * to know the entire size to allocate it all in one go. There - * are two cases: - * 1) device with no IOMMU, in which case we need exclusive - * access to a VRAM carveout big enough for all gpu - * buffers - * 2) device with IOMMU, but where the bootloader puts up - * a splash screen. In this case, the VRAM carveout - * need only be large enough for fbdev fb. But we need - * exclusive access to the buffer to avoid the kernel - * using those pages for other purposes (which appears - * as corruption on screen before we have a chance to - * load and do initial modeset) - */ - - node = of_parse_phandle(dev->dev->of_node, "memory-region", 0); - if (node) { - struct resource r; - ret = of_address_to_resource(node, 0, &r); - of_node_put(node); - if (ret) - return ret; - size = r.end - r.start + 1; - DRM_INFO("using VRAM carveout: %lx@%pa\n", size, &r.start); - - /* if we have no IOMMU, then we need to use carveout allocator. - * Grab the entire DMA chunk carved out in early startup in - * mach-msm: - */ - } else if (!msm_use_mmu(dev)) { - DRM_INFO("using %s VRAM carveout\n", vram); - size = memparse(vram, NULL); - } - - if (size) { - unsigned long attrs = 0; - void *p; - - priv->vram.size = size; - - drm_mm_init(&priv->vram.mm, 0, (size >> PAGE_SHIFT) - 1); - spin_lock_init(&priv->vram.lock); - - attrs |= DMA_ATTR_NO_KERNEL_MAPPING; - attrs |= DMA_ATTR_WRITE_COMBINE; - - /* note that for no-kernel-mapping, the vaddr returned - * is bogus, but non-null if allocation succeeded: - */ - p = dma_alloc_attrs(dev->dev, size, - &priv->vram.paddr, GFP_KERNEL, attrs); - if (!p) { - DRM_DEV_ERROR(dev->dev, "failed to allocate VRAM\n"); - priv->vram.paddr = 0; - return -ENOMEM; - } - - DRM_DEV_INFO(dev->dev, "VRAM: %08x->%08x\n", - (uint32_t)priv->vram.paddr, - (uint32_t)(priv->vram.paddr + size)); - } - - return ret; -} - -static void msm_deinit_vram(struct drm_device *ddev) -{ - struct msm_drm_private *priv = ddev->dev_private; - unsigned long attrs = DMA_ATTR_NO_KERNEL_MAPPING; - - if (!priv->vram.paddr) - return; - - drm_mm_takedown(&priv->vram.mm); - dma_free_attrs(ddev->dev, priv->vram.size, NULL, priv->vram.paddr, - attrs); -} - static int msm_drm_init(struct device *dev, const struct drm_driver *drv) { struct msm_drm_private *priv = dev_get_drvdata(dev); @@ -260,16 +151,12 @@ static int msm_drm_init(struct device *dev, const struct drm_driver *drv) goto err_destroy_wq; } - ret = msm_init_vram(ddev); - if (ret) - goto err_destroy_wq; - dma_set_max_seg_size(dev, UINT_MAX); /* Bind all our sub-components: */ ret = component_bind_all(dev, ddev); if (ret) - goto err_deinit_vram; + goto err_destroy_wq; ret = msm_gem_shrinker_init(ddev); if (ret) @@ -306,8 +193,6 @@ err_msm_uninit: return ret; -err_deinit_vram: - msm_deinit_vram(ddev); err_destroy_wq: destroy_workqueue(priv->wq); err_put_dev: diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 8aa3412c6e36..761e7e221ad9 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -183,17 +183,6 @@ struct msm_drm_private { struct msm_drm_thread event_thread[MAX_CRTCS]; - /* VRAM carveout, used when no IOMMU: */ - struct { - unsigned long size; - dma_addr_t paddr; - /* NOTE: mm managed at the page level, size is in # of pages - * and position mm_node->start is in # of pages: - */ - struct drm_mm mm; - spinlock_t lock; /* Protects drm_mm node allocation/removal */ - } vram; - struct notifier_block vmap_notifier; struct shrinker *shrinker; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5e6c88b85fd3..b83790cc08df 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -17,24 +17,8 @@ #include #include "msm_drv.h" -#include "msm_fence.h" #include "msm_gem.h" #include "msm_gpu.h" -#include "msm_mmu.h" - -static dma_addr_t physaddr(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_drm_private *priv = obj->dev->dev_private; - return (((dma_addr_t)msm_obj->vram_node->start) << PAGE_SHIFT) + - priv->vram.paddr; -} - -static bool use_pages(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - return !msm_obj->vram_node; -} static void update_device_mem(struct msm_drm_private *priv, ssize_t size) { @@ -135,36 +119,6 @@ static void update_lru(struct drm_gem_object *obj) mutex_unlock(&priv->lru.lock); } -/* allocate pages from VRAM carveout, used when no IOMMU: */ -static struct page **get_pages_vram(struct drm_gem_object *obj, int npages) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_drm_private *priv = obj->dev->dev_private; - dma_addr_t paddr; - struct page **p; - int ret, i; - - p = kvmalloc_array(npages, sizeof(struct page *), GFP_KERNEL); - if (!p) - return ERR_PTR(-ENOMEM); - - spin_lock(&priv->vram.lock); - ret = drm_mm_insert_node(&priv->vram.mm, msm_obj->vram_node, npages); - spin_unlock(&priv->vram.lock); - if (ret) { - kvfree(p); - return ERR_PTR(ret); - } - - paddr = physaddr(obj); - for (i = 0; i < npages; i++) { - p[i] = pfn_to_page(__phys_to_pfn(paddr)); - paddr += PAGE_SIZE; - } - - return p; -} - static struct page **get_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -176,10 +130,7 @@ static struct page **get_pages(struct drm_gem_object *obj) struct page **p; int npages = obj->size >> PAGE_SHIFT; - if (use_pages(obj)) - p = drm_gem_get_pages(obj); - else - p = get_pages_vram(obj, npages); + p = drm_gem_get_pages(obj); if (IS_ERR(p)) { DRM_DEV_ERROR(dev->dev, "could not get pages: %ld\n", @@ -212,18 +163,6 @@ static struct page **get_pages(struct drm_gem_object *obj) return msm_obj->pages; } -static void put_pages_vram(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_drm_private *priv = obj->dev->dev_private; - - spin_lock(&priv->vram.lock); - drm_mm_remove_node(msm_obj->vram_node); - spin_unlock(&priv->vram.lock); - - kvfree(msm_obj->pages); -} - static void put_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -244,10 +183,7 @@ static void put_pages(struct drm_gem_object *obj) update_device_mem(obj->dev->dev_private, -obj->size); - if (use_pages(obj)) - drm_gem_put_pages(obj, msm_obj->pages, true, false); - else - put_pages_vram(obj); + drm_gem_put_pages(obj, msm_obj->pages, true, false); msm_obj->pages = NULL; update_lru(obj); @@ -1207,19 +1143,10 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 struct msm_drm_private *priv = dev->dev_private; struct msm_gem_object *msm_obj; struct drm_gem_object *obj = NULL; - bool use_vram = false; int ret; size = PAGE_ALIGN(size); - if (!msm_use_mmu(dev)) - use_vram = true; - else if ((flags & (MSM_BO_STOLEN | MSM_BO_SCANOUT)) && priv->vram.size) - use_vram = true; - - if (GEM_WARN_ON(use_vram && !priv->vram.size)) - return ERR_PTR(-EINVAL); - /* Disallow zero sized objects as they make the underlying * infrastructure grumpy */ @@ -1232,44 +1159,16 @@ struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32 msm_obj = to_msm_bo(obj); - if (use_vram) { - struct msm_gem_vma *vma; - struct page **pages; - - drm_gem_private_object_init(dev, obj, size); - - msm_gem_lock(obj); - - vma = add_vma(obj, NULL); - msm_gem_unlock(obj); - if (IS_ERR(vma)) { - ret = PTR_ERR(vma); - goto fail; - } - - to_msm_bo(obj)->vram_node = &vma->node; - - msm_gem_lock(obj); - pages = get_pages(obj); - msm_gem_unlock(obj); - if (IS_ERR(pages)) { - ret = PTR_ERR(pages); - goto fail; - } - - vma->iova = physaddr(obj); - } else { - ret = drm_gem_object_init(dev, obj, size); - if (ret) - goto fail; - /* - * Our buffers are kept pinned, so allocating them from the - * MOVABLE zone is a really bad idea, and conflicts with CMA. - * See comments above new_inode() why this is required _and_ - * expected if you're going to pin these pages. - */ - mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); - } + ret = drm_gem_object_init(dev, obj, size); + if (ret) + goto fail; + /* + * Our buffers are kept pinned, so allocating them from the + * MOVABLE zone is a really bad idea, and conflicts with CMA. + * See comments above new_inode() why this is required _and_ + * expected if you're going to pin these pages. + */ + mapping_set_gfp_mask(obj->filp->f_mapping, GFP_HIGHUSER); drm_gem_lru_move_tail(&priv->lru.unbacked, obj); @@ -1297,12 +1196,6 @@ struct drm_gem_object *msm_gem_import(struct drm_device *dev, uint32_t size; int ret, npages; - /* if we don't have IOMMU, don't bother pretending we can import: */ - if (!msm_use_mmu(dev)) { - DRM_DEV_ERROR(dev->dev, "cannot import without IOMMU\n"); - return ERR_PTR(-EINVAL); - } - size = PAGE_ALIGN(dmabuf->size); ret = msm_gem_new_impl(dev, size, MSM_BO_WC, &obj); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 64ea3ed213c1..e47e187ecd00 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -102,11 +102,6 @@ struct msm_gem_object { struct list_head vmas; /* list of msm_gem_vma */ - /* For physically contiguous buffers. Used when we don't have - * an IOMMU. Also used for stolen/splashscreen buffer. - */ - struct drm_mm_node *vram_node; - char name[32]; /* Identifier to print for the debugfs files */ /* userspace metadata backchannel */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index b0733f634791..c299c0c72c96 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -670,11 +670,6 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->pad) return -EINVAL; - if (unlikely(!ctx->vm) && !capable(CAP_SYS_RAWIO)) { - DRM_ERROR_RATELIMITED("IOMMU support or CAP_SYS_RAWIO required!\n"); - return -EPERM; - } - /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 3400a6ca8fd8..47268aae7d54 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -942,12 +942,8 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, msm_devfreq_init(gpu); - gpu->vm = gpu->funcs->create_vm(gpu, pdev); - - if (gpu->vm == NULL) - DRM_DEV_INFO(drm->dev, "%s: no IOMMU, fallback to VRAM carveout!\n", name); - else if (IS_ERR(gpu->vm)) { + if (IS_ERR(gpu->vm)) { ret = PTR_ERR(gpu->vm); goto fail; } -- cgit v1.2.3 From da0e1407beb3a40e1b78418b257b849befb24bc6 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:51 -0700 Subject: drm/msm: Collapse vma allocation and initialization Now that we've dropped vram carveout support, we can collapse vma allocation and initialization. This better matches how things work with drm_gpuvm. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661471/ --- drivers/gpu/drm/msm/msm_gem.c | 30 +++--------------------------- drivers/gpu/drm/msm/msm_gem.h | 4 ++-- drivers/gpu/drm/msm/msm_gem_vma.c | 36 +++++++++++++++--------------------- 3 files changed, 20 insertions(+), 50 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b83790cc08df..9fa830209b1e 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -333,23 +333,6 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) return offset; } -static struct msm_gem_vma *add_vma(struct drm_gem_object *obj, - struct msm_gem_vm *vm) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_gem_vma *vma; - - msm_gem_assert_locked(obj); - - vma = msm_gem_vma_new(vm); - if (!vma) - return ERR_PTR(-ENOMEM); - - list_add_tail(&vma->list, &msm_obj->vmas); - - return vma; -} - static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, struct msm_gem_vm *vm) { @@ -416,6 +399,7 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, struct msm_gem_vm *vm, u64 range_start, u64 range_end) { + struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; msm_gem_assert_locked(obj); @@ -423,18 +407,10 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, vma = lookup_vma(obj, vm); if (!vma) { - int ret; - - vma = add_vma(obj, vm); + vma = msm_gem_vma_new(vm, obj, range_start, range_end); if (IS_ERR(vma)) return vma; - - ret = msm_gem_vma_init(vma, obj->size, - range_start, range_end); - if (ret) { - del_vma(vma); - return ERR_PTR(ret); - } + list_add_tail(&vma->list, &msm_obj->vmas); } else { GEM_WARN_ON(vma->iova < range_start); GEM_WARN_ON((vma->iova + obj->size) > range_end); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index e47e187ecd00..cf1e86252219 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -66,8 +66,8 @@ struct msm_gem_vma { bool mapped; }; -struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_vm *vm); -int msm_gem_vma_init(struct msm_gem_vma *vma, int size, +struct msm_gem_vma * +msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, u64 range_start, u64 range_end); void msm_gem_vma_purge(struct msm_gem_vma *vma); int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 9419692f0cc8..6d18364f321c 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -106,47 +106,41 @@ void msm_gem_vma_close(struct msm_gem_vma *vma) msm_gem_vm_put(vm); } -struct msm_gem_vma *msm_gem_vma_new(struct msm_gem_vm *vm) +/* Create a new vma and allocate an iova for it */ +struct msm_gem_vma * +msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, + u64 range_start, u64 range_end) { struct msm_gem_vma *vma; + int ret; vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) - return NULL; + return ERR_PTR(-ENOMEM); vma->vm = vm; - return vma; -} - -/* Initialize a new vma and allocate an iova for it */ -int msm_gem_vma_init(struct msm_gem_vma *vma, int size, - u64 range_start, u64 range_end) -{ - struct msm_gem_vm *vm = vma->vm; - int ret; - - if (GEM_WARN_ON(!vm)) - return -EINVAL; - - if (GEM_WARN_ON(vma->iova)) - return -EBUSY; - spin_lock(&vm->lock); ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, - size, PAGE_SIZE, 0, + obj->size, PAGE_SIZE, 0, range_start, range_end, 0); spin_unlock(&vm->lock); if (ret) - return ret; + goto err_free_vma; vma->iova = vma->node.start; vma->mapped = false; + INIT_LIST_HEAD(&vma->list); + kref_get(&vm->kref); - return 0; + return vma; + +err_free_vma: + kfree(vma); + return ERR_PTR(ret); } struct msm_gem_vm * -- cgit v1.2.3 From 111fdd2198e63b1e19ce015d95692d27bf6ce3fa Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:56 -0700 Subject: drm/msm: drm_gpuvm conversion Now that we've realigned deletion and allocation, switch over to using drm_gpuvm/drm_gpuva. This allows us to support multiple VMAs per BO per VM, to allow mapping different parts of a single BO at different virtual addresses, which is a key requirement for sparse/VM_BIND. This prepares us for using drm_gpuvm to translate a batch of MAP/ MAP_NULL/UNMAP operations from userspace into a sequence of map/remap/ unmap steps for updating the page tables. Since, unlike our prior vm/vma setup, with drm_gpuvm the vm_bo holds a reference to the GEM object. To prevent reference loops causing us to leak all GEM objects, we implicitly tear down the mapping when the GEM handle is close or when the obj is unpinned. Which means the submit needs to also hold a reference to the vm_bo, to prevent the VMA from being torn down while the submit is in-flight. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661479/ --- drivers/gpu/drm/msm/Kconfig | 1 + drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 3 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 6 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 5 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 7 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 5 +- drivers/gpu/drm/msm/msm_drv.c | 1 + drivers/gpu/drm/msm/msm_gem.c | 166 ++++++++++++++++++++----------- drivers/gpu/drm/msm/msm_gem.h | 90 +++++++++++++---- drivers/gpu/drm/msm/msm_gem_submit.c | 13 ++- drivers/gpu/drm/msm/msm_gem_vma.c | 140 ++++++++++++++++++-------- drivers/gpu/drm/msm/msm_kms.c | 4 +- 12 files changed, 303 insertions(+), 138 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig index 7f127e2ae442..1523bc3e9540 100644 --- a/drivers/gpu/drm/msm/Kconfig +++ b/drivers/gpu/drm/msm/Kconfig @@ -21,6 +21,7 @@ config DRM_MSM select DRM_DISPLAY_HELPER select DRM_BRIDGE_CONNECTOR select DRM_EXEC + select DRM_GPUVM select DRM_KMS_HELPER select DRM_PANEL select DRM_BRIDGE diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 095bae92e3e8..889480aa13ba 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -472,8 +472,7 @@ a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu); struct msm_gem_vm *vm; - vm = msm_gem_vm_create(mmu, "gpu", SZ_16M, - 0xfff * SZ_64K); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true); if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 848acc382b7d..77d9ff9632d1 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1311,7 +1311,7 @@ static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, return 0; } -static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) +static int a6xx_gmu_memory_probe(struct drm_device *drm, struct a6xx_gmu *gmu) { struct msm_mmu *mmu; @@ -1321,7 +1321,7 @@ static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu) if (IS_ERR(mmu)) return PTR_ERR(mmu); - gmu->vm = msm_gem_vm_create(mmu, "gmu", 0x0, 0x80000000); + gmu->vm = msm_gem_vm_create(drm, mmu, "gmu", 0x0, 0x80000000, true); if (IS_ERR(gmu->vm)) return PTR_ERR(gmu->vm); @@ -1940,7 +1940,7 @@ int a6xx_gmu_init(struct a6xx_gpu *a6xx_gpu, struct device_node *node) if (ret) goto err_put_device; - ret = a6xx_gmu_memory_probe(gmu); + ret = a6xx_gmu_memory_probe(adreno_gpu->base.dev, gmu); if (ret) goto err_put_device; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 7b3be2b46cc4..262129cb4415 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -2284,9 +2284,8 @@ a6xx_create_private_vm(struct msm_gpu *gpu) if (IS_ERR(mmu)) return ERR_CAST(mmu); - return msm_gem_vm_create(mmu, - "gpu", ADRENO_VM_START, - adreno_private_vm_size(gpu)); + return msm_gem_vm_create(gpu->dev, mmu, "gpu", ADRENO_VM_START, + adreno_private_vm_size(gpu), true); } static uint32_t a6xx_get_rptr(struct msm_gpu *gpu, struct msm_ringbuffer *ring) diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 0f71c39696a5..46199a6d0e41 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -226,7 +226,8 @@ adreno_iommu_create_vm(struct msm_gpu *gpu, start = max_t(u64, SZ_16M, geometry->aperture_start); size = geometry->aperture_end - start + 1; - vm = msm_gem_vm_create(mmu, "gpu", start & GENMASK_ULL(48, 0), size); + vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", start & GENMASK_ULL(48, 0), + size, true); if (IS_ERR(vm) && !IS_ERR(mmu)) mmu->funcs->destroy(mmu); @@ -414,12 +415,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, case MSM_PARAM_VA_START: if (ctx->vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->vm->va_start; + *value = ctx->vm->base.mm_start; return 0; case MSM_PARAM_VA_SIZE: if (ctx->vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->vm->va_size; + *value = ctx->vm->base.mm_range; return 0; case MSM_PARAM_HIGHEST_BANK_BIT: *value = adreno_gpu->ubwc_config.highest_bank_bit; diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index 5cb4a4bae2a6..a867c684c6d6 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -469,8 +469,9 @@ static int mdp4_kms_init(struct drm_device *dev) "contig buffers for scanout\n"); vm = NULL; } else { - vm = msm_gem_vm_create(mmu, - "mdp4", 0x1000, 0x100000000 - 0x1000); + vm = msm_gem_vm_create(dev, mmu, "mdp4", + 0x1000, 0x100000000 - 0x1000, + true); if (IS_ERR(vm)) { if (!IS_ERR(mmu)) diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index 1af93cc86901..728f7d86c31c 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -787,6 +787,7 @@ static const struct file_operations fops = { static const struct drm_driver msm_driver = { .driver_features = DRIVER_GEM | + DRIVER_GEM_GPUVA | DRIVER_RENDER | DRIVER_ATOMIC | DRIVER_MODESET | diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index a20ae783f244..664fb801c221 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -43,9 +43,53 @@ static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file) return 0; } +static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close); + +static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm) +{ + msm_gem_assert_locked(obj); + + struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(&vm->base, obj); + if (vm_bo) { + struct drm_gpuva *vma; + + drm_gpuvm_bo_for_each_va (vma, vm_bo) { + if (vma->vm != &vm->base) + continue; + msm_gem_vma_purge(to_msm_vma(vma)); + msm_gem_vma_close(to_msm_vma(vma)); + break; + } + + drm_gpuvm_bo_put(vm_bo); + } +} + static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) { + struct msm_context *ctx = file->driver_priv; + update_ctx_mem(file, -obj->size); + + /* + * If VM isn't created yet, nothing to cleanup. And in fact calling + * put_iova_spaces() with vm=NULL would be bad, in that it will tear- + * down the mappings of shared buffers in other contexts. + */ + if (!ctx->vm) + return; + + /* + * TODO we might need to kick this to a queue to avoid blocking + * in CLOSE ioctl + */ + dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, false, + msecs_to_jiffies(1000)); + + msm_gem_lock(obj); + put_iova_spaces(obj, &ctx->vm->base, true); + detach_vm(obj, ctx->vm); + msm_gem_unlock(obj); } /* @@ -167,6 +211,13 @@ static void put_pages(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); + /* + * Skip gpuvm in the object free path to avoid a WARN_ON() splat. + * See explaination in msm_gem_assert_locked() + */ + if (kref_read(&obj->refcount)) + drm_gpuvm_bo_gem_evict(obj, true); + if (msm_obj->pages) { if (msm_obj->sgt) { /* For non-cached buffers, ensure the new @@ -334,16 +385,25 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) } static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, - struct msm_gem_vm *vm) + struct msm_gem_vm *vm) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_gem_vma *vma; + struct drm_gpuvm_bo *vm_bo; msm_gem_assert_locked(obj); - list_for_each_entry(vma, &msm_obj->vmas, list) { - if (vma->vm == vm) - return vma; + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + struct drm_gpuva *vma; + + drm_gpuvm_bo_for_each_va (vma, vm_bo) { + if (vma->vm == &vm->base) { + /* lookup_vma() should only be used in paths + * with at most one vma per vm + */ + GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva)); + + return to_msm_vma(vma); + } + } } return NULL; @@ -356,33 +416,29 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, * mapping. */ static void -put_iova_spaces(struct drm_gem_object *obj, bool close) +put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_gem_vma *vma, *tmp; + struct drm_gpuvm_bo *vm_bo, *tmp; msm_gem_assert_locked(obj); - list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { - if (vma->vm) { - msm_gem_vma_purge(vma); - if (close) - msm_gem_vma_close(vma); - } - } -} + drm_gem_for_each_gpuvm_bo_safe (vm_bo, tmp, obj) { + struct drm_gpuva *vma, *vmatmp; -/* Called with msm_obj locked */ -static void -put_iova_vmas(struct drm_gem_object *obj) -{ - struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct msm_gem_vma *vma, *tmp; + if (vm && vm_bo->vm != vm) + continue; - msm_gem_assert_locked(obj); + drm_gpuvm_bo_get(vm_bo); + + drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) { + struct msm_gem_vma *msm_vma = to_msm_vma(vma); + + msm_gem_vma_purge(msm_vma); + if (close) + msm_gem_vma_close(msm_vma); + } - list_for_each_entry_safe(vma, tmp, &msm_obj->vmas, list) { - msm_gem_vma_close(vma); + drm_gpuvm_bo_put(vm_bo); } } @@ -390,7 +446,6 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, struct msm_gem_vm *vm, u64 range_start, u64 range_end) { - struct msm_gem_object *msm_obj = to_msm_bo(obj); struct msm_gem_vma *vma; msm_gem_assert_locked(obj); @@ -399,12 +454,9 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, if (!vma) { vma = msm_gem_vma_new(vm, obj, range_start, range_end); - if (IS_ERR(vma)) - return vma; - list_add_tail(&vma->list, &msm_obj->vmas); } else { - GEM_WARN_ON(vma->iova < range_start); - GEM_WARN_ON((vma->iova + obj->size) > range_end); + GEM_WARN_ON(vma->base.va.addr < range_start); + GEM_WARN_ON((vma->base.va.addr + obj->size) > range_end); } return vma; @@ -484,7 +536,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, ret = msm_gem_pin_vma_locked(obj, vma); if (!ret) { - *iova = vma->iova; + *iova = vma->base.va.addr; pin_obj_locked(obj); } @@ -530,7 +582,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, if (IS_ERR(vma)) { ret = PTR_ERR(vma); } else { - *iova = vma->iova; + *iova = vma->base.va.addr; } msm_gem_unlock(obj); @@ -571,7 +623,7 @@ int msm_gem_set_iova(struct drm_gem_object *obj, vma = get_vma_locked(obj, vm, iova, iova + obj->size); if (IS_ERR(vma)) { ret = PTR_ERR(vma); - } else if (GEM_WARN_ON(vma->iova != iova)) { + } else if (GEM_WARN_ON(vma->base.va.addr != iova)) { clear_iova(obj, vm); ret = -EBUSY; } @@ -593,9 +645,10 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, msm_gem_lock(obj); vma = lookup_vma(obj, vm); - if (!GEM_WARN_ON(!vma)) { + if (vma) { msm_gem_unpin_locked(obj); } + detach_vm(obj, vm); msm_gem_unlock(obj); } @@ -755,7 +808,7 @@ void msm_gem_purge(struct drm_gem_object *obj) GEM_WARN_ON(!is_purgeable(msm_obj)); /* Get rid of any iommu mapping(s): */ - put_iova_spaces(obj, false); + put_iova_spaces(obj, NULL, false); msm_gem_vunmap(obj); @@ -763,8 +816,6 @@ void msm_gem_purge(struct drm_gem_object *obj) put_pages(obj); - put_iova_vmas(obj); - mutex_lock(&priv->lru.lock); /* A one-way transition: */ msm_obj->madv = __MSM_MADV_PURGED; @@ -795,7 +846,7 @@ void msm_gem_evict(struct drm_gem_object *obj) GEM_WARN_ON(is_unevictable(msm_obj)); /* Get rid of any iommu mapping(s): */ - put_iova_spaces(obj, false); + put_iova_spaces(obj, NULL, false); drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); @@ -861,7 +912,6 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct dma_resv *robj = obj->resv; - struct msm_gem_vma *vma; uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; @@ -904,14 +954,17 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, seq_printf(m, " %08zu %9s %-32s\n", obj->size, madv, msm_obj->name); - if (!list_empty(&msm_obj->vmas)) { + if (!list_empty(&obj->gpuva.list)) { + struct drm_gpuvm_bo *vm_bo; seq_puts(m, " vmas:"); - list_for_each_entry(vma, &msm_obj->vmas, list) { - const char *name, *comm; - if (vma->vm) { - struct msm_gem_vm *vm = vma->vm; + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + struct drm_gpuva *vma; + + drm_gpuvm_bo_for_each_va (vma, vm_bo) { + const char *name, *comm; + struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct task_struct *task = get_pid_task(vm->pid, PIDTYPE_PID); if (task) { @@ -920,15 +973,14 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, } else { comm = NULL; } - name = vm->name; - } else { - name = comm = NULL; + name = vm->base.name; + + seq_printf(m, " [%s%s%s: vm=%p, %08llx, %smapped]", + name, comm ? ":" : "", comm ? comm : "", + vma->vm, vma->va.addr, + to_msm_vma(vma)->mapped ? "" : "un"); + kfree(comm); } - seq_printf(m, " [%s%s%s: vm=%p, %08llx,%s]", - name, comm ? ":" : "", comm ? comm : "", - vma->vm, vma->iova, - vma->mapped ? "mapped" : "unmapped"); - kfree(comm); } seq_puts(m, "\n"); @@ -974,7 +1026,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj) list_del(&msm_obj->node); mutex_unlock(&priv->obj_lock); - put_iova_spaces(obj, true); + put_iova_spaces(obj, NULL, true); if (drm_gem_is_imported(obj)) { GEM_WARN_ON(msm_obj->vaddr); @@ -984,13 +1036,10 @@ static void msm_gem_free_object(struct drm_gem_object *obj) */ kvfree(msm_obj->pages); - put_iova_vmas(obj); - drm_prime_gem_destroy(obj, msm_obj->sgt); } else { msm_gem_vunmap(obj); put_pages(obj); - put_iova_vmas(obj); } drm_gem_object_release(obj); @@ -1096,7 +1145,6 @@ static int msm_gem_new_impl(struct drm_device *dev, msm_obj->madv = MSM_MADV_WILLNEED; INIT_LIST_HEAD(&msm_obj->node); - INIT_LIST_HEAD(&msm_obj->vmas); *obj = &msm_obj->base; (*obj)->funcs = &msm_gem_object_funcs; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index cf1e86252219..4112370baf34 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -10,6 +10,7 @@ #include #include #include "drm/drm_exec.h" +#include "drm/drm_gpuvm.h" #include "drm/gpu_scheduler.h" #include "msm_drv.h" @@ -22,30 +23,67 @@ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ +/** + * struct msm_gem_vm - VM object + * + * A VM object representing a GPU (or display or GMU or ...) virtual address + * space. + * + * In the case of GPU, if per-process address spaces are supported, the address + * space is split into two VMs, which map to TTBR0 and TTBR1 in the SMMU. TTBR0 + * is used for userspace objects, and is unique per msm_context/drm_file, while + * TTBR1 is the same for all processes. (The kernel controlled ringbuffer and + * a few other kernel controlled buffers live in TTBR1.) + * + * The GPU TTBR0 vm can be managed by userspace or by the kernel, depending on + * whether userspace supports VM_BIND. All other vm's are managed by the kernel. + * (Managed by kernel means the kernel is responsible for VA allocation.) + * + * Note that because VM_BIND allows a given BO to be mapped multiple times in + * a VM, and therefore have multiple VMA's in a VM, there is an extra object + * provided by drm_gpuvm infrastructure.. the drm_gpuvm_bo, which is not + * embedded in any larger driver structure. The GEM object holds a list of + * drm_gpuvm_bo, which in turn holds a list of msm_gem_vma. A linked vma + * holds a reference to the vm_bo, and drops it when the vma is unlinked. + * So we just need to call drm_gpuvm_bo_obtain() to return a ref to an + * existing vm_bo, or create a new one. Once the vma is linked, the ref + * to the vm_bo can be dropped (since the vma is holding one). + */ struct msm_gem_vm { - const char *name; - /* NOTE: mm managed at the page level, size is in # of pages - * and position mm_node->start is in # of pages: + /** @base: Inherit from drm_gpuvm. */ + struct drm_gpuvm base; + + /** + * @mm: Memory management for kernel managed VA allocations + * + * Only used for kernel managed VMs, unused for user managed VMs. + * + * Protected by @mm_lock. */ struct drm_mm mm; - spinlock_t lock; /* Protects drm_mm node allocation/removal */ + + /** @mm_lock: protects @mm node allocation/removal */ + struct spinlock mm_lock; + + /** @vm_lock: protects gpuvm insert/remove/traverse */ + struct mutex vm_lock; + + /** @mmu: The mmu object which manages the pgtables */ struct msm_mmu *mmu; - struct kref kref; - /* For address spaces associated with a specific process, this + /** + * @pid: For address spaces associated with a specific process, this * will be non-NULL: */ struct pid *pid; - /* @faults: the number of GPU hangs associated with this address space */ + /** @faults: the number of GPU hangs associated with this address space */ int faults; - /** @va_start: lowest possible address to allocate */ - uint64_t va_start; - - /** @va_size: the size of the address space (in bytes) */ - uint64_t va_size; + /** @managed: is this a kernel managed VM? */ + bool managed; }; +#define to_msm_vm(x) container_of(x, struct msm_gem_vm, base) struct msm_gem_vm * msm_gem_vm_get(struct msm_gem_vm *vm); @@ -53,18 +91,33 @@ msm_gem_vm_get(struct msm_gem_vm *vm); void msm_gem_vm_put(struct msm_gem_vm *vm); struct msm_gem_vm * -msm_gem_vm_create(struct msm_mmu *mmu, const char *name, - u64 va_start, u64 size); +msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, + u64 va_start, u64 va_size, bool managed); struct msm_fence_context; +#define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0) + +/** + * struct msm_gem_vma - a VMA mapping + * + * Represents a combination of a GEM object plus a VM. + */ struct msm_gem_vma { + /** @base: inherit from drm_gpuva */ + struct drm_gpuva base; + + /** + * @node: mm node for VA allocation + * + * Only used by kernel managed VMs + */ struct drm_mm_node node; - uint64_t iova; - struct msm_gem_vm *vm; - struct list_head list; /* node in msm_gem_object::vmas */ + + /** @mapped: Is this VMA mapped? */ bool mapped; }; +#define to_msm_vma(x) container_of(x, struct msm_gem_vma, base) struct msm_gem_vma * msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, @@ -100,8 +153,6 @@ struct msm_gem_object { struct sg_table *sgt; void *vaddr; - struct list_head vmas; /* list of msm_gem_vma */ - char name[32]; /* Identifier to print for the debugfs files */ /* userspace metadata backchannel */ @@ -292,6 +343,7 @@ struct msm_gem_submit { struct drm_gem_object *obj; uint32_t handle; }; + struct drm_gpuvm_bo *vm_bo; uint64_t iova; } bos[]; }; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index c299c0c72c96..0cc30660c9c5 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -322,7 +322,8 @@ static int submit_pin_objects(struct msm_gem_submit *submit) if (ret) break; - submit->bos[i].iova = vma->iova; + submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); + submit->bos[i].iova = vma->base.va.addr; } /* @@ -459,14 +460,14 @@ out: */ static void submit_cleanup(struct msm_gem_submit *submit, bool error) { + if (submit->exec.objects) + drm_exec_fini(&submit->exec); + if (error) { submit_unpin_objects(submit); /* job wasn't enqueued to scheduler, so early retirement: */ msm_submit_retire(submit); } - - if (submit->exec.objects) - drm_exec_fini(&submit->exec); } void msm_submit_retire(struct msm_gem_submit *submit) @@ -475,7 +476,11 @@ void msm_submit_retire(struct msm_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; + struct drm_gpuvm_bo *vm_bo = submit->bos[i].vm_bo; + msm_gem_lock(obj); + drm_gpuvm_bo_put(vm_bo); + msm_gem_unlock(obj); drm_gem_object_put(obj); } } diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index ca29e81d79d2..1f4c9b5c2e8f 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -5,14 +5,13 @@ */ #include "msm_drv.h" -#include "msm_fence.h" #include "msm_gem.h" #include "msm_mmu.h" static void -msm_gem_vm_destroy(struct kref *kref) +msm_gem_vm_free(struct drm_gpuvm *gpuvm) { - struct msm_gem_vm *vm = container_of(kref, struct msm_gem_vm, kref); + struct msm_gem_vm *vm = container_of(gpuvm, struct msm_gem_vm, base); drm_mm_takedown(&vm->mm); if (vm->mmu) @@ -25,14 +24,14 @@ msm_gem_vm_destroy(struct kref *kref) void msm_gem_vm_put(struct msm_gem_vm *vm) { if (vm) - kref_put(&vm->kref, msm_gem_vm_destroy); + drm_gpuvm_put(&vm->base); } struct msm_gem_vm * msm_gem_vm_get(struct msm_gem_vm *vm) { if (!IS_ERR_OR_NULL(vm)) - kref_get(&vm->kref); + drm_gpuvm_get(&vm->base); return vm; } @@ -40,14 +39,14 @@ msm_gem_vm_get(struct msm_gem_vm *vm) /* Actually unmap memory for the vma */ void msm_gem_vma_purge(struct msm_gem_vma *vma) { - struct msm_gem_vm *vm = vma->vm; - unsigned size = vma->node.size; + struct msm_gem_vm *vm = to_msm_vm(vma->base.vm); + unsigned size = vma->base.va.range; /* Don't do anything if the memory isn't mapped */ if (!vma->mapped) return; - vm->mmu->funcs->unmap(vm->mmu, vma->iova, size); + vm->mmu->funcs->unmap(vm->mmu, vma->base.va.addr, size); vma->mapped = false; } @@ -57,10 +56,10 @@ int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size) { - struct msm_gem_vm *vm = vma->vm; + struct msm_gem_vm *vm = to_msm_vm(vma->base.vm); int ret; - if (GEM_WARN_ON(!vma->iova)) + if (GEM_WARN_ON(!vma->base.va.addr)) return -EINVAL; if (vma->mapped) @@ -68,9 +67,6 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot, vma->mapped = true; - if (!vm) - return 0; - /* * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold * a lock across map/unmap which is also used in the job_run() @@ -80,7 +76,7 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot, * Revisit this if we can come up with a scheme to pre-alloc pages * for the pgtable in map/unmap ops. */ - ret = vm->mmu->funcs->map(vm->mmu, vma->iova, sgt, size, prot); + ret = vm->mmu->funcs->map(vm->mmu, vma->base.va.addr, sgt, size, prot); if (ret) { vma->mapped = false; @@ -92,19 +88,20 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot, /* Close an iova. Warn if it is still in use */ void msm_gem_vma_close(struct msm_gem_vma *vma) { - struct msm_gem_vm *vm = vma->vm; + struct msm_gem_vm *vm = to_msm_vm(vma->base.vm); GEM_WARN_ON(vma->mapped); - spin_lock(&vm->lock); - if (vma->iova) + spin_lock(&vm->mm_lock); + if (vma->base.va.addr) drm_mm_remove_node(&vma->node); - spin_unlock(&vm->lock); + spin_unlock(&vm->mm_lock); - vma->iova = 0; - list_del(&vma->list); + mutex_lock(&vm->vm_lock); + drm_gpuva_remove(&vma->base); + drm_gpuva_unlink(&vma->base); + mutex_unlock(&vm->vm_lock); - msm_gem_vm_put(vm); kfree(vma); } @@ -113,6 +110,7 @@ struct msm_gem_vma * msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, u64 range_start, u64 range_end) { + struct drm_gpuvm_bo *vm_bo; struct msm_gem_vma *vma; int ret; @@ -120,36 +118,83 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, if (!vma) return ERR_PTR(-ENOMEM); - vma->vm = vm; + if (vm->managed) { + spin_lock(&vm->mm_lock); + ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, + obj->size, PAGE_SIZE, 0, + range_start, range_end, 0); + spin_unlock(&vm->mm_lock); - spin_lock(&vm->lock); - ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, - obj->size, PAGE_SIZE, 0, - range_start, range_end, 0); - spin_unlock(&vm->lock); + if (ret) + goto err_free_vma; - if (ret) - goto err_free_vma; + range_start = vma->node.start; + range_end = range_start + obj->size; + } - vma->iova = vma->node.start; + GEM_WARN_ON((range_end - range_start) > obj->size); + + drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0); vma->mapped = false; - INIT_LIST_HEAD(&vma->list); + mutex_lock(&vm->vm_lock); + ret = drm_gpuva_insert(&vm->base, &vma->base); + mutex_unlock(&vm->vm_lock); + if (ret) + goto err_free_range; - kref_get(&vm->kref); + vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj); + if (IS_ERR(vm_bo)) { + ret = PTR_ERR(vm_bo); + goto err_va_remove; + } + + mutex_lock(&vm->vm_lock); + drm_gpuvm_bo_extobj_add(vm_bo); + drm_gpuva_link(&vma->base, vm_bo); + mutex_unlock(&vm->vm_lock); + GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo)); return vma; +err_va_remove: + mutex_lock(&vm->vm_lock); + drm_gpuva_remove(&vma->base); + mutex_unlock(&vm->vm_lock); +err_free_range: + if (vm->managed) + drm_mm_remove_node(&vma->node); err_free_vma: kfree(vma); return ERR_PTR(ret); } +static const struct drm_gpuvm_ops msm_gpuvm_ops = { + .vm_free = msm_gem_vm_free, +}; + +/** + * msm_gem_vm_create() - Create and initialize a &msm_gem_vm + * @drm: the drm device + * @mmu: the backing MMU objects handling mapping/unmapping + * @name: the name of the VM + * @va_start: the start offset of the VA space + * @va_size: the size of the VA space + * @managed: is it a kernel managed VM? + * + * In a kernel managed VM, the kernel handles address allocation, and only + * synchronous operations are supported. In a user managed VM, userspace + * handles virtual address allocation, and both async and sync operations + * are supported. + */ struct msm_gem_vm * -msm_gem_vm_create(struct msm_mmu *mmu, const char *name, - u64 va_start, u64 size) +msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, + u64 va_start, u64 va_size, bool managed) { + enum drm_gpuvm_flags flags = 0; struct msm_gem_vm *vm; + struct drm_gem_object *dummy_gem; + int ret = 0; if (IS_ERR(mmu)) return ERR_CAST(mmu); @@ -158,15 +203,28 @@ msm_gem_vm_create(struct msm_mmu *mmu, const char *name, if (!vm) return ERR_PTR(-ENOMEM); - spin_lock_init(&vm->lock); - vm->name = name; - vm->mmu = mmu; - vm->va_start = va_start; - vm->va_size = size; + dummy_gem = drm_gpuvm_resv_object_alloc(drm); + if (!dummy_gem) { + ret = -ENOMEM; + goto err_free_vm; + } + + drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem, + va_start, va_size, 0, 0, &msm_gpuvm_ops); + drm_gem_object_put(dummy_gem); + + spin_lock_init(&vm->mm_lock); + mutex_init(&vm->vm_lock); - drm_mm_init(&vm->mm, va_start, size); + vm->mmu = mmu; + vm->managed = managed; - kref_init(&vm->kref); + drm_mm_init(&vm->mm, va_start, va_size); return vm; + +err_free_vm: + kfree(vm); + return ERR_PTR(ret); + } diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index 88504c4b842f..6458bd82a0cd 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -204,8 +204,8 @@ struct msm_gem_vm *msm_kms_init_vm(struct drm_device *dev) return NULL; } - vm = msm_gem_vm_create(mmu, "mdp_kms", - 0x1000, 0x100000000 - 0x1000); + vm = msm_gem_vm_create(dev, mmu, "mdp_kms", + 0x1000, 0x100000000 - 0x1000, true); if (IS_ERR(vm)) { dev_err(mdp_dev, "vm create, error %pe\n", vm); mmu->funcs->destroy(mmu); -- cgit v1.2.3 From fe4952b5f27cca5d143d3de249562d4cc984c1d6 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:57 -0700 Subject: drm/msm: Convert vm locking Convert to using the gpuvm's r_obj for serializing access to the VM. This way we can use the drm_exec helper for dealing with deadlock detection and backoff. This will let us deal with upcoming locking order conflicts with the VM_BIND implmentation (ie. in some scenarious we need to acquire the obj lock first, for ex. to iterate all the VMs an obj is bound in, and in other scenarious we need to acquire the VM lock first). Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661478/ --- drivers/gpu/drm/msm/msm_gem.c | 41 ++++++++++++----- drivers/gpu/drm/msm/msm_gem.h | 37 +++++++++++++--- drivers/gpu/drm/msm/msm_gem_shrinker.c | 80 +++++++++++++++++++++++++++++++--- drivers/gpu/drm/msm/msm_gem_submit.c | 8 +++- drivers/gpu/drm/msm/msm_gem_vma.c | 24 ++++------ 5 files changed, 151 insertions(+), 39 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 664fb801c221..82293806219a 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -48,6 +48,7 @@ static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bo static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm) { msm_gem_assert_locked(obj); + drm_gpuvm_resv_assert_held(&vm->base); struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(&vm->base, obj); if (vm_bo) { @@ -68,6 +69,7 @@ static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm) static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) { struct msm_context *ctx = file->driver_priv; + struct drm_exec exec; update_ctx_mem(file, -obj->size); @@ -86,10 +88,10 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) dma_resv_wait_timeout(obj->resv, DMA_RESV_USAGE_READ, false, msecs_to_jiffies(1000)); - msm_gem_lock(obj); + msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm); put_iova_spaces(obj, &ctx->vm->base, true); detach_vm(obj, ctx->vm); - msm_gem_unlock(obj); + drm_exec_fini(&exec); /* drop locks */ } /* @@ -551,11 +553,12 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, struct msm_gem_vm *vm, uint64_t *iova, u64 range_start, u64 range_end) { + struct drm_exec exec; int ret; - msm_gem_lock(obj); + msm_gem_lock_vm_and_obj(&exec, obj, vm); ret = get_and_pin_iova_range_locked(obj, vm, iova, range_start, range_end); - msm_gem_unlock(obj); + drm_exec_fini(&exec); /* drop locks */ return ret; } @@ -575,16 +578,17 @@ int msm_gem_get_iova(struct drm_gem_object *obj, struct msm_gem_vm *vm, uint64_t *iova) { struct msm_gem_vma *vma; + struct drm_exec exec; int ret = 0; - msm_gem_lock(obj); + msm_gem_lock_vm_and_obj(&exec, obj, vm); vma = get_vma_locked(obj, vm, 0, U64_MAX); if (IS_ERR(vma)) { ret = PTR_ERR(vma); } else { *iova = vma->base.va.addr; } - msm_gem_unlock(obj); + drm_exec_fini(&exec); /* drop locks */ return ret; } @@ -613,9 +617,10 @@ static int clear_iova(struct drm_gem_object *obj, int msm_gem_set_iova(struct drm_gem_object *obj, struct msm_gem_vm *vm, uint64_t iova) { + struct drm_exec exec; int ret = 0; - msm_gem_lock(obj); + msm_gem_lock_vm_and_obj(&exec, obj, vm); if (!iova) { ret = clear_iova(obj, vm); } else { @@ -628,7 +633,7 @@ int msm_gem_set_iova(struct drm_gem_object *obj, ret = -EBUSY; } } - msm_gem_unlock(obj); + drm_exec_fini(&exec); /* drop locks */ return ret; } @@ -642,14 +647,15 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct msm_gem_vm *vm) { struct msm_gem_vma *vma; + struct drm_exec exec; - msm_gem_lock(obj); + msm_gem_lock_vm_and_obj(&exec, obj, vm); vma = lookup_vma(obj, vm); if (vma) { msm_gem_unpin_locked(obj); } detach_vm(obj, vm); - msm_gem_unlock(obj); + drm_exec_fini(&exec); /* drop locks */ } int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, @@ -1021,12 +1027,27 @@ static void msm_gem_free_object(struct drm_gem_object *obj) struct msm_gem_object *msm_obj = to_msm_bo(obj); struct drm_device *dev = obj->dev; struct msm_drm_private *priv = dev->dev_private; + struct drm_exec exec; mutex_lock(&priv->obj_lock); list_del(&msm_obj->node); mutex_unlock(&priv->obj_lock); + /* + * We need to lock any VMs the object is still attached to, but not + * the object itself (see explaination in msm_gem_assert_locked()), + * so just open-code this special case: + */ + drm_exec_init(&exec, 0, 0); + drm_exec_until_all_locked (&exec) { + struct drm_gpuvm_bo *vm_bo; + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(vm_bo->vm)); + drm_exec_retry_on_contention(&exec); + } + } put_iova_spaces(obj, NULL, true); + drm_exec_fini(&exec); /* drop locks */ if (drm_gem_is_imported(obj)) { GEM_WARN_ON(msm_obj->vaddr); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 4112370baf34..33885a08cdd7 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -62,12 +62,6 @@ struct msm_gem_vm { */ struct drm_mm mm; - /** @mm_lock: protects @mm node allocation/removal */ - struct spinlock mm_lock; - - /** @vm_lock: protects gpuvm insert/remove/traverse */ - struct mutex vm_lock; - /** @mmu: The mmu object which manages the pgtables */ struct msm_mmu *mmu; @@ -246,6 +240,37 @@ msm_gem_unlock(struct drm_gem_object *obj) dma_resv_unlock(obj->resv); } +/** + * msm_gem_lock_vm_and_obj() - Helper to lock an obj + VM + * @exec: the exec context helper which will be initalized + * @obj: the GEM object to lock + * @vm: the VM to lock + * + * Operations which modify a VM frequently need to lock both the VM and + * the object being mapped/unmapped/etc. This helper uses drm_exec to + * acquire both locks, dealing with potential deadlock/backoff scenarios + * which arise when multiple locks are involved. + */ +static inline int +msm_gem_lock_vm_and_obj(struct drm_exec *exec, + struct drm_gem_object *obj, + struct msm_gem_vm *vm) +{ + int ret = 0; + + drm_exec_init(exec, 0, 2); + drm_exec_until_all_locked (exec) { + ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(&vm->base)); + if (!ret && (obj->resv != drm_gpuvm_resv(&vm->base))) + ret = drm_exec_lock_obj(exec, obj); + drm_exec_retry_on_contention(exec); + if (GEM_WARN_ON(ret)) + break; + } + + return ret; +} + static inline void msm_gem_assert_locked(struct drm_gem_object *obj) { diff --git a/drivers/gpu/drm/msm/msm_gem_shrinker.c b/drivers/gpu/drm/msm/msm_gem_shrinker.c index de185fc34084..5faf6227584a 100644 --- a/drivers/gpu/drm/msm/msm_gem_shrinker.c +++ b/drivers/gpu/drm/msm/msm_gem_shrinker.c @@ -43,6 +43,75 @@ msm_gem_shrinker_count(struct shrinker *shrinker, struct shrink_control *sc) return count; } +static bool +with_vm_locks(struct ww_acquire_ctx *ticket, + void (*fn)(struct drm_gem_object *obj), + struct drm_gem_object *obj) +{ + /* + * Track last locked entry for for unwinding locks in error and + * success paths + */ + struct drm_gpuvm_bo *vm_bo, *last_locked = NULL; + int ret = 0; + + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm); + + if (resv == obj->resv) + continue; + + ret = dma_resv_lock(resv, ticket); + + /* + * Since we already skip the case when the VM and obj + * share a resv (ie. _NO_SHARE objs), we don't expect + * to hit a double-locking scenario... which the lock + * unwinding cannot really cope with. + */ + WARN_ON(ret == -EALREADY); + + /* + * Don't bother with slow-lock / backoff / retry sequence, + * if we can't get the lock just give up and move on to + * the next object. + */ + if (ret) + goto out_unlock; + + /* + * Hold a ref to prevent the vm_bo from being freed + * and removed from the obj's gpuva list, as that would + * would result in missing the unlock below + */ + drm_gpuvm_bo_get(vm_bo); + + last_locked = vm_bo; + } + + fn(obj); + +out_unlock: + if (last_locked) { + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + struct dma_resv *resv = drm_gpuvm_resv(vm_bo->vm); + + if (resv == obj->resv) + continue; + + dma_resv_unlock(resv); + + /* Drop the ref taken while locking: */ + drm_gpuvm_bo_put(vm_bo); + + if (last_locked == vm_bo) + break; + } + } + + return ret == 0; +} + static bool purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket) { @@ -52,9 +121,7 @@ purge(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket) if (msm_gem_active(obj)) return false; - msm_gem_purge(obj); - - return true; + return with_vm_locks(ticket, msm_gem_purge, obj); } static bool @@ -66,9 +133,7 @@ evict(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket) if (msm_gem_active(obj)) return false; - msm_gem_evict(obj); - - return true; + return with_vm_locks(ticket, msm_gem_evict, obj); } static bool @@ -100,6 +165,7 @@ static unsigned long msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) { struct msm_drm_private *priv = shrinker->private_data; + struct ww_acquire_ctx ticket; struct { struct drm_gem_lru *lru; bool (*shrink)(struct drm_gem_object *obj, struct ww_acquire_ctx *ticket); @@ -124,7 +190,7 @@ msm_gem_shrinker_scan(struct shrinker *shrinker, struct shrink_control *sc) drm_gem_lru_scan(stages[i].lru, nr, &stages[i].remaining, stages[i].shrink, - NULL); + &ticket); nr -= stages[i].freed; freed += stages[i].freed; remaining += stages[i].remaining; diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 0cc30660c9c5..50060d934469 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -257,11 +257,17 @@ out: /* This is where we make sure all the bo's are reserved and pin'd: */ static int submit_lock_objects(struct msm_gem_submit *submit) { + unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT; int ret; - drm_exec_init(&submit->exec, DRM_EXEC_INTERRUPTIBLE_WAIT, submit->nr_bos); + drm_exec_init(&submit->exec, flags, submit->nr_bos); drm_exec_until_all_locked (&submit->exec) { + ret = drm_exec_lock_obj(&submit->exec, + drm_gpuvm_resv_obj(&submit->vm->base)); + drm_exec_retry_on_contention(&submit->exec); + if (ret) + goto error; for (unsigned i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; ret = drm_exec_prepare_obj(&submit->exec, obj, 1); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 1f4c9b5c2e8f..ccb20897a2b0 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -92,15 +92,13 @@ void msm_gem_vma_close(struct msm_gem_vma *vma) GEM_WARN_ON(vma->mapped); - spin_lock(&vm->mm_lock); + drm_gpuvm_resv_assert_held(&vm->base); + if (vma->base.va.addr) drm_mm_remove_node(&vma->node); - spin_unlock(&vm->mm_lock); - mutex_lock(&vm->vm_lock); drm_gpuva_remove(&vma->base); drm_gpuva_unlink(&vma->base); - mutex_unlock(&vm->vm_lock); kfree(vma); } @@ -114,16 +112,16 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, struct msm_gem_vma *vma; int ret; + drm_gpuvm_resv_assert_held(&vm->base); + vma = kzalloc(sizeof(*vma), GFP_KERNEL); if (!vma) return ERR_PTR(-ENOMEM); if (vm->managed) { - spin_lock(&vm->mm_lock); ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, obj->size, PAGE_SIZE, 0, range_start, range_end, 0); - spin_unlock(&vm->mm_lock); if (ret) goto err_free_vma; @@ -137,9 +135,7 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0); vma->mapped = false; - mutex_lock(&vm->vm_lock); ret = drm_gpuva_insert(&vm->base, &vma->base); - mutex_unlock(&vm->vm_lock); if (ret) goto err_free_range; @@ -149,18 +145,14 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, goto err_va_remove; } - mutex_lock(&vm->vm_lock); drm_gpuvm_bo_extobj_add(vm_bo); drm_gpuva_link(&vma->base, vm_bo); - mutex_unlock(&vm->vm_lock); GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo)); return vma; err_va_remove: - mutex_lock(&vm->vm_lock); drm_gpuva_remove(&vma->base); - mutex_unlock(&vm->vm_lock); err_free_range: if (vm->managed) drm_mm_remove_node(&vma->node); @@ -191,6 +183,11 @@ struct msm_gem_vm * msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, u64 va_start, u64 va_size, bool managed) { + /* + * We mostly want to use DRM_GPUVM_RESV_PROTECTED, except that + * makes drm_gpuvm_bo_evict() a no-op for extobjs (ie. we loose + * tracking that an extobj is evicted) :facepalm: + */ enum drm_gpuvm_flags flags = 0; struct msm_gem_vm *vm; struct drm_gem_object *dummy_gem; @@ -213,9 +210,6 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, va_start, va_size, 0, 0, &msm_gpuvm_ops); drm_gem_object_put(dummy_gem); - spin_lock_init(&vm->mm_lock); - mutex_init(&vm->vm_lock); - vm->mmu = mmu; vm->managed = managed; -- cgit v1.2.3 From 37889600f58ea554943d48a86e30f635005f6712 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:58 -0700 Subject: drm/msm: Use drm_gpuvm types more Most of the driver code doesn't need to reach in to msm specific fields, so just use the drm_gpuvm/drm_gpuva types directly. This should hopefully improve commonality with other drivers and make the code easier to understand. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661483/ --- drivers/gpu/drm/msm/adreno/a2xx_gpu.c | 6 +- drivers/gpu/drm/msm/adreno/a5xx_gpu.c | 3 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.c | 6 +- drivers/gpu/drm/msm/adreno/a6xx_gmu.h | 2 +- drivers/gpu/drm/msm/adreno/a6xx_gpu.c | 11 ++-- drivers/gpu/drm/msm/adreno/a6xx_preempt.c | 2 +- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 19 ++++--- drivers/gpu/drm/msm/adreno/adreno_gpu.h | 4 +- drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c | 6 +- drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c | 11 ++-- drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c | 11 ++-- drivers/gpu/drm/msm/dsi/dsi_host.c | 6 +- drivers/gpu/drm/msm/msm_drv.h | 4 +- drivers/gpu/drm/msm/msm_fb.c | 4 +- drivers/gpu/drm/msm/msm_gem.c | 94 +++++++++++++++---------------- drivers/gpu/drm/msm/msm_gem.h | 59 +++++++++---------- drivers/gpu/drm/msm/msm_gem_submit.c | 8 +-- drivers/gpu/drm/msm/msm_gem_vma.c | 70 ++++++++++------------- drivers/gpu/drm/msm/msm_gpu.c | 18 +++--- drivers/gpu/drm/msm/msm_gpu.h | 10 ++-- drivers/gpu/drm/msm/msm_kms.c | 6 +- drivers/gpu/drm/msm/msm_kms.h | 2 +- drivers/gpu/drm/msm/msm_submitqueue.c | 2 +- 23 files changed, 175 insertions(+), 189 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c index 889480aa13ba..ec38db45d8a3 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpu.c @@ -113,7 +113,7 @@ static int a2xx_hw_init(struct msm_gpu *gpu) uint32_t *ptr, len; int i, ret; - a2xx_gpummu_params(gpu->vm->mmu, &pt_base, &tran_error); + a2xx_gpummu_params(to_msm_vm(gpu->vm)->mmu, &pt_base, &tran_error); DBG("%s", gpu->name); @@ -466,11 +466,11 @@ static struct msm_gpu_state *a2xx_gpu_state_get(struct msm_gpu *gpu) return state; } -static struct msm_gem_vm * +static struct drm_gpuvm * a2xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { struct msm_mmu *mmu = a2xx_gpummu_new(&pdev->dev, gpu); - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; vm = msm_gem_vm_create(gpu->dev, mmu, "gpu", SZ_16M, 0xfff * SZ_64K, true); diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c index 04138a06724b..ee927d8cc0dc 100644 --- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c @@ -1786,7 +1786,8 @@ struct msm_gpu *a5xx_gpu_init(struct drm_device *dev) return ERR_PTR(ret); } - msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a5xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a5xx_fault_handler); /* Set up the preemption specific bits and pieces for each ringbuffer */ a5xx_preempt_init(gpu); diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c index 77d9ff9632d1..28e6705c6da6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c @@ -1259,6 +1259,8 @@ int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu) static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) { + struct msm_mmu *mmu = to_msm_vm(gmu->vm)->mmu; + msm_gem_kernel_put(gmu->hfi.obj, gmu->vm); msm_gem_kernel_put(gmu->debug.obj, gmu->vm); msm_gem_kernel_put(gmu->icache.obj, gmu->vm); @@ -1266,8 +1268,8 @@ static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu) msm_gem_kernel_put(gmu->dummy.obj, gmu->vm); msm_gem_kernel_put(gmu->log.obj, gmu->vm); - gmu->vm->mmu->funcs->detach(gmu->vm->mmu); - msm_gem_vm_put(gmu->vm); + mmu->funcs->detach(mmu); + drm_gpuvm_put(gmu->vm); } static int a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo, diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h index fc288dfe889f..d1ce11131ba6 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h +++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h @@ -62,7 +62,7 @@ struct a6xx_gmu { /* For serializing communication with the GMU: */ struct mutex lock; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; void __iomem *mmio; void __iomem *rscc; diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c index 262129cb4415..0b78888c58af 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c @@ -120,7 +120,7 @@ static void a6xx_set_pagetable(struct a6xx_gpu *a6xx_gpu, if (ctx->seqno == ring->cur_ctx_seqno) return; - if (msm_iommu_pagetable_params(ctx->vm->mmu, &ttbr, &asid)) + if (msm_iommu_pagetable_params(to_msm_vm(ctx->vm)->mmu, &ttbr, &asid)) return; if (adreno_gpu->info->family >= ADRENO_7XX_GEN1) { @@ -2256,7 +2256,7 @@ static void a6xx_gpu_set_freq(struct msm_gpu *gpu, struct dev_pm_opp *opp, mutex_unlock(&a6xx_gpu->gmu.lock); } -static struct msm_gem_vm * +static struct drm_gpuvm * a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu); @@ -2274,12 +2274,12 @@ a6xx_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) return adreno_iommu_create_vm(gpu, pdev, quirks); } -static struct msm_gem_vm * +static struct drm_gpuvm * a6xx_create_private_vm(struct msm_gpu *gpu) { struct msm_mmu *mmu; - mmu = msm_iommu_pagetable_create(gpu->vm->mmu); + mmu = msm_iommu_pagetable_create(to_msm_vm(gpu->vm)->mmu); if (IS_ERR(mmu)) return ERR_CAST(mmu); @@ -2559,7 +2559,8 @@ struct msm_gpu *a6xx_gpu_init(struct drm_device *dev) adreno_gpu->uche_trap_base = 0x1fffffffff000ull; - msm_mmu_set_fault_handler(gpu->vm->mmu, gpu, a6xx_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(gpu->vm)->mmu, gpu, + a6xx_fault_handler); a6xx_calc_ubwc_config(adreno_gpu); /* Set up the preemption specific bits and pieces for each ringbuffer */ diff --git a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c index f6194a57f794..9e7f2e5fb2b9 100644 --- a/drivers/gpu/drm/msm/adreno/a6xx_preempt.c +++ b/drivers/gpu/drm/msm/adreno/a6xx_preempt.c @@ -377,7 +377,7 @@ static int preempt_init_ring(struct a6xx_gpu *a6xx_gpu, struct a7xx_cp_smmu_info *smmu_info_ptr = ptr; - msm_iommu_pagetable_params(gpu->vm->mmu, &ttbr, &asid); + msm_iommu_pagetable_params(to_msm_vm(gpu->vm)->mmu, &ttbr, &asid); smmu_info_ptr->magic = GEN7_CP_SMMU_INFO_MAGIC; smmu_info_ptr->ttbr0 = ttbr; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index 46199a6d0e41..676fc078d545 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -191,21 +191,21 @@ int adreno_zap_shader_load(struct msm_gpu *gpu, u32 pasid) return zap_shader_load_mdt(gpu, adreno_gpu->info->zapfw, pasid); } -struct msm_gem_vm * +struct drm_gpuvm * adreno_create_vm(struct msm_gpu *gpu, struct platform_device *pdev) { return adreno_iommu_create_vm(gpu, pdev, 0); } -struct msm_gem_vm * +struct drm_gpuvm * adreno_iommu_create_vm(struct msm_gpu *gpu, struct platform_device *pdev, unsigned long quirks) { struct iommu_domain_geometry *geometry; struct msm_mmu *mmu; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; u64 start, size; mmu = msm_iommu_gpu_new(&pdev->dev, gpu, quirks); @@ -275,9 +275,11 @@ void adreno_check_and_reenable_stall(struct adreno_gpu *adreno_gpu) if (!priv->stall_enabled && ktime_after(ktime_get(), priv->stall_reenable_time) && !READ_ONCE(gpu->crashstate)) { + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; + priv->stall_enabled = true; - gpu->vm->mmu->funcs->set_stall(gpu->vm->mmu, true); + mmu->funcs->set_stall(mmu, true); } spin_unlock_irqrestore(&priv->fault_stall_lock, flags); } @@ -292,6 +294,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, u32 scratch[4]) { struct msm_drm_private *priv = gpu->dev->dev_private; + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; const char *type = "UNKNOWN"; bool do_devcoredump = info && (info->fsr & ARM_SMMU_FSR_SS) && !READ_ONCE(gpu->crashstate); @@ -305,7 +308,7 @@ int adreno_fault_handler(struct msm_gpu *gpu, unsigned long iova, int flags, if (priv->stall_enabled) { priv->stall_enabled = false; - gpu->vm->mmu->funcs->set_stall(gpu->vm->mmu, false); + mmu->funcs->set_stall(mmu, false); } priv->stall_reenable_time = ktime_add_ms(ktime_get(), 500); @@ -405,7 +408,7 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, return 0; case MSM_PARAM_FAULTS: if (ctx->vm) - *value = gpu->global_faults + ctx->vm->faults; + *value = gpu->global_faults + to_msm_vm(ctx->vm)->faults; else *value = gpu->global_faults; return 0; @@ -415,12 +418,12 @@ int adreno_get_param(struct msm_gpu *gpu, struct msm_context *ctx, case MSM_PARAM_VA_START: if (ctx->vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->vm->base.mm_start; + *value = ctx->vm->mm_start; return 0; case MSM_PARAM_VA_SIZE: if (ctx->vm == gpu->vm) return UERR(EINVAL, drm, "requires per-process pgtables"); - *value = ctx->vm->base.mm_range; + *value = ctx->vm->mm_range; return 0; case MSM_PARAM_HIGHEST_BANK_BIT: *value = adreno_gpu->ubwc_config.highest_bank_bit; diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h index b1761f990aa1..8650bbd8698e 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h @@ -622,11 +622,11 @@ void adreno_show_object(struct drm_printer *p, void **ptr, int len, * Common helper function to initialize the default address space for arm-smmu * attached targets */ -struct msm_gem_vm * +struct drm_gpuvm * adreno_create_vm(struct msm_gpu *gpu, struct platform_device *pdev); -struct msm_gem_vm * +struct drm_gpuvm * adreno_iommu_create_vm(struct msm_gpu *gpu, struct platform_device *pdev, unsigned long quirks); diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c index a7782763ab90..fa463ccc135d 100644 --- a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c +++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c @@ -1098,17 +1098,17 @@ static void _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms) if (!dpu_kms->base.vm) return; - mmu = dpu_kms->base.vm->mmu; + mmu = to_msm_vm(dpu_kms->base.vm)->mmu; mmu->funcs->detach(mmu); - msm_gem_vm_put(dpu_kms->base.vm); + drm_gpuvm_put(dpu_kms->base.vm); dpu_kms->base.vm = NULL; } static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms) { - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; vm = msm_kms_init_vm(dpu_kms->dev); if (IS_ERR(vm)) diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c index a867c684c6d6..9acde91ad6c3 100644 --- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c @@ -122,15 +122,16 @@ static void mdp4_destroy(struct msm_kms *kms) { struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(kms)); struct device *dev = mdp4_kms->dev->dev; - struct msm_gem_vm *vm = kms->vm; if (mdp4_kms->blank_cursor_iova) msm_gem_unpin_iova(mdp4_kms->blank_cursor_bo, kms->vm); drm_gem_object_put(mdp4_kms->blank_cursor_bo); - if (vm) { - vm->mmu->funcs->detach(vm->mmu); - msm_gem_vm_put(vm); + if (kms->vm) { + struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu; + + mmu->funcs->detach(mmu); + drm_gpuvm_put(kms->vm); } if (mdp4_kms->rpm_enabled) @@ -398,7 +399,7 @@ static int mdp4_kms_init(struct drm_device *dev) struct mdp4_kms *mdp4_kms = to_mdp4_kms(to_mdp_kms(priv->kms)); struct msm_kms *kms = NULL; struct msm_mmu *mmu; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; int ret; u32 major, minor; unsigned long max_clk; diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c index 9dca0385a42d..b6e6bd1f95ee 100644 --- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c +++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c @@ -198,11 +198,12 @@ static void mdp5_destroy(struct mdp5_kms *mdp5_kms); static void mdp5_kms_destroy(struct msm_kms *kms) { struct mdp5_kms *mdp5_kms = to_mdp5_kms(to_mdp_kms(kms)); - struct msm_gem_vm *vm = kms->vm; - if (vm) { - vm->mmu->funcs->detach(vm->mmu); - msm_gem_vm_put(vm); + if (kms->vm) { + struct msm_mmu *mmu = to_msm_vm(kms->vm)->mmu; + + mmu->funcs->detach(mmu); + drm_gpuvm_put(kms->vm); } mdp_kms_destroy(&mdp5_kms->base); @@ -500,7 +501,7 @@ static int mdp5_kms_init(struct drm_device *dev) struct mdp5_kms *mdp5_kms; struct mdp5_cfg *config; struct msm_kms *kms = priv->kms; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; int i, ret; ret = mdp5_init(to_platform_device(dev->dev), dev); diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c index f95cde8b039b..e0de545d4077 100644 --- a/drivers/gpu/drm/msm/dsi/dsi_host.c +++ b/drivers/gpu/drm/msm/dsi/dsi_host.c @@ -152,7 +152,7 @@ struct msm_dsi_host { /* DSI 6G TX buffer*/ struct drm_gem_object *tx_gem_obj; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; /* DSI v2 TX buffer */ void *tx_buf; @@ -1207,7 +1207,7 @@ int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size) uint64_t iova; u8 *data; - msm_host->vm = msm_gem_vm_get(priv->kms->vm); + msm_host->vm = drm_gpuvm_get(priv->kms->vm); data = msm_gem_kernel_new(dev, size, MSM_BO_WC, msm_host->vm, @@ -1255,7 +1255,7 @@ void msm_dsi_tx_buf_free(struct mipi_dsi_host *host) if (msm_host->tx_gem_obj) { msm_gem_kernel_put(msm_host->tx_gem_obj, msm_host->vm); - msm_gem_vm_put(msm_host->vm); + drm_gpuvm_put(msm_host->vm); msm_host->tx_gem_obj = NULL; msm_host->vm = NULL; } diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index eb009bd193e3..0fe3c9a24baa 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -48,8 +48,6 @@ struct msm_rd_state; struct msm_perf_state; struct msm_gem_submit; struct msm_fence_context; -struct msm_gem_vm; -struct msm_gem_vma; struct msm_disp_state; #define MAX_CRTCS 8 @@ -253,7 +251,7 @@ void msm_crtc_disable_vblank(struct drm_crtc *crtc); int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu); void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu); -struct msm_gem_vm *msm_kms_init_vm(struct drm_device *dev); +struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev); bool msm_use_mmu(struct drm_device *dev); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 3b17d83f6673..8ae2f326ec54 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -78,7 +78,7 @@ void msm_framebuffer_describe(struct drm_framebuffer *fb, struct seq_file *m) int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb) { struct msm_drm_private *priv = fb->dev->dev_private; - struct msm_gem_vm *vm = priv->kms->vm; + struct drm_gpuvm *vm = priv->kms->vm; struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int ret, i, n = fb->format->num_planes; @@ -102,7 +102,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb) void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb) { struct msm_drm_private *priv = fb->dev->dev_private; - struct msm_gem_vm *vm = priv->kms->vm; + struct drm_gpuvm *vm = priv->kms->vm; struct msm_framebuffer *msm_fb = to_msm_framebuffer(fb); int i, n = fb->format->num_planes; diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 82293806219a..763bafcff4cc 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -45,20 +45,20 @@ static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file) static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close); -static void detach_vm(struct drm_gem_object *obj, struct msm_gem_vm *vm) +static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm) { msm_gem_assert_locked(obj); - drm_gpuvm_resv_assert_held(&vm->base); + drm_gpuvm_resv_assert_held(vm); - struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(&vm->base, obj); + struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(vm, obj); if (vm_bo) { struct drm_gpuva *vma; drm_gpuvm_bo_for_each_va (vma, vm_bo) { - if (vma->vm != &vm->base) + if (vma->vm != vm) continue; - msm_gem_vma_purge(to_msm_vma(vma)); - msm_gem_vma_close(to_msm_vma(vma)); + msm_gem_vma_purge(vma); + msm_gem_vma_close(vma); break; } @@ -89,7 +89,7 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) msecs_to_jiffies(1000)); msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm); - put_iova_spaces(obj, &ctx->vm->base, true); + put_iova_spaces(obj, ctx->vm, true); detach_vm(obj, ctx->vm); drm_exec_fini(&exec); /* drop locks */ } @@ -386,8 +386,8 @@ uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj) return offset; } -static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, - struct msm_gem_vm *vm) +static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj, + struct drm_gpuvm *vm) { struct drm_gpuvm_bo *vm_bo; @@ -397,13 +397,13 @@ static struct msm_gem_vma *lookup_vma(struct drm_gem_object *obj, struct drm_gpuva *vma; drm_gpuvm_bo_for_each_va (vma, vm_bo) { - if (vma->vm == &vm->base) { + if (vma->vm == vm) { /* lookup_vma() should only be used in paths * with at most one vma per vm */ GEM_WARN_ON(!list_is_singular(&vm_bo->list.gpuva)); - return to_msm_vma(vma); + return vma; } } } @@ -433,22 +433,20 @@ put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close) drm_gpuvm_bo_get(vm_bo); drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) { - struct msm_gem_vma *msm_vma = to_msm_vma(vma); - - msm_gem_vma_purge(msm_vma); + msm_gem_vma_purge(vma); if (close) - msm_gem_vma_close(msm_vma); + msm_gem_vma_close(vma); } drm_gpuvm_bo_put(vm_bo); } } -static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, - struct msm_gem_vm *vm, - u64 range_start, u64 range_end) +static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj, + struct drm_gpuvm *vm, u64 range_start, + u64 range_end) { - struct msm_gem_vma *vma; + struct drm_gpuva *vma; msm_gem_assert_locked(obj); @@ -457,14 +455,14 @@ static struct msm_gem_vma *get_vma_locked(struct drm_gem_object *obj, if (!vma) { vma = msm_gem_vma_new(vm, obj, range_start, range_end); } else { - GEM_WARN_ON(vma->base.va.addr < range_start); - GEM_WARN_ON((vma->base.va.addr + obj->size) > range_end); + GEM_WARN_ON(vma->va.addr < range_start); + GEM_WARN_ON((vma->va.addr + obj->size) > range_end); } return vma; } -int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma) +int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma) { struct msm_gem_object *msm_obj = to_msm_bo(obj); struct page **pages; @@ -517,17 +515,17 @@ void msm_gem_unpin_active(struct drm_gem_object *obj) update_lru_active(obj); } -struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, - struct msm_gem_vm *vm) +struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj, + struct drm_gpuvm *vm) { return get_vma_locked(obj, vm, 0, U64_MAX); } static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova, - u64 range_start, u64 range_end) + struct drm_gpuvm *vm, uint64_t *iova, + u64 range_start, u64 range_end) { - struct msm_gem_vma *vma; + struct drm_gpuva *vma; int ret; msm_gem_assert_locked(obj); @@ -538,7 +536,7 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, ret = msm_gem_pin_vma_locked(obj, vma); if (!ret) { - *iova = vma->base.va.addr; + *iova = vma->va.addr; pin_obj_locked(obj); } @@ -550,8 +548,8 @@ static int get_and_pin_iova_range_locked(struct drm_gem_object *obj, * limits iova to specified range (in pages) */ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova, - u64 range_start, u64 range_end) + struct drm_gpuvm *vm, uint64_t *iova, + u64 range_start, u64 range_end) { struct drm_exec exec; int ret; @@ -564,8 +562,8 @@ int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, } /* get iova and pin it. Should have a matching put */ -int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova) +int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, + uint64_t *iova) { return msm_gem_get_and_pin_iova_range(obj, vm, iova, 0, U64_MAX); } @@ -574,10 +572,10 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, * Get an iova but don't pin it. Doesn't need a put because iovas are currently * valid for the life of the object */ -int msm_gem_get_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova) +int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, + uint64_t *iova) { - struct msm_gem_vma *vma; + struct drm_gpuva *vma; struct drm_exec exec; int ret = 0; @@ -586,7 +584,7 @@ int msm_gem_get_iova(struct drm_gem_object *obj, if (IS_ERR(vma)) { ret = PTR_ERR(vma); } else { - *iova = vma->base.va.addr; + *iova = vma->va.addr; } drm_exec_fini(&exec); /* drop locks */ @@ -594,9 +592,9 @@ int msm_gem_get_iova(struct drm_gem_object *obj, } static int clear_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm) + struct drm_gpuvm *vm) { - struct msm_gem_vma *vma = lookup_vma(obj, vm); + struct drm_gpuva *vma = lookup_vma(obj, vm); if (!vma) return 0; @@ -615,7 +613,7 @@ static int clear_iova(struct drm_gem_object *obj, * Setting an iova of zero will clear the vma. */ int msm_gem_set_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t iova) + struct drm_gpuvm *vm, uint64_t iova) { struct drm_exec exec; int ret = 0; @@ -624,11 +622,11 @@ int msm_gem_set_iova(struct drm_gem_object *obj, if (!iova) { ret = clear_iova(obj, vm); } else { - struct msm_gem_vma *vma; + struct drm_gpuva *vma; vma = get_vma_locked(obj, vm, iova, iova + obj->size); if (IS_ERR(vma)) { ret = PTR_ERR(vma); - } else if (GEM_WARN_ON(vma->base.va.addr != iova)) { + } else if (GEM_WARN_ON(vma->va.addr != iova)) { clear_iova(obj, vm); ret = -EBUSY; } @@ -643,10 +641,9 @@ int msm_gem_set_iova(struct drm_gem_object *obj, * purged until something else (shrinker, mm_notifier, destroy, etc) decides * to get rid of it */ -void msm_gem_unpin_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm) +void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm) { - struct msm_gem_vma *vma; + struct drm_gpuva *vma; struct drm_exec exec; msm_gem_lock_vm_and_obj(&exec, obj, vm); @@ -1276,9 +1273,9 @@ fail: return ERR_PTR(ret); } -void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, - uint32_t flags, struct msm_gem_vm *vm, - struct drm_gem_object **bo, uint64_t *iova) +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags, + struct drm_gpuvm *vm, struct drm_gem_object **bo, + uint64_t *iova) { void *vaddr; struct drm_gem_object *obj = msm_gem_new(dev, size, flags); @@ -1311,8 +1308,7 @@ err: } -void msm_gem_kernel_put(struct drm_gem_object *bo, - struct msm_gem_vm *vm) +void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm) { if (IS_ERR_OR_NULL(bo)) return; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 33885a08cdd7..892e4132fa72 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -79,12 +79,7 @@ struct msm_gem_vm { }; #define to_msm_vm(x) container_of(x, struct msm_gem_vm, base) -struct msm_gem_vm * -msm_gem_vm_get(struct msm_gem_vm *vm); - -void msm_gem_vm_put(struct msm_gem_vm *vm); - -struct msm_gem_vm * +struct drm_gpuvm * msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, u64 va_start, u64 va_size, bool managed); @@ -113,12 +108,12 @@ struct msm_gem_vma { }; #define to_msm_vma(x) container_of(x, struct msm_gem_vma, base) -struct msm_gem_vma * -msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, +struct drm_gpuva * +msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj, u64 range_start, u64 range_end); -void msm_gem_vma_purge(struct msm_gem_vma *vma); -int msm_gem_vma_map(struct msm_gem_vma *vma, int prot, struct sg_table *sgt, int size); -void msm_gem_vma_close(struct msm_gem_vma *vma); +void msm_gem_vma_purge(struct drm_gpuva *vma); +int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size); +void msm_gem_vma_close(struct drm_gpuva *vma); struct msm_gem_object { struct drm_gem_object base; @@ -163,22 +158,21 @@ struct msm_gem_object { #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); -int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct msm_gem_vma *vma); +int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma); void msm_gem_unpin_locked(struct drm_gem_object *obj); void msm_gem_unpin_active(struct drm_gem_object *obj); -struct msm_gem_vma *msm_gem_get_vma_locked(struct drm_gem_object *obj, - struct msm_gem_vm *vm); -int msm_gem_get_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova); -int msm_gem_set_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t iova); +struct drm_gpuva *msm_gem_get_vma_locked(struct drm_gem_object *obj, + struct drm_gpuvm *vm); +int msm_gem_get_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, + uint64_t *iova); +int msm_gem_set_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, + uint64_t iova); int msm_gem_get_and_pin_iova_range(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova, - u64 range_start, u64 range_end); -int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm, uint64_t *iova); -void msm_gem_unpin_iova(struct drm_gem_object *obj, - struct msm_gem_vm *vm); + struct drm_gpuvm *vm, uint64_t *iova, + u64 range_start, u64 range_end); +int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, + uint64_t *iova); +void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm); void msm_gem_pin_obj_locked(struct drm_gem_object *obj); struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); @@ -199,11 +193,10 @@ int msm_gem_new_handle(struct drm_device *dev, struct drm_file *file, uint32_t size, uint32_t flags, uint32_t *handle, char *name); struct drm_gem_object *msm_gem_new(struct drm_device *dev, uint32_t size, uint32_t flags); -void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, - uint32_t flags, struct msm_gem_vm *vm, - struct drm_gem_object **bo, uint64_t *iova); -void msm_gem_kernel_put(struct drm_gem_object *bo, - struct msm_gem_vm *vm); +void *msm_gem_kernel_new(struct drm_device *dev, uint32_t size, uint32_t flags, + struct drm_gpuvm *vm, struct drm_gem_object **bo, + uint64_t *iova); +void msm_gem_kernel_put(struct drm_gem_object *bo, struct drm_gpuvm *vm); struct drm_gem_object *msm_gem_import(struct drm_device *dev, struct dma_buf *dmabuf, struct sg_table *sgt); __printf(2, 3) @@ -254,14 +247,14 @@ msm_gem_unlock(struct drm_gem_object *obj) static inline int msm_gem_lock_vm_and_obj(struct drm_exec *exec, struct drm_gem_object *obj, - struct msm_gem_vm *vm) + struct drm_gpuvm *vm) { int ret = 0; drm_exec_init(exec, 0, 2); drm_exec_until_all_locked (exec) { - ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(&vm->base)); - if (!ret && (obj->resv != drm_gpuvm_resv(&vm->base))) + ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(vm)); + if (!ret && (obj->resv != drm_gpuvm_resv(vm))) ret = drm_exec_lock_obj(exec, obj); drm_exec_retry_on_contention(exec); if (GEM_WARN_ON(ret)) @@ -328,7 +321,7 @@ struct msm_gem_submit { struct kref ref; struct drm_device *dev; struct msm_gpu *gpu; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; struct list_head node; /* node in ring submit list */ struct drm_exec exec; uint32_t seqno; /* Sequence number of the submit on the ring */ diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 50060d934469..939dbc9ec10a 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -264,7 +264,7 @@ static int submit_lock_objects(struct msm_gem_submit *submit) drm_exec_until_all_locked (&submit->exec) { ret = drm_exec_lock_obj(&submit->exec, - drm_gpuvm_resv_obj(&submit->vm->base)); + drm_gpuvm_resv_obj(submit->vm)); drm_exec_retry_on_contention(&submit->exec); if (ret) goto error; @@ -315,7 +315,7 @@ static int submit_pin_objects(struct msm_gem_submit *submit) for (i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; - struct msm_gem_vma *vma; + struct drm_gpuva *vma; /* if locking succeeded, pin bo: */ vma = msm_gem_get_vma_locked(obj, submit->vm); @@ -328,8 +328,8 @@ static int submit_pin_objects(struct msm_gem_submit *submit) if (ret) break; - submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); - submit->bos[i].iova = vma->base.va.addr; + submit->bos[i].vm_bo = drm_gpuvm_bo_get(vma->vm_bo); + submit->bos[i].iova = vma->va.addr; } /* diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index ccb20897a2b0..df8eb910ca31 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -20,52 +20,38 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) kfree(vm); } - -void msm_gem_vm_put(struct msm_gem_vm *vm) -{ - if (vm) - drm_gpuvm_put(&vm->base); -} - -struct msm_gem_vm * -msm_gem_vm_get(struct msm_gem_vm *vm) -{ - if (!IS_ERR_OR_NULL(vm)) - drm_gpuvm_get(&vm->base); - - return vm; -} - /* Actually unmap memory for the vma */ -void msm_gem_vma_purge(struct msm_gem_vma *vma) +void msm_gem_vma_purge(struct drm_gpuva *vma) { - struct msm_gem_vm *vm = to_msm_vm(vma->base.vm); - unsigned size = vma->base.va.range; + struct msm_gem_vma *msm_vma = to_msm_vma(vma); + struct msm_gem_vm *vm = to_msm_vm(vma->vm); + unsigned size = vma->va.range; /* Don't do anything if the memory isn't mapped */ - if (!vma->mapped) + if (!msm_vma->mapped) return; - vm->mmu->funcs->unmap(vm->mmu, vma->base.va.addr, size); + vm->mmu->funcs->unmap(vm->mmu, vma->va.addr, size); - vma->mapped = false; + msm_vma->mapped = false; } /* Map and pin vma: */ int -msm_gem_vma_map(struct msm_gem_vma *vma, int prot, +msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size) { - struct msm_gem_vm *vm = to_msm_vm(vma->base.vm); + struct msm_gem_vma *msm_vma = to_msm_vma(vma); + struct msm_gem_vm *vm = to_msm_vm(vma->vm); int ret; - if (GEM_WARN_ON(!vma->base.va.addr)) + if (GEM_WARN_ON(!vma->va.addr)) return -EINVAL; - if (vma->mapped) + if (msm_vma->mapped) return 0; - vma->mapped = true; + msm_vma->mapped = true; /* * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold @@ -76,38 +62,40 @@ msm_gem_vma_map(struct msm_gem_vma *vma, int prot, * Revisit this if we can come up with a scheme to pre-alloc pages * for the pgtable in map/unmap ops. */ - ret = vm->mmu->funcs->map(vm->mmu, vma->base.va.addr, sgt, size, prot); + ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, size, prot); if (ret) { - vma->mapped = false; + msm_vma->mapped = false; } return ret; } /* Close an iova. Warn if it is still in use */ -void msm_gem_vma_close(struct msm_gem_vma *vma) +void msm_gem_vma_close(struct drm_gpuva *vma) { - struct msm_gem_vm *vm = to_msm_vm(vma->base.vm); + struct msm_gem_vm *vm = to_msm_vm(vma->vm); + struct msm_gem_vma *msm_vma = to_msm_vma(vma); - GEM_WARN_ON(vma->mapped); + GEM_WARN_ON(msm_vma->mapped); drm_gpuvm_resv_assert_held(&vm->base); - if (vma->base.va.addr) - drm_mm_remove_node(&vma->node); + if (vma->va.addr && vm->managed) + drm_mm_remove_node(&msm_vma->node); - drm_gpuva_remove(&vma->base); - drm_gpuva_unlink(&vma->base); + drm_gpuva_remove(vma); + drm_gpuva_unlink(vma); kfree(vma); } /* Create a new vma and allocate an iova for it */ -struct msm_gem_vma * -msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, +struct drm_gpuva * +msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, u64 range_start, u64 range_end) { + struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuvm_bo *vm_bo; struct msm_gem_vma *vma; int ret; @@ -149,7 +137,7 @@ msm_gem_vma_new(struct msm_gem_vm *vm, struct drm_gem_object *obj, drm_gpuva_link(&vma->base, vm_bo); GEM_WARN_ON(drm_gpuvm_bo_put(vm_bo)); - return vma; + return &vma->base; err_va_remove: drm_gpuva_remove(&vma->base); @@ -179,7 +167,7 @@ static const struct drm_gpuvm_ops msm_gpuvm_ops = { * handles virtual address allocation, and both async and sync operations * are supported. */ -struct msm_gem_vm * +struct drm_gpuvm * msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, u64 va_start, u64 va_size, bool managed) { @@ -215,7 +203,7 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, drm_mm_init(&vm->mm, va_start, va_size); - return vm; + return &vm->base; err_free_vm: kfree(vm); diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index 47268aae7d54..fc4d6c9049b0 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -285,7 +285,7 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, if (state->fault_info.ttbr0) { struct msm_gpu_fault_info *info = &state->fault_info; - struct msm_mmu *mmu = submit->vm->mmu; + struct msm_mmu *mmu = to_msm_vm(submit->vm)->mmu; msm_iommu_pagetable_params(mmu, &info->pgtbl_ttbr0, &info->asid); @@ -390,7 +390,7 @@ static void recover_worker(struct kthread_work *work) /* Increment the fault counts */ submit->queue->faults++; if (submit->vm) - submit->vm->faults++; + to_msm_vm(submit->vm)->faults++; get_comm_cmdline(submit, &comm, &cmd); @@ -828,10 +828,11 @@ static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu) } /* Return a new address space for a msm_drm_private instance */ -struct msm_gem_vm * +struct drm_gpuvm * msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task) { - struct msm_gem_vm *vm = NULL; + struct drm_gpuvm *vm = NULL; + if (!gpu) return NULL; @@ -842,11 +843,11 @@ msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task) if (gpu->funcs->create_private_vm) { vm = gpu->funcs->create_private_vm(gpu); if (!IS_ERR(vm)) - vm->pid = get_pid(task_pid(task)); + to_msm_vm(vm)->pid = get_pid(task_pid(task)); } if (IS_ERR_OR_NULL(vm)) - vm = msm_gem_vm_get(gpu->vm); + vm = drm_gpuvm_get(gpu->vm); return vm; } @@ -1014,8 +1015,9 @@ void msm_gpu_cleanup(struct msm_gpu *gpu) msm_gem_kernel_put(gpu->memptrs_bo, gpu->vm); if (!IS_ERR_OR_NULL(gpu->vm)) { - gpu->vm->mmu->funcs->detach(gpu->vm->mmu); - msm_gem_vm_put(gpu->vm); + struct msm_mmu *mmu = to_msm_vm(gpu->vm)->mmu; + mmu->funcs->detach(mmu); + drm_gpuvm_put(gpu->vm); } if (gpu->worker) { diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 9d69dcad6612..231577656fae 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -78,8 +78,8 @@ struct msm_gpu_funcs { /* note: gpu_set_freq() can assume that we have been pm_resumed */ void (*gpu_set_freq)(struct msm_gpu *gpu, struct dev_pm_opp *opp, bool suspended); - struct msm_gem_vm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev); - struct msm_gem_vm *(*create_private_vm)(struct msm_gpu *gpu); + struct drm_gpuvm *(*create_vm)(struct msm_gpu *gpu, struct platform_device *pdev); + struct drm_gpuvm *(*create_private_vm)(struct msm_gpu *gpu); uint32_t (*get_rptr)(struct msm_gpu *gpu, struct msm_ringbuffer *ring); /** @@ -234,7 +234,7 @@ struct msm_gpu { void __iomem *mmio; int irq; - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; /* Power Control: */ struct regulator *gpu_reg, *gpu_cx; @@ -357,7 +357,7 @@ struct msm_context { int queueid; /** @vm: the per-process GPU address-space */ - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; /** @kref: the reference count */ struct kref ref; @@ -667,7 +667,7 @@ int msm_gpu_init(struct drm_device *drm, struct platform_device *pdev, struct msm_gpu *gpu, const struct msm_gpu_funcs *funcs, const char *name, struct msm_gpu_config *config); -struct msm_gem_vm * +struct drm_gpuvm * msm_gpu_create_private_vm(struct msm_gpu *gpu, struct task_struct *task); void msm_gpu_cleanup(struct msm_gpu *gpu); diff --git a/drivers/gpu/drm/msm/msm_kms.c b/drivers/gpu/drm/msm/msm_kms.c index 6458bd82a0cd..e82b8569a468 100644 --- a/drivers/gpu/drm/msm/msm_kms.c +++ b/drivers/gpu/drm/msm/msm_kms.c @@ -176,9 +176,9 @@ static int msm_kms_fault_handler(void *arg, unsigned long iova, int flags, void return -ENOSYS; } -struct msm_gem_vm *msm_kms_init_vm(struct drm_device *dev) +struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev) { - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; struct msm_mmu *mmu; struct device *mdp_dev = dev->dev; struct device *mdss_dev = mdp_dev->parent; @@ -212,7 +212,7 @@ struct msm_gem_vm *msm_kms_init_vm(struct drm_device *dev) return vm; } - msm_mmu_set_fault_handler(vm->mmu, kms, msm_kms_fault_handler); + msm_mmu_set_fault_handler(to_msm_vm(vm)->mmu, kms, msm_kms_fault_handler); return vm; } diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h index f45996a03e15..7cdb2eb67700 100644 --- a/drivers/gpu/drm/msm/msm_kms.h +++ b/drivers/gpu/drm/msm/msm_kms.h @@ -139,7 +139,7 @@ struct msm_kms { atomic_t fault_snapshot_capture; /* mapper-id used to request GEM buffer mapped for scanout: */ - struct msm_gem_vm *vm; + struct drm_gpuvm *vm; /* disp snapshot support */ struct kthread_worker *dump_worker; diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 6298233c3568..8ced49c7557b 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -59,7 +59,7 @@ void __msm_context_destroy(struct kref *kref) kfree(ctx->entities[i]); } - msm_gem_vm_put(ctx->vm); + drm_gpuvm_put(ctx->vm); kfree(ctx->comm); kfree(ctx->cmdline); kfree(ctx); -- cgit v1.2.3 From 62a28e272b87aee1b225942cf80505da1e220def Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:12:59 -0700 Subject: drm/msm: Split out helper to get iommu prot flags We'll re-use this in the vm_bind path. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661484/ --- drivers/gpu/drm/msm/msm_gem.c | 12 ++++++++++-- drivers/gpu/drm/msm/msm_gem.h | 1 + 2 files changed, 11 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 763bafcff4cc..20d5e4b4d057 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -462,10 +462,9 @@ static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj, return vma; } -int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma) +int msm_gem_prot(struct drm_gem_object *obj) { struct msm_gem_object *msm_obj = to_msm_bo(obj); - struct page **pages; int prot = IOMMU_READ; if (!(msm_obj->flags & MSM_BO_GPU_READONLY)) @@ -477,6 +476,15 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma) if (msm_obj->flags & MSM_BO_CACHED_COHERENT) prot |= IOMMU_CACHE; + return prot; +} + +int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma) +{ + struct msm_gem_object *msm_obj = to_msm_bo(obj); + struct page **pages; + int prot = msm_gem_prot(obj); + msm_gem_assert_locked(obj); pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 892e4132fa72..a18872ab1393 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -158,6 +158,7 @@ struct msm_gem_object { #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); +int msm_gem_prot(struct drm_gem_object *obj); int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma); void msm_gem_unpin_locked(struct drm_gem_object *obj); void msm_gem_unpin_active(struct drm_gem_object *obj); -- cgit v1.2.3 From 2c7ad9925523f644fe808b243921ebd5c01590e5 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:00 -0700 Subject: drm/msm: Add mmu support for non-zero offset Only needs to be supported for iopgtables mmu, the other cases are either only used for kernel managed mappings (where offset is always zero) or devices which do not support sparse bindings. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661501/ --- drivers/gpu/drm/msm/adreno/a2xx_gpummu.c | 5 ++++- drivers/gpu/drm/msm/msm_gem.c | 4 ++-- drivers/gpu/drm/msm/msm_gem.h | 4 ++-- drivers/gpu/drm/msm/msm_gem_vma.c | 13 +++++++------ drivers/gpu/drm/msm/msm_iommu.c | 22 ++++++++++++++++++++-- drivers/gpu/drm/msm/msm_mmu.h | 2 +- 6 files changed, 36 insertions(+), 14 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c index 4280f71e472a..0407c9bc8c1b 100644 --- a/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c +++ b/drivers/gpu/drm/msm/adreno/a2xx_gpummu.c @@ -29,13 +29,16 @@ static void a2xx_gpummu_detach(struct msm_mmu *mmu) } static int a2xx_gpummu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, size_t len, int prot) + struct sg_table *sgt, size_t off, size_t len, + int prot) { struct a2xx_gpummu *gpummu = to_a2xx_gpummu(mmu); unsigned idx = (iova - GPUMMU_VA_START) / GPUMMU_PAGE_SIZE; struct sg_dma_page_iter dma_iter; unsigned prot_bits = 0; + WARN_ON(off != 0); + if (prot & IOMMU_WRITE) prot_bits |= 1; if (prot & IOMMU_READ) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 20d5e4b4d057..5c71a4be0dfa 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -453,7 +453,7 @@ static struct drm_gpuva *get_vma_locked(struct drm_gem_object *obj, vma = lookup_vma(obj, vm); if (!vma) { - vma = msm_gem_vma_new(vm, obj, range_start, range_end); + vma = msm_gem_vma_new(vm, obj, 0, range_start, range_end); } else { GEM_WARN_ON(vma->va.addr < range_start); GEM_WARN_ON((vma->va.addr + obj->size) > range_end); @@ -491,7 +491,7 @@ int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma) if (IS_ERR(pages)) return PTR_ERR(pages); - return msm_gem_vma_map(vma, prot, msm_obj->sgt, obj->size); + return msm_gem_vma_map(vma, prot, msm_obj->sgt); } void msm_gem_unpin_locked(struct drm_gem_object *obj) diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index a18872ab1393..0e7b17b2093b 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -110,9 +110,9 @@ struct msm_gem_vma { struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj, - u64 range_start, u64 range_end); + u64 offset, u64 range_start, u64 range_end); void msm_gem_vma_purge(struct drm_gpuva *vma); -int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt, int size); +int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt); void msm_gem_vma_close(struct drm_gpuva *vma); struct msm_gem_object { diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index df8eb910ca31..ef0efd87e4a6 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -38,8 +38,7 @@ void msm_gem_vma_purge(struct drm_gpuva *vma) /* Map and pin vma: */ int -msm_gem_vma_map(struct drm_gpuva *vma, int prot, - struct sg_table *sgt, int size) +msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) { struct msm_gem_vma *msm_vma = to_msm_vma(vma); struct msm_gem_vm *vm = to_msm_vm(vma->vm); @@ -62,8 +61,9 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, * Revisit this if we can come up with a scheme to pre-alloc pages * for the pgtable in map/unmap ops. */ - ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, size, prot); - + ret = vm->mmu->funcs->map(vm->mmu, vma->va.addr, sgt, + vma->gem.offset, vma->va.range, + prot); if (ret) { msm_vma->mapped = false; } @@ -93,7 +93,7 @@ void msm_gem_vma_close(struct drm_gpuva *vma) /* Create a new vma and allocate an iova for it */ struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, - u64 range_start, u64 range_end) + u64 offset, u64 range_start, u64 range_end) { struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuvm_bo *vm_bo; @@ -107,6 +107,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, return ERR_PTR(-ENOMEM); if (vm->managed) { + BUG_ON(offset != 0); ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, obj->size, PAGE_SIZE, 0, range_start, range_end, 0); @@ -120,7 +121,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, GEM_WARN_ON((range_end - range_start) > obj->size); - drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, 0); + drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); vma->mapped = false; ret = drm_gpuva_insert(&vm->base, &vma->base); diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index 739ce2c283a4..3c2eb59bfd49 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -113,7 +113,8 @@ static int msm_iommu_pagetable_unmap(struct msm_mmu *mmu, u64 iova, } static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, - struct sg_table *sgt, size_t len, int prot) + struct sg_table *sgt, size_t off, size_t len, + int prot) { struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); struct io_pgtable_ops *ops = pagetable->pgtbl_ops; @@ -125,6 +126,19 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, size_t size = sg->length; phys_addr_t phys = sg_phys(sg); + if (!len) + break; + + if (size <= off) { + off -= size; + continue; + } + + phys += off; + size -= off; + size = min_t(size_t, size, len); + off = 0; + while (size) { size_t pgsize, count, mapped = 0; int ret; @@ -140,6 +154,7 @@ static int msm_iommu_pagetable_map(struct msm_mmu *mmu, u64 iova, phys += mapped; addr += mapped; size -= mapped; + len -= mapped; if (ret) { msm_iommu_pagetable_unmap(mmu, iova, addr - iova); @@ -388,11 +403,14 @@ static void msm_iommu_detach(struct msm_mmu *mmu) } static int msm_iommu_map(struct msm_mmu *mmu, uint64_t iova, - struct sg_table *sgt, size_t len, int prot) + struct sg_table *sgt, size_t off, size_t len, + int prot) { struct msm_iommu *iommu = to_msm_iommu(mmu); size_t ret; + WARN_ON(off != 0); + /* The arm-smmu driver expects the addresses to be sign extended */ if (iova & BIT_ULL(48)) iova |= GENMASK_ULL(63, 49); diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 0c694907140d..9d61999f4d42 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -12,7 +12,7 @@ struct msm_mmu_funcs { void (*detach)(struct msm_mmu *mmu); int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, - size_t len, int prot); + size_t off, size_t len, int prot); int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); void (*destroy)(struct msm_mmu *mmu); void (*set_stall)(struct msm_mmu *mmu, bool enable); -- cgit v1.2.3 From 7e34b8f6ed1e2d705668959cebd5aef3d5ba1b8b Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:02 -0700 Subject: drm/msm: Rename msm_gem_vma_purge() -> _unmap() This is a more descriptive name. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661487/ --- drivers/gpu/drm/msm/msm_gem.c | 6 +++--- drivers/gpu/drm/msm/msm_gem.h | 2 +- drivers/gpu/drm/msm/msm_gem_vma.c | 2 +- 3 files changed, 5 insertions(+), 5 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 5c71a4be0dfa..186d160b74de 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -57,7 +57,7 @@ static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm) drm_gpuvm_bo_for_each_va (vma, vm_bo) { if (vma->vm != vm) continue; - msm_gem_vma_purge(vma); + msm_gem_vma_unmap(vma); msm_gem_vma_close(vma); break; } @@ -433,7 +433,7 @@ put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close) drm_gpuvm_bo_get(vm_bo); drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) { - msm_gem_vma_purge(vma); + msm_gem_vma_unmap(vma); if (close) msm_gem_vma_close(vma); } @@ -607,7 +607,7 @@ static int clear_iova(struct drm_gem_object *obj, if (!vma) return 0; - msm_gem_vma_purge(vma); + msm_gem_vma_unmap(vma); msm_gem_vma_close(vma); return 0; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 0e7b17b2093b..b5bf21f62f9d 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -111,7 +111,7 @@ struct msm_gem_vma { struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj, u64 offset, u64 range_start, u64 range_end); -void msm_gem_vma_purge(struct drm_gpuva *vma); +void msm_gem_vma_unmap(struct drm_gpuva *vma); int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt); void msm_gem_vma_close(struct drm_gpuva *vma); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index ef0efd87e4a6..e16a8cafd8be 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -21,7 +21,7 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) } /* Actually unmap memory for the vma */ -void msm_gem_vma_purge(struct drm_gpuva *vma) +void msm_gem_vma_unmap(struct drm_gpuva *vma) { struct msm_gem_vma *msm_vma = to_msm_vma(vma); struct msm_gem_vm *vm = to_msm_vm(vma->vm); -- cgit v1.2.3 From 6a4d287a1ae6e49f8ef57fcb2a512c2b0bbef966 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:06 -0700 Subject: drm/msm: Mark VM as unusable on GPU hangs If userspace has opted-in to VM_BIND, then GPU hangs and VM_BIND errors will mark the VM as unusable. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661499/ --- drivers/gpu/drm/msm/msm_gem.h | 17 +++++++++++++++++ drivers/gpu/drm/msm/msm_gem_submit.c | 3 +++ drivers/gpu/drm/msm/msm_gpu.c | 16 ++++++++++++++-- 3 files changed, 34 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index b5bf21f62f9d..f2631a8c62b9 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -76,6 +76,23 @@ struct msm_gem_vm { /** @managed: is this a kernel managed VM? */ bool managed; + + /** + * @unusable: True if the VM has turned unusable because something + * bad happened during an asynchronous request. + * + * We don't try to recover from such failures, because this implies + * informing userspace about the specific operation that failed, and + * hoping the userspace driver can replay things from there. This all + * sounds very complicated for little gain. + * + * Instead, we should just flag the VM as unusable, and fail any + * further request targeting this VM. + * + * As an analogy, this would be mapped to a VK_ERROR_DEVICE_LOST + * situation, where the logical device needs to be re-created. + */ + bool unusable; }; #define to_msm_vm(x) container_of(x, struct msm_gem_vm, base) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index daf05c380e84..50fafcacf035 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -681,6 +681,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (args->pad) return -EINVAL; + if (to_msm_vm(ctx->vm)->unusable) + return UERR(EPIPE, dev, "context is unusable"); + /* for now, we just have 3d pipe.. eventually this would need to * be more clever to dispatch to appropriate gpu module: */ diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index c08c942d85a0..0846f6c5169f 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -389,8 +389,20 @@ static void recover_worker(struct kthread_work *work) /* Increment the fault counts */ submit->queue->faults++; - if (submit->vm) - to_msm_vm(submit->vm)->faults++; + if (submit->vm) { + struct msm_gem_vm *vm = to_msm_vm(submit->vm); + + vm->faults++; + + /* + * If userspace has opted-in to VM_BIND (and therefore userspace + * management of the VM), faults mark the VM as unusuable. This + * matches vulkan expectations (vulkan is the main target for + * VM_BIND) + */ + if (!vm->managed) + vm->unusable = true; + } get_comm_cmdline(submit, &comm, &cmd); -- cgit v1.2.3 From 4570dbb8a62410862528fe80cebad4ad4fdd9f5e Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:09 -0700 Subject: drm/msm: rd dumping prep for sparse mappings Similar to the previous commit, add support for dumping partial mappings. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661514/ --- drivers/gpu/drm/msm/msm_gem.h | 10 ---------- drivers/gpu/drm/msm/msm_rd.c | 38 +++++++++++++++++--------------------- 2 files changed, 17 insertions(+), 31 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index f2631a8c62b9..3a5f81437b5d 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -403,14 +403,4 @@ static inline void msm_gem_submit_put(struct msm_gem_submit *submit) void msm_submit_retire(struct msm_gem_submit *submit); -/* helper to determine of a buffer in submit should be dumped, used for both - * devcoredump and debugfs cmdstream dumping: - */ -static inline bool -should_dump(struct msm_gem_submit *submit, int idx) -{ - extern bool rd_full; - return rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); -} - #endif /* __MSM_GEM_H__ */ diff --git a/drivers/gpu/drm/msm/msm_rd.c b/drivers/gpu/drm/msm/msm_rd.c index 39138e190cb9..edbcb93410a9 100644 --- a/drivers/gpu/drm/msm/msm_rd.c +++ b/drivers/gpu/drm/msm/msm_rd.c @@ -308,21 +308,11 @@ void msm_rd_debugfs_cleanup(struct msm_drm_private *priv) priv->hangrd = NULL; } -static void snapshot_buf(struct msm_rd_state *rd, - struct msm_gem_submit *submit, int idx, - uint64_t iova, uint32_t size, bool full) +static void snapshot_buf(struct msm_rd_state *rd, struct drm_gem_object *obj, + uint64_t iova, bool full, size_t offset, size_t size) { - struct drm_gem_object *obj = submit->bos[idx].obj; - unsigned offset = 0; const char *buf; - if (iova) { - offset = iova - submit->bos[idx].iova; - } else { - iova = submit->bos[idx].iova; - size = obj->size; - } - /* * Always write the GPUADDR header so can get a complete list of all the * buffers in the cmd @@ -333,10 +323,6 @@ static void snapshot_buf(struct msm_rd_state *rd, if (!full) return; - /* But only dump the contents of buffers marked READ */ - if (!(submit->bos[idx].flags & MSM_SUBMIT_BO_READ)) - return; - buf = msm_gem_get_vaddr_active(obj); if (IS_ERR(buf)) return; @@ -352,6 +338,7 @@ static void snapshot_buf(struct msm_rd_state *rd, void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, const char *fmt, ...) { + extern bool rd_full; struct task_struct *task; char msg[256]; int i, n; @@ -385,16 +372,25 @@ void msm_rd_dump_submit(struct msm_rd_state *rd, struct msm_gem_submit *submit, rd_write_section(rd, RD_CMD, msg, ALIGN(n, 4)); - for (i = 0; i < submit->nr_bos; i++) - snapshot_buf(rd, submit, i, 0, 0, should_dump(submit, i)); + for (i = 0; i < submit->nr_bos; i++) { + struct drm_gem_object *obj = submit->bos[i].obj; + bool dump = rd_full || (submit->bos[i].flags & MSM_SUBMIT_BO_DUMP); + + snapshot_buf(rd, obj, submit->bos[i].iova, dump, 0, obj->size); + } for (i = 0; i < submit->nr_cmds; i++) { uint32_t szd = submit->cmd[i].size; /* in dwords */ + int idx = submit->cmd[i].idx; + bool dump = rd_full || (submit->bos[idx].flags & MSM_SUBMIT_BO_DUMP); /* snapshot cmdstream bo's (if we haven't already): */ - if (!should_dump(submit, i)) { - snapshot_buf(rd, submit, submit->cmd[i].idx, - submit->cmd[i].iova, szd * 4, true); + if (!dump) { + struct drm_gem_object *obj = submit->bos[idx].obj; + size_t offset = submit->cmd[i].iova - submit->bos[idx].iova; + + snapshot_buf(rd, obj, submit->cmd[i].iova, true, + offset, szd * 4); } } -- cgit v1.2.3 From 92395af63a9958615edfa9d4ef1ea72c92a00410 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:14 -0700 Subject: drm/msm: Add VM_BIND submitqueue This submitqueue type isn't tied to a hw ringbuffer, but instead executes on the CPU for performing async VM_BIND ops. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661517/ --- drivers/gpu/drm/msm/msm_gem.h | 12 ++++++ drivers/gpu/drm/msm/msm_gem_submit.c | 63 +++++++++++++++++++++++++++---- drivers/gpu/drm/msm/msm_gem_vma.c | 71 +++++++++++++++++++++++++++++++++++ drivers/gpu/drm/msm/msm_gpu.h | 3 ++ drivers/gpu/drm/msm/msm_submitqueue.c | 67 +++++++++++++++++++++++++-------- include/uapi/drm/msm_drm.h | 9 ++++- 6 files changed, 201 insertions(+), 24 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 3a5f81437b5d..af637409be39 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -53,6 +53,13 @@ struct msm_gem_vm { /** @base: Inherit from drm_gpuvm. */ struct drm_gpuvm base; + /** + * @sched: Scheduler used for asynchronous VM_BIND request. + * + * Unused for kernel managed VMs (where all operations are synchronous). + */ + struct drm_gpu_scheduler sched; + /** * @mm: Memory management for kernel managed VA allocations * @@ -71,6 +78,9 @@ struct msm_gem_vm { */ struct pid *pid; + /** @last_fence: Fence for last pending work scheduled on the VM */ + struct dma_fence *last_fence; + /** @faults: the number of GPU hangs associated with this address space */ int faults; @@ -100,6 +110,8 @@ struct drm_gpuvm * msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, u64 va_start, u64 va_size, bool managed); +void msm_gem_vm_close(struct drm_gpuvm *gpuvm); + struct msm_fence_context; #define MSM_VMA_DUMP (DRM_GPUVA_USERBITS << 0) diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 00ba893c24f7..108f3372ba93 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -4,6 +4,7 @@ * Author: Rob Clark */ +#include #include #include #include @@ -259,8 +260,29 @@ out: static int submit_lock_objects(struct msm_gem_submit *submit) { unsigned flags = DRM_EXEC_INTERRUPTIBLE_WAIT; + struct drm_exec *exec = &submit->exec; int ret; + if (msm_context_is_vmbind(submit->queue->ctx)) { + flags |= DRM_EXEC_IGNORE_DUPLICATES; + + drm_exec_init(&submit->exec, flags, submit->nr_bos); + + drm_exec_until_all_locked (&submit->exec) { + ret = drm_gpuvm_prepare_vm(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + + ret = drm_gpuvm_prepare_objects(submit->vm, exec, 1); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + } + + return 0; + } + drm_exec_init(&submit->exec, flags, submit->nr_bos); drm_exec_until_all_locked (&submit->exec) { @@ -268,20 +290,17 @@ static int submit_lock_objects(struct msm_gem_submit *submit) drm_gpuvm_resv_obj(submit->vm)); drm_exec_retry_on_contention(&submit->exec); if (ret) - goto error; + return ret; for (unsigned i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; ret = drm_exec_prepare_obj(&submit->exec, obj, 1); drm_exec_retry_on_contention(&submit->exec); if (ret) - goto error; + return ret; } } return 0; - -error: - return ret; } static int submit_fence_sync(struct msm_gem_submit *submit) @@ -367,9 +386,18 @@ static void submit_unpin_objects(struct msm_gem_submit *submit) static void submit_attach_object_fences(struct msm_gem_submit *submit) { - int i; + struct msm_gem_vm *vm = to_msm_vm(submit->vm); + struct dma_fence *last_fence; + + if (msm_context_is_vmbind(submit->queue->ctx)) { + drm_gpuvm_resv_add_fence(submit->vm, &submit->exec, + submit->user_fence, + DMA_RESV_USAGE_BOOKKEEP, + DMA_RESV_USAGE_BOOKKEEP); + return; + } - for (i = 0; i < submit->nr_bos; i++) { + for (unsigned i = 0; i < submit->nr_bos; i++) { struct drm_gem_object *obj = submit->bos[i].obj; if (submit->bos[i].flags & MSM_SUBMIT_BO_WRITE) @@ -379,6 +407,10 @@ static void submit_attach_object_fences(struct msm_gem_submit *submit) dma_resv_add_fence(obj->resv, submit->user_fence, DMA_RESV_USAGE_READ); } + + last_fence = vm->last_fence; + vm->last_fence = dma_fence_unwrap_merge(submit->user_fence, last_fence); + dma_fence_put(last_fence); } static int submit_bo(struct msm_gem_submit *submit, uint32_t idx, @@ -537,6 +569,11 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (!queue) return -ENOENT; + if (queue->flags & MSM_SUBMITQUEUE_VM_BIND) { + ret = UERR(EINVAL, dev, "Invalid queue type"); + goto out_post_unlock; + } + ring = gpu->rb[queue->ring_nr]; if (args->flags & MSM_SUBMIT_FENCE_FD_OUT) { @@ -727,6 +764,18 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, submit_attach_object_fences(submit); + if (msm_context_is_vmbind(ctx)) { + /* + * If we are not using VM_BIND, submit_pin_vmas() will validate + * just the BOs attached to the submit. In that case we don't + * need to validate the _entire_ vm, because userspace tracked + * what BOs are associated with the submit. + */ + ret = drm_gpuvm_validate(submit->vm, &submit->exec); + if (ret) + goto out; + } + /* The scheduler owns a ref now: */ msm_gem_submit_get(submit); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index e16a8cafd8be..cf37abb98235 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -16,6 +16,7 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) drm_mm_takedown(&vm->mm); if (vm->mmu) vm->mmu->funcs->destroy(vm->mmu); + dma_fence_put(vm->last_fence); put_pid(vm->pid); kfree(vm); } @@ -154,6 +155,9 @@ static const struct drm_gpuvm_ops msm_gpuvm_ops = { .vm_free = msm_gem_vm_free, }; +static const struct drm_sched_backend_ops msm_vm_bind_ops = { +}; + /** * msm_gem_vm_create() - Create and initialize a &msm_gem_vm * @drm: the drm device @@ -195,6 +199,21 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, goto err_free_vm; } + if (!managed) { + struct drm_sched_init_args args = { + .ops = &msm_vm_bind_ops, + .num_rqs = 1, + .credit_limit = 1, + .timeout = MAX_SCHEDULE_TIMEOUT, + .name = "msm-vm-bind", + .dev = drm->dev, + }; + + ret = drm_sched_init(&vm->sched, &args); + if (ret) + goto err_free_dummy; + } + drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem, va_start, va_size, 0, 0, &msm_gpuvm_ops); drm_gem_object_put(dummy_gem); @@ -206,8 +225,60 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, return &vm->base; +err_free_dummy: + drm_gem_object_put(dummy_gem); + err_free_vm: kfree(vm); return ERR_PTR(ret); } + +/** + * msm_gem_vm_close() - Close a VM + * @gpuvm: The VM to close + * + * Called when the drm device file is closed, to tear down VM related resources + * (which will drop refcounts to GEM objects that were still mapped into the + * VM at the time). + */ +void +msm_gem_vm_close(struct drm_gpuvm *gpuvm) +{ + struct msm_gem_vm *vm = to_msm_vm(gpuvm); + struct drm_gpuva *vma, *tmp; + + /* + * For kernel managed VMs, the VMAs are torn down when the handle is + * closed, so nothing more to do. + */ + if (vm->managed) + return; + + if (vm->last_fence) + dma_fence_wait(vm->last_fence, false); + + /* Kill the scheduler now, so we aren't racing with it for cleanup: */ + drm_sched_stop(&vm->sched, NULL); + drm_sched_fini(&vm->sched); + + /* Tear down any remaining mappings: */ + dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); + drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) { + struct drm_gem_object *obj = vma->gem.obj; + + if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { + drm_gem_object_get(obj); + msm_gem_lock(obj); + } + + msm_gem_vma_unmap(vma); + msm_gem_vma_close(vma); + + if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { + msm_gem_unlock(obj); + drm_gem_object_put(obj); + } + } + dma_resv_unlock(drm_gpuvm_resv(gpuvm)); +} diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index b38a33a67ee9..5705e8d4e6b9 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -564,6 +564,9 @@ struct msm_gpu_submitqueue { struct mutex lock; struct kref ref; struct drm_sched_entity *entity; + + /** @_vm_bind_entity: used for @entity pointer for VM_BIND queues */ + struct drm_sched_entity _vm_bind_entity[0]; }; struct msm_gpu_state_bo { diff --git a/drivers/gpu/drm/msm/msm_submitqueue.c b/drivers/gpu/drm/msm/msm_submitqueue.c index 8ced49c7557b..8617a82cd6b3 100644 --- a/drivers/gpu/drm/msm/msm_submitqueue.c +++ b/drivers/gpu/drm/msm/msm_submitqueue.c @@ -72,6 +72,9 @@ void msm_submitqueue_destroy(struct kref *kref) idr_destroy(&queue->fence_idr); + if (queue->entity == &queue->_vm_bind_entity[0]) + drm_sched_entity_destroy(queue->entity); + msm_context_put(queue->ctx); kfree(queue); @@ -102,7 +105,7 @@ struct msm_gpu_submitqueue *msm_submitqueue_get(struct msm_context *ctx, void msm_submitqueue_close(struct msm_context *ctx) { - struct msm_gpu_submitqueue *entry, *tmp; + struct msm_gpu_submitqueue *queue, *tmp; if (!ctx) return; @@ -111,10 +114,17 @@ void msm_submitqueue_close(struct msm_context *ctx) * No lock needed in close and there won't * be any more user ioctls coming our way */ - list_for_each_entry_safe(entry, tmp, &ctx->submitqueues, node) { - list_del(&entry->node); - msm_submitqueue_put(entry); + list_for_each_entry_safe(queue, tmp, &ctx->submitqueues, node) { + if (queue->entity == &queue->_vm_bind_entity[0]) + drm_sched_entity_flush(queue->entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY); + list_del(&queue->node); + msm_submitqueue_put(queue); } + + if (!ctx->vm) + return; + + msm_gem_vm_close(ctx->vm); } static struct drm_sched_entity * @@ -160,8 +170,6 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx, struct msm_drm_private *priv = drm->dev_private; struct msm_gpu_submitqueue *queue; enum drm_sched_priority sched_prio; - extern int enable_preemption; - bool preemption_supported; unsigned ring_nr; int ret; @@ -171,26 +179,53 @@ int msm_submitqueue_create(struct drm_device *drm, struct msm_context *ctx, if (!priv->gpu) return -ENODEV; - preemption_supported = priv->gpu->nr_rings == 1 && enable_preemption != 0; + if (flags & MSM_SUBMITQUEUE_VM_BIND) { + unsigned sz; - if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) - return -EINVAL; + /* Not allowed for kernel managed VMs (ie. kernel allocs VA) */ + if (!msm_context_is_vmbind(ctx)) + return -EINVAL; - ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); - if (ret) - return ret; + if (prio) + return -EINVAL; + + sz = struct_size(queue, _vm_bind_entity, 1); + queue = kzalloc(sz, GFP_KERNEL); + } else { + extern int enable_preemption; + bool preemption_supported = + priv->gpu->nr_rings == 1 && enable_preemption != 0; + + if (flags & MSM_SUBMITQUEUE_ALLOW_PREEMPT && preemption_supported) + return -EINVAL; - queue = kzalloc(sizeof(*queue), GFP_KERNEL); + ret = msm_gpu_convert_priority(priv->gpu, prio, &ring_nr, &sched_prio); + if (ret) + return ret; + + queue = kzalloc(sizeof(*queue), GFP_KERNEL); + } if (!queue) return -ENOMEM; kref_init(&queue->ref); queue->flags = flags; - queue->ring_nr = ring_nr; - queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], - ring_nr, sched_prio); + if (flags & MSM_SUBMITQUEUE_VM_BIND) { + struct drm_gpu_scheduler *sched = &to_msm_vm(msm_context_vm(drm, ctx))->sched; + + queue->entity = &queue->_vm_bind_entity[0]; + + drm_sched_entity_init(queue->entity, DRM_SCHED_PRIORITY_KERNEL, + &sched, 1, NULL); + } else { + queue->ring_nr = ring_nr; + + queue->entity = get_sched_entity(ctx, priv->gpu->rb[ring_nr], + ring_nr, sched_prio); + } + if (IS_ERR(queue->entity)) { ret = PTR_ERR(queue->entity); kfree(queue); diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 2c2fc4b284d0..6d6cd1219926 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -385,12 +385,19 @@ struct drm_msm_gem_madvise { /* * Draw queues allow the user to set specific submission parameter. Command * submissions specify a specific submitqueue to use. ID 0 is reserved for - * backwards compatibility as a "default" submitqueue + * backwards compatibility as a "default" submitqueue. + * + * Because VM_BIND async updates happen on the CPU, they must run on a + * virtual queue created with the flag MSM_SUBMITQUEUE_VM_BIND. If we had + * a way to do pgtable updates on the GPU, we could drop this restriction. */ #define MSM_SUBMITQUEUE_ALLOW_PREEMPT 0x00000001 +#define MSM_SUBMITQUEUE_VM_BIND 0x00000002 /* virtual queue for VM_BIND ops */ + #define MSM_SUBMITQUEUE_FLAGS ( \ MSM_SUBMITQUEUE_ALLOW_PREEMPT | \ + MSM_SUBMITQUEUE_VM_BIND | \ 0) /* -- cgit v1.2.3 From e601ea31d66ba83d565cae9cfa45cbbcdd8286dd Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:16 -0700 Subject: drm/msm: Support pgtable preallocation Introduce a mechanism to count the worst case # of pages required in a VM_BIND op. Note that previously we would have had to somehow account for allocations in unmap, when splitting a block. This behavior was removed in commit 33729a5fc0ca ("iommu/io-pgtable-arm: Remove split on unmap behavior)" Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661515/ --- drivers/gpu/drm/msm/msm_gem.h | 1 + drivers/gpu/drm/msm/msm_iommu.c | 191 +++++++++++++++++++++++++++++++++++++++- drivers/gpu/drm/msm/msm_mmu.h | 34 +++++++ 3 files changed, 225 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index af637409be39..f369a30a247c 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -7,6 +7,7 @@ #ifndef __MSM_GEM_H__ #define __MSM_GEM_H__ +#include "msm_mmu.h" #include #include #include "drm/drm_exec.h" diff --git a/drivers/gpu/drm/msm/msm_iommu.c b/drivers/gpu/drm/msm/msm_iommu.c index bd67431cb25f..887c9023f8a2 100644 --- a/drivers/gpu/drm/msm/msm_iommu.c +++ b/drivers/gpu/drm/msm/msm_iommu.c @@ -6,6 +6,7 @@ #include #include +#include #include "msm_drv.h" #include "msm_mmu.h" @@ -14,6 +15,8 @@ struct msm_iommu { struct iommu_domain *domain; atomic_t pagetables; struct page *prr_page; + + struct kmem_cache *pt_cache; }; #define to_msm_iommu(x) container_of(x, struct msm_iommu, base) @@ -27,6 +30,9 @@ struct msm_iommu_pagetable { unsigned long pgsize_bitmap; /* Bitmap of page sizes in use */ phys_addr_t ttbr; u32 asid; + + /** @root_page_table: Stores the root page table pointer. */ + void *root_page_table; }; static struct msm_iommu_pagetable *to_pagetable(struct msm_mmu *mmu) { @@ -282,7 +288,145 @@ msm_iommu_pagetable_walk(struct msm_mmu *mmu, unsigned long iova, uint64_t ptes[ return 0; } +static void +msm_iommu_pagetable_prealloc_count(struct msm_mmu *mmu, struct msm_mmu_prealloc *p, + uint64_t iova, size_t len) +{ + u64 pt_count; + + /* + * L1, L2 and L3 page tables. + * + * We could optimize L3 allocation by iterating over the sgt and merging + * 2M contiguous blocks, but it's simpler to over-provision and return + * the pages if they're not used. + * + * The first level descriptor (v8 / v7-lpae page table format) encodes + * 30 bits of address. The second level encodes 29. For the 3rd it is + * 39. + * + * https://developer.arm.com/documentation/ddi0406/c/System-Level-Architecture/Virtual-Memory-System-Architecture--VMSA-/Long-descriptor-translation-table-format/Long-descriptor-translation-table-format-descriptors?lang=en#BEIHEFFB + */ + pt_count = ((ALIGN(iova + len, 1ull << 39) - ALIGN_DOWN(iova, 1ull << 39)) >> 39) + + ((ALIGN(iova + len, 1ull << 30) - ALIGN_DOWN(iova, 1ull << 30)) >> 30) + + ((ALIGN(iova + len, 1ull << 21) - ALIGN_DOWN(iova, 1ull << 21)) >> 21); + + p->count += pt_count; +} + +static struct kmem_cache * +get_pt_cache(struct msm_mmu *mmu) +{ + struct msm_iommu_pagetable *pagetable = to_pagetable(mmu); + return to_msm_iommu(pagetable->parent)->pt_cache; +} + +static int +msm_iommu_pagetable_prealloc_allocate(struct msm_mmu *mmu, struct msm_mmu_prealloc *p) +{ + struct kmem_cache *pt_cache = get_pt_cache(mmu); + int ret; + + p->pages = kvmalloc_array(p->count, sizeof(p->pages), GFP_KERNEL); + if (!p->pages) + return -ENOMEM; + + ret = kmem_cache_alloc_bulk(pt_cache, GFP_KERNEL, p->count, p->pages); + if (ret != p->count) { + p->count = ret; + return -ENOMEM; + } + + return 0; +} + +static void +msm_iommu_pagetable_prealloc_cleanup(struct msm_mmu *mmu, struct msm_mmu_prealloc *p) +{ + struct kmem_cache *pt_cache = get_pt_cache(mmu); + uint32_t remaining_pt_count = p->count - p->ptr; + + kmem_cache_free_bulk(pt_cache, remaining_pt_count, &p->pages[p->ptr]); + kvfree(p->pages); +} + +/** + * alloc_pt() - Custom page table allocator + * @cookie: Cookie passed at page table allocation time. + * @size: Size of the page table. This size should be fixed, + * and determined at creation time based on the granule size. + * @gfp: GFP flags. + * + * We want a custom allocator so we can use a cache for page table + * allocations and amortize the cost of the over-reservation that's + * done to allow asynchronous VM operations. + * + * Return: non-NULL on success, NULL if the allocation failed for any + * reason. + */ +static void * +msm_iommu_pagetable_alloc_pt(void *cookie, size_t size, gfp_t gfp) +{ + struct msm_iommu_pagetable *pagetable = cookie; + struct msm_mmu_prealloc *p = pagetable->base.prealloc; + void *page; + + /* Allocation of the root page table happening during init. */ + if (unlikely(!pagetable->root_page_table)) { + struct page *p; + + p = alloc_pages_node(dev_to_node(pagetable->iommu_dev), + gfp | __GFP_ZERO, get_order(size)); + page = p ? page_address(p) : NULL; + pagetable->root_page_table = page; + return page; + } + + if (WARN_ON(!p) || WARN_ON(p->ptr >= p->count)) + return NULL; + + page = p->pages[p->ptr++]; + memset(page, 0, size); + + /* + * Page table entries don't use virtual addresses, which trips out + * kmemleak. kmemleak_alloc_phys() might work, but physical addresses + * are mixed with other fields, and I fear kmemleak won't detect that + * either. + * + * Let's just ignore memory passed to the page-table driver for now. + */ + kmemleak_ignore(page); + + return page; +} + + +/** + * free_pt() - Custom page table free function + * @cookie: Cookie passed at page table allocation time. + * @data: Page table to free. + * @size: Size of the page table. This size should be fixed, + * and determined at creation time based on the granule size. + */ +static void +msm_iommu_pagetable_free_pt(void *cookie, void *data, size_t size) +{ + struct msm_iommu_pagetable *pagetable = cookie; + + if (unlikely(pagetable->root_page_table == data)) { + free_pages((unsigned long)data, get_order(size)); + pagetable->root_page_table = NULL; + return; + } + + kmem_cache_free(get_pt_cache(&pagetable->base), data); +} + static const struct msm_mmu_funcs pagetable_funcs = { + .prealloc_count = msm_iommu_pagetable_prealloc_count, + .prealloc_allocate = msm_iommu_pagetable_prealloc_allocate, + .prealloc_cleanup = msm_iommu_pagetable_prealloc_cleanup, .map = msm_iommu_pagetable_map, .unmap = msm_iommu_pagetable_unmap, .destroy = msm_iommu_pagetable_destroy, @@ -333,6 +477,17 @@ static const struct iommu_flush_ops tlb_ops = { static int msm_gpu_fault_handler(struct iommu_domain *domain, struct device *dev, unsigned long iova, int flags, void *arg); +static size_t get_tblsz(const struct io_pgtable_cfg *cfg) +{ + int pg_shift, bits_per_level; + + pg_shift = __ffs(cfg->pgsize_bitmap); + /* arm_lpae_iopte is u64: */ + bits_per_level = pg_shift - ilog2(sizeof(u64)); + + return sizeof(u64) << bits_per_level; +} + struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_managed) { struct adreno_smmu_priv *adreno_smmu = dev_get_drvdata(parent->dev); @@ -369,8 +524,34 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m if (!kernel_managed) { ttbr0_cfg.quirks |= IO_PGTABLE_QUIRK_NO_WARN; + + /* + * With userspace managed VM (aka VM_BIND), we need to pre- + * allocate pages ahead of time for map/unmap operations, + * handing them to io-pgtable via custom alloc/free ops as + * needed: + */ + ttbr0_cfg.alloc = msm_iommu_pagetable_alloc_pt; + ttbr0_cfg.free = msm_iommu_pagetable_free_pt; + + /* + * Restrict to single page granules. Otherwise we may run + * into a situation where userspace wants to unmap/remap + * only a part of a larger block mapping, which is not + * possible without unmapping the entire block. Which in + * turn could cause faults if the GPU is accessing other + * parts of the block mapping. + * + * Note that prior to commit 33729a5fc0ca ("iommu/io-pgtable-arm: + * Remove split on unmap behavior)" this was handled in + * io-pgtable-arm. But this apparently does not work + * correctly on SMMUv3. + */ + WARN_ON(!(ttbr0_cfg.pgsize_bitmap & PAGE_SIZE)); + ttbr0_cfg.pgsize_bitmap = PAGE_SIZE; } + pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgtbl_ops = alloc_io_pgtable_ops(ARM_64_LPAE_S1, &ttbr0_cfg, pagetable); @@ -414,7 +595,6 @@ struct msm_mmu *msm_iommu_pagetable_create(struct msm_mmu *parent, bool kernel_m /* Needed later for TLB flush */ pagetable->parent = parent; pagetable->tlb = ttbr1_cfg->tlb; - pagetable->iommu_dev = ttbr1_cfg->iommu_dev; pagetable->pgsize_bitmap = ttbr0_cfg.pgsize_bitmap; pagetable->ttbr = ttbr0_cfg.arm_lpae_s1_cfg.ttbr; @@ -510,6 +690,7 @@ static void msm_iommu_destroy(struct msm_mmu *mmu) { struct msm_iommu *iommu = to_msm_iommu(mmu); iommu_domain_free(iommu->domain); + kmem_cache_destroy(iommu->pt_cache); kfree(iommu); } @@ -583,6 +764,14 @@ struct msm_mmu *msm_iommu_gpu_new(struct device *dev, struct msm_gpu *gpu, unsig return mmu; iommu = to_msm_iommu(mmu); + if (adreno_smmu && adreno_smmu->cookie) { + const struct io_pgtable_cfg *cfg = + adreno_smmu->get_ttbr1_cfg(adreno_smmu->cookie); + size_t tblsz = get_tblsz(cfg); + + iommu->pt_cache = + kmem_cache_create("msm-mmu-pt", tblsz, tblsz, 0, NULL); + } iommu_set_fault_handler(iommu->domain, msm_gpu_fault_handler, iommu); /* Enable stall on iommu fault: */ diff --git a/drivers/gpu/drm/msm/msm_mmu.h b/drivers/gpu/drm/msm/msm_mmu.h index 04dce0faaa3a..8915662fbd4d 100644 --- a/drivers/gpu/drm/msm/msm_mmu.h +++ b/drivers/gpu/drm/msm/msm_mmu.h @@ -9,8 +9,16 @@ #include +struct msm_mmu_prealloc; +struct msm_mmu; +struct msm_gpu; + struct msm_mmu_funcs { void (*detach)(struct msm_mmu *mmu); + void (*prealloc_count)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p, + uint64_t iova, size_t len); + int (*prealloc_allocate)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p); + void (*prealloc_cleanup)(struct msm_mmu *mmu, struct msm_mmu_prealloc *p); int (*map)(struct msm_mmu *mmu, uint64_t iova, struct sg_table *sgt, size_t off, size_t len, int prot); int (*unmap)(struct msm_mmu *mmu, uint64_t iova, size_t len); @@ -24,12 +32,38 @@ enum msm_mmu_type { MSM_MMU_IOMMU_PAGETABLE, }; +/** + * struct msm_mmu_prealloc - Tracking for pre-allocated pages for MMU updates. + */ +struct msm_mmu_prealloc { + /** @count: Number of pages reserved. */ + uint32_t count; + /** @ptr: Index of first unused page in @pages */ + uint32_t ptr; + /** + * @pages: Array of pages preallocated for MMU table updates. + * + * After a VM operation, there might be free pages remaining in this + * array (since the amount allocated is a worst-case). These are + * returned to the pt_cache at mmu->prealloc_cleanup(). + */ + void **pages; +}; + struct msm_mmu { const struct msm_mmu_funcs *funcs; struct device *dev; int (*handler)(void *arg, unsigned long iova, int flags, void *data); void *arg; enum msm_mmu_type type; + + /** + * @prealloc: pre-allocated pages for pgtable + * + * Set while a VM_BIND job is running, serialized under + * msm_gem_vm::mmu_lock. + */ + struct msm_mmu_prealloc *prealloc; }; static inline void msm_mmu_init(struct msm_mmu *mmu, struct device *dev, -- cgit v1.2.3 From 2e6a8a1fe2b262a6dfd0a65041fcd830ee1e7143 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:18 -0700 Subject: drm/msm: Add VM_BIND ioctl Add a VM_BIND ioctl for binding/unbinding buffers into a VM. This is only supported if userspace has opted in to MSM_PARAM_EN_VM_BIND. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661524/ --- drivers/gpu/drm/msm/msm_drv.c | 1 + drivers/gpu/drm/msm/msm_drv.h | 4 +- drivers/gpu/drm/msm/msm_gem.c | 40 +- drivers/gpu/drm/msm/msm_gem.h | 4 + drivers/gpu/drm/msm/msm_gem_submit.c | 22 +- drivers/gpu/drm/msm/msm_gem_vma.c | 1093 +++++++++++++++++++++++++++++++++- include/uapi/drm/msm_drm.h | 74 ++- 7 files changed, 1205 insertions(+), 33 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d75753745f12..639f71f5e06e 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -802,6 +802,7 @@ static const struct drm_ioctl_desc msm_ioctls[] = { DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_NEW, msm_ioctl_submitqueue_new, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_CLOSE, msm_ioctl_submitqueue_close, DRM_RENDER_ALLOW), DRM_IOCTL_DEF_DRV(MSM_SUBMITQUEUE_QUERY, msm_ioctl_submitqueue_query, DRM_RENDER_ALLOW), + DRM_IOCTL_DEF_DRV(MSM_VM_BIND, msm_ioctl_vm_bind, DRM_RENDER_ALLOW), }; static void msm_show_fdinfo(struct drm_printer *p, struct drm_file *file) diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 9b1ccb2b18f6..200c3135bbf9 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -255,7 +255,9 @@ struct drm_gpuvm *msm_kms_init_vm(struct drm_device *dev); bool msm_use_mmu(struct drm_device *dev); int msm_ioctl_gem_submit(struct drm_device *dev, void *data, - struct drm_file *file); + struct drm_file *file); +int msm_ioctl_vm_bind(struct drm_device *dev, void *data, + struct drm_file *file); #ifdef CONFIG_DEBUG_FS unsigned long msm_gem_shrinker_shrink(struct drm_device *dev, unsigned long nr_to_scan); diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index b688d397cc47..77fdf53d3e33 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -251,8 +251,7 @@ static void put_pages(struct drm_gem_object *obj) } } -static struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, - unsigned madv) +struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv) { struct msm_gem_object *msm_obj = to_msm_bo(obj); @@ -1052,18 +1051,37 @@ static void msm_gem_free_object(struct drm_gem_object *obj) /* * We need to lock any VMs the object is still attached to, but not * the object itself (see explaination in msm_gem_assert_locked()), - * so just open-code this special case: + * so just open-code this special case. + * + * Note that we skip the dance if we aren't attached to any VM. This + * is load bearing. The driver needs to support two usage models: + * + * 1. Legacy kernel managed VM: Userspace expects the VMA's to be + * implicitly torn down when the object is freed, the VMA's do + * not hold a hard reference to the BO. + * + * 2. VM_BIND, userspace managed VM: The VMA holds a reference to the + * BO. This can be dropped when the VM is closed and it's associated + * VMAs are torn down. (See msm_gem_vm_close()). + * + * In the latter case the last reference to a BO can be dropped while + * we already have the VM locked. It would have already been removed + * from the gpuva list, but lockdep doesn't know that. Or understand + * the differences between the two usage models. */ - drm_exec_init(&exec, 0, 0); - drm_exec_until_all_locked (&exec) { - struct drm_gpuvm_bo *vm_bo; - drm_gem_for_each_gpuvm_bo (vm_bo, obj) { - drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(vm_bo->vm)); - drm_exec_retry_on_contention(&exec); + if (!list_empty(&obj->gpuva.list)) { + drm_exec_init(&exec, 0, 0); + drm_exec_until_all_locked (&exec) { + struct drm_gpuvm_bo *vm_bo; + drm_gem_for_each_gpuvm_bo (vm_bo, obj) { + drm_exec_lock_obj(&exec, + drm_gpuvm_resv_obj(vm_bo->vm)); + drm_exec_retry_on_contention(&exec); + } } + put_iova_spaces(obj, NULL, true); + drm_exec_fini(&exec); /* drop locks */ } - put_iova_spaces(obj, NULL, true); - drm_exec_fini(&exec); /* drop locks */ if (drm_gem_is_imported(obj)) { GEM_WARN_ON(msm_obj->vaddr); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index f369a30a247c..ee464e315643 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -73,6 +73,9 @@ struct msm_gem_vm { /** @mmu: The mmu object which manages the pgtables */ struct msm_mmu *mmu; + /** @mmu_lock: Protects access to the mmu */ + struct mutex mmu_lock; + /** * @pid: For address spaces associated with a specific process, this * will be non-NULL: @@ -205,6 +208,7 @@ int msm_gem_get_and_pin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm, uint64_t *iova); void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm); void msm_gem_pin_obj_locked(struct drm_gem_object *obj); +struct page **msm_gem_get_pages_locked(struct drm_gem_object *obj, unsigned madv); struct page **msm_gem_pin_pages_locked(struct drm_gem_object *obj); void msm_gem_unpin_pages_locked(struct drm_gem_object *obj); int msm_gem_dumb_create(struct drm_file *file, struct drm_device *dev, diff --git a/drivers/gpu/drm/msm/msm_gem_submit.c b/drivers/gpu/drm/msm/msm_gem_submit.c index 108f3372ba93..5f8e939a5906 100644 --- a/drivers/gpu/drm/msm/msm_gem_submit.c +++ b/drivers/gpu/drm/msm/msm_gem_submit.c @@ -194,6 +194,7 @@ out: static int submit_lookup_cmds(struct msm_gem_submit *submit, struct drm_msm_gem_submit *args, struct drm_file *file) { + struct msm_context *ctx = file->driver_priv; unsigned i; size_t sz; int ret = 0; @@ -225,6 +226,20 @@ static int submit_lookup_cmds(struct msm_gem_submit *submit, goto out; } + if (msm_context_is_vmbind(ctx)) { + if (submit_cmd.nr_relocs) { + ret = SUBMIT_ERROR(EINVAL, submit, "nr_relocs must be zero"); + goto out; + } + + if (submit_cmd.submit_idx || submit_cmd.submit_offset) { + ret = SUBMIT_ERROR(EINVAL, submit, "submit_idx/offset must be zero"); + goto out; + } + + submit->cmd[i].iova = submit_cmd.iova; + } + submit->cmd[i].type = submit_cmd.type; submit->cmd[i].size = submit_cmd.size / 4; submit->cmd[i].offset = submit_cmd.submit_offset / 4; @@ -537,6 +552,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, struct msm_syncobj_post_dep *post_deps = NULL; struct drm_syncobj **syncobjs_to_reset = NULL; struct sync_file *sync_file = NULL; + unsigned cmds_to_parse; int out_fence_fd = -1; unsigned i; int ret; @@ -661,7 +677,9 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, if (ret) goto out; - for (i = 0; i < args->nr_cmds; i++) { + cmds_to_parse = msm_context_is_vmbind(ctx) ? 0 : args->nr_cmds; + + for (i = 0; i < cmds_to_parse; i++) { struct drm_gem_object *obj; uint64_t iova; @@ -692,7 +710,7 @@ int msm_ioctl_gem_submit(struct drm_device *dev, void *data, goto out; } - submit->nr_cmds = i; + submit->nr_cmds = args->nr_cmds; idr_preload(GFP_KERNEL); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 76b79c122182..69e33d4b278b 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -4,9 +4,16 @@ * Author: Rob Clark */ +#include "drm/drm_file.h" +#include "drm/msm_drm.h" +#include "linux/file.h" +#include "linux/sync_file.h" + #include "msm_drv.h" #include "msm_gem.h" +#include "msm_gpu.h" #include "msm_mmu.h" +#include "msm_syncobj.h" #define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__) @@ -36,6 +43,97 @@ struct msm_vm_unmap_op { uint64_t range; }; +/** + * struct msm_vma_op - A MAP or UNMAP operation + */ +struct msm_vm_op { + /** @op: The operation type */ + enum { + MSM_VM_OP_MAP = 1, + MSM_VM_OP_UNMAP, + } op; + union { + /** @map: Parameters used if op == MSM_VMA_OP_MAP */ + struct msm_vm_map_op map; + /** @unmap: Parameters used if op == MSM_VMA_OP_UNMAP */ + struct msm_vm_unmap_op unmap; + }; + /** @node: list head in msm_vm_bind_job::vm_ops */ + struct list_head node; + + /** + * @obj: backing object for pages to be mapped/unmapped + * + * Async unmap ops, in particular, must hold a reference to the + * original GEM object backing the mapping that will be unmapped. + * But the same can be required in the map path, for example if + * there is not a corresponding unmap op, such as process exit. + * + * This ensures that the pages backing the mapping are not freed + * before the mapping is torn down. + */ + struct drm_gem_object *obj; +}; + +/** + * struct msm_vm_bind_job - Tracking for a VM_BIND ioctl + * + * A table of userspace requested VM updates (MSM_VM_BIND_OP_UNMAP/MAP/MAP_NULL) + * gets applied to the vm, generating a list of VM ops (MSM_VM_OP_MAP/UNMAP) + * which are applied to the pgtables asynchronously. For example a userspace + * requested MSM_VM_BIND_OP_MAP could end up generating both an MSM_VM_OP_UNMAP + * to unmap an existing mapping, and a MSM_VM_OP_MAP to apply the new mapping. + */ +struct msm_vm_bind_job { + /** @base: base class for drm_sched jobs */ + struct drm_sched_job base; + /** @vm: The VM being operated on */ + struct drm_gpuvm *vm; + /** @fence: The fence that is signaled when job completes */ + struct dma_fence *fence; + /** @queue: The queue that the job runs on */ + struct msm_gpu_submitqueue *queue; + /** @prealloc: Tracking for pre-allocated MMU pgtable pages */ + struct msm_mmu_prealloc prealloc; + /** @vm_ops: a list of struct msm_vm_op */ + struct list_head vm_ops; + /** @bos_pinned: are the GEM objects being bound pinned? */ + bool bos_pinned; + /** @nr_ops: the number of userspace requested ops */ + unsigned int nr_ops; + /** + * @ops: the userspace requested ops + * + * The userspace requested ops are copied/parsed and validated + * before we start applying the updates to try to do as much up- + * front error checking as possible, to avoid the VM being in an + * undefined state due to partially executed VM_BIND. + * + * This table also serves to hold a reference to the backing GEM + * objects. + */ + struct msm_vm_bind_op { + uint32_t op; + uint32_t flags; + union { + struct drm_gem_object *obj; + uint32_t handle; + }; + uint64_t obj_offset; + uint64_t iova; + uint64_t range; + } ops[]; +}; + +#define job_foreach_bo(obj, _job) \ + for (unsigned i = 0; i < (_job)->nr_ops; i++) \ + if ((obj = (_job)->ops[i].obj)) + +static inline struct msm_vm_bind_job *to_msm_vm_bind_job(struct drm_sched_job *job) +{ + return container_of(job, struct msm_vm_bind_job, base); +} + static void msm_gem_vm_free(struct drm_gpuvm *gpuvm) { @@ -52,6 +150,9 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) static void vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) { + if (!vm->managed) + lockdep_assert_held(&vm->mmu_lock); + vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range); vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); @@ -60,6 +161,9 @@ vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) static int vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) { + if (!vm->managed) + lockdep_assert_held(&vm->mmu_lock); + vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range); return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset, @@ -69,17 +173,29 @@ vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) /* Actually unmap memory for the vma */ void msm_gem_vma_unmap(struct drm_gpuva *vma) { + struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma); /* Don't do anything if the memory isn't mapped */ if (!msm_vma->mapped) return; - vm_unmap_op(to_msm_vm(vma->vm), &(struct msm_vm_unmap_op){ + /* + * The mmu_lock is only needed when preallocation is used. But + * in that case we don't need to worry about recursion into + * shrinker + */ + if (!vm->managed) + mutex_lock(&vm->mmu_lock); + + vm_unmap_op(vm, &(struct msm_vm_unmap_op){ .iova = vma->va.addr, .range = vma->va.range, }); + if (!vm->managed) + mutex_unlock(&vm->mmu_lock); + msm_vma->mapped = false; } @@ -87,6 +203,7 @@ void msm_gem_vma_unmap(struct drm_gpuva *vma) int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) { + struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma); int ret; @@ -98,6 +215,14 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) msm_vma->mapped = true; + /* + * The mmu_lock is only needed when preallocation is used. But + * in that case we don't need to worry about recursion into + * shrinker + */ + if (!vm->managed) + mutex_lock(&vm->mmu_lock); + /* * NOTE: iommu/io-pgtable can allocate pages, so we cannot hold * a lock across map/unmap which is also used in the job_run() @@ -107,16 +232,19 @@ msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt) * Revisit this if we can come up with a scheme to pre-alloc pages * for the pgtable in map/unmap ops. */ - ret = vm_map_op(to_msm_vm(vma->vm), &(struct msm_vm_map_op){ + ret = vm_map_op(vm, &(struct msm_vm_map_op){ .iova = vma->va.addr, .range = vma->va.range, .offset = vma->gem.offset, .sgt = sgt, .prot = prot, }); - if (ret) { + + if (!vm->managed) + mutex_unlock(&vm->mmu_lock); + + if (ret) msm_vma->mapped = false; - } return ret; } @@ -131,6 +259,9 @@ void msm_gem_vma_close(struct drm_gpuva *vma) drm_gpuvm_resv_assert_held(&vm->base); + if (vma->gem.obj) + msm_gem_assert_locked(vma->gem.obj); + if (vma->va.addr && vm->managed) drm_mm_remove_node(&msm_vma->node); @@ -158,6 +289,7 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, if (vm->managed) { BUG_ON(offset != 0); + BUG_ON(!obj); /* NULL mappings not valid for kernel managed VM */ ret = drm_mm_insert_node_in_range(&vm->mm, &vma->node, obj->size, PAGE_SIZE, 0, range_start, range_end, 0); @@ -169,7 +301,8 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, range_end = range_start + obj->size; } - GEM_WARN_ON((range_end - range_start) > obj->size); + if (obj) + GEM_WARN_ON((range_end - range_start) > obj->size); drm_gpuva_init(&vma->base, range_start, range_end - range_start, obj, offset); vma->mapped = false; @@ -178,6 +311,9 @@ msm_gem_vma_new(struct drm_gpuvm *gpuvm, struct drm_gem_object *obj, if (ret) goto err_free_range; + if (!obj) + return &vma->base; + vm_bo = drm_gpuvm_bo_obtain(&vm->base, obj); if (IS_ERR(vm_bo)) { ret = PTR_ERR(vm_bo); @@ -200,11 +336,297 @@ err_free_vma: return ERR_PTR(ret); } +static int +msm_gem_vm_bo_validate(struct drm_gpuvm_bo *vm_bo, struct drm_exec *exec) +{ + struct drm_gem_object *obj = vm_bo->obj; + struct drm_gpuva *vma; + int ret; + + vm_dbg("validate: %p", obj); + + msm_gem_assert_locked(obj); + + drm_gpuvm_bo_for_each_va (vma, vm_bo) { + ret = msm_gem_pin_vma_locked(obj, vma); + if (ret) + return ret; + } + + return 0; +} + +struct op_arg { + unsigned flags; + struct msm_vm_bind_job *job; +}; + +static void +vm_op_enqueue(struct op_arg *arg, struct msm_vm_op _op) +{ + struct msm_vm_op *op = kmalloc(sizeof(*op), GFP_KERNEL); + *op = _op; + list_add_tail(&op->node, &arg->job->vm_ops); + + if (op->obj) + drm_gem_object_get(op->obj); +} + +static struct drm_gpuva * +vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op) +{ + return msm_gem_vma_new(arg->job->vm, op->gem.obj, op->gem.offset, + op->va.addr, op->va.addr + op->va.range); +} + +static int +msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg) +{ + struct drm_gem_object *obj = op->map.gem.obj; + struct drm_gpuva *vma; + struct sg_table *sgt; + unsigned prot; + + vma = vma_from_op(arg, &op->map); + if (WARN_ON(IS_ERR(vma))) + return PTR_ERR(vma); + + vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, + vma->va.addr, vma->va.range); + + vma->flags = ((struct op_arg *)arg)->flags; + + if (obj) { + sgt = to_msm_bo(obj)->sgt; + prot = msm_gem_prot(obj); + } else { + sgt = NULL; + prot = IOMMU_READ | IOMMU_WRITE; + } + + vm_op_enqueue(arg, (struct msm_vm_op){ + .op = MSM_VM_OP_MAP, + .map = { + .sgt = sgt, + .iova = vma->va.addr, + .range = vma->va.range, + .offset = vma->gem.offset, + .prot = prot, + }, + .obj = vma->gem.obj, + }); + + to_msm_vma(vma)->mapped = true; + + return 0; +} + +static int +msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) +{ + struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; + struct drm_gpuvm *vm = job->vm; + struct drm_gpuva *orig_vma = op->remap.unmap->va; + struct drm_gpuva *prev_vma = NULL, *next_vma = NULL; + struct drm_gpuvm_bo *vm_bo = orig_vma->vm_bo; + bool mapped = to_msm_vma(orig_vma)->mapped; + unsigned flags; + + vm_dbg("orig_vma: %p:%p:%p: %016llx %016llx", vm, orig_vma, + orig_vma->gem.obj, orig_vma->va.addr, orig_vma->va.range); + + if (mapped) { + uint64_t unmap_start, unmap_range; + + drm_gpuva_op_remap_to_unmap_range(&op->remap, &unmap_start, &unmap_range); + + vm_op_enqueue(arg, (struct msm_vm_op){ + .op = MSM_VM_OP_UNMAP, + .unmap = { + .iova = unmap_start, + .range = unmap_range, + }, + .obj = orig_vma->gem.obj, + }); + + /* + * Part of this GEM obj is still mapped, but we're going to kill the + * existing VMA and replace it with one or two new ones (ie. two if + * the unmapped range is in the middle of the existing (unmap) VMA). + * So just set the state to unmapped: + */ + to_msm_vma(orig_vma)->mapped = false; + } + + /* + * Hold a ref to the vm_bo between the msm_gem_vma_close() and the + * creation of the new prev/next vma's, in case the vm_bo is tracked + * in the VM's evict list: + */ + if (vm_bo) + drm_gpuvm_bo_get(vm_bo); + + /* + * The prev_vma and/or next_vma are replacing the unmapped vma, and + * therefore should preserve it's flags: + */ + flags = orig_vma->flags; + + msm_gem_vma_close(orig_vma); + + if (op->remap.prev) { + prev_vma = vma_from_op(arg, op->remap.prev); + if (WARN_ON(IS_ERR(prev_vma))) + return PTR_ERR(prev_vma); + + vm_dbg("prev_vma: %p:%p: %016llx %016llx", vm, prev_vma, prev_vma->va.addr, prev_vma->va.range); + to_msm_vma(prev_vma)->mapped = mapped; + prev_vma->flags = flags; + } + + if (op->remap.next) { + next_vma = vma_from_op(arg, op->remap.next); + if (WARN_ON(IS_ERR(next_vma))) + return PTR_ERR(next_vma); + + vm_dbg("next_vma: %p:%p: %016llx %016llx", vm, next_vma, next_vma->va.addr, next_vma->va.range); + to_msm_vma(next_vma)->mapped = mapped; + next_vma->flags = flags; + } + + if (!mapped) + drm_gpuvm_bo_evict(vm_bo, true); + + /* Drop the previous ref: */ + drm_gpuvm_bo_put(vm_bo); + + return 0; +} + +static int +msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg) +{ + struct drm_gpuva *vma = op->unmap.va; + struct msm_gem_vma *msm_vma = to_msm_vma(vma); + + vm_dbg("%p:%p:%p: %016llx %016llx", vma->vm, vma, vma->gem.obj, + vma->va.addr, vma->va.range); + + if (!msm_vma->mapped) + goto out_close; + + vm_op_enqueue(arg, (struct msm_vm_op){ + .op = MSM_VM_OP_UNMAP, + .unmap = { + .iova = vma->va.addr, + .range = vma->va.range, + }, + .obj = vma->gem.obj, + }); + + msm_vma->mapped = false; + +out_close: + msm_gem_vma_close(vma); + + return 0; +} + static const struct drm_gpuvm_ops msm_gpuvm_ops = { .vm_free = msm_gem_vm_free, + .vm_bo_validate = msm_gem_vm_bo_validate, + .sm_step_map = msm_gem_vm_sm_step_map, + .sm_step_remap = msm_gem_vm_sm_step_remap, + .sm_step_unmap = msm_gem_vm_sm_step_unmap, }; +static struct dma_fence * +msm_vma_job_run(struct drm_sched_job *_job) +{ + struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); + struct msm_gem_vm *vm = to_msm_vm(job->vm); + struct drm_gem_object *obj; + int ret = vm->unusable ? -EINVAL : 0; + + vm_dbg(""); + + mutex_lock(&vm->mmu_lock); + vm->mmu->prealloc = &job->prealloc; + + while (!list_empty(&job->vm_ops)) { + struct msm_vm_op *op = + list_first_entry(&job->vm_ops, struct msm_vm_op, node); + + switch (op->op) { + case MSM_VM_OP_MAP: + /* + * On error, stop trying to map new things.. but we + * still want to process the unmaps (or in particular, + * the drm_gem_object_put()s) + */ + if (!ret) + ret = vm_map_op(vm, &op->map); + break; + case MSM_VM_OP_UNMAP: + vm_unmap_op(vm, &op->unmap); + break; + } + drm_gem_object_put(op->obj); + list_del(&op->node); + kfree(op); + } + + vm->mmu->prealloc = NULL; + mutex_unlock(&vm->mmu_lock); + + /* + * We failed to perform at least _some_ of the pgtable updates, so + * now the VM is in an undefined state. Game over! + */ + if (ret) + vm->unusable = true; + + job_foreach_bo (obj, job) { + msm_gem_lock(obj); + msm_gem_unpin_locked(obj); + msm_gem_unlock(obj); + } + + /* VM_BIND ops are synchronous, so no fence to wait on: */ + return NULL; +} + +static void +msm_vma_job_free(struct drm_sched_job *_job) +{ + struct msm_vm_bind_job *job = to_msm_vm_bind_job(_job); + struct msm_gem_vm *vm = to_msm_vm(job->vm); + struct drm_gem_object *obj; + + vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); + + drm_sched_job_cleanup(_job); + + job_foreach_bo (obj, job) + drm_gem_object_put(obj); + + msm_submitqueue_put(job->queue); + dma_fence_put(job->fence); + + /* In error paths, we could have unexecuted ops: */ + while (!list_empty(&job->vm_ops)) { + struct msm_vm_op *op = + list_first_entry(&job->vm_ops, struct msm_vm_op, node); + list_del(&op->node); + kfree(op); + } + + kfree(job); +} + static const struct drm_sched_backend_ops msm_vm_bind_ops = { + .run_job = msm_vma_job_run, + .free_job = msm_vma_job_free }; /** @@ -268,6 +690,7 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, drm_gem_object_put(dummy_gem); vm->mmu = mmu; + mutex_init(&vm->mmu_lock); vm->managed = managed; drm_mm_init(&vm->mm, va_start, va_size); @@ -280,7 +703,6 @@ err_free_dummy: err_free_vm: kfree(vm); return ERR_PTR(ret); - } /** @@ -296,6 +718,7 @@ msm_gem_vm_close(struct drm_gpuvm *gpuvm) { struct msm_gem_vm *vm = to_msm_vm(gpuvm); struct drm_gpuva *vma, *tmp; + struct drm_exec exec; /* * For kernel managed VMs, the VMAs are torn down when the handle is @@ -312,22 +735,656 @@ msm_gem_vm_close(struct drm_gpuvm *gpuvm) drm_sched_fini(&vm->sched); /* Tear down any remaining mappings: */ - dma_resv_lock(drm_gpuvm_resv(gpuvm), NULL); - drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) { - struct drm_gem_object *obj = vma->gem.obj; + drm_exec_init(&exec, 0, 2); + drm_exec_until_all_locked (&exec) { + drm_exec_lock_obj(&exec, drm_gpuvm_resv_obj(gpuvm)); + drm_exec_retry_on_contention(&exec); - if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { - drm_gem_object_get(obj); - msm_gem_lock(obj); + drm_gpuvm_for_each_va_safe (vma, tmp, gpuvm) { + struct drm_gem_object *obj = vma->gem.obj; + + /* + * MSM_BO_NO_SHARE objects share the same resv as the + * VM, in which case the obj is already locked: + */ + if (obj && (obj->resv == drm_gpuvm_resv(gpuvm))) + obj = NULL; + + if (obj) { + drm_exec_lock_obj(&exec, obj); + drm_exec_retry_on_contention(&exec); + } + + msm_gem_vma_unmap(vma); + msm_gem_vma_close(vma); + + if (obj) { + drm_exec_unlock_obj(&exec, obj); + } } + } + drm_exec_fini(&exec); +} + + +static struct msm_vm_bind_job * +vm_bind_job_create(struct drm_device *dev, struct drm_file *file, + struct msm_gpu_submitqueue *queue, uint32_t nr_ops) +{ + struct msm_vm_bind_job *job; + uint64_t sz; + int ret; + + sz = struct_size(job, ops, nr_ops); + + if (sz > SIZE_MAX) + return ERR_PTR(-ENOMEM); + + job = kzalloc(sz, GFP_KERNEL | __GFP_NOWARN); + if (!job) + return ERR_PTR(-ENOMEM); + + ret = drm_sched_job_init(&job->base, queue->entity, 1, queue, + file->client_id); + if (ret) { + kfree(job); + return ERR_PTR(ret); + } - msm_gem_vma_unmap(vma); - msm_gem_vma_close(vma); + job->vm = msm_context_vm(dev, queue->ctx); + job->queue = queue; + INIT_LIST_HEAD(&job->vm_ops); - if (obj && obj->resv != drm_gpuvm_resv(gpuvm)) { - msm_gem_unlock(obj); - drm_gem_object_put(obj); + return job; +} + +static bool invalid_alignment(uint64_t addr) +{ + /* + * Technically this is about GPU alignment, not CPU alignment. But + * I've not seen any qcom SoC where the SMMU does not support the + * CPU's smallest page size. + */ + return !PAGE_ALIGNED(addr); +} + +static int +lookup_op(struct msm_vm_bind_job *job, const struct drm_msm_vm_bind_op *op) +{ + struct drm_device *dev = job->vm->drm; + int i = job->nr_ops++; + int ret = 0; + + job->ops[i].op = op->op; + job->ops[i].handle = op->handle; + job->ops[i].obj_offset = op->obj_offset; + job->ops[i].iova = op->iova; + job->ops[i].range = op->range; + job->ops[i].flags = op->flags; + + if (op->flags & ~MSM_VM_BIND_OP_FLAGS) + ret = UERR(EINVAL, dev, "invalid flags: %x\n", op->flags); + + if (invalid_alignment(op->iova)) + ret = UERR(EINVAL, dev, "invalid address: %016llx\n", op->iova); + + if (invalid_alignment(op->obj_offset)) + ret = UERR(EINVAL, dev, "invalid bo_offset: %016llx\n", op->obj_offset); + + if (invalid_alignment(op->range)) + ret = UERR(EINVAL, dev, "invalid range: %016llx\n", op->range); + + if (!drm_gpuvm_range_valid(job->vm, op->iova, op->range)) + ret = UERR(EINVAL, dev, "invalid range: %016llx, %016llx\n", op->iova, op->range); + + /* + * MAP must specify a valid handle. But the handle MBZ for + * UNMAP or MAP_NULL. + */ + if (op->op == MSM_VM_BIND_OP_MAP) { + if (!op->handle) + ret = UERR(EINVAL, dev, "invalid handle\n"); + } else if (op->handle) { + ret = UERR(EINVAL, dev, "handle must be zero\n"); + } + + switch (op->op) { + case MSM_VM_BIND_OP_MAP: + case MSM_VM_BIND_OP_MAP_NULL: + case MSM_VM_BIND_OP_UNMAP: + break; + default: + ret = UERR(EINVAL, dev, "invalid op: %u\n", op->op); + break; + } + + return ret; +} + +/* + * ioctl parsing, parameter validation, and GEM handle lookup + */ +static int +vm_bind_job_lookup_ops(struct msm_vm_bind_job *job, struct drm_msm_vm_bind *args, + struct drm_file *file, int *nr_bos) +{ + struct drm_device *dev = job->vm->drm; + int ret = 0; + int cnt = 0; + + if (args->nr_ops == 1) { + /* Single op case, the op is inlined: */ + ret = lookup_op(job, &args->op); + } else { + for (unsigned i = 0; i < args->nr_ops; i++) { + struct drm_msm_vm_bind_op op; + void __user *userptr = + u64_to_user_ptr(args->ops + (i * sizeof(op))); + + /* make sure we don't have garbage flags, in case we hit + * error path before flags is initialized: + */ + job->ops[i].flags = 0; + + if (copy_from_user(&op, userptr, sizeof(op))) { + ret = -EFAULT; + break; + } + + ret = lookup_op(job, &op); + if (ret) + break; + } + } + + if (ret) { + job->nr_ops = 0; + goto out; + } + + spin_lock(&file->table_lock); + + for (unsigned i = 0; i < args->nr_ops; i++) { + struct drm_gem_object *obj; + + if (!job->ops[i].handle) { + job->ops[i].obj = NULL; + continue; + } + + /* + * normally use drm_gem_object_lookup(), but for bulk lookup + * all under single table_lock just hit object_idr directly: + */ + obj = idr_find(&file->object_idr, job->ops[i].handle); + if (!obj) { + ret = UERR(EINVAL, dev, "invalid handle %u at index %u\n", job->ops[i].handle, i); + goto out_unlock; + } + + drm_gem_object_get(obj); + + job->ops[i].obj = obj; + cnt++; + } + + *nr_bos = cnt; + +out_unlock: + spin_unlock(&file->table_lock); + +out: + return ret; +} + +static void +prealloc_count(struct msm_vm_bind_job *job, + struct msm_vm_bind_op *first, + struct msm_vm_bind_op *last) +{ + struct msm_mmu *mmu = to_msm_vm(job->vm)->mmu; + + if (!first) + return; + + uint64_t start_iova = first->iova; + uint64_t end_iova = last->iova + last->range; + + mmu->funcs->prealloc_count(mmu, &job->prealloc, start_iova, end_iova - start_iova); +} + +static bool +ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next) +{ + /* + * Last level pte covers 2MB.. so we should merge two ops, from + * the PoV of figuring out how much pgtable pages to pre-allocate + * if they land in the same 2MB range: + */ + uint64_t pte_mask = ~(SZ_2M - 1); + return ((first->iova + first->range) & pte_mask) == (next->iova & pte_mask); +} + +/* + * Determine the amount of memory to prealloc for pgtables. For sparse images, + * in particular, userspace plays some tricks with the order of page mappings + * to get the desired swizzle pattern, resulting in a large # of tiny MAP ops. + * So detect when multiple MAP operations are physically contiguous, and count + * them as a single mapping. Otherwise the prealloc_count() will not realize + * they can share pagetable pages and vastly overcount. + */ +static void +vm_bind_prealloc_count(struct msm_vm_bind_job *job) +{ + struct msm_vm_bind_op *first = NULL, *last = NULL; + + for (int i = 0; i < job->nr_ops; i++) { + struct msm_vm_bind_op *op = &job->ops[i]; + + /* We only care about MAP/MAP_NULL: */ + if (op->op == MSM_VM_BIND_OP_UNMAP) + continue; + + /* + * If op is contiguous with last in the current range, then + * it becomes the new last in the range and we continue + * looping: + */ + if (last && ops_are_same_pte(last, op)) { + last = op; + continue; + } + + /* + * If op is not contiguous with the current range, flush + * the current range and start anew: + */ + prealloc_count(job, first, last); + first = last = op; + } + + /* Flush the remaining range: */ + prealloc_count(job, first, last); +} + +/* + * Lock VM and GEM objects + */ +static int +vm_bind_job_lock_objects(struct msm_vm_bind_job *job, struct drm_exec *exec) +{ + int ret; + + /* Lock VM and objects: */ + drm_exec_until_all_locked (exec) { + ret = drm_exec_lock_obj(exec, drm_gpuvm_resv_obj(job->vm)); + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + + for (unsigned i = 0; i < job->nr_ops; i++) { + const struct msm_vm_bind_op *op = &job->ops[i]; + + switch (op->op) { + case MSM_VM_BIND_OP_UNMAP: + ret = drm_gpuvm_sm_unmap_exec_lock(job->vm, exec, + op->iova, + op->obj_offset); + break; + case MSM_VM_BIND_OP_MAP: + case MSM_VM_BIND_OP_MAP_NULL: + ret = drm_gpuvm_sm_map_exec_lock(job->vm, exec, 1, + op->iova, op->range, + op->obj, op->obj_offset); + break; + default: + /* + * lookup_op() should have already thrown an error for + * invalid ops + */ + WARN_ON("unreachable"); + } + + drm_exec_retry_on_contention(exec); + if (ret) + return ret; + } + } + + return 0; +} + +/* + * Pin GEM objects, ensuring that we have backing pages. Pinning will move + * the object to the pinned LRU so that the shrinker knows to first consider + * other objects for evicting. + */ +static int +vm_bind_job_pin_objects(struct msm_vm_bind_job *job) +{ + struct drm_gem_object *obj; + + /* + * First loop, before holding the LRU lock, avoids holding the + * LRU lock while calling msm_gem_pin_vma_locked (which could + * trigger get_pages()) + */ + job_foreach_bo (obj, job) { + struct page **pages; + + pages = msm_gem_get_pages_locked(obj, MSM_MADV_WILLNEED); + if (IS_ERR(pages)) + return PTR_ERR(pages); + } + + struct msm_drm_private *priv = job->vm->drm->dev_private; + + /* + * A second loop while holding the LRU lock (a) avoids acquiring/dropping + * the LRU lock for each individual bo, while (b) avoiding holding the + * LRU lock while calling msm_gem_pin_vma_locked() (which could trigger + * get_pages() which could trigger reclaim.. and if we held the LRU lock + * could trigger deadlock with the shrinker). + */ + mutex_lock(&priv->lru.lock); + job_foreach_bo (obj, job) + msm_gem_pin_obj_locked(obj); + mutex_unlock(&priv->lru.lock); + + job->bos_pinned = true; + + return 0; +} + +/* + * Unpin GEM objects. Normally this is done after the bind job is run. + */ +static void +vm_bind_job_unpin_objects(struct msm_vm_bind_job *job) +{ + struct drm_gem_object *obj; + + if (!job->bos_pinned) + return; + + job_foreach_bo (obj, job) + msm_gem_unpin_locked(obj); + + job->bos_pinned = false; +} + +/* + * Pre-allocate pgtable memory, and translate the VM bind requests into a + * sequence of pgtable updates to be applied asynchronously. + */ +static int +vm_bind_job_prepare(struct msm_vm_bind_job *job) +{ + struct msm_gem_vm *vm = to_msm_vm(job->vm); + struct msm_mmu *mmu = vm->mmu; + int ret; + + ret = mmu->funcs->prealloc_allocate(mmu, &job->prealloc); + if (ret) + return ret; + + for (unsigned i = 0; i < job->nr_ops; i++) { + const struct msm_vm_bind_op *op = &job->ops[i]; + struct op_arg arg = { + .job = job, + }; + + switch (op->op) { + case MSM_VM_BIND_OP_UNMAP: + ret = drm_gpuvm_sm_unmap(job->vm, &arg, op->iova, + op->range); + break; + case MSM_VM_BIND_OP_MAP: + if (op->flags & MSM_VM_BIND_OP_DUMP) + arg.flags |= MSM_VMA_DUMP; + fallthrough; + case MSM_VM_BIND_OP_MAP_NULL: + ret = drm_gpuvm_sm_map(job->vm, &arg, op->iova, + op->range, op->obj, op->obj_offset); + break; + default: + /* + * lookup_op() should have already thrown an error for + * invalid ops + */ + BUG_ON("unreachable"); + } + + if (ret) { + /* + * If we've already started modifying the vm, we can't + * adequetly describe to userspace the intermediate + * state the vm is in. So throw up our hands! + */ + if (i > 0) + vm->unusable = true; + return ret; + } + } + + return 0; +} + +/* + * Attach fences to the GEM objects being bound. This will signify to + * the shrinker that they are busy even after dropping the locks (ie. + * drm_exec_fini()) + */ +static void +vm_bind_job_attach_fences(struct msm_vm_bind_job *job) +{ + for (unsigned i = 0; i < job->nr_ops; i++) { + struct drm_gem_object *obj = job->ops[i].obj; + + if (!obj) + continue; + + dma_resv_add_fence(obj->resv, job->fence, + DMA_RESV_USAGE_KERNEL); + } +} + +int +msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file) +{ + struct msm_drm_private *priv = dev->dev_private; + struct drm_msm_vm_bind *args = data; + struct msm_context *ctx = file->driver_priv; + struct msm_vm_bind_job *job = NULL; + struct msm_gpu *gpu = priv->gpu; + struct msm_gpu_submitqueue *queue; + struct msm_syncobj_post_dep *post_deps = NULL; + struct drm_syncobj **syncobjs_to_reset = NULL; + struct sync_file *sync_file = NULL; + struct dma_fence *fence; + int out_fence_fd = -1; + int ret, nr_bos = 0; + unsigned i; + + if (!gpu) + return -ENXIO; + + /* + * Maybe we could allow just UNMAP ops? OTOH userspace should just + * immediately close the device file and all will be torn down. + */ + if (to_msm_vm(ctx->vm)->unusable) + return UERR(EPIPE, dev, "context is unusable"); + + /* + * Technically, you cannot create a VM_BIND submitqueue in the first + * place, if you haven't opted in to VM_BIND context. But it is + * cleaner / less confusing, to check this case directly. + */ + if (!msm_context_is_vmbind(ctx)) + return UERR(EINVAL, dev, "context does not support vmbind"); + + if (args->flags & ~MSM_VM_BIND_FLAGS) + return UERR(EINVAL, dev, "invalid flags"); + + queue = msm_submitqueue_get(ctx, args->queue_id); + if (!queue) + return -ENOENT; + + if (!(queue->flags & MSM_SUBMITQUEUE_VM_BIND)) { + ret = UERR(EINVAL, dev, "Invalid queue type"); + goto out_post_unlock; + } + + if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) { + out_fence_fd = get_unused_fd_flags(O_CLOEXEC); + if (out_fence_fd < 0) { + ret = out_fence_fd; + goto out_post_unlock; } } - dma_resv_unlock(drm_gpuvm_resv(gpuvm)); + + job = vm_bind_job_create(dev, file, queue, args->nr_ops); + if (IS_ERR(job)) { + ret = PTR_ERR(job); + goto out_post_unlock; + } + + ret = mutex_lock_interruptible(&queue->lock); + if (ret) + goto out_post_unlock; + + if (args->flags & MSM_VM_BIND_FENCE_FD_IN) { + struct dma_fence *in_fence; + + in_fence = sync_file_get_fence(args->fence_fd); + + if (!in_fence) { + ret = UERR(EINVAL, dev, "invalid in-fence"); + goto out_unlock; + } + + ret = drm_sched_job_add_dependency(&job->base, in_fence); + if (ret) + goto out_unlock; + } + + if (args->in_syncobjs > 0) { + syncobjs_to_reset = msm_syncobj_parse_deps(dev, &job->base, + file, args->in_syncobjs, + args->nr_in_syncobjs, + args->syncobj_stride); + if (IS_ERR(syncobjs_to_reset)) { + ret = PTR_ERR(syncobjs_to_reset); + goto out_unlock; + } + } + + if (args->out_syncobjs > 0) { + post_deps = msm_syncobj_parse_post_deps(dev, file, + args->out_syncobjs, + args->nr_out_syncobjs, + args->syncobj_stride); + if (IS_ERR(post_deps)) { + ret = PTR_ERR(post_deps); + goto out_unlock; + } + } + + ret = vm_bind_job_lookup_ops(job, args, file, &nr_bos); + if (ret) + goto out_unlock; + + vm_bind_prealloc_count(job); + + struct drm_exec exec; + unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT; + drm_exec_init(&exec, flags, nr_bos + 1); + + ret = vm_bind_job_lock_objects(job, &exec); + if (ret) + goto out; + + ret = vm_bind_job_pin_objects(job); + if (ret) + goto out; + + ret = vm_bind_job_prepare(job); + if (ret) + goto out; + + drm_sched_job_arm(&job->base); + + job->fence = dma_fence_get(&job->base.s_fence->finished); + + if (args->flags & MSM_VM_BIND_FENCE_FD_OUT) { + sync_file = sync_file_create(job->fence); + if (!sync_file) { + ret = -ENOMEM; + } else { + fd_install(out_fence_fd, sync_file->file); + args->fence_fd = out_fence_fd; + } + } + + if (ret) + goto out; + + vm_bind_job_attach_fences(job); + + /* + * The job can be free'd (and fence unref'd) at any point after + * drm_sched_entity_push_job(), so we need to hold our own ref + */ + fence = dma_fence_get(job->fence); + + drm_sched_entity_push_job(&job->base); + + msm_syncobj_reset(syncobjs_to_reset, args->nr_in_syncobjs); + msm_syncobj_process_post_deps(post_deps, args->nr_out_syncobjs, fence); + + dma_fence_put(fence); + +out: + if (ret) + vm_bind_job_unpin_objects(job); + + drm_exec_fini(&exec); +out_unlock: + mutex_unlock(&queue->lock); +out_post_unlock: + if (ret && (out_fence_fd >= 0)) { + put_unused_fd(out_fence_fd); + if (sync_file) + fput(sync_file->file); + } + + if (!IS_ERR_OR_NULL(job)) { + if (ret) + msm_vma_job_free(&job->base); + } else { + /* + * If the submit hasn't yet taken ownership of the queue + * then we need to drop the reference ourself: + */ + msm_submitqueue_put(queue); + } + + if (!IS_ERR_OR_NULL(post_deps)) { + for (i = 0; i < args->nr_out_syncobjs; ++i) { + kfree(post_deps[i].chain); + drm_syncobj_put(post_deps[i].syncobj); + } + kfree(post_deps); + } + + if (!IS_ERR_OR_NULL(syncobjs_to_reset)) { + for (i = 0; i < args->nr_in_syncobjs; ++i) { + if (syncobjs_to_reset[i]) + drm_syncobj_put(syncobjs_to_reset[i]); + } + kfree(syncobjs_to_reset); + } + + return ret; } diff --git a/include/uapi/drm/msm_drm.h b/include/uapi/drm/msm_drm.h index 6d6cd1219926..5c67294edc95 100644 --- a/include/uapi/drm/msm_drm.h +++ b/include/uapi/drm/msm_drm.h @@ -272,7 +272,10 @@ struct drm_msm_gem_submit_cmd { __u32 size; /* in, cmdstream size */ __u32 pad; __u32 nr_relocs; /* in, number of submit_reloc's */ - __u64 relocs; /* in, ptr to array of submit_reloc's */ + union { + __u64 relocs; /* in, ptr to array of submit_reloc's */ + __u64 iova; /* cmdstream address (for VM_BIND contexts) */ + }; }; /* Each buffer referenced elsewhere in the cmdstream submit (ie. the @@ -339,7 +342,74 @@ struct drm_msm_gem_submit { __u32 nr_out_syncobjs; /* in, number of entries in out_syncobj. */ __u32 syncobj_stride; /* in, stride of syncobj arrays. */ __u32 pad; /*in, reserved for future use, always 0. */ +}; + +#define MSM_VM_BIND_OP_UNMAP 0 +#define MSM_VM_BIND_OP_MAP 1 +#define MSM_VM_BIND_OP_MAP_NULL 2 + +#define MSM_VM_BIND_OP_DUMP 1 +#define MSM_VM_BIND_OP_FLAGS ( \ + MSM_VM_BIND_OP_DUMP | \ + 0) +/** + * struct drm_msm_vm_bind_op - bind/unbind op to run + */ +struct drm_msm_vm_bind_op { + /** @op: one of MSM_VM_BIND_OP_x */ + __u32 op; + /** @handle: GEM object handle, MBZ for UNMAP or MAP_NULL */ + __u32 handle; + /** @obj_offset: Offset into GEM object, MBZ for UNMAP or MAP_NULL */ + __u64 obj_offset; + /** @iova: Address to operate on */ + __u64 iova; + /** @range: Number of bites to to map/unmap */ + __u64 range; + /** @flags: Bitmask of MSM_VM_BIND_OP_FLAG_x */ + __u32 flags; + /** @pad: MBZ */ + __u32 pad; +}; + +#define MSM_VM_BIND_FENCE_FD_IN 0x00000001 +#define MSM_VM_BIND_FENCE_FD_OUT 0x00000002 +#define MSM_VM_BIND_FLAGS ( \ + MSM_VM_BIND_FENCE_FD_IN | \ + MSM_VM_BIND_FENCE_FD_OUT | \ + 0) + +/** + * struct drm_msm_vm_bind - Input of &DRM_IOCTL_MSM_VM_BIND + */ +struct drm_msm_vm_bind { + /** @flags: in, bitmask of MSM_VM_BIND_x */ + __u32 flags; + /** @nr_ops: the number of bind ops in this ioctl */ + __u32 nr_ops; + /** @fence_fd: in/out fence fd (see MSM_VM_BIND_FENCE_FD_IN/OUT) */ + __s32 fence_fd; + /** @queue_id: in, submitqueue id */ + __u32 queue_id; + /** @in_syncobjs: in, ptr to array of drm_msm_gem_syncobj */ + __u64 in_syncobjs; + /** @out_syncobjs: in, ptr to array of drm_msm_gem_syncobj */ + __u64 out_syncobjs; + /** @nr_in_syncobjs: in, number of entries in in_syncobj */ + __u32 nr_in_syncobjs; + /** @nr_out_syncobjs: in, number of entries in out_syncobj */ + __u32 nr_out_syncobjs; + /** @syncobj_stride: in, stride of syncobj arrays */ + __u32 syncobj_stride; + /** @op_stride: sizeof each struct drm_msm_vm_bind_op in @ops */ + __u32 op_stride; + union { + /** @op: used if num_ops == 1 */ + struct drm_msm_vm_bind_op op; + /** @ops: userptr to array of drm_msm_vm_bind_op if num_ops > 1 */ + __u64 ops; + }; }; #define MSM_WAIT_FENCE_BOOST 0x00000001 @@ -435,6 +505,7 @@ struct drm_msm_submitqueue_query { #define DRM_MSM_SUBMITQUEUE_NEW 0x0A #define DRM_MSM_SUBMITQUEUE_CLOSE 0x0B #define DRM_MSM_SUBMITQUEUE_QUERY 0x0C +#define DRM_MSM_VM_BIND 0x0D #define DRM_IOCTL_MSM_GET_PARAM DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_GET_PARAM, struct drm_msm_param) #define DRM_IOCTL_MSM_SET_PARAM DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SET_PARAM, struct drm_msm_param) @@ -448,6 +519,7 @@ struct drm_msm_submitqueue_query { #define DRM_IOCTL_MSM_SUBMITQUEUE_NEW DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_NEW, struct drm_msm_submitqueue) #define DRM_IOCTL_MSM_SUBMITQUEUE_CLOSE DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_CLOSE, __u32) #define DRM_IOCTL_MSM_SUBMITQUEUE_QUERY DRM_IOW (DRM_COMMAND_BASE + DRM_MSM_SUBMITQUEUE_QUERY, struct drm_msm_submitqueue_query) +#define DRM_IOCTL_MSM_VM_BIND DRM_IOWR(DRM_COMMAND_BASE + DRM_MSM_VM_BIND, struct drm_msm_vm_bind) #if defined(__cplusplus) } -- cgit v1.2.3 From 9edc52967cc7fcd430749cdd8866aa68f25422bf Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:19 -0700 Subject: drm/msm: Add VM logging for VM_BIND updates When userspace opts in to VM_BIND, the submit no longer holds references keeping the VMA alive. This makes it difficult to distinguish between UMD/KMD/app bugs. So add a debug option for logging the most recent VM updates and capturing these in GPU devcoredumps. The submitqueue id is also captured, a value of zero means the operation did not go via a submitqueue (ie. comes from msm_gem_vm_close() tearing down the remaining mappings when the device file is closed. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661518/ --- drivers/gpu/drm/msm/adreno/adreno_gpu.c | 11 +++ drivers/gpu/drm/msm/msm_gem.h | 24 +++++++ drivers/gpu/drm/msm/msm_gem_vma.c | 124 +++++++++++++++++++++++++++++--- drivers/gpu/drm/msm/msm_gpu.c | 52 ++++++++++++-- drivers/gpu/drm/msm/msm_gpu.h | 4 ++ 5 files changed, 202 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c index ff25e3dada04..53cbfa5a507b 100644 --- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c +++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c @@ -833,6 +833,7 @@ void adreno_gpu_state_destroy(struct msm_gpu_state *state) for (i = 0; state->bos && i < state->nr_bos; i++) kvfree(state->bos[i].data); + kfree(state->vm_logs); kfree(state->bos); kfree(state->comm); kfree(state->cmd); @@ -973,6 +974,16 @@ void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state, info->ptes[0], info->ptes[1], info->ptes[2], info->ptes[3]); } + if (state->vm_logs) { + drm_puts(p, "vm-log:\n"); + for (i = 0; i < state->nr_vm_logs; i++) { + struct msm_gem_vm_log_entry *e = &state->vm_logs[i]; + drm_printf(p, " - %s:%d: 0x%016llx-0x%016llx\n", + e->op, e->queue_id, e->iova, + e->iova + e->range); + } + } + drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status); drm_puts(p, "ringbuffer:\n"); diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index ee464e315643..062d1b5477d6 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -24,6 +24,20 @@ #define MSM_BO_STOLEN 0x10000000 /* try to use stolen/splash memory */ #define MSM_BO_MAP_PRIV 0x20000000 /* use IOMMU_PRIV when mapping */ +/** + * struct msm_gem_vm_log_entry - An entry in the VM log + * + * For userspace managed VMs, a log of recent VM updates is tracked and + * captured in GPU devcore dumps, to aid debugging issues caused by (for + * example) incorrectly synchronized VM updates + */ +struct msm_gem_vm_log_entry { + const char *op; + uint64_t iova; + uint64_t range; + int queue_id; +}; + /** * struct msm_gem_vm - VM object * @@ -85,6 +99,15 @@ struct msm_gem_vm { /** @last_fence: Fence for last pending work scheduled on the VM */ struct dma_fence *last_fence; + /** @log: A log of recent VM updates */ + struct msm_gem_vm_log_entry *log; + + /** @log_shift: length of @log is (1 << @log_shift) */ + uint32_t log_shift; + + /** @log_idx: index of next @log entry to write */ + uint32_t log_idx; + /** @faults: the number of GPU hangs associated with this address space */ int faults; @@ -115,6 +138,7 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, u64 va_start, u64 va_size, bool managed); void msm_gem_vm_close(struct drm_gpuvm *gpuvm); +void msm_gem_vm_unusable(struct drm_gpuvm *gpuvm); struct msm_fence_context; diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 69e33d4b278b..eccefdb9ebc3 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -17,6 +17,10 @@ #define vm_dbg(fmt, ...) pr_debug("%s:%d: "fmt"\n", __func__, __LINE__, ##__VA_ARGS__) +static uint vm_log_shift = 0; +MODULE_PARM_DESC(vm_log_shift, "Length of VM op log"); +module_param_named(vm_log_shift, vm_log_shift, uint, 0600); + /** * struct msm_vm_map_op - create new pgtable mapping */ @@ -31,6 +35,13 @@ struct msm_vm_map_op { struct sg_table *sgt; /** @prot: the mapping protection flags */ int prot; + + /** + * @queue_id: The id of the submitqueue the operation is performed + * on, or zero for (in particular) UNMAP ops triggered outside of + * a submitqueue (ie. process cleanup) + */ + int queue_id; }; /** @@ -41,6 +52,13 @@ struct msm_vm_unmap_op { uint64_t iova; /** @range: size of region to unmap */ uint64_t range; + + /** + * @queue_id: The id of the submitqueue the operation is performed + * on, or zero for (in particular) UNMAP ops triggered outside of + * a submitqueue (ie. process cleanup) + */ + int queue_id; }; /** @@ -144,16 +162,87 @@ msm_gem_vm_free(struct drm_gpuvm *gpuvm) vm->mmu->funcs->destroy(vm->mmu); dma_fence_put(vm->last_fence); put_pid(vm->pid); + kfree(vm->log); kfree(vm); } +/** + * msm_gem_vm_unusable() - Mark a VM as unusable + * @vm: the VM to mark unusable + */ +void +msm_gem_vm_unusable(struct drm_gpuvm *gpuvm) +{ + struct msm_gem_vm *vm = to_msm_vm(gpuvm); + uint32_t vm_log_len = (1 << vm->log_shift); + uint32_t vm_log_mask = vm_log_len - 1; + uint32_t nr_vm_logs; + int first; + + vm->unusable = true; + + /* Bail if no log, or empty log: */ + if (!vm->log || !vm->log[0].op) + return; + + mutex_lock(&vm->mmu_lock); + + /* + * log_idx is the next entry to overwrite, meaning it is the oldest, or + * first, entry (other than the special case handled below where the + * log hasn't wrapped around yet) + */ + first = vm->log_idx; + + if (!vm->log[first].op) { + /* + * If the next log entry has not been written yet, then only + * entries 0 to idx-1 are valid (ie. we haven't wrapped around + * yet) + */ + nr_vm_logs = MAX(0, first - 1); + first = 0; + } else { + nr_vm_logs = vm_log_len; + } + + pr_err("vm-log:\n"); + for (int i = 0; i < nr_vm_logs; i++) { + int idx = (i + first) & vm_log_mask; + struct msm_gem_vm_log_entry *e = &vm->log[idx]; + pr_err(" - %s:%d: 0x%016llx-0x%016llx\n", + e->op, e->queue_id, e->iova, + e->iova + e->range); + } + + mutex_unlock(&vm->mmu_lock); +} + static void -vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) +vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int queue_id) { + int idx; + if (!vm->managed) lockdep_assert_held(&vm->mmu_lock); - vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range); + vm_dbg("%s:%p:%d: %016llx %016llx", op, vm, queue_id, iova, iova + range); + + if (!vm->log) + return; + + idx = vm->log_idx; + vm->log[idx].op = op; + vm->log[idx].iova = iova; + vm->log[idx].range = range; + vm->log[idx].queue_id = queue_id; + vm->log_idx = (vm->log_idx + 1) & ((1 << vm->log_shift) - 1); +} + +static void +vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) +{ + vm_log(vm, "unmap", op->iova, op->range, op->queue_id); vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); } @@ -161,10 +250,7 @@ vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) static int vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) { - if (!vm->managed) - lockdep_assert_held(&vm->mmu_lock); - - vm_dbg("%p: %016llx %016llx", vm, op->iova, op->iova + op->range); + vm_log(vm, "map", op->iova, op->range, op->queue_id); return vm->mmu->funcs->map(vm->mmu, op->iova, op->sgt, op->offset, op->range, op->prot); @@ -382,6 +468,7 @@ vma_from_op(struct op_arg *arg, struct drm_gpuva_op_map *op) static int msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg) { + struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; struct drm_gem_object *obj = op->map.gem.obj; struct drm_gpuva *vma; struct sg_table *sgt; @@ -412,6 +499,7 @@ msm_gem_vm_sm_step_map(struct drm_gpuva_op *op, void *arg) .range = vma->va.range, .offset = vma->gem.offset, .prot = prot, + .queue_id = job->queue->id, }, .obj = vma->gem.obj, }); @@ -445,6 +533,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) .unmap = { .iova = unmap_start, .range = unmap_range, + .queue_id = job->queue->id, }, .obj = orig_vma->gem.obj, }); @@ -506,6 +595,7 @@ msm_gem_vm_sm_step_remap(struct drm_gpuva_op *op, void *arg) static int msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg) { + struct msm_vm_bind_job *job = ((struct op_arg *)arg)->job; struct drm_gpuva *vma = op->unmap.va; struct msm_gem_vma *msm_vma = to_msm_vma(vma); @@ -520,6 +610,7 @@ msm_gem_vm_sm_step_unmap(struct drm_gpuva_op *op, void *arg) .unmap = { .iova = vma->va.addr, .range = vma->va.range, + .queue_id = job->queue->id, }, .obj = vma->gem.obj, }); @@ -584,7 +675,7 @@ msm_vma_job_run(struct drm_sched_job *_job) * now the VM is in an undefined state. Game over! */ if (ret) - vm->unusable = true; + msm_gem_vm_unusable(job->vm); job_foreach_bo (obj, job) { msm_gem_lock(obj); @@ -695,6 +786,23 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, drm_mm_init(&vm->mm, va_start, va_size); + /* + * We don't really need vm log for kernel managed VMs, as the kernel + * is responsible for ensuring that GEM objs are mapped if they are + * used by a submit. Furthermore we piggyback on mmu_lock to serialize + * access to the log. + * + * Limit the max log_shift to 8 to prevent userspace from asking us + * for an unreasonable log size. + */ + if (!managed) + vm->log_shift = MIN(vm_log_shift, 8); + + if (vm->log_shift) { + vm->log = kmalloc_array(1 << vm->log_shift, sizeof(vm->log[0]), + GFP_KERNEL | __GFP_ZERO); + } + return &vm->base; err_free_dummy: @@ -1162,7 +1270,7 @@ vm_bind_job_prepare(struct msm_vm_bind_job *job) * state the vm is in. So throw up our hands! */ if (i > 0) - vm->unusable = true; + msm_gem_vm_unusable(job->vm); return ret; } } diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c index ccd9ebfc5c7c..c317b25a8162 100644 --- a/drivers/gpu/drm/msm/msm_gpu.c +++ b/drivers/gpu/drm/msm/msm_gpu.c @@ -259,9 +259,6 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi { extern bool rd_full; - if (!submit) - return; - if (msm_context_is_vmbind(submit->queue->ctx)) { struct drm_exec exec; struct drm_gpuva *vma; @@ -318,6 +315,48 @@ static void crashstate_get_bos(struct msm_gpu_state *state, struct msm_gem_submi } } +static void crashstate_get_vm_logs(struct msm_gpu_state *state, struct msm_gem_vm *vm) +{ + uint32_t vm_log_len = (1 << vm->log_shift); + uint32_t vm_log_mask = vm_log_len - 1; + int first; + + /* Bail if no log, or empty log: */ + if (!vm->log || !vm->log[0].op) + return; + + mutex_lock(&vm->mmu_lock); + + /* + * log_idx is the next entry to overwrite, meaning it is the oldest, or + * first, entry (other than the special case handled below where the + * log hasn't wrapped around yet) + */ + first = vm->log_idx; + + if (!vm->log[first].op) { + /* + * If the next log entry has not been written yet, then only + * entries 0 to idx-1 are valid (ie. we haven't wrapped around + * yet) + */ + state->nr_vm_logs = MAX(0, first - 1); + first = 0; + } else { + state->nr_vm_logs = vm_log_len; + } + + state->vm_logs = kmalloc_array( + state->nr_vm_logs, sizeof(vm->log[0]), GFP_KERNEL); + for (int i = 0; i < state->nr_vm_logs; i++) { + int idx = (i + first) & vm_log_mask; + + state->vm_logs[i] = vm->log[idx]; + } + + mutex_unlock(&vm->mmu_lock); +} + static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, struct msm_gem_submit *submit, struct msm_gpu_fault_info *fault_info, char *comm, char *cmd) @@ -351,7 +390,10 @@ static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, msm_iommu_pagetable_walk(mmu, info->iova, info->ptes); } - crashstate_get_bos(state, submit); + if (submit) { + crashstate_get_vm_logs(state, to_msm_vm(submit->vm)); + crashstate_get_bos(state, submit); + } /* Set the active crash state to be dumped on failure */ gpu->crashstate = state; @@ -452,7 +494,7 @@ static void recover_worker(struct kthread_work *work) * VM_BIND) */ if (!vm->managed) - vm->unusable = true; + msm_gem_vm_unusable(submit->vm); } get_comm_cmdline(submit, &comm, &cmd); diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h index 5705e8d4e6b9..b2a96544f92a 100644 --- a/drivers/gpu/drm/msm/msm_gpu.h +++ b/drivers/gpu/drm/msm/msm_gpu.h @@ -20,6 +20,7 @@ #include "msm_gem.h" struct msm_gem_submit; +struct msm_gem_vm_log_entry; struct msm_gpu_perfcntr; struct msm_gpu_state; struct msm_context; @@ -603,6 +604,9 @@ struct msm_gpu_state { struct msm_gpu_fault_info fault_info; + int nr_vm_logs; + struct msm_gem_vm_log_entry *vm_logs; + int nr_bos; struct msm_gpu_state_bo *bos; }; -- cgit v1.2.3 From 0b4339c55ef59ff753c8d505596961edd7b887da Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:20 -0700 Subject: drm/msm: Add VMA unmap reason Make the VM log a bit more useful by providing a reason for the unmap (ie. closing VM vs evict/purge, etc) Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661527/ --- drivers/gpu/drm/msm/msm_gem.c | 20 +++++++++++--------- drivers/gpu/drm/msm/msm_gem.h | 2 +- drivers/gpu/drm/msm/msm_gem_vma.c | 15 ++++++++++++--- 3 files changed, 24 insertions(+), 13 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 77fdf53d3e33..e3ccda777ef3 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -43,7 +43,8 @@ static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file) return 0; } -static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close); +static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, + bool close, const char *reason); static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm) { @@ -57,7 +58,7 @@ static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm) drm_gpuvm_bo_for_each_va (vma, vm_bo) { if (vma->vm != vm) continue; - msm_gem_vma_unmap(vma); + msm_gem_vma_unmap(vma, "detach"); msm_gem_vma_close(vma); break; } @@ -97,7 +98,7 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) MAX_SCHEDULE_TIMEOUT); msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm); - put_iova_spaces(obj, ctx->vm, true); + put_iova_spaces(obj, ctx->vm, true, "close"); detach_vm(obj, ctx->vm); drm_exec_fini(&exec); /* drop locks */ } @@ -425,7 +426,8 @@ static struct drm_gpuva *lookup_vma(struct drm_gem_object *obj, * mapping. */ static void -put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close) +put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, + bool close, const char *reason) { struct drm_gpuvm_bo *vm_bo, *tmp; @@ -440,7 +442,7 @@ put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close) drm_gpuvm_bo_get(vm_bo); drm_gpuvm_bo_for_each_va_safe (vma, vmatmp, vm_bo) { - msm_gem_vma_unmap(vma); + msm_gem_vma_unmap(vma, reason); if (close) msm_gem_vma_close(vma); } @@ -617,7 +619,7 @@ static int clear_iova(struct drm_gem_object *obj, if (!vma) return 0; - msm_gem_vma_unmap(vma); + msm_gem_vma_unmap(vma, NULL); msm_gem_vma_close(vma); return 0; @@ -829,7 +831,7 @@ void msm_gem_purge(struct drm_gem_object *obj) GEM_WARN_ON(!is_purgeable(msm_obj)); /* Get rid of any iommu mapping(s): */ - put_iova_spaces(obj, NULL, false); + put_iova_spaces(obj, NULL, false, "purge"); msm_gem_vunmap(obj); @@ -867,7 +869,7 @@ void msm_gem_evict(struct drm_gem_object *obj) GEM_WARN_ON(is_unevictable(msm_obj)); /* Get rid of any iommu mapping(s): */ - put_iova_spaces(obj, NULL, false); + put_iova_spaces(obj, NULL, false, "evict"); drm_vma_node_unmap(&obj->vma_node, dev->anon_inode->i_mapping); @@ -1079,7 +1081,7 @@ static void msm_gem_free_object(struct drm_gem_object *obj) drm_exec_retry_on_contention(&exec); } } - put_iova_spaces(obj, NULL, true); + put_iova_spaces(obj, NULL, true, "free"); drm_exec_fini(&exec); /* drop locks */ } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 062d1b5477d6..ce5e90ba935b 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -168,7 +168,7 @@ struct msm_gem_vma { struct drm_gpuva * msm_gem_vma_new(struct drm_gpuvm *vm, struct drm_gem_object *obj, u64 offset, u64 range_start, u64 range_end); -void msm_gem_vma_unmap(struct drm_gpuva *vma); +void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason); int msm_gem_vma_map(struct drm_gpuva *vma, int prot, struct sg_table *sgt); void msm_gem_vma_close(struct drm_gpuva *vma); diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index eccefdb9ebc3..02491e7dd816 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -53,6 +53,9 @@ struct msm_vm_unmap_op { /** @range: size of region to unmap */ uint64_t range; + /** @reason: The reason for the unmap */ + const char *reason; + /** * @queue_id: The id of the submitqueue the operation is performed * on, or zero for (in particular) UNMAP ops triggered outside of @@ -242,7 +245,12 @@ vm_log(struct msm_gem_vm *vm, const char *op, uint64_t iova, uint64_t range, int static void vm_unmap_op(struct msm_gem_vm *vm, const struct msm_vm_unmap_op *op) { - vm_log(vm, "unmap", op->iova, op->range, op->queue_id); + const char *reason = op->reason; + + if (!reason) + reason = "unmap"; + + vm_log(vm, reason, op->iova, op->range, op->queue_id); vm->mmu->funcs->unmap(vm->mmu, op->iova, op->range); } @@ -257,7 +265,7 @@ vm_map_op(struct msm_gem_vm *vm, const struct msm_vm_map_op *op) } /* Actually unmap memory for the vma */ -void msm_gem_vma_unmap(struct drm_gpuva *vma) +void msm_gem_vma_unmap(struct drm_gpuva *vma, const char *reason) { struct msm_gem_vm *vm = to_msm_vm(vma->vm); struct msm_gem_vma *msm_vma = to_msm_vma(vma); @@ -277,6 +285,7 @@ void msm_gem_vma_unmap(struct drm_gpuva *vma) vm_unmap_op(vm, &(struct msm_vm_unmap_op){ .iova = vma->va.addr, .range = vma->va.range, + .reason = reason, }); if (!vm->managed) @@ -863,7 +872,7 @@ msm_gem_vm_close(struct drm_gpuvm *gpuvm) drm_exec_retry_on_contention(&exec); } - msm_gem_vma_unmap(vma); + msm_gem_vma_unmap(vma, "close"); msm_gem_vma_close(vma); if (obj) { -- cgit v1.2.3 From 0a1ff88ec5b60b41ba830c5bf08b6cd8f45ab411 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:22 -0700 Subject: drm/msm: use trylock for debugfs This resolves a potential deadlock vs msm_gem_vm_close(). Otherwise for _NO_SHARE buffers msm_gem_describe() could be trying to acquire the shared vm resv, while already holding priv->obj_lock. But _vm_close() might drop the last reference to a GEM obj while already holding the vm resv, and msm_gem_free_object() needs to grab priv->obj_lock, a locking inversion. OTOH this is only for debugfs and it isn't critical if we undercount by skipping a locked obj. So just use trylock() and move along if we can't get the lock. Signed-off-by: Rob Clark Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661525/ --- drivers/gpu/drm/msm/msm_gem.c | 3 ++- drivers/gpu/drm/msm/msm_gem.h | 6 ++++++ 2 files changed, 8 insertions(+), 1 deletion(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index e3ccda777ef3..3e87d27dfcb6 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -938,7 +938,8 @@ void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m, uint64_t off = drm_vma_node_start(&obj->vma_node); const char *madv; - msm_gem_lock(obj); + if (!msm_gem_trylock(obj)) + return; stats->all.count++; stats->all.size += obj->size; diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index ce5e90ba935b..1ce97f8a30bb 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -280,6 +280,12 @@ msm_gem_lock(struct drm_gem_object *obj) dma_resv_lock(obj->resv, NULL); } +static inline bool __must_check +msm_gem_trylock(struct drm_gem_object *obj) +{ + return dma_resv_trylock(obj->resv); +} + static inline int msm_gem_lock_interruptible(struct drm_gem_object *obj) { -- cgit v1.2.3 From 3bebfd53af0f7c8ea55094ba7b8b8b907024bb7b Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:24 -0700 Subject: drm/msm: Defer VMA unmap for fb unpins With the conversion to drm_gpuvm, we lost the lazy VMA cleanup, which means that fb cleanup/unpin when pageflipping to new scanout buffers immediately unmaps the scanout buffer. This is costly (with tlbinv, it can be 4-6ms for a 1080p scanout buffer, and more for higher resolutions)! To avoid this, introduce a vma_ref, which is incremented whenever userspace has a GEM handle or dma-buf fd. When unpinning if the vm is the kms->vm we defer tearing down the VMA until the vma_ref drops to zero. If the buffer is still part of a flip-chain then userspace will be holding some sort of reference to the BO, either via a GEM handle and/or dma-buf fd. So this avoids unmapping the VMA when there is a strong possibility that it will be needed again. Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661538/ --- drivers/gpu/drm/msm/msm_drv.c | 1 + drivers/gpu/drm/msm/msm_drv.h | 1 + drivers/gpu/drm/msm/msm_fb.c | 5 +++- drivers/gpu/drm/msm/msm_gem.c | 60 +++++++++++++++++++++++-------------- drivers/gpu/drm/msm/msm_gem.h | 28 +++++++++++++++++ drivers/gpu/drm/msm/msm_gem_prime.c | 54 +++++++++++++++++++++++++++++++-- 6 files changed, 123 insertions(+), 26 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c index d0f80267f4ba..2283a377cda1 100644 --- a/drivers/gpu/drm/msm/msm_drv.c +++ b/drivers/gpu/drm/msm/msm_drv.c @@ -837,6 +837,7 @@ static const struct drm_driver msm_driver = { .postclose = msm_postclose, .dumb_create = msm_gem_dumb_create, .dumb_map_offset = msm_gem_dumb_map_offset, + .gem_prime_import = msm_gem_prime_import, .gem_prime_import_sg_table = msm_gem_prime_import_sg_table, #ifdef CONFIG_DEBUG_FS .debugfs_init = msm_debugfs_init, diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h index 200c3135bbf9..2b49c4b800ee 100644 --- a/drivers/gpu/drm/msm/msm_drv.h +++ b/drivers/gpu/drm/msm/msm_drv.h @@ -269,6 +269,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev); struct sg_table *msm_gem_prime_get_sg_table(struct drm_gem_object *obj); int msm_gem_prime_vmap(struct drm_gem_object *obj, struct iosys_map *map); void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map); +struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, struct dma_buf *buf); struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg); struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags); diff --git a/drivers/gpu/drm/msm/msm_fb.c b/drivers/gpu/drm/msm/msm_fb.c index 8ae2f326ec54..bc7c2bb8f01e 100644 --- a/drivers/gpu/drm/msm/msm_fb.c +++ b/drivers/gpu/drm/msm/msm_fb.c @@ -89,6 +89,7 @@ int msm_framebuffer_prepare(struct drm_framebuffer *fb, bool needs_dirtyfb) return 0; for (i = 0; i < n; i++) { + msm_gem_vma_get(fb->obj[i]); ret = msm_gem_get_and_pin_iova(fb->obj[i], vm, &msm_fb->iova[i]); drm_dbg_state(fb->dev, "FB[%u]: iova[%d]: %08llx (%d)\n", fb->base.id, i, msm_fb->iova[i], ret); @@ -114,8 +115,10 @@ void msm_framebuffer_cleanup(struct drm_framebuffer *fb, bool needed_dirtyfb) memset(msm_fb->iova, 0, sizeof(msm_fb->iova)); - for (i = 0; i < n; i++) + for (i = 0; i < n; i++) { msm_gem_unpin_iova(fb->obj[i], vm); + msm_gem_vma_put(fb->obj[i]); + } } uint32_t msm_framebuffer_iova(struct drm_framebuffer *fb, int plane) diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c index 3e87d27dfcb6..33d3354c6102 100644 --- a/drivers/gpu/drm/msm/msm_gem.c +++ b/drivers/gpu/drm/msm/msm_gem.c @@ -19,6 +19,7 @@ #include "msm_drv.h" #include "msm_gem.h" #include "msm_gpu.h" +#include "msm_kms.h" static void update_device_mem(struct msm_drm_private *priv, ssize_t size) { @@ -39,6 +40,7 @@ static void update_ctx_mem(struct drm_file *file, ssize_t size) static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file) { + msm_gem_vma_get(obj); update_ctx_mem(file, obj->size); return 0; } @@ -46,33 +48,13 @@ static int msm_gem_open(struct drm_gem_object *obj, struct drm_file *file) static void put_iova_spaces(struct drm_gem_object *obj, struct drm_gpuvm *vm, bool close, const char *reason); -static void detach_vm(struct drm_gem_object *obj, struct drm_gpuvm *vm) -{ - msm_gem_assert_locked(obj); - drm_gpuvm_resv_assert_held(vm); - - struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_find(vm, obj); - if (vm_bo) { - struct drm_gpuva *vma; - - drm_gpuvm_bo_for_each_va (vma, vm_bo) { - if (vma->vm != vm) - continue; - msm_gem_vma_unmap(vma, "detach"); - msm_gem_vma_close(vma); - break; - } - - drm_gpuvm_bo_put(vm_bo); - } -} - static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) { struct msm_context *ctx = file->driver_priv; struct drm_exec exec; update_ctx_mem(file, -obj->size); + msm_gem_vma_put(obj); /* * If VM isn't created yet, nothing to cleanup. And in fact calling @@ -99,7 +81,31 @@ static void msm_gem_close(struct drm_gem_object *obj, struct drm_file *file) msm_gem_lock_vm_and_obj(&exec, obj, ctx->vm); put_iova_spaces(obj, ctx->vm, true, "close"); - detach_vm(obj, ctx->vm); + drm_exec_fini(&exec); /* drop locks */ +} + +/* + * Get/put for kms->vm VMA + */ + +void msm_gem_vma_get(struct drm_gem_object *obj) +{ + atomic_inc(&to_msm_bo(obj)->vma_ref); +} + +void msm_gem_vma_put(struct drm_gem_object *obj) +{ + struct msm_drm_private *priv = obj->dev->dev_private; + struct drm_exec exec; + + if (atomic_dec_return(&to_msm_bo(obj)->vma_ref)) + return; + + if (!priv->kms) + return; + + msm_gem_lock_vm_and_obj(&exec, obj, priv->kms->vm); + put_iova_spaces(obj, priv->kms->vm, true, "vma_put"); drm_exec_fini(&exec); /* drop locks */ } @@ -656,6 +662,13 @@ int msm_gem_set_iova(struct drm_gem_object *obj, return ret; } +static bool is_kms_vm(struct drm_gpuvm *vm) +{ + struct msm_drm_private *priv = vm->drm->dev_private; + + return priv->kms && (priv->kms->vm == vm); +} + /* * Unpin a iova by updating the reference counts. The memory isn't actually * purged until something else (shrinker, mm_notifier, destroy, etc) decides @@ -671,7 +684,8 @@ void msm_gem_unpin_iova(struct drm_gem_object *obj, struct drm_gpuvm *vm) if (vma) { msm_gem_unpin_locked(obj); } - detach_vm(obj, vm); + if (!is_kms_vm(vm)) + put_iova_spaces(obj, vm, true, "close"); drm_exec_fini(&exec); /* drop locks */ } diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 1ce97f8a30bb..5c0c59e4835c 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -211,9 +211,37 @@ struct msm_gem_object { * Protected by LRU lock. */ int pin_count; + + /** + * @vma_ref: Reference count of VMA users. + * + * With the vm_bo/vma holding a reference to the GEM object, we'd + * otherwise have to actively tear down a VMA when, for example, + * a buffer is unpinned for scanout, vs. the pre-drm_gpuvm approach + * where a VMA did not hold a reference to the BO, but instead was + * implicitly torn down when the BO was freed. + * + * To regain the lazy VMA teardown, we use the @vma_ref. It is + * incremented for any of the following: + * + * 1) the BO is exported as a dma_buf + * 2) the BO has open userspace handle + * + * All of those conditions will hold an reference to the BO, + * preventing it from being freed. So lazily keeping around the + * VMA will not prevent the BO from being freed. (Or rather, the + * reference loop is harmless in this case.) + * + * When the @vma_ref drops to zero, then kms->vm VMA will be + * torn down. + */ + atomic_t vma_ref; }; #define to_msm_bo(x) container_of(x, struct msm_gem_object, base) +void msm_gem_vma_get(struct drm_gem_object *obj); +void msm_gem_vma_put(struct drm_gem_object *obj); + uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj); int msm_gem_prot(struct drm_gem_object *obj); int msm_gem_pin_vma_locked(struct drm_gem_object *obj, struct drm_gpuva *vma); diff --git a/drivers/gpu/drm/msm/msm_gem_prime.c b/drivers/gpu/drm/msm/msm_gem_prime.c index 4d93f2daeeaa..c0a33ac839cb 100644 --- a/drivers/gpu/drm/msm/msm_gem_prime.c +++ b/drivers/gpu/drm/msm/msm_gem_prime.c @@ -6,6 +6,7 @@ #include +#include #include #include "msm_drv.h" @@ -42,19 +43,68 @@ void msm_gem_prime_vunmap(struct drm_gem_object *obj, struct iosys_map *map) msm_gem_put_vaddr_locked(obj); } +static void msm_gem_dmabuf_release(struct dma_buf *dma_buf) +{ + struct drm_gem_object *obj = dma_buf->priv; + + msm_gem_vma_put(obj); + drm_gem_dmabuf_release(dma_buf); +} + +static const struct dma_buf_ops msm_gem_prime_dmabuf_ops = { + .attach = drm_gem_map_attach, + .detach = drm_gem_map_detach, + .map_dma_buf = drm_gem_map_dma_buf, + .unmap_dma_buf = drm_gem_unmap_dma_buf, + .release = msm_gem_dmabuf_release, + .mmap = drm_gem_dmabuf_mmap, + .vmap = drm_gem_dmabuf_vmap, + .vunmap = drm_gem_dmabuf_vunmap, +}; + +struct drm_gem_object *msm_gem_prime_import(struct drm_device *dev, + struct dma_buf *buf) +{ + if (buf->ops == &msm_gem_prime_dmabuf_ops) { + struct drm_gem_object *obj = buf->priv; + if (obj->dev == dev) { + /* + * Importing dmabuf exported from our own gem increases + * refcount on gem itself instead of f_count of dmabuf. + */ + drm_gem_object_get(obj); + return obj; + } + } + + return drm_gem_prime_import(dev, buf); +} + struct drm_gem_object *msm_gem_prime_import_sg_table(struct drm_device *dev, struct dma_buf_attachment *attach, struct sg_table *sg) { return msm_gem_import(dev, attach->dmabuf, sg); } - struct dma_buf *msm_gem_prime_export(struct drm_gem_object *obj, int flags) { if (to_msm_bo(obj)->flags & MSM_BO_NO_SHARE) return ERR_PTR(-EPERM); - return drm_gem_prime_export(obj, flags); + msm_gem_vma_get(obj); + + struct drm_device *dev = obj->dev; + struct dma_buf_export_info exp_info = { + .exp_name = KBUILD_MODNAME, /* white lie for debug */ + .owner = dev->driver->fops->owner, + .ops = &msm_gem_prime_dmabuf_ops, + .size = obj->size, + .flags = flags, + .priv = obj, + .resv = obj->resv, + }; + + return drm_gem_dmabuf_export(dev, &exp_info); } int msm_gem_prime_pin(struct drm_gem_object *obj) -- cgit v1.2.3 From b74fae5492d1429c03b57a423d0837a3bdc4b397 Mon Sep 17 00:00:00 2001 From: Rob Clark Date: Sun, 29 Jun 2025 13:13:25 -0700 Subject: drm/msm: Add VM_BIND throttling A large number of (unsorted or separate) small (<2MB) mappings can cause a lot of, probably unnecessary, prealloc pages. Ie. a single 4k page size mapping will pre-allocate 3 pages (for levels 2-4) for the pagetable. Which can chew up a large amount of unneeded memory. So add a mechanism to put an upper bound on the # of pre-alloc pages. Signed-off-by: Rob Clark Tested-by: Antonino Maniscalco Reviewed-by: Antonino Maniscalco Patchwork: https://patchwork.freedesktop.org/patch/661529/ --- drivers/gpu/drm/msm/msm_gem.h | 20 ++++++++++++++++++++ drivers/gpu/drm/msm/msm_gem_vma.c | 28 ++++++++++++++++++++++++++-- 2 files changed, 46 insertions(+), 2 deletions(-) (limited to 'drivers/gpu/drm/msm/msm_gem.h') diff --git a/drivers/gpu/drm/msm/msm_gem.h b/drivers/gpu/drm/msm/msm_gem.h index 5c0c59e4835c..88239da1cd72 100644 --- a/drivers/gpu/drm/msm/msm_gem.h +++ b/drivers/gpu/drm/msm/msm_gem.h @@ -75,6 +75,26 @@ struct msm_gem_vm { */ struct drm_gpu_scheduler sched; + /** + * @prealloc_throttle: Used to throttle VM_BIND ops if too much pre- + * allocated memory is in flight. + * + * Because we have to pre-allocate pgtable pages for the worst case + * (ie. new mappings do not share any PTEs with existing mappings) + * we could end up consuming a lot of resources transiently. The + * prealloc_throttle puts an upper bound on that. + */ + struct { + /** @wait: Notified when preallocated resources are released */ + wait_queue_head_t wait; + + /** + * @in_flight: The # of preallocated pgtable pages in-flight + * for queued VM_BIND jobs. + */ + atomic_t in_flight; + } prealloc_throttle; + /** * @mm: Memory management for kernel managed VA allocations * diff --git a/drivers/gpu/drm/msm/msm_gem_vma.c b/drivers/gpu/drm/msm/msm_gem_vma.c index 02491e7dd816..1ad67dfa94e3 100644 --- a/drivers/gpu/drm/msm/msm_gem_vma.c +++ b/drivers/gpu/drm/msm/msm_gem_vma.c @@ -705,6 +705,8 @@ msm_vma_job_free(struct drm_sched_job *_job) vm->mmu->funcs->prealloc_cleanup(vm->mmu, &job->prealloc); + atomic_sub(job->prealloc.count, &vm->prealloc_throttle.in_flight); + drm_sched_job_cleanup(_job); job_foreach_bo (obj, job) @@ -721,6 +723,8 @@ msm_vma_job_free(struct drm_sched_job *_job) kfree(op); } + wake_up(&vm->prealloc_throttle.wait); + kfree(job); } @@ -783,6 +787,8 @@ msm_gem_vm_create(struct drm_device *drm, struct msm_mmu *mmu, const char *name, ret = drm_sched_init(&vm->sched, &args); if (ret) goto err_free_dummy; + + init_waitqueue_head(&vm->prealloc_throttle.wait); } drm_gpuvm_init(&vm->base, name, flags, drm, dummy_gem, @@ -1090,10 +1096,12 @@ ops_are_same_pte(struct msm_vm_bind_op *first, struct msm_vm_bind_op *next) * them as a single mapping. Otherwise the prealloc_count() will not realize * they can share pagetable pages and vastly overcount. */ -static void +static int vm_bind_prealloc_count(struct msm_vm_bind_job *job) { struct msm_vm_bind_op *first = NULL, *last = NULL; + struct msm_gem_vm *vm = to_msm_vm(job->vm); + int ret; for (int i = 0; i < job->nr_ops; i++) { struct msm_vm_bind_op *op = &job->ops[i]; @@ -1122,6 +1130,20 @@ vm_bind_prealloc_count(struct msm_vm_bind_job *job) /* Flush the remaining range: */ prealloc_count(job, first, last); + + /* + * Now that we know the needed amount to pre-alloc, throttle on pending + * VM_BIND jobs if we already have too much pre-alloc memory in flight + */ + ret = wait_event_interruptible( + vm->prealloc_throttle.wait, + atomic_read(&vm->prealloc_throttle.in_flight) <= 1024); + if (ret) + return ret; + + atomic_add(job->prealloc.count, &vm->prealloc_throttle.in_flight); + + return 0; } /* @@ -1412,7 +1434,9 @@ msm_ioctl_vm_bind(struct drm_device *dev, void *data, struct drm_file *file) if (ret) goto out_unlock; - vm_bind_prealloc_count(job); + ret = vm_bind_prealloc_count(job); + if (ret) + goto out_unlock; struct drm_exec exec; unsigned flags = DRM_EXEC_IGNORE_DUPLICATES | DRM_EXEC_INTERRUPTIBLE_WAIT; -- cgit v1.2.3