diff options
Diffstat (limited to 'drivers/gpu/drm/amd/amdgpu/amdgpu_device.c')
| -rw-r--r-- | drivers/gpu/drm/amd/amdgpu/amdgpu_device.c | 392 |
1 files changed, 228 insertions, 164 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c index 25e1f5ed7ead..cfa411c12072 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c @@ -37,6 +37,7 @@ #include <linux/pci-p2pdma.h> #include <drm/drm_atomic_helper.h> +#include <drm/drm_fb_helper.h> #include <drm/drm_probe_helper.h> #include <drm/amdgpu_drm.h> #include <linux/vgaarb.h> @@ -1568,7 +1569,7 @@ static int amdgpu_device_check_arguments(struct amdgpu_device *adev) * @pdev: pci dev pointer * @state: vga_switcheroo state * - * Callback for the switcheroo driver. Suspends or resumes the + * Callback for the switcheroo driver. Suspends or resumes * the asics before or after it is powered up using ACPI methods. */ static void amdgpu_switcheroo_set_state(struct pci_dev *pdev, @@ -1915,6 +1916,16 @@ static void amdgpu_device_enable_virtual_display(struct amdgpu_device *adev) } } +void amdgpu_device_set_sriov_virtual_display(struct amdgpu_device *adev) +{ + if (amdgpu_sriov_vf(adev) && !adev->enable_virtual_display) { + adev->mode_info.num_crtc = 1; + adev->enable_virtual_display = true; + DRM_INFO("virtual_display:%d, num_crtc:%d\n", + adev->enable_virtual_display, adev->mode_info.num_crtc); + } +} + /** * amdgpu_device_parse_gpu_info_fw - parse gpu info firmware * @@ -2397,7 +2408,7 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) adev->ip_blocks[i].status.hw = true; /* right after GMC hw init, we create CSA */ - if (amdgpu_mcbp || amdgpu_sriov_vf(adev)) { + if (amdgpu_mcbp) { r = amdgpu_allocate_static_csa(adev, &adev->virt.csa_obj, AMDGPU_GEM_DOMAIN_VRAM, AMDGPU_CSA_SIZE); @@ -2459,19 +2470,26 @@ static int amdgpu_device_ip_init(struct amdgpu_device *adev) */ if (adev->gmc.xgmi.num_physical_nodes > 1) { if (amdgpu_xgmi_add_device(adev) == 0) { - struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + if (!amdgpu_sriov_vf(adev)) { + struct amdgpu_hive_info *hive = amdgpu_get_xgmi_hive(adev); + + if (WARN_ON(!hive)) { + r = -ENOENT; + goto init_failed; + } + + if (!hive->reset_domain || + !amdgpu_reset_get_reset_domain(hive->reset_domain)) { + r = -ENOENT; + amdgpu_put_xgmi_hive(hive); + goto init_failed; + } - if (!hive->reset_domain || - !amdgpu_reset_get_reset_domain(hive->reset_domain)) { - r = -ENOENT; + /* Drop the early temporary reset domain we created for device */ + amdgpu_reset_put_reset_domain(adev->reset_domain); + adev->reset_domain = hive->reset_domain; amdgpu_put_xgmi_hive(hive); - goto init_failed; } - - /* Drop the early temporary reset domain we created for device */ - amdgpu_reset_put_reset_domain(adev->reset_domain); - adev->reset_domain = hive->reset_domain; - amdgpu_put_xgmi_hive(hive); } } @@ -2926,6 +2944,14 @@ static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev) amdgpu_device_set_pg_state(adev, AMD_PG_STATE_UNGATE); amdgpu_device_set_cg_state(adev, AMD_CG_STATE_UNGATE); + /* + * Per PMFW team's suggestion, driver needs to handle gfxoff + * and df cstate features disablement for gpu reset(e.g. Mode1Reset) + * scenario. Add the missing df cstate disablement here. + */ + if (amdgpu_dpm_set_df_cstate(adev, DF_CSTATE_DISALLOW)) + dev_warn(adev->dev, "Failed to disallow df cstate"); + for (i = adev->num_ip_blocks - 1; i >= 0; i--) { if (!adev->ip_blocks[i].status.valid) continue; @@ -3200,6 +3226,15 @@ static int amdgpu_device_ip_resume_phase2(struct amdgpu_device *adev) return r; } adev->ip_blocks[i].status.hw = true; + + if (adev->in_s0ix && adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_SMC) { + /* disable gfxoff for IP resume. The gfxoff will be re-enabled in + * amdgpu_device_resume() after IP resume. + */ + amdgpu_gfx_off_ctrl(adev, false); + DRM_DEBUG("will disable gfxoff for re-initializing other blocks\n"); + } + } return 0; @@ -3328,8 +3363,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type) */ bool amdgpu_device_has_dc_support(struct amdgpu_device *adev) { - if (amdgpu_sriov_vf(adev) || - adev->enable_virtual_display || + if (adev->enable_virtual_display || (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) return false; @@ -3510,6 +3544,7 @@ int amdgpu_device_init(struct amdgpu_device *adev, adev->gmc.gart_size = 512 * 1024 * 1024; adev->accel_working = false; adev->num_rings = 0; + RCU_INIT_POINTER(adev->gang_submit, dma_fence_get_stub()); adev->mman.buffer_funcs = NULL; adev->mman.buffer_funcs_ring = NULL; adev->vm_manager.vm_pte_funcs = NULL; @@ -3588,6 +3623,8 @@ int amdgpu_device_init(struct amdgpu_device *adev, INIT_WORK(&adev->xgmi_reset_work, amdgpu_device_xgmi_reset_func); adev->gfx.gfx_off_req_count = 1; + adev->gfx.gfx_off_residency = 0; + adev->gfx.gfx_off_entrycount = 0; adev->pm.ac_power = power_supply_is_system_supplied() > 0; atomic_set(&adev->throttling_logging_enabled, 1); @@ -3976,8 +4013,7 @@ void amdgpu_device_fini_hw(struct amdgpu_device *adev) amdgpu_gart_dummy_page_fini(adev); - if (drm_dev_is_unplugged(adev_to_drm(adev))) - amdgpu_device_unmap_mmio(adev); + amdgpu_device_unmap_mmio(adev); } @@ -3990,6 +4026,7 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) release_firmware(adev->firmware.gpu_info_fw); adev->firmware.gpu_info_fw = NULL; adev->accel_working = false; + dma_fence_put(rcu_dereference_protected(adev->gang_submit, true)); amdgpu_reset_fini(adev); @@ -4038,15 +4075,18 @@ void amdgpu_device_fini_sw(struct amdgpu_device *adev) * at suspend time. * */ -static void amdgpu_device_evict_resources(struct amdgpu_device *adev) +static int amdgpu_device_evict_resources(struct amdgpu_device *adev) { + int ret; + /* No need to evict vram on APUs for suspend to ram or s2idle */ if ((adev->in_s3 || adev->in_s0ix) && (adev->flags & AMD_IS_APU)) - return; + return 0; - if (amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM)) + ret = amdgpu_ttm_evict_resources(adev, TTM_PL_VRAM); + if (ret) DRM_WARN("evicting device resources failed\n"); - + return ret; } /* @@ -4096,7 +4136,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) amdgpu_amdkfd_suspend(adev, adev->in_runpm); - amdgpu_device_evict_resources(adev); + r = amdgpu_device_evict_resources(adev); + if (r) + return r; amdgpu_fence_driver_hw_fini(adev); @@ -4144,21 +4186,15 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) r = amdgpu_device_ip_resume(adev); - /* no matter what r is, always need to properly release full GPU */ - if (amdgpu_sriov_vf(adev)) { - amdgpu_virt_init_data_exchange(adev); - amdgpu_virt_release_full_gpu(adev, true); - } - if (r) { dev_err(adev->dev, "amdgpu_device_ip_resume failed (%d).\n", r); - return r; + goto exit; } amdgpu_fence_driver_hw_init(adev); r = amdgpu_device_ip_late_init(adev); if (r) - return r; + goto exit; queue_delayed_work(system_wq, &adev->delayed_init_work, msecs_to_jiffies(AMDGPU_RESUME_MS)); @@ -4166,12 +4202,28 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) if (!adev->in_s0ix) { r = amdgpu_amdkfd_resume(adev, adev->in_runpm); if (r) - return r; + goto exit; + } + +exit: + if (amdgpu_sriov_vf(adev)) { + amdgpu_virt_init_data_exchange(adev); + amdgpu_virt_release_full_gpu(adev, true); } + if (r) + return r; + /* Make sure IB tests flushed */ flush_delayed_work(&adev->delayed_init_work); + if (adev->in_s0ix) { + /* re-enable gfxoff after IP resume. This re-enables gfxoff after + * it was disabled for IP resume in amdgpu_device_ip_resume_phase2(). + */ + amdgpu_gfx_off_ctrl(adev, true); + DRM_DEBUG("will enable gfxoff for the mission mode\n"); + } if (fbcon) drm_fb_helper_set_suspend_unlocked(adev_to_drm(adev)->fb_helper, false); @@ -4179,25 +4231,27 @@ int amdgpu_device_resume(struct drm_device *dev, bool fbcon) amdgpu_ras_resume(adev); - /* - * Most of the connector probing functions try to acquire runtime pm - * refs to ensure that the GPU is powered on when connector polling is - * performed. Since we're calling this from a runtime PM callback, - * trying to acquire rpm refs will cause us to deadlock. - * - * Since we're guaranteed to be holding the rpm lock, it's safe to - * temporarily disable the rpm helpers so this doesn't deadlock us. - */ + if (adev->mode_info.num_crtc) { + /* + * Most of the connector probing functions try to acquire runtime pm + * refs to ensure that the GPU is powered on when connector polling is + * performed. Since we're calling this from a runtime PM callback, + * trying to acquire rpm refs will cause us to deadlock. + * + * Since we're guaranteed to be holding the rpm lock, it's safe to + * temporarily disable the rpm helpers so this doesn't deadlock us. + */ #ifdef CONFIG_PM - dev->dev->power.disable_depth++; + dev->dev->power.disable_depth++; #endif - if (!amdgpu_device_has_dc_support(adev)) - drm_helper_hpd_irq_event(dev); - else - drm_kms_helper_hotplug_event(dev); + if (!adev->dc_enabled) + drm_helper_hpd_irq_event(dev); + else + drm_kms_helper_hotplug_event(dev); #ifdef CONFIG_PM - dev->dev->power.disable_depth--; + dev->dev->power.disable_depth--; #endif + } adev->in_suspend = false; if (amdgpu_acpi_smart_shift_update(dev, AMDGPU_SS_DEV_D0)) @@ -4542,14 +4596,19 @@ bool amdgpu_device_has_job_running(struct amdgpu_device *adev) */ bool amdgpu_device_should_recover_gpu(struct amdgpu_device *adev) { - if (!amdgpu_device_ip_check_soft_reset(adev)) { - dev_info(adev->dev, "Timeout, but no hardware hang detected.\n"); - return false; - } if (amdgpu_gpu_recovery == 0) goto disabled; + /* Skip soft reset check in fatal error mode */ + if (!amdgpu_ras_is_poison_mode_supported(adev)) + return true; + + if (!amdgpu_device_ip_check_soft_reset(adev)) { + dev_info(adev->dev,"Timeout, but no hardware hang detected.\n"); + return false; + } + if (amdgpu_sriov_vf(adev)) return true; @@ -4674,7 +4733,7 @@ int amdgpu_device_pre_asic_reset(struct amdgpu_device *adev, if (!need_full_reset) need_full_reset = amdgpu_device_ip_need_full_reset(adev); - if (!need_full_reset) { + if (!need_full_reset && amdgpu_gpu_recovery) { amdgpu_device_ip_pre_soft_reset(adev); r = amdgpu_device_ip_soft_reset(adev); amdgpu_device_ip_post_soft_reset(adev); @@ -4770,6 +4829,7 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, struct amdgpu_device *tmp_adev = NULL; bool need_full_reset, skip_hw_reset, vram_lost = false; int r = 0; + bool gpu_reset_for_dev_remove = 0; /* Try reset handler method first */ tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, @@ -4789,6 +4849,10 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); skip_hw_reset = test_bit(AMDGPU_SKIP_HW_RESET, &reset_context->flags); + gpu_reset_for_dev_remove = + test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); + /* * ASIC reset has to be done on all XGMI hive nodes ASAP * to allow proper links negotiation in FW (within 1 sec) @@ -4833,6 +4897,18 @@ int amdgpu_do_asic_reset(struct list_head *device_list_handle, amdgpu_ras_intr_cleared(); } + /* Since the mode1 reset affects base ip blocks, the + * phase1 ip blocks need to be resumed. Otherwise there + * will be a BIOS signature error and the psp bootloader + * can't load kdb on the next amdgpu install. + */ + if (gpu_reset_for_dev_remove) { + list_for_each_entry(tmp_adev, device_list_handle, reset_list) + amdgpu_device_ip_resume_phase1(tmp_adev); + + goto end; + } + list_for_each_entry(tmp_adev, device_list_handle, reset_list) { if (need_full_reset) { /* post card */ @@ -4975,6 +5051,8 @@ static void amdgpu_device_resume_display_audio(struct amdgpu_device *adev) pm_runtime_enable(&(p->dev)); pm_runtime_resume(&(p->dev)); } + + pci_dev_put(p); } static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) @@ -5013,6 +5091,7 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) if (expires < ktime_get_mono_fast_ns()) { dev_warn(adev->dev, "failed to suspend display audio\n"); + pci_dev_put(p); /* TODO: abort the succeeding gpu reset? */ return -ETIMEDOUT; } @@ -5020,96 +5099,10 @@ static int amdgpu_device_suspend_display_audio(struct amdgpu_device *adev) pm_runtime_disable(&(p->dev)); + pci_dev_put(p); return 0; } -static void amdgpu_device_recheck_guilty_jobs( - struct amdgpu_device *adev, struct list_head *device_list_handle, - struct amdgpu_reset_context *reset_context) -{ - int i, r = 0; - - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { - struct amdgpu_ring *ring = adev->rings[i]; - int ret = 0; - struct drm_sched_job *s_job; - - if (!ring || !ring->sched.thread) - continue; - - s_job = list_first_entry_or_null(&ring->sched.pending_list, - struct drm_sched_job, list); - if (s_job == NULL) - continue; - - /* clear job's guilty and depend the folowing step to decide the real one */ - drm_sched_reset_karma(s_job); - drm_sched_resubmit_jobs_ext(&ring->sched, 1); - - if (!s_job->s_fence->parent) { - DRM_WARN("Failed to get a HW fence for job!"); - continue; - } - - ret = dma_fence_wait_timeout(s_job->s_fence->parent, false, ring->sched.timeout); - if (ret == 0) { /* timeout */ - DRM_ERROR("Found the real bad job! ring:%s, job_id:%llx\n", - ring->sched.name, s_job->id); - - - amdgpu_fence_driver_isr_toggle(adev, true); - - /* Clear this failed job from fence array */ - amdgpu_fence_driver_clear_job_fences(ring); - - amdgpu_fence_driver_isr_toggle(adev, false); - - /* Since the job won't signal and we go for - * another resubmit drop this parent pointer - */ - dma_fence_put(s_job->s_fence->parent); - s_job->s_fence->parent = NULL; - - /* set guilty */ - drm_sched_increase_karma(s_job); -retry: - /* do hw reset */ - if (amdgpu_sriov_vf(adev)) { - amdgpu_virt_fini_data_exchange(adev); - r = amdgpu_device_reset_sriov(adev, false); - if (r) - adev->asic_reset_res = r; - } else { - clear_bit(AMDGPU_SKIP_HW_RESET, - &reset_context->flags); - r = amdgpu_do_asic_reset(device_list_handle, - reset_context); - if (r && r == -EAGAIN) - goto retry; - } - - /* - * add reset counter so that the following - * resubmitted job could flush vmid - */ - atomic_inc(&adev->gpu_reset_counter); - continue; - } - - /* got the hw fence, signal finished fence */ - atomic_dec(ring->sched.score); - dma_fence_get(&s_job->s_fence->finished); - dma_fence_signal(&s_job->s_fence->finished); - dma_fence_put(&s_job->s_fence->finished); - - /* remove node from list and free the job */ - spin_lock(&ring->sched.job_list_lock); - list_del_init(&s_job->list); - spin_unlock(&ring->sched.job_list_lock); - ring->sched.ops->free_job(s_job); - } -} - static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) { struct amdgpu_ras *con = amdgpu_ras_get_context(adev); @@ -5130,7 +5123,6 @@ static inline void amdgpu_device_stop_pending_resets(struct amdgpu_device *adev) } - /** * amdgpu_device_gpu_recover - reset the asic and recover scheduler * @@ -5153,7 +5145,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, int i, r = 0; bool need_emergency_restart = false; bool audio_suspended = false; - int tmp_vram_lost_counter; + bool gpu_reset_for_dev_remove = false; + + gpu_reset_for_dev_remove = + test_bit(AMDGPU_RESET_FOR_DEVICE_REMOVE, &reset_context->flags) && + test_bit(AMDGPU_NEED_FULL_RESET, &reset_context->flags); /* * Special case: RAS triggered and full reset isn't supported @@ -5188,8 +5184,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, */ INIT_LIST_HEAD(&device_list); if (!amdgpu_sriov_vf(adev) && (adev->gmc.xgmi.num_physical_nodes > 1)) { - list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) + list_for_each_entry(tmp_adev, &hive->device_list, gmc.xgmi.head) { list_add_tail(&tmp_adev->reset_list, &device_list); + if (gpu_reset_for_dev_remove && adev->shutdown) + tmp_adev->shutdown = true; + } if (!list_is_first(&adev->reset_list, &device_list)) list_rotate_to_front(&adev->reset_list, &device_list); device_list_handle = &device_list; @@ -5272,6 +5271,10 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev, retry: /* Rest of adevs pre asic reset from XGMI hive. */ list_for_each_entry(tmp_adev, device_list_handle, reset_list) { + if (gpu_reset_for_dev_remove) { + /* Workaroud for ASICs need to disable SMC first */ + amdgpu_device_smu_fini_early(tmp_adev); + } r = amdgpu_device_pre_asic_reset(tmp_adev, reset_context); /*TODO Should we stop ?*/ if (r) { @@ -5287,7 +5290,6 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ amdgpu_device_stop_pending_resets(tmp_adev); } - tmp_vram_lost_counter = atomic_read(&((adev)->vram_lost_counter)); /* Actual ASIC resets if needed.*/ /* Host driver will handle XGMI hive reset for SRIOV */ if (amdgpu_sriov_vf(adev)) { @@ -5302,6 +5304,9 @@ retry: /* Rest of adevs pre asic reset from XGMI hive. */ r = amdgpu_do_asic_reset(device_list_handle, reset_context); if (r && r == -EAGAIN) goto retry; + + if (!r && gpu_reset_for_dev_remove) + goto recover_end; } skip_hw_reset: @@ -5309,32 +5314,16 @@ skip_hw_reset: /* Post ASIC reset for all devs .*/ list_for_each_entry(tmp_adev, device_list_handle, reset_list) { - /* - * Sometimes a later bad compute job can block a good gfx job as gfx - * and compute ring share internal GC HW mutually. We add an additional - * guilty jobs recheck step to find the real guilty job, it synchronously - * submits and pends for the first job being signaled. If it gets timeout, - * we identify it as a real guilty job. - */ - if (amdgpu_gpu_recovery == 2 && - !(tmp_vram_lost_counter < atomic_read(&adev->vram_lost_counter))) - amdgpu_device_recheck_guilty_jobs( - tmp_adev, device_list_handle, reset_context); - for (i = 0; i < AMDGPU_MAX_RINGS; ++i) { struct amdgpu_ring *ring = tmp_adev->rings[i]; if (!ring || !ring->sched.thread) continue; - /* No point to resubmit jobs if we didn't HW reset*/ - if (!tmp_adev->asic_reset_res && !job_signaled) - drm_sched_resubmit_jobs(&ring->sched); - - drm_sched_start(&ring->sched, !tmp_adev->asic_reset_res); + drm_sched_start(&ring->sched, true); } - if (adev->enable_mes) + if (adev->enable_mes && adev->ip_versions[GC_HWIP][0] != IP_VERSION(11, 0, 3)) amdgpu_mes_self_test(tmp_adev); if (!drm_drv_uses_atomic_modeset(adev_to_drm(tmp_adev)) && !job_signaled) { @@ -5373,8 +5362,11 @@ skip_sched_resume: amdgpu_device_resume_display_audio(tmp_adev); amdgpu_device_unset_mp1_state(tmp_adev); + + amdgpu_ras_set_error_query_ready(tmp_adev, true); } +recover_end: tmp_adev = list_first_entry(device_list_handle, struct amdgpu_device, reset_list); amdgpu_device_unlock_reset_domain(tmp_adev->reset_domain); @@ -5557,9 +5549,9 @@ bool amdgpu_device_is_peer_accessible(struct amdgpu_device *adev, ~*peer_adev->dev->dma_mask : ~((1ULL << 32) - 1); resource_size_t aper_limit = adev->gmc.aper_base + adev->gmc.aper_size - 1; - bool p2p_access = !adev->gmc.xgmi.connected_to_cpu && - !(pci_p2pdma_distance_many(adev->pdev, - &peer_adev->dev, 1, true) < 0); + bool p2p_access = + !adev->gmc.xgmi.connected_to_cpu && + !(pci_p2pdma_distance(adev->pdev, peer_adev->dev, false) < 0); return pcie_p2p && p2p_access && (adev->gmc.visible_vram_size && adev->gmc.real_vram_size == adev->gmc.visible_vram_size && @@ -5783,8 +5775,6 @@ void amdgpu_pci_resume(struct pci_dev *pdev) if (!ring || !ring->sched.thread) continue; - - drm_sched_resubmit_jobs(&ring->sched); drm_sched_start(&ring->sched, true); } @@ -5942,3 +5932,77 @@ void amdgpu_device_pcie_port_wreg(struct amdgpu_device *adev, (void)RREG32(data); spin_unlock_irqrestore(&adev->pcie_idx_lock, flags); } + +/** + * amdgpu_device_switch_gang - switch to a new gang + * @adev: amdgpu_device pointer + * @gang: the gang to switch to + * + * Try to switch to a new gang. + * Returns: NULL if we switched to the new gang or a reference to the current + * gang leader. + */ +struct dma_fence *amdgpu_device_switch_gang(struct amdgpu_device *adev, + struct dma_fence *gang) +{ + struct dma_fence *old = NULL; + + do { + dma_fence_put(old); + rcu_read_lock(); + old = dma_fence_get_rcu_safe(&adev->gang_submit); + rcu_read_unlock(); + + if (old == gang) + break; + + if (!dma_fence_is_signaled(old)) + return old; + + } while (cmpxchg((struct dma_fence __force **)&adev->gang_submit, + old, gang) != old); + + dma_fence_put(old); + return NULL; +} + +bool amdgpu_device_has_display_hardware(struct amdgpu_device *adev) +{ + switch (adev->asic_type) { +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_HAINAN: +#endif + case CHIP_TOPAZ: + /* chips with no display hardware */ + return false; +#ifdef CONFIG_DRM_AMDGPU_SI + case CHIP_TAHITI: + case CHIP_PITCAIRN: + case CHIP_VERDE: + case CHIP_OLAND: +#endif +#ifdef CONFIG_DRM_AMDGPU_CIK + case CHIP_BONAIRE: + case CHIP_HAWAII: + case CHIP_KAVERI: + case CHIP_KABINI: + case CHIP_MULLINS: +#endif + case CHIP_TONGA: + case CHIP_FIJI: + case CHIP_POLARIS10: + case CHIP_POLARIS11: + case CHIP_POLARIS12: + case CHIP_VEGAM: + case CHIP_CARRIZO: + case CHIP_STONEY: + /* chips with display hardware */ + return true; + default: + /* IP discovery */ + if (!adev->ip_versions[DCE_HWIP][0] || + (adev->harvest_ip_mask & AMD_HARVEST_IP_DMU_MASK)) + return false; + return true; + } +} |
