diff options
Diffstat (limited to 'drivers/gpu/drm/amd/display')
58 files changed, 537 insertions, 311 deletions
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index 1c9c6096e28f..6d2f60c61dec 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -148,6 +148,9 @@ MODULE_FIRMWARE(FIRMWARE_NAVI12_DMCU); #define FIRMWARE_DCN_35_DMUB "amdgpu/dcn_3_5_dmcub.bin" MODULE_FIRMWARE(FIRMWARE_DCN_35_DMUB); +#define FIRMWARE_DCN_351_DMUB "amdgpu/dcn_3_5_1_dmcub.bin" +MODULE_FIRMWARE(FIRMWARE_DCN_351_DMUB); + /* Number of bytes in PSP header for firmware. */ #define PSP_HEADER_BYTES 0x100 @@ -1767,6 +1770,9 @@ static int amdgpu_dm_init(struct amdgpu_device *adev) if (amdgpu_dc_debug_mask & DC_FORCE_SUBVP_MCLK_SWITCH) adev->dm.dc->debug.force_subvp_mclk_switch = true; + if (amdgpu_dc_debug_mask & DC_ENABLE_DML2) + adev->dm.dc->debug.using_dml2 = true; + adev->dm.dc->debug.visual_confirm = amdgpu_dc_visual_confirm; /* TODO: Remove after DP2 receiver gets proper support of Cable ID feature */ @@ -3041,6 +3047,10 @@ static int dm_resume(void *handle) /* Do mst topology probing after resuming cached state*/ drm_connector_list_iter_begin(ddev, &iter); drm_for_each_connector_iter(connector, &iter) { + + if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + continue; + aconnector = to_amdgpu_dm_connector(connector); if (aconnector->dc_link->type != dc_connection_mst_branch || aconnector->mst_root) @@ -4817,9 +4827,11 @@ static int dm_init_microcode(struct amdgpu_device *adev) fw_name_dmub = FIRMWARE_DCN_V3_2_1_DMCUB; break; case IP_VERSION(3, 5, 0): - case IP_VERSION(3, 5, 1): fw_name_dmub = FIRMWARE_DCN_35_DMUB; break; + case IP_VERSION(3, 5, 1): + fw_name_dmub = FIRMWARE_DCN_351_DMUB; + break; default: /* ASIC doesn't support DMUB. */ return 0; @@ -5918,6 +5930,9 @@ get_highest_refresh_rate_mode(struct amdgpu_dm_connector *aconnector, &aconnector->base.probed_modes : &aconnector->base.modes; + if (aconnector->base.connector_type == DRM_MODE_CONNECTOR_WRITEBACK) + return NULL; + if (aconnector->freesync_vid_base.clock != 0) return &aconnector->freesync_vid_base; @@ -6302,27 +6317,22 @@ create_stream_for_sink(struct drm_connector *connector, if (stream->signal == SIGNAL_TYPE_HDMI_TYPE_A) mod_build_hf_vsif_infopacket(stream, &stream->vsp_infopacket); - else if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || - stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || - stream->signal == SIGNAL_TYPE_EDP) { + + if (stream->signal == SIGNAL_TYPE_DISPLAY_PORT || + stream->signal == SIGNAL_TYPE_DISPLAY_PORT_MST || + stream->signal == SIGNAL_TYPE_EDP) { // // should decide stream support vsc sdp colorimetry capability // before building vsc info packet // - stream->use_vsc_sdp_for_colorimetry = false; - if (aconnector->dc_sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT_MST) { - stream->use_vsc_sdp_for_colorimetry = - aconnector->dc_sink->is_vsc_sdp_colorimetry_supported; - } else { - if (stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED) - stream->use_vsc_sdp_for_colorimetry = true; - } + stream->use_vsc_sdp_for_colorimetry = stream->link->dpcd_caps.dpcd_rev.raw >= 0x14 && + stream->link->dpcd_caps.dprx_feature.bits.VSC_SDP_COLORIMETRY_SUPPORTED; + if (stream->out_transfer_func->tf == TRANSFER_FUNCTION_GAMMA22) tf = TRANSFER_FUNC_GAMMA_22; mod_build_vsc_infopacket(stream, &stream->vsc_infopacket, stream->output_color_space, tf); + aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; - if (stream->link->psr_settings.psr_feature_enabled) - aconnector->psr_skip_count = AMDGPU_DM_PSR_ENTRY_DELAY; } finish: dc_sink_release(sink); @@ -8761,10 +8771,10 @@ static void amdgpu_dm_commit_audio(struct drm_device *dev, if (!drm_atomic_crtc_needs_modeset(new_crtc_state)) continue; +notify: if (connector->connector_type == DRM_MODE_CONNECTOR_WRITEBACK) continue; -notify: aconnector = to_amdgpu_dm_connector(connector); mutex_lock(&adev->dm.audio_lock); @@ -11271,18 +11281,24 @@ void amdgpu_dm_update_freesync_caps(struct drm_connector *connector, if (!adev->dm.freesync_module) goto update; - if (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT - || sink->sink_signal == SIGNAL_TYPE_EDP) { + if (edid && (sink->sink_signal == SIGNAL_TYPE_DISPLAY_PORT || + sink->sink_signal == SIGNAL_TYPE_EDP)) { bool edid_check_required = false; - if (edid) { - edid_check_required = is_dp_capable_without_timing_msa( - adev->dm.dc, - amdgpu_dm_connector); + if (is_dp_capable_without_timing_msa(adev->dm.dc, + amdgpu_dm_connector)) { + if (edid->features & DRM_EDID_FEATURE_CONTINUOUS_FREQ) { + freesync_capable = true; + amdgpu_dm_connector->min_vfreq = connector->display_info.monitor_range.min_vfreq; + amdgpu_dm_connector->max_vfreq = connector->display_info.monitor_range.max_vfreq; + } else { + edid_check_required = edid->version > 1 || + (edid->version == 1 && + edid->revision > 1); + } } - if (edid_check_required == true && (edid->version > 1 || - (edid->version == 1 && edid->revision > 1))) { + if (edid_check_required) { for (i = 0; i < 4; i++) { timing = &edid->detailed_timings[i]; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c index 1f08c6564c3b..286ecd28cc6e 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.c @@ -141,9 +141,8 @@ bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream) * amdgpu_dm_psr_enable() - enable psr f/w * @stream: stream state * - * Return: true if success */ -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) +void amdgpu_dm_psr_enable(struct dc_stream_state *stream) { struct dc_link *link = stream->link; unsigned int vsync_rate_hz = 0; @@ -190,7 +189,10 @@ bool amdgpu_dm_psr_enable(struct dc_stream_state *stream) if (link->psr_settings.psr_version < DC_PSR_VERSION_SU_1) power_opt |= psr_power_opt_z10_static_screen; - return dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + dc_link_set_psr_allow_active(link, &psr_enable, false, false, &power_opt); + + if (link->ctx->dc->caps.ips_support) + dc_allow_idle_optimizations(link->ctx->dc, true); } /* diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h index 6806b3c9c84b..1fdfd183c0d9 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_psr.h @@ -32,7 +32,7 @@ #define AMDGPU_DM_PSR_ENTRY_DELAY 5 void amdgpu_dm_set_psr_caps(struct dc_link *link); -bool amdgpu_dm_psr_enable(struct dc_stream_state *stream); +void amdgpu_dm_psr_enable(struct dc_stream_state *stream); bool amdgpu_dm_link_setup_psr(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable(struct dc_stream_state *stream); bool amdgpu_dm_psr_disable_all(struct amdgpu_display_manager *dm); diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c index 16e72d623630..08c494a7a21b 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_wb.c @@ -76,10 +76,8 @@ static int amdgpu_dm_wb_encoder_atomic_check(struct drm_encoder *encoder, static int amdgpu_dm_wb_connector_get_modes(struct drm_connector *connector) { - struct drm_device *dev = connector->dev; - - return drm_add_modes_noedid(connector, dev->mode_config.max_width, - dev->mode_config.max_height); + /* Maximum resolution supported by DWB */ + return drm_add_modes_noedid(connector, 3840, 2160); } static int amdgpu_dm_wb_prepare_job(struct drm_writeback_connector *wb_connector, diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c index 12f3e8aa46d8..6ad4f4efec5d 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn316/dcn316_clk_mgr.c @@ -99,20 +99,25 @@ static int dcn316_get_active_display_cnt_wa( return display_count; } -static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, bool disable) +static void dcn316_disable_otg_wa(struct clk_mgr *clk_mgr_base, struct dc_state *context, + bool safe_to_lower, bool disable) { struct dc *dc = clk_mgr_base->ctx->dc; int i; for (i = 0; i < dc->res_pool->pipe_count; ++i) { - struct pipe_ctx *pipe = &dc->current_state->res_ctx.pipe_ctx[i]; + struct pipe_ctx *pipe = safe_to_lower + ? &context->res_ctx.pipe_ctx[i] + : &dc->current_state->res_ctx.pipe_ctx[i]; if (pipe->top_pipe || pipe->prev_odm_pipe) continue; - if (pipe->stream && (pipe->stream->dpms_off || pipe->plane_state == NULL || - dc_is_virtual_signal(pipe->stream->signal))) { + if (pipe->stream && (pipe->stream->dpms_off || dc_is_virtual_signal(pipe->stream->signal) || + !pipe->stream->link_enc)) { if (disable) { - pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + if (pipe->stream_res.tg && pipe->stream_res.tg->funcs->immediate_disable_crtc) + pipe->stream_res.tg->funcs->immediate_disable_crtc(pipe->stream_res.tg); + reset_sync_context_for_pipe(dc, context, i); } else pipe->stream_res.tg->funcs->enable_crtc(pipe->stream_res.tg); @@ -207,11 +212,11 @@ static void dcn316_update_clocks(struct clk_mgr *clk_mgr_base, } if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, clk_mgr_base->clks.dispclk_khz)) { - dcn316_disable_otg_wa(clk_mgr_base, context, true); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, true); clk_mgr_base->clks.dispclk_khz = new_clocks->dispclk_khz; dcn316_smu_set_dispclk(clk_mgr, clk_mgr_base->clks.dispclk_khz); - dcn316_disable_otg_wa(clk_mgr_base, context, false); + dcn316_disable_otg_wa(clk_mgr_base, context, safe_to_lower, false); update_dispclk = true; } diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c index 668f05c8654e..bec252e1dd27 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn32/dcn32_clk_mgr.c @@ -216,6 +216,16 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) if (clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz > 1950) clk_mgr_base->bw_params->dc_mode_limit.dispclk_mhz = 1950; + /* DPPCLK */ + dcn32_init_single_clock(clk_mgr, PPCLK_DPPCLK, + &clk_mgr_base->bw_params->clk_table.entries[0].dppclk_mhz, + &num_entries_per_clk->num_dppclk_levels); + num_levels = num_entries_per_clk->num_dppclk_levels; + clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = dcn30_smu_get_dc_mode_max_dpm_freq(clk_mgr, PPCLK_DPPCLK); + //HW recommends limit of 1950 MHz in display clock for all DCN3.2.x + if (clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz > 1950) + clk_mgr_base->bw_params->dc_mode_limit.dppclk_mhz = 1950; + if (num_entries_per_clk->num_dcfclk_levels && num_entries_per_clk->num_dtbclk_levels && num_entries_per_clk->num_dispclk_levels) @@ -240,6 +250,10 @@ void dcn32_init_clocks(struct clk_mgr *clk_mgr_base) = khz_to_mhz_ceil(clk_mgr_base->ctx->dc->debug.min_dpp_clk_khz); } + for (i = 0; i < num_levels; i++) + if (clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz > 1950) + clk_mgr_base->bw_params->clk_table.entries[i].dppclk_mhz = 1950; + /* Get UCLK, update bounding box */ clk_mgr_base->funcs->get_memclk_states_from_smu(clk_mgr_base); diff --git a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c index c378b879c76d..d9c5692c86c2 100644 --- a/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c +++ b/drivers/gpu/drm/amd/display/dc/clk_mgr/dcn35/dcn35_clk_mgr.c @@ -73,6 +73,14 @@ #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_SEL_MASK 0x00000007L #define CLK1_CLK2_BYPASS_CNTL__CLK2_BYPASS_DIV_MASK 0x000F0000L +#define regCLK5_0_CLK5_spll_field_8 0x464b +#define regCLK5_0_CLK5_spll_field_8_BASE_IDX 0 + +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en__SHIFT 0xd +#define CLK5_0_CLK5_spll_field_8__spll_ssc_en_MASK 0x00002000L + +#define SMU_VER_THRESHOLD 0x5D4A00 //93.74.0 + #define REG(reg_name) \ (ctx->clk_reg_offsets[reg ## reg_name ## _BASE_IDX] + reg ## reg_name) @@ -409,11 +417,25 @@ static void dcn35_dump_clk_registers(struct clk_state_registers_and_bypass *regs { } +static bool dcn35_is_spll_ssc_enabled(struct clk_mgr *clk_mgr_base) +{ + struct clk_mgr_internal *clk_mgr = TO_CLK_MGR_INTERNAL(clk_mgr_base); + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t ssc_enable; + + REG_GET(CLK5_0_CLK5_spll_field_8, spll_ssc_en, &ssc_enable); + + return ssc_enable == 1; +} + static void init_clk_states(struct clk_mgr *clk_mgr) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); uint32_t ref_dtbclk = clk_mgr->clks.ref_dtbclk_khz; memset(&(clk_mgr->clks), 0, sizeof(struct dc_clocks)); + if (clk_mgr_int->smu_ver >= SMU_VER_THRESHOLD) + clk_mgr->clks.dtbclk_en = true; // request DTBCLK disable on first commit clk_mgr->clks.ref_dtbclk_khz = ref_dtbclk; // restore ref_dtbclk clk_mgr->clks.p_state_change_support = true; clk_mgr->clks.prev_p_state_change_support = true; @@ -423,7 +445,16 @@ static void init_clk_states(struct clk_mgr *clk_mgr) void dcn35_init_clocks(struct clk_mgr *clk_mgr) { + struct clk_mgr_internal *clk_mgr_int = TO_CLK_MGR_INTERNAL(clk_mgr); init_clk_states(clk_mgr); + + // to adjust dp_dto reference clock if ssc is enable otherwise to apply dprefclk + if (dcn35_is_spll_ssc_enabled(clk_mgr)) + clk_mgr->dp_dto_source_clock_in_khz = + dce_adjust_dp_ref_freq_for_ss(clk_mgr_int, clk_mgr->dprefclk_khz); + else + clk_mgr->dp_dto_source_clock_in_khz = clk_mgr->dprefclk_khz; + } static struct clk_bw_params dcn35_bw_params = { .vram_type = Ddr4MemType, @@ -512,6 +543,28 @@ static DpmClocks_t_dcn35 dummy_clocks; static struct dcn35_watermarks dummy_wms = { 0 }; +static struct dcn35_ss_info_table ss_info_table = { + .ss_divider = 1000, + .ss_percentage = {0, 0, 375, 375, 375} +}; + +static void dcn35_read_ss_info_from_lut(struct clk_mgr_internal *clk_mgr) +{ + struct dc_context *ctx = clk_mgr->base.ctx; + uint32_t clock_source; + + REG_GET(CLK1_CLK2_BYPASS_CNTL, CLK2_BYPASS_SEL, &clock_source); + // If it's DFS mode, clock_source is 0. + if (dcn35_is_spll_ssc_enabled(&clk_mgr->base) && (clock_source < ARRAY_SIZE(ss_info_table.ss_percentage))) { + clk_mgr->dprefclk_ss_percentage = ss_info_table.ss_percentage[clock_source]; + + if (clk_mgr->dprefclk_ss_percentage != 0) { + clk_mgr->ss_on_dprefclk = true; + clk_mgr->dprefclk_ss_divider = ss_info_table.ss_divider; + } + } +} + static void dcn35_build_watermark_ranges(struct clk_bw_params *bw_params, struct dcn35_watermarks *table) { int i, num_valid_sets; @@ -709,7 +762,7 @@ static void dcn35_clk_mgr_helper_populate_bw_params(struct clk_mgr_internal *clk clock_table->NumFclkLevelsEnabled; max_fclk = find_max_clk_value(clock_table->FclkClocks_Freq, num_fclk); - num_dcfclk = (clock_table->NumFclkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : + num_dcfclk = (clock_table->NumDcfClkLevelsEnabled > NUM_DCFCLK_DPM_LEVELS) ? NUM_DCFCLK_DPM_LEVELS : clock_table->NumDcfClkLevelsEnabled; for (i = 0; i < num_dcfclk; i++) { int j; @@ -1056,6 +1109,8 @@ void dcn35_clk_mgr_construct( dce_clock_read_ss_info(&clk_mgr->base); /*when clk src is from FCH, it could have ss, same clock src as DPREF clk*/ + dcn35_read_ss_info_from_lut(&clk_mgr->base); + clk_mgr->base.base.bw_params = &dcn35_bw_params; if (clk_mgr->base.base.ctx->dc->debug.pstate_enabled) { diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c index 5211c1c0f3c0..03b554e912a2 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc.c @@ -1302,6 +1302,54 @@ static void disable_vbios_mode_if_required( } } +/** + * wait_for_blank_complete - wait for all active OPPs to finish pending blank + * pattern updates + * + * @dc: [in] dc reference + * @context: [in] hardware context in use + */ +static void wait_for_blank_complete(struct dc *dc, + struct dc_state *context) +{ + struct pipe_ctx *opp_head; + struct dce_hwseq *hws = dc->hwseq; + int i; + + if (!hws->funcs.wait_for_blank_complete) + return; + + for (i = 0; i < MAX_PIPES; i++) { + opp_head = &context->res_ctx.pipe_ctx[i]; + + if (!resource_is_pipe_type(opp_head, OPP_HEAD) || + dc_state_get_pipe_subvp_type(context, opp_head) == SUBVP_PHANTOM) + continue; + + hws->funcs.wait_for_blank_complete(opp_head->stream_res.opp); + } +} + +static void wait_for_odm_update_pending_complete(struct dc *dc, struct dc_state *context) +{ + struct pipe_ctx *otg_master; + struct timing_generator *tg; + int i; + + for (i = 0; i < MAX_PIPES; i++) { + otg_master = &context->res_ctx.pipe_ctx[i]; + if (!resource_is_pipe_type(otg_master, OTG_MASTER) || + dc_state_get_pipe_subvp_type(context, otg_master) == SUBVP_PHANTOM) + continue; + tg = otg_master->stream_res.tg; + if (tg->funcs->wait_odm_doublebuffer_pending_clear) + tg->funcs->wait_odm_doublebuffer_pending_clear(tg); + } + + /* ODM update may require to reprogram blank pattern for each OPP */ + wait_for_blank_complete(dc, context); +} + static void wait_for_no_pipes_pending(struct dc *dc, struct dc_state *context) { int i; @@ -1993,6 +2041,11 @@ static enum dc_status dc_commit_state_no_check(struct dc *dc, struct dc_state *c context->stream_count == 0) { /* Must wait for no flips to be pending before doing optimize bw */ wait_for_no_pipes_pending(dc, context); + /* + * optimized dispclk depends on ODM setup. Need to wait for ODM + * update pending complete before optimizing bandwidth. + */ + wait_for_odm_update_pending_complete(dc, context); /* pplib is notified if disp_num changed */ dc->hwss.optimize_bandwidth(dc, context); /* Need to do otg sync again as otg could be out of sync due to otg @@ -2971,7 +3024,8 @@ static void backup_planes_and_stream_state( scratch->blend_tf[i] = *status->plane_states[i]->blend_tf; } scratch->stream_state = *stream; - scratch->out_transfer_func = *stream->out_transfer_func; + if (stream->out_transfer_func) + scratch->out_transfer_func = *stream->out_transfer_func; } static void restore_planes_and_stream_state( @@ -2993,7 +3047,8 @@ static void restore_planes_and_stream_state( *status->plane_states[i]->blend_tf = scratch->blend_tf[i]; } *stream = scratch->stream_state; - *stream->out_transfer_func = scratch->out_transfer_func; + if (stream->out_transfer_func) + *stream->out_transfer_func = scratch->out_transfer_func; } static bool update_planes_and_stream_state(struct dc *dc, @@ -3270,6 +3325,9 @@ static bool dc_dmub_should_send_dirty_rect_cmd(struct dc *dc, struct dc_stream_s if (stream->link->replay_settings.config.replay_supported) return true; + if (stream->ctx->dce_version >= DCN_VERSION_3_5 && stream->abm_level) + return true; + return false; } @@ -3493,7 +3551,7 @@ static void commit_planes_for_stream_fast(struct dc *dc, top_pipe_to_program->stream->update_flags.raw = 0; } -static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state *dc_context) +static void wait_for_outstanding_hw_updates(struct dc *dc, struct dc_state *dc_context) { /* * This function calls HWSS to wait for any potentially double buffered @@ -3531,6 +3589,7 @@ static void wait_for_outstanding_hw_updates(struct dc *dc, const struct dc_state } } } + wait_for_odm_update_pending_complete(dc, dc_context); } static void commit_planes_for_stream(struct dc *dc, @@ -4844,22 +4903,16 @@ void dc_exit_ips_for_hw_access(struct dc *dc) bool dc_dmub_is_ips_idle_state(struct dc *dc) { - uint32_t idle_state = 0; - if (dc->debug.disable_idle_power_optimizations) return false; if (!dc->caps.ips_support || (dc->config.disable_ips == DMUB_IPS_DISABLE_ALL)) return false; - if (dc->hwss.get_idle_state) - idle_state = dc->hwss.get_idle_state(dc); - - if (!(idle_state & DMUB_IPS1_ALLOW_MASK) || - !(idle_state & DMUB_IPS2_ALLOW_MASK)) - return true; + if (!dc->ctx->dmub_srv) + return false; - return false; + return dc->ctx->dmub_srv->idle_allowed; } /* set min and max memory clock to lowest and highest DPM level, respectively */ diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_state.c b/drivers/gpu/drm/amd/display/dc/core/dc_state.c index 180ac47868c2..61986e5cb491 100644 --- a/drivers/gpu/drm/amd/display/dc/core/dc_state.c +++ b/drivers/gpu/drm/amd/display/dc/core/dc_state.c @@ -334,7 +334,8 @@ static void dc_state_free(struct kref *kref) void dc_state_release(struct dc_state *state) { - kref_put(&state->refcount, dc_state_free); + if (state != NULL) + kref_put(&state->refcount, dc_state_free); } /* * dc_state_add_stream() - Add a new dc_stream_state to a dc_state. @@ -435,6 +436,15 @@ bool dc_state_add_plane( goto out; } + if (stream_status->plane_count == 0 && dc->config.enable_windowed_mpo_odm) + /* ODM combine could prevent us from supporting more planes + * we will reset ODM slice count back to 1 when all planes have + * been removed to maximize the amount of planes supported when + * new planes are added. + */ + resource_update_pipes_for_stream_with_slice_count( + state, dc->current_state, dc->res_pool, stream, 1); + otg_master_pipe = resource_get_otg_master_for_stream( &state->res_ctx, stream); if (otg_master_pipe) diff --git a/drivers/gpu/drm/amd/display/dc/dc_types.h b/drivers/gpu/drm/amd/display/dc/dc_types.h index 9900dda2eef5..be2ac5c442a4 100644 --- a/drivers/gpu/drm/amd/display/dc/dc_types.h +++ b/drivers/gpu/drm/amd/display/dc/dc_types.h @@ -1085,9 +1085,9 @@ struct replay_settings { /* SMU optimization is enabled */ bool replay_smu_opt_enable; /* Current Coasting vtotal */ - uint16_t coasting_vtotal; + uint32_t coasting_vtotal; /* Coasting vtotal table */ - uint16_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; + uint32_t coasting_vtotal_table[PR_COASTING_TYPE_NUM]; /* Maximum link off frame count */ enum replay_link_off_frame_count_level link_off_frame_count_level; /* Replay pseudo vtotal for abm + ips on full screen video which can improve ips residency */ diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c index 970644b695cd..b5e0289d2fe8 100644 --- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c +++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c @@ -976,7 +976,10 @@ static bool dcn31_program_pix_clk( struct bp_pixel_clock_parameters bp_pc_params = {0}; enum transmitter_color_depth bp_pc_colour_depth = TRANSMITTER_COLOR_DEPTH_24; - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0) + // Apply ssed(spread spectrum) dpref clock for edp only. + if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0 + && pix_clk_params->signal_type == SIGNAL_TYPE_EDP + && encoding == DP_8b_10b_ENCODING) dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; // For these signal types Driver to program DP_DTO without calling VBIOS Command table if (dc_is_dp_signal(pix_clk_params->signal_type) || dc_is_virtual_signal(pix_clk_params->signal_type)) { @@ -1093,9 +1096,6 @@ static bool get_pixel_clk_frequency_100hz( unsigned int modulo_hz = 0; unsigned int dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dprefclk_khz; - if (clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz != 0) - dp_dto_ref_khz = clock_source->ctx->dc->clk_mgr->dp_dto_source_clock_in_khz; - if (clock_source->id == CLOCK_SOURCE_ID_DP_DTO) { clock_hz = REG_READ(PHASE[inst]); diff --git a/drivers/gpu/drm/amd/display/dc/dce110/Makefile b/drivers/gpu/drm/amd/display/dc/dce110/Makefile index f0777d61c2cb..c307f040e48f 100644 --- a/drivers/gpu/drm/amd/display/dc/dce110/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce110/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce110/dce110_resource.o = -Wno-override-init DCE110 = dce110_timing_generator.o \ dce110_compressor.o dce110_opp_regamma_v.o \ diff --git a/drivers/gpu/drm/amd/display/dc/dce112/Makefile b/drivers/gpu/drm/amd/display/dc/dce112/Makefile index 7e92effec894..683866797709 100644 --- a/drivers/gpu/drm/amd/display/dc/dce112/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce112/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce112/dce112_resource.o = -Wno-override-init DCE112 = dce112_compressor.o diff --git a/drivers/gpu/drm/amd/display/dc/dce120/Makefile b/drivers/gpu/drm/amd/display/dc/dce120/Makefile index 1e3ef68a452a..8f508e662748 100644 --- a/drivers/gpu/drm/amd/display/dc/dce120/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce120/Makefile @@ -24,7 +24,7 @@ # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce120/dce120_resource.o = -Wno-override-init DCE120 = dce120_timing_generator.o diff --git a/drivers/gpu/drm/amd/display/dc/dce60/Makefile b/drivers/gpu/drm/amd/display/dc/dce60/Makefile index fee331accc0e..eede83ad91fa 100644 --- a/drivers/gpu/drm/amd/display/dc/dce60/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce60/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce60/dce60_resource.o = -Wno-override-init DCE60 = dce60_timing_generator.o dce60_hw_sequencer.o \ dce60_resource.o diff --git a/drivers/gpu/drm/amd/display/dc/dce80/Makefile b/drivers/gpu/drm/amd/display/dc/dce80/Makefile index 7eefffbdc925..fba189d26652 100644 --- a/drivers/gpu/drm/amd/display/dc/dce80/Makefile +++ b/drivers/gpu/drm/amd/display/dc/dce80/Makefile @@ -23,7 +23,7 @@ # Makefile for the 'controller' sub-component of DAL. # It provides the control and status of HW CRTC block. -CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = $(call cc-disable-warning, override-init) +CFLAGS_$(AMDDALPATH)/dc/dce80/dce80_resource.o = -Wno-override-init DCE80 = dce80_timing_generator.o diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c index 48a40dcc7050..5838a11efd00 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_opp.c @@ -384,6 +384,7 @@ static const struct opp_funcs dcn10_opp_funcs = { .opp_set_disp_pattern_generator = NULL, .opp_program_dpg_dimensions = NULL, .dpg_is_blanked = NULL, + .dpg_is_pending = NULL, .opp_destroy = opp1_destroy }; diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c index 0784d0198661..fbf1b6370eb2 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.c @@ -337,6 +337,19 @@ bool opp2_dpg_is_blanked(struct output_pixel_processor *opp) (double_buffer_pending == 0); } +bool opp2_dpg_is_pending(struct output_pixel_processor *opp) +{ + struct dcn20_opp *oppn20 = TO_DCN20_OPP(opp); + uint32_t double_buffer_pending; + uint32_t dpg_en; + + REG_GET(DPG_CONTROL, DPG_EN, &dpg_en); + + REG_GET(DPG_STATUS, DPG_DOUBLE_BUFFER_PENDING, &double_buffer_pending); + + return (dpg_en == 1 && double_buffer_pending == 1); +} + void opp2_program_left_edge_extra_pixel ( struct output_pixel_processor *opp, bool count) @@ -363,6 +376,7 @@ static struct opp_funcs dcn20_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h index 3ab221bdd27d..8f186abd558d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h +++ b/drivers/gpu/drm/amd/display/dc/dcn20/dcn20_opp.h @@ -159,6 +159,8 @@ void opp2_program_dpg_dimensions( bool opp2_dpg_is_blanked(struct output_pixel_processor *opp); +bool opp2_dpg_is_pending(struct output_pixel_processor *opp); + void opp2_dpg_set_blank_color( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c index 8e77db46a409..6a71ba3dfc63 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c +++ b/drivers/gpu/drm/amd/display/dc/dcn201/dcn201_opp.c @@ -50,6 +50,7 @@ static struct opp_funcs dcn201_opp_funcs = { .opp_set_disp_pattern_generator = opp2_set_disp_pattern_generator, .opp_program_dpg_dimensions = opp2_program_dpg_dimensions, .dpg_is_blanked = opp2_dpg_is_blanked, + .dpg_is_pending = opp2_dpg_is_pending, .opp_dpg_set_blank_color = opp2_dpg_set_blank_color, .opp_destroy = opp1_destroy, .opp_program_left_edge_extra_pixel = opp2_program_left_edge_extra_pixel, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c index bf3386cd444d..5ebb57303130 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.c @@ -44,6 +44,36 @@ #define NUM_ELEMENTS(a) (sizeof(a) / sizeof((a)[0])) +void mpc3_mpc_init(struct mpc *mpc) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + int opp_id; + + mpc1_mpc_init(mpc); + + for (opp_id = 0; opp_id < MAX_OPP; opp_id++) { + if (REG(MUX[opp_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[opp_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); + } +} + +void mpc3_mpc_init_single_inst(struct mpc *mpc, unsigned int mpcc_id) +{ + struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); + + mpc1_mpc_init_single_inst(mpc, mpcc_id); + + /* assuming mpc out mux is connected to opp with the same index at this + * point in time (e.g. transitioning from vbios to driver) + */ + if (mpcc_id < MAX_OPP && REG(MUX[mpcc_id])) + /* disable mpc out rate and flow control */ + REG_UPDATE_2(MUX[mpcc_id], MPC_OUT_RATE_CONTROL_DISABLE, + 1, MPC_OUT_FLOW_CONTROL_COUNT, 0); +} + bool mpc3_is_dwb_idle( struct mpc *mpc, int dwb_id) @@ -80,25 +110,6 @@ void mpc3_disable_dwb_mux( MPC_DWB0_MUX, 0xf); } -void mpc3_set_out_rate_control( - struct mpc *mpc, - int opp_id, - bool enable, - bool rate_2x_mode, - struct mpc_dwb_flow_control *flow_control) -{ - struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); - - REG_UPDATE_2(MUX[opp_id], - MPC_OUT_RATE_CONTROL_DISABLE, !enable, - MPC_OUT_RATE_CONTROL, rate_2x_mode); - - if (flow_control) - REG_UPDATE_2(MUX[opp_id], - MPC_OUT_FLOW_CONTROL_MODE, flow_control->flow_ctrl_mode, - MPC_OUT_FLOW_CONTROL_COUNT, flow_control->flow_ctrl_cnt1); -} - enum dc_lut_mode mpc3_get_ogam_current(struct mpc *mpc, int mpcc_id) { /*Contrary to DCN2 and DCN1 wherein a single status register field holds this info; @@ -1490,8 +1501,8 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .read_mpcc_state = mpc3_read_mpcc_state, .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, - .mpc_init = mpc1_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .mpc_init = mpc3_mpc_init, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, @@ -1508,7 +1519,6 @@ static const struct mpc_funcs dcn30_mpc_funcs = { .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, - .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc3_program_shaper, .acquire_rmu = mpcc3_acquire_rmu, diff --git a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h index 9cb96ae95a2f..ce93003dae01 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h +++ b/drivers/gpu/drm/amd/display/dc/dcn30/dcn30_mpc.h @@ -1007,6 +1007,13 @@ void dcn30_mpc_construct(struct dcn30_mpc *mpc30, int num_mpcc, int num_rmu); +void mpc3_mpc_init( + struct mpc *mpc); + +void mpc3_mpc_init_single_inst( + struct mpc *mpc, + unsigned int mpcc_id); + bool mpc3_program_shaper( struct mpc *mpc, const struct pwl_params *params, @@ -1078,13 +1085,6 @@ bool mpc3_is_dwb_idle( struct mpc *mpc, int dwb_id); -void mpc3_set_out_rate_control( - struct mpc *mpc, - int opp_id, - bool enable, - bool rate_2x_mode, - struct mpc_dwb_flow_control *flow_control); - void mpc3_power_on_ogam_lut( struct mpc *mpc, int mpcc_id, bool power_on); diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c index e224a028d68a..8a0460e86309 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_dio_link_encoder.c @@ -248,14 +248,12 @@ void dcn32_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; - enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; - - enc10->base.features = *enc_features; if (enc10->base.connector.id == CONNECTOR_ID_USBC) enc10->base.features.flags.bits.DP_IS_USB_C = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; + enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; + + enc10->base.features = *enc_features; enc10->base.transmitter = init_data->transmitter; diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c index e789e654c387..e408e859b355 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_mpc.c @@ -47,7 +47,7 @@ void mpc32_mpc_init(struct mpc *mpc) struct dcn30_mpc *mpc30 = TO_DCN30_MPC(mpc); int mpcc_id; - mpc1_mpc_init(mpc); + mpc3_mpc_init(mpc); if (mpc->ctx->dc->debug.enable_mem_low_power.bits.mpc) { if (mpc30->mpc_mask->MPCC_MCM_SHAPER_MEM_LOW_PWR_MODE && mpc30->mpc_mask->MPCC_MCM_3DLUT_MEM_LOW_PWR_MODE) { @@ -991,7 +991,7 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .insert_plane = mpc1_insert_plane, .remove_mpcc = mpc1_remove_mpcc, .mpc_init = mpc32_mpc_init, - .mpc_init_single_inst = mpc1_mpc_init_single_inst, + .mpc_init_single_inst = mpc3_mpc_init_single_inst, .update_blending = mpc2_update_blending, .cursor_lock = mpc1_cursor_lock, .get_mpcc_for_dpp = mpc1_get_mpcc_for_dpp, @@ -1008,7 +1008,6 @@ static const struct mpc_funcs dcn32_mpc_funcs = { .set_dwb_mux = mpc3_set_dwb_mux, .disable_dwb_mux = mpc3_disable_dwb_mux, .is_dwb_idle = mpc3_is_dwb_idle, - .set_out_rate_control = mpc3_set_out_rate_control, .set_gamut_remap = mpc3_set_gamut_remap, .program_shaper = mpc32_program_shaper, .program_3dlut = mpc32_program_3dlut, diff --git a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c index 87760600e154..f98def6c8c2d 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c +++ b/drivers/gpu/drm/amd/display/dc/dcn32/dcn32_resource_helpers.c @@ -782,3 +782,9 @@ void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc pipe_cnt++; } } + +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context) +{ + if (dcn32_subvp_in_use(dc, context) && context->bw_ctx.bw.dcn.clk.dcfclk_khz <= MIN_SUBVP_DCFCLK_KHZ) + context->bw_ctx.bw.dcn.clk.dcfclk_khz = MIN_SUBVP_DCFCLK_KHZ; +} diff --git a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c index 81e349d5835b..da94e5309fba 100644 --- a/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c +++ b/drivers/gpu/drm/amd/display/dc/dcn35/dcn35_dio_link_encoder.c @@ -184,6 +184,8 @@ void dcn35_link_encoder_construct( enc10->base.hpd_source = init_data->hpd_source; enc10->base.connector = init_data->connector; + if (enc10->base.connector.id == CONNECTOR_ID_USBC) + enc10->base.features.flags.bits.DP_IS_USB_C = 1; enc10->base.preferred_engine = ENGINE_ID_UNKNOWN; @@ -238,8 +240,6 @@ void dcn35_link_encoder_construct( } enc10->base.features.flags.bits.HDMI_6GB_EN = 1; - if (enc10->base.connector.id == CONNECTOR_ID_USBC) - enc10->base.features.flags.bits.DP_IS_USB_C = 1; if (bp_funcs->get_connector_speed_cap_info) result = bp_funcs->get_connector_speed_cap_info(enc10->base.ctx->dc_bios, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c index b49e1dc9d8ba..a0a65e099104 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn32/dcn32_fpu.c @@ -623,6 +623,7 @@ static bool dcn32_assign_subvp_pipe(struct dc *dc, * - Not TMZ surface */ if (pipe->plane_state && !pipe->top_pipe && !dcn32_is_center_timing(pipe) && + !(pipe->stream->timing.pix_clk_100hz / 10000 > DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ) && (!dcn32_is_psr_capable(pipe) || (context->stream_count == 1 && dc->caps.dmub_caps.subvp_psr)) && dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_NONE && (refresh_rate < 120 || dcn32_allow_subvp_high_refresh_rate(dc, context, pipe)) && diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c index 80bebfc268db..21e0eef3269b 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn35/dcn35_fpu.c @@ -166,8 +166,8 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_5_soc = { .num_states = 5, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 210.0, - .sr_enter_plus_exit_z8_time_us = 320.0, + .sr_exit_z8_time_us = 250.0, + .sr_enter_plus_exit_z8_time_us = 350.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, diff --git a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c index dc9e1b758ed6..b3ffab77cf88 100644 --- a/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c +++ b/drivers/gpu/drm/amd/display/dc/dml/dcn351/dcn351_fpu.c @@ -98,55 +98,114 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .clock_limits = { { .state = 0, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 400.0, + .fabricclk_mhz = 400.0, + .socclk_mhz = 600.0, + .dram_speed_mts = 3200.0, + .dispclk_mhz = 600.0, + .dppclk_mhz = 600.0, .phyclk_mhz = 600.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 186.0, + .dscclk_mhz = 200.0, .dtbclk_mhz = 600.0, }, { .state = 1, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 600.0, + .fabricclk_mhz = 1000.0, + .socclk_mhz = 733.0, + .dram_speed_mts = 6400.0, + .dispclk_mhz = 800.0, + .dppclk_mhz = 800.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 209.0, + .dscclk_mhz = 266.7, .dtbclk_mhz = 600.0, }, { .state = 2, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 738.0, + .fabricclk_mhz = 1200.0, + .socclk_mhz = 880.0, + .dram_speed_mts = 7500.0, + .dispclk_mhz = 800.0, + .dppclk_mhz = 800.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 209.0, + .dscclk_mhz = 266.7, .dtbclk_mhz = 600.0, }, { .state = 3, - .dispclk_mhz = 1200.0, - .dppclk_mhz = 1200.0, + .dcfclk_mhz = 800.0, + .fabricclk_mhz = 1400.0, + .socclk_mhz = 978.0, + .dram_speed_mts = 7500.0, + .dispclk_mhz = 960.0, + .dppclk_mhz = 960.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 371.0, + .dscclk_mhz = 320.0, .dtbclk_mhz = 600.0, }, { .state = 4, + .dcfclk_mhz = 873.0, + .fabricclk_mhz = 1600.0, + .socclk_mhz = 1100.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1066.7, + .dppclk_mhz = 1066.7, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 355.6, + .dtbclk_mhz = 600.0, + }, + { + .state = 5, + .dcfclk_mhz = 960.0, + .fabricclk_mhz = 1700.0, + .socclk_mhz = 1257.0, + .dram_speed_mts = 8533.0, .dispclk_mhz = 1200.0, .dppclk_mhz = 1200.0, .phyclk_mhz = 810.0, .phyclk_d18_mhz = 667.0, - .dscclk_mhz = 417.0, + .dscclk_mhz = 400.0, + .dtbclk_mhz = 600.0, + }, + { + .state = 6, + .dcfclk_mhz = 1067.0, + .fabricclk_mhz = 1850.0, + .socclk_mhz = 1257.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1371.4, + .dppclk_mhz = 1371.4, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 457.1, + .dtbclk_mhz = 600.0, + }, + { + .state = 7, + .dcfclk_mhz = 1200.0, + .fabricclk_mhz = 2000.0, + .socclk_mhz = 1467.0, + .dram_speed_mts = 8533.0, + .dispclk_mhz = 1600.0, + .dppclk_mhz = 1600.0, + .phyclk_mhz = 810.0, + .phyclk_d18_mhz = 667.0, + .dscclk_mhz = 533.3, .dtbclk_mhz = 600.0, }, }, - .num_states = 5, + .num_states = 8, .sr_exit_time_us = 28.0, .sr_enter_plus_exit_time_us = 30.0, - .sr_exit_z8_time_us = 210.0, - .sr_enter_plus_exit_z8_time_us = 320.0, + .sr_exit_z8_time_us = 250.0, + .sr_enter_plus_exit_z8_time_us = 350.0, .fclk_change_latency_us = 24.0, .usr_retraining_latency_us = 2, .writeback_latency_us = 12.0, @@ -177,6 +236,9 @@ struct _vcs_dpi_soc_bounding_box_st dcn3_51_soc = { .do_urgent_latency_adjustment = 0, .urgent_latency_adjustment_fabric_clock_component_us = 0, .urgent_latency_adjustment_fabric_clock_reference_mhz = 0, + .num_chans = 4, + .dram_clock_change_latency_us = 11.72, + .dispclk_dppclk_vco_speed_mhz = 2400.0, }; /* @@ -340,6 +402,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, clock_limits[i].socclk_mhz; dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].memclk_mhz = clk_table->entries[i].memclk_mhz * clk_table->entries[i].wck_ratio; + dc->dml2_options.bbox_overrides.clks_table.clk_entries[i].dtbclk_mhz = + clock_limits[i].dtbclk_mhz; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dcfclk_levels = clk_table->num_entries; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_fclk_levels = @@ -352,6 +416,8 @@ void dcn351_update_bw_bounding_box_fpu(struct dc *dc, clk_table->num_entries; dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_memclk_levels = clk_table->num_entries; + dc->dml2_options.bbox_overrides.clks_table.num_entries_per_clk.num_dtbclk_levels = + clk_table->num_entries; } } @@ -551,6 +617,7 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context) if (context->res_ctx.pipe_ctx[i].plane_state) plane_count++; } + /*dcn351 does not support z9/z10*/ if (context->stream_count == 0 || plane_count == 0) { support = DCN_ZSTATE_SUPPORT_ALLOW_Z8_ONLY; @@ -564,11 +631,9 @@ void dcn351_decide_zstate_support(struct dc *dc, struct dc_state *context) dc->debug.minimum_z8_residency_time > 0 ? dc->debug.minimum_z8_residency_time : 1000; bool allow_z8 = context->bw_ctx.dml.vba.StutterPeriod > (double)minmum_z8_residency; - /*for psr1/psr-su, we allow z8 and z10 based on latency, for replay with IPS enabled, it will enter ips2*/ - if (is_pwrseq0 && (is_psr || is_replay)) + if (is_pwrseq0 && (is_psr || is_replay)) support = allow_z8 ? allow_z8 : DCN_ZSTATE_SUPPORT_DISALLOW; - } context->bw_ctx.bw.dcn.clk.zstate_support = support; } diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c index 1ba6933d2b36..a20f28a5d2e7 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_translation_helper.c @@ -228,17 +228,13 @@ void dml2_init_socbb_params(struct dml2_context *dml2, const struct dc *in_dc, s break; case dml_project_dcn35: + case dml_project_dcn351: out->num_chans = 4; out->round_trip_ping_latency_dcfclk_cycles = 106; out->smn_latency_us = 2; out->dispclk_dppclk_vco_speed_mhz = 3600; break; - case dml_project_dcn351: - out->num_chans = 16; - out->round_trip_ping_latency_dcfclk_cycles = 1100; - out->smn_latency_us = 2; - break; } /* ---Overrides if available--- */ if (dml2->config.bbox_overrides.dram_num_chan) @@ -824,13 +820,25 @@ static struct scaler_data get_scaler_data_for_plane(const struct dc_plane_state static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned int location, const struct dc_stream_state *in) { + dml_uint_t width, height; + + if (in->timing.h_addressable > 3840) + width = 3840; + else + width = in->timing.h_addressable; // 4K max + + if (in->timing.v_addressable > 2160) + height = 2160; + else + height = in->timing.v_addressable; // 4K max + out->CursorBPP[location] = dml_cur_32bit; out->CursorWidth[location] = 256; out->GPUVMMinPageSizeKBytes[location] = 256; - out->ViewportWidth[location] = in->timing.h_addressable; - out->ViewportHeight[location] = in->timing.v_addressable; + out->ViewportWidth[location] = width; + out->ViewportHeight[location] = height; out->ViewportStationary[location] = false; out->ViewportWidthChroma[location] = 0; out->ViewportHeightChroma[location] = 0; @@ -849,7 +857,7 @@ static void populate_dummy_dml_plane_cfg(struct dml_plane_cfg_st *out, unsigned out->HTapsChroma[location] = 0; out->VTapsChroma[location] = 0; out->SourceScan[location] = dml_rotation_0; - out->ScalerRecoutWidth[location] = in->timing.h_addressable; + out->ScalerRecoutWidth[location] = width; out->LBBitPerPixel[location] = 57; diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c index 2a58a7687bdb..72cca367062e 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.c @@ -703,13 +703,8 @@ static inline struct dml2_context *dml2_allocate_memory(void) return (struct dml2_context *) kzalloc(sizeof(struct dml2_context), GFP_KERNEL); } -bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +static void dml2_init(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) { - // Allocate Mode Lib Ctx - *dml2 = dml2_allocate_memory(); - - if (!(*dml2)) - return false; // Store config options (*dml2)->config = *config; @@ -737,9 +732,18 @@ bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options initialize_dml2_soc_bbox(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc); initialize_dml2_soc_states(*dml2, in_dc, &(*dml2)->v20.dml_core_ctx.soc, &(*dml2)->v20.dml_core_ctx.states); +} + +bool dml2_create(const struct dc *in_dc, const struct dml2_configuration_options *config, struct dml2_context **dml2) +{ + // Allocate Mode Lib Ctx + *dml2 = dml2_allocate_memory(); + + if (!(*dml2)) + return false; + + dml2_init(in_dc, config, dml2); - /*Initialize DML20 instance which calls dml2_core_create, and core_dcn3_populate_informative*/ - //dml2_initialize_instance(&(*dml_ctx)->v20.dml_init); return true; } @@ -779,3 +783,11 @@ bool dml2_create_copy(struct dml2_context **dst_dml2, return true; } + +void dml2_reinit(const struct dc *in_dc, + const struct dml2_configuration_options *config, + struct dml2_context **dml2) +{ + + dml2_init(in_dc, config, dml2); +} diff --git a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h index ee0eb184eb6d..cc662d682fd4 100644 --- a/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h +++ b/drivers/gpu/drm/amd/display/dc/dml2/dml2_wrapper.h @@ -214,6 +214,9 @@ void dml2_copy(struct dml2_context *dst_dml2, struct dml2_context *src_dml2); bool dml2_create_copy(struct dml2_context **dst_dml2, struct dml2_context *src_dml2); +void dml2_reinit(const struct dc *in_dc, + const struct dml2_configuration_options *config, + struct dml2_context **dml2); /* * dml2_validate - Determines if a display configuration is supported or not. diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c index 9d5df4c0da59..0ba1feaf96c0 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dce110/dce110_hwseq.c @@ -1185,7 +1185,8 @@ void dce110_disable_stream(struct pipe_ctx *pipe_ctx) if (dccg) { dccg->funcs->disable_symclk32_se(dccg, dp_hpo_inst); dccg->funcs->set_dpstreamclk(dccg, REFCLK, tg->inst, dp_hpo_inst); - dccg->funcs->set_dtbclk_dto(dccg, &dto_params); + if (dccg && dccg->funcs->set_dtbclk_dto) + dccg->funcs->set_dtbclk_dto(dccg, &dto_params); } } else if (dccg && dccg->funcs->disable_symclk_se) { dccg->funcs->disable_symclk_se(dccg, stream_enc->stream_enc_inst, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c index c55d5155ecb9..8b3536c380b8 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn20/dcn20_hwseq.c @@ -1498,6 +1498,11 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, return; } + if (resource_is_pipe_type(new_pipe, OTG_MASTER) && + resource_is_odm_topology_changed(new_pipe, old_pipe)) + /* Detect odm changes */ + new_pipe->update_flags.bits.odm = 1; + /* Exit on unchanged, unused pipe */ if (!old_pipe->plane_state && !new_pipe->plane_state) return; @@ -1551,10 +1556,6 @@ static void dcn20_detect_pipe_changes(struct dc_state *old_state, /* Detect top pipe only changes */ if (resource_is_pipe_type(new_pipe, OTG_MASTER)) { - /* Detect odm changes */ - if (resource_is_odm_topology_changed(new_pipe, old_pipe)) - new_pipe->update_flags.bits.odm = 1; - /* Detect global sync changes */ if (old_pipe->pipe_dlg_param.vready_offset != new_pipe->pipe_dlg_param.vready_offset || old_pipe->pipe_dlg_param.vstartup_start != new_pipe->pipe_dlg_param.vstartup_start @@ -1999,19 +2000,20 @@ void dcn20_program_front_end_for_ctx( DC_LOGGER_INIT(dc->ctx->logger); unsigned int prev_hubp_count = 0; unsigned int hubp_count = 0; + struct pipe_ctx *pipe; if (resource_is_pipe_topology_changed(dc->current_state, context)) resource_log_pipe_topology_update(dc, context); if (dc->hwss.program_triplebuffer != NULL && dc->debug.enable_tri_buf) { for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe_ctx = &context->res_ctx.pipe_ctx[i]; + pipe = &context->res_ctx.pipe_ctx[i]; - if (!pipe_ctx->top_pipe && !pipe_ctx->prev_odm_pipe && pipe_ctx->plane_state) { - ASSERT(!pipe_ctx->plane_state->triplebuffer_flips); + if (!pipe->top_pipe && !pipe->prev_odm_pipe && pipe->plane_state) { + ASSERT(!pipe->plane_state->triplebuffer_flips); /*turn off triple buffer for full update*/ dc->hwss.program_triplebuffer( - dc, pipe_ctx, pipe_ctx->plane_state->triplebuffer_flips); + dc, pipe, pipe->plane_state->triplebuffer_flips); } } } @@ -2085,12 +2087,22 @@ void dcn20_program_front_end_for_ctx( DC_LOG_DC("Reset mpcc for pipe %d\n", dc->current_state->res_ctx.pipe_ctx[i].pipe_idx); } + /* update ODM for blanked OTG master pipes */ + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + if (resource_is_pipe_type(pipe, OTG_MASTER) && + !resource_is_pipe_type(pipe, DPP_PIPE) && + pipe->update_flags.bits.odm && + hws->funcs.update_odm) + hws->funcs.update_odm(dc, context, pipe); + } + /* * Program all updated pipes, order matters for mpcc setup. Start with * top pipe and program all pipes that follow in order */ for (i = 0; i < dc->res_pool->pipe_count; i++) { - struct pipe_ctx *pipe = &context->res_ctx.pipe_ctx[i]; + pipe = &context->res_ctx.pipe_ctx[i]; if (pipe->plane_state && !pipe->top_pipe) { while (pipe) { @@ -2129,17 +2141,6 @@ void dcn20_program_front_end_for_ctx( context->stream_status[0].plane_count > 1) { pipe->plane_res.hubp->funcs->hubp_wait_pipe_read_start(pipe->plane_res.hubp); } - - /* when dynamic ODM is active, pipes must be reconfigured when all planes are - * disabled, as some transitions will leave software and hardware state - * mismatched. - */ - if (dc->debug.enable_single_display_2to1_odm_policy && - pipe->stream && - pipe->update_flags.bits.disable && - !pipe->prev_odm_pipe && - hws->funcs.update_odm) - hws->funcs.update_odm(dc, context, pipe); } } @@ -2451,7 +2452,7 @@ bool dcn20_wait_for_blank_complete( int counter; for (counter = 0; counter < 1000; counter++) { - if (opp->funcs->dpg_is_blanked(opp)) + if (!opp->funcs->dpg_is_pending(opp)) break; udelay(100); @@ -2462,7 +2463,7 @@ bool dcn20_wait_for_blank_complete( return false; } - return true; + return opp->funcs->dpg_is_blanked(opp); } bool dcn20_dmdata_status_done(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c index 7e6b7f2a6dc9..8bc3d01537bb 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn30/dcn30_hwseq.c @@ -812,10 +812,20 @@ void dcn30_set_avmute(struct pipe_ctx *pipe_ctx, bool enable) if (pipe_ctx == NULL) return; - if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) + if (dc_is_hdmi_signal(pipe_ctx->stream->signal) && pipe_ctx->stream_res.stream_enc != NULL) { pipe_ctx->stream_res.stream_enc->funcs->set_avmute( pipe_ctx->stream_res.stream_enc, enable); + + /* Wait for two frame to make sure AV mute is sent out */ + if (enable) { + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VBLANK); + pipe_ctx->stream_res.tg->funcs->wait_for_state(pipe_ctx->stream_res.tg, CRTC_STATE_VACTIVE); + } + } } void dcn30_update_info_frame(struct pipe_ctx *pipe_ctx) diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c index 3a9cc8ac0c07..093f4387553c 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn314/dcn314_hwseq.c @@ -69,29 +69,6 @@ #define FN(reg_name, field_name) \ hws->shifts->field_name, hws->masks->field_name -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -183,10 +160,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -199,20 +172,6 @@ void dcn314_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c index aa36d7a56ca8..7668229438da 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.c @@ -966,29 +966,6 @@ void dcn32_init_hw(struct dc *dc) } } -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -1103,10 +1080,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -1119,20 +1092,6 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, @@ -1156,6 +1115,13 @@ void dcn32_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * dsc->funcs->dsc_disconnect(dsc); } } + + if (!resource_is_pipe_type(pipe_ctx, DPP_PIPE)) + /* + * blank pattern is generated by OPP, reprogram blank pattern + * due to OPP count change + */ + dc->hwseq->funcs.blank_pixel_data(dc, pipe_ctx, true); } unsigned int dcn32_calculate_dccg_k1_k2_values(struct pipe_ctx *pipe_ctx, unsigned int *k1_div, unsigned int *k2_div) @@ -1778,3 +1744,26 @@ void dcn32_prepare_bandwidth(struct dc *dc, context->bw_ctx.bw.dcn.clk.p_state_change_support = p_state_change_support; } } + +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock) +{ + unsigned int i; + struct pipe_ctx *pipe; + struct timing_generator *tg; + + for (i = 0; i < dc->res_pool->pipe_count; i++) { + pipe = &context->res_ctx.pipe_ctx[i]; + tg = pipe->stream_res.tg; + + if (!resource_is_pipe_type(pipe, OTG_MASTER) || + !tg->funcs->is_tg_enabled(tg) || + dc_state_get_pipe_subvp_type(context, pipe) == SUBVP_PHANTOM) + continue; + + if (lock) + dc->hwss.pipe_control_lock(dc, pipe, true); + else + dc->hwss.pipe_control_lock(dc, pipe, false); + } +} diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h index 069e20bc87c0..f55c11fc56ec 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_hwseq.h @@ -129,4 +129,6 @@ bool dcn32_is_pipe_topology_transition_seamless(struct dc *dc, void dcn32_prepare_bandwidth(struct dc *dc, struct dc_state *context); +void dcn32_interdependent_update_lock(struct dc *dc, + struct dc_state *context, bool lock); #endif /* __DC_HWSS_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c index 2b073123d3ed..67d661dbd5b7 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn32/dcn32_init.c @@ -58,7 +58,7 @@ static const struct hw_sequencer_funcs dcn32_funcs = { .disable_plane = dcn20_disable_plane, .disable_pixel_data = dcn20_disable_pixel_data, .pipe_control_lock = dcn20_pipe_control_lock, - .interdependent_update_lock = dcn10_lock_all_pipes, + .interdependent_update_lock = dcn32_interdependent_update_lock, .cursor_lock = dcn10_cursor_lock, .prepare_bandwidth = dcn32_prepare_bandwidth, .optimize_bandwidth = dcn20_optimize_bandwidth, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c index 4b92df23ff0d..a5560b3fc39b 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn35/dcn35_hwseq.c @@ -358,29 +358,6 @@ void dcn35_init_hw(struct dc *dc) } } -static int calc_mpc_flow_ctrl_cnt(const struct dc_stream_state *stream, - int opp_cnt) -{ - bool hblank_halved = optc2_is_two_pixels_per_containter(&stream->timing); - int flow_ctrl_cnt; - - if (opp_cnt >= 2) - hblank_halved = true; - - flow_ctrl_cnt = stream->timing.h_total - stream->timing.h_addressable - - stream->timing.h_border_left - - stream->timing.h_border_right; - - if (hblank_halved) - flow_ctrl_cnt /= 2; - - /* ODM combine 4:1 case */ - if (opp_cnt == 4) - flow_ctrl_cnt /= 2; - - return flow_ctrl_cnt; -} - static void update_dsc_on_stream(struct pipe_ctx *pipe_ctx, bool enable) { struct display_stream_compressor *dsc = pipe_ctx->stream_res.dsc; @@ -474,10 +451,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * struct pipe_ctx *odm_pipe; int opp_cnt = 0; int opp_inst[MAX_PIPES] = {0}; - bool rate_control_2x_pclk = (pipe_ctx->stream->timing.flags.INTERLACE || optc2_is_two_pixels_per_containter(&pipe_ctx->stream->timing)); - struct mpc_dwb_flow_control flow_control; - struct mpc *mpc = dc->res_pool->mpc; - int i; opp_cnt = get_odm_config(pipe_ctx, opp_inst); @@ -490,20 +463,6 @@ void dcn35_update_odm(struct dc *dc, struct dc_state *context, struct pipe_ctx * pipe_ctx->stream_res.tg->funcs->set_odm_bypass( pipe_ctx->stream_res.tg, &pipe_ctx->stream->timing); - rate_control_2x_pclk = rate_control_2x_pclk || opp_cnt > 1; - flow_control.flow_ctrl_mode = 0; - flow_control.flow_ctrl_cnt0 = 0x80; - flow_control.flow_ctrl_cnt1 = calc_mpc_flow_ctrl_cnt(pipe_ctx->stream, opp_cnt); - if (mpc->funcs->set_out_rate_control) { - for (i = 0; i < opp_cnt; ++i) { - mpc->funcs->set_out_rate_control( - mpc, opp_inst[i], - true, - rate_control_2x_pclk, - &flow_control); - } - } - for (odm_pipe = pipe_ctx->next_odm_pipe; odm_pipe; odm_pipe = odm_pipe->next_odm_pipe) { odm_pipe->stream_res.opp->funcs->opp_pipe_clock_control( odm_pipe->stream_res.opp, diff --git a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c index ab17fa1c64e8..670255c9bc82 100644 --- a/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c +++ b/drivers/gpu/drm/amd/display/dc/hwss/dcn351/dcn351_init.c @@ -67,7 +67,7 @@ static const struct hw_sequencer_funcs dcn351_funcs = { .prepare_bandwidth = dcn35_prepare_bandwidth, .optimize_bandwidth = dcn35_optimize_bandwidth, .update_bandwidth = dcn20_update_bandwidth, - .set_drr = dcn10_set_drr, + .set_drr = dcn35_set_drr, .get_position = dcn10_get_position, .set_static_screen_control = dcn35_set_static_screen_control, .setup_stereo = dcn10_setup_stereo, diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h index aee5372e292c..d89c92370d5b 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/opp.h @@ -337,6 +337,9 @@ struct opp_funcs { bool (*dpg_is_blanked)( struct output_pixel_processor *opp); + bool (*dpg_is_pending)(struct output_pixel_processor *opp); + + void (*opp_dpg_set_blank_color)( struct output_pixel_processor *opp, const struct tg_color *color); diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h index d98d72f35be5..ffad8fe16c54 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h +++ b/drivers/gpu/drm/amd/display/dc/inc/hw/timing_generator.h @@ -331,6 +331,7 @@ struct timing_generator_funcs { void (*init_odm)(struct timing_generator *tg); void (*wait_drr_doublebuffer_pending_clear)(struct timing_generator *tg); + void (*wait_odm_doublebuffer_pending_clear)(struct timing_generator *tg); }; #endif diff --git a/drivers/gpu/drm/amd/display/dc/inc/link.h b/drivers/gpu/drm/amd/display/dc/inc/link.h index 26fe81f213da..bf29fc58ea6a 100644 --- a/drivers/gpu/drm/amd/display/dc/inc/link.h +++ b/drivers/gpu/drm/amd/display/dc/inc/link.h @@ -285,12 +285,12 @@ struct link_service { enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); bool (*edp_set_coasting_vtotal)( - struct dc_link *link, uint16_t coasting_vtotal); + struct dc_link *link, uint32_t coasting_vtotal); bool (*edp_replay_residency)(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); bool (*edp_set_replay_power_opt_and_coasting_vtotal)(struct dc_link *link, - const unsigned int *power_opts, uint16_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal); bool (*edp_wait_for_t12)(struct dc_link *link); bool (*edp_is_ilr_optimization_required)(struct dc_link *link, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index acfbbc638cc6..3baa2bdd6dd6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -1034,7 +1034,7 @@ bool edp_send_replay_cmd(struct dc_link *link, return true; } -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal) +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; @@ -1073,7 +1073,7 @@ bool edp_replay_residency(const struct dc_link *link, } bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint16_t coasting_vtotal) + const unsigned int *power_opts, uint32_t coasting_vtotal) { struct dc *dc = link->ctx->dc; struct dmub_replay *replay = dc->res_pool->replay; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h index 34e521af7bb4..a158c6234d42 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.h @@ -59,12 +59,12 @@ bool edp_setup_replay(struct dc_link *link, bool edp_send_replay_cmd(struct dc_link *link, enum replay_FW_Message_type msg, union dmub_replay_cmd_set *cmd_data); -bool edp_set_coasting_vtotal(struct dc_link *link, uint16_t coasting_vtotal); +bool edp_set_coasting_vtotal(struct dc_link *link, uint32_t coasting_vtotal); bool edp_replay_residency(const struct dc_link *link, unsigned int *residency, const bool is_start, const bool is_alpm); bool edp_get_replay_state(const struct dc_link *link, uint64_t *state); bool edp_set_replay_power_opt_and_coasting_vtotal(struct dc_link *link, - const unsigned int *power_opts, uint16_t coasting_vtotal); + const unsigned int *power_opts, uint32_t coasting_vtotal); bool edp_wait_for_t12(struct dc_link *link); bool edp_is_ilr_optimization_required(struct dc_link *link, struct dc_crtc_timing *crtc_timing); diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h index ab81594a7fad..6c2e84d3967f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn10/dcn10_optc.h @@ -557,7 +557,8 @@ struct dcn_optc_registers { type OTG_CRC_DATA_STREAM_SPLIT_MODE;\ type OTG_CRC_DATA_FORMAT;\ type OTG_V_TOTAL_LAST_USED_BY_DRR;\ - type OTG_DRR_TIMING_DBUF_UPDATE_PENDING; + type OTG_DRR_TIMING_DBUF_UPDATE_PENDING;\ + type OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING; #define TG_REG_FIELD_LIST_DCN3_2(type) \ type OTG_H_TIMING_DIV_MODE_MANUAL; diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c index 823493543325..52eab8fccb7f 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.c @@ -122,6 +122,13 @@ void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combi } } +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg) +{ + struct optc *optc1 = DCN10TG_FROM_TG(tg); + + REG_WAIT(OTG_DOUBLE_BUFFER_CONTROL, OTG_H_TIMING_DIV_MODE_DB_UPDATE_PENDING, 0, 2, 50000); +} + void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool manual_mode) { struct optc *optc1 = DCN10TG_FROM_TG(optc); @@ -260,9 +267,6 @@ static void optc32_setup_manual_trigger(struct timing_generator *optc) OTG_V_TOTAL_MAX_SEL, 1, OTG_FORCE_LOCK_ON_EVENT, 0, OTG_SET_V_TOTAL_MIN_MASK, (1 << 1)); /* TRIGA */ - - // Setup manual flow control for EOF via TRIG_A - optc->funcs->setup_manual_trigger(optc); } } @@ -345,6 +349,7 @@ static struct timing_generator_funcs dcn32_tg_funcs = { .set_odm_bypass = optc32_set_odm_bypass, .set_odm_combine = optc32_set_odm_combine, .get_odm_combine_segments = optc32_get_odm_combine_segments, + .wait_odm_doublebuffer_pending_clear = optc32_wait_odm_doublebuffer_pending_clear, .set_h_timing_div_manual_mode = optc32_set_h_timing_div_manual_mode, .get_optc_source = optc2_get_optc_source, .set_out_mux = optc3_set_out_mux, diff --git a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h index 8ce3b178cab0..0c2c14695561 100644 --- a/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h +++ b/drivers/gpu/drm/amd/display/dc/optc/dcn32/dcn32_optc.h @@ -183,5 +183,6 @@ void optc32_set_h_timing_div_manual_mode(struct timing_generator *optc, bool man void optc32_get_odm_combine_segments(struct timing_generator *tg, int *odm_combine_segments); void optc32_set_odm_bypass(struct timing_generator *optc, const struct dc_crtc_timing *dc_crtc_timing); +void optc32_wait_odm_doublebuffer_pending_clear(struct timing_generator *tg); #endif /* __DC_OPTC_DCN32_H__ */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c index 3f3951f3ba98..ce1754cc1f46 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.c @@ -1771,6 +1771,7 @@ static bool dml1_validate(struct dc *dc, struct dc_state *context, bool fast_val dc->res_pool->funcs->calculate_wm_and_dlg(dc, context, pipes, pipe_cnt, vlevel); dcn32_override_min_req_memclk(dc, context); + dcn32_override_min_req_dcfclk(dc, context); BW_VAL_TRACE_END_WATERMARKS(); @@ -1930,6 +1931,8 @@ static void dcn32_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *bw { DC_FP_START(); dcn32_update_bw_bounding_box_fpu(dc, bw_params); + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h index 0c87b0fabba7..2258c5c7212d 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn32/dcn32_resource.h @@ -42,6 +42,7 @@ #define SUBVP_ACTIVE_MARGIN_LIST_LEN 2 #define DCN3_2_MAX_SUBVP_PIXEL_RATE_MHZ 1800 #define DCN3_2_VMIN_DISPCLK_HZ 717000000 +#define MIN_SUBVP_DCFCLK_KHZ 400000 #define TO_DCN32_RES_POOL(pool)\ container_of(pool, struct dcn32_resource_pool, base) @@ -181,6 +182,8 @@ bool dcn32_subvp_vblank_admissable(struct dc *dc, struct dc_state *context, int void dcn32_update_dml_pipes_odm_policy_based_on_context(struct dc *dc, struct dc_state *context, display_e2e_pipe_params_st *pipes); +void dcn32_override_min_req_dcfclk(struct dc *dc, struct dc_state *context); + /* definitions for run time init of reg offsets */ /* CLK SRC */ diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c index b356fed1726d..296a0a8e7145 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn321/dcn321_resource.c @@ -1581,6 +1581,8 @@ static void dcn321_update_bw_bounding_box(struct dc *dc, struct clk_bw_params *b { DC_FP_START(); dcn321_update_bw_bounding_box_fpu(dc, bw_params); + if (dc->debug.using_dml2 && dc->current_state && dc->current_state->bw_ctx.dml2) + dml2_reinit(dc, &dc->dml2_options, &dc->current_state->bw_ctx.dml2); DC_FP_END(); } diff --git a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c index 5b486400dfdb..909e14261f9b 100644 --- a/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c +++ b/drivers/gpu/drm/amd/display/dc/resource/dcn351/dcn351_resource.c @@ -700,6 +700,8 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_dcc = DCC_ENABLE, .disable_dpp_power_gate = true, .disable_hubp_power_gate = true, + .disable_optc_power_gate = true, /*should the same as above two*/ + .disable_hpo_power_gate = true, /*dmubfw force domain25 on*/ .disable_clock_gate = false, .disable_dsc_power_gate = true, .vsr_support = true, @@ -742,12 +744,13 @@ static const struct dc_debug_options debug_defaults_drv = { }, .seamless_boot_odm_combine = DML_FAIL_SOURCE_PIXEL_FORMAT, .enable_z9_disable_interface = true, /* Allow support for the PMFW interface for disable Z9*/ + .minimum_z8_residency_time = 2100, .using_dml2 = true, .support_eDP1_5 = true, .enable_hpo_pg_support = false, .enable_legacy_fast_update = true, .enable_single_display_2to1_odm_policy = true, - .disable_idle_power_optimizations = true, + .disable_idle_power_optimizations = false, .dmcub_emulation = false, .disable_boot_optimizations = false, .disable_unbounded_requesting = false, @@ -758,8 +761,10 @@ static const struct dc_debug_options debug_defaults_drv = { .disable_z10 = true, .ignore_pg = true, .psp_disabled_wa = true, - .ips2_eval_delay_us = 200, - .ips2_entry_delay_us = 400 + .ips2_eval_delay_us = 2000, + .ips2_entry_delay_us = 800, + .disable_dmub_reallow_idle = true, + .static_screen_wait_frames = 2, }; static const struct dc_panel_config panel_config_defaults = { diff --git a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h index a529e369b2ac..af3fe8bb0728 100644 --- a/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h +++ b/drivers/gpu/drm/amd/display/dmub/inc/dmub_cmd.h @@ -3238,6 +3238,14 @@ struct dmub_cmd_replay_set_coasting_vtotal_data { * Currently the support is only for 0 or 1 */ uint8_t panel_inst; + /** + * 16-bit value dicated by driver that indicates the coasting vtotal high byte part. + */ + uint16_t coasting_vtotal_high; + /** + * Explicit padding to 4 byte boundary. + */ + uint8_t pad[2]; }; /** diff --git a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c index 8c137d7c032e..7c9805705fd3 100644 --- a/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c +++ b/drivers/gpu/drm/amd/display/modules/hdcp/hdcp_psp.c @@ -513,6 +513,9 @@ enum mod_hdcp_status mod_hdcp_hdcp2_create_session(struct mod_hdcp *hdcp) hdcp_cmd = (struct ta_hdcp_shared_memory *)psp->hdcp_context.context.mem_context.shared_buf; memset(hdcp_cmd, 0, sizeof(struct ta_hdcp_shared_memory)); + if (!display) + return MOD_HDCP_STATUS_DISPLAY_NOT_FOUND; + hdcp_cmd->in_msg.hdcp2_create_session_v2.display_handle = display->index; if (hdcp->connection.link.adjust.hdcp2.force_type == MOD_HDCP_FORCE_TYPE_0) diff --git a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c index 738ee763f24a..84f9b412a4f1 100644 --- a/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c +++ b/drivers/gpu/drm/amd/display/modules/info_packet/info_packet.c @@ -147,15 +147,12 @@ void mod_build_vsc_infopacket(const struct dc_stream_state *stream, } /* VSC packet set to 4 for PSR-SU, or 2 for PSR1 */ - if (stream->link->psr_settings.psr_feature_enabled) { - if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) - vsc_packet_revision = vsc_packet_rev4; - else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) - vsc_packet_revision = vsc_packet_rev2; - } - - if (stream->link->replay_settings.config.replay_supported) + if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_SU_1) + vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->replay_settings.config.replay_supported) vsc_packet_revision = vsc_packet_rev4; + else if (stream->link->psr_settings.psr_version == DC_PSR_VERSION_1) + vsc_packet_revision = vsc_packet_rev2; /* Update to revision 5 for extended colorimetry support */ if (stream->use_vsc_sdp_for_colorimetry) diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c index e304e8435fb8..2a3698fd2dc2 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.c +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.c @@ -975,7 +975,7 @@ bool psr_su_set_dsc_slice_height(struct dc *dc, struct dc_link *link, void set_replay_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, - uint16_t vtotal) + uint32_t vtotal) { link->replay_settings.coasting_vtotal_table[type] = vtotal; } diff --git a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h index bef4815e1703..ff7e6f3cd6be 100644 --- a/drivers/gpu/drm/amd/display/modules/power/power_helpers.h +++ b/drivers/gpu/drm/amd/display/modules/power/power_helpers.h @@ -56,7 +56,7 @@ bool dmub_init_abm_config(struct resource_pool *res_pool, void init_replay_config(struct dc_link *link, struct replay_config *pr_config); void set_replay_coasting_vtotal(struct dc_link *link, enum replay_coasting_vtotal_type type, - uint16_t vtotal); + uint32_t vtotal); void set_replay_ips_full_screen_video_src_vtotal(struct dc_link *link, uint16_t vtotal); void calculate_replay_link_off_frame_count(struct dc_link *link, uint16_t vtotal, uint16_t htotal); |
