diff options
| author | Dave Airlie <airlied@redhat.com> | 2024-06-11 14:01:55 +1000 |
|---|---|---|
| committer | Dave Airlie <airlied@redhat.com> | 2024-06-11 14:01:55 +1000 |
| commit | 1ddaaa244021aba8496536a6627b4ad2bc0f936a (patch) | |
| tree | 2b37ec6170094757daaa0c7445670eebf3b996d9 /drivers/gpu/drm/amd/display/dc/link/protocols | |
| parent | Merge tag 'drm-xe-next-2024-06-06' of https://gitlab.freedesktop.org/drm/xe/k... (diff) | |
| parent | drm/amdgpu: add RAS is_rma flag (diff) | |
| download | linux-1ddaaa244021aba8496536a6627b4ad2bc0f936a.tar.gz linux-1ddaaa244021aba8496536a6627b4ad2bc0f936a.zip | |
Merge tag 'amd-drm-next-6.11-2024-06-07' of https://gitlab.freedesktop.org/agd5f/linux into drm-next
amd-drm-next-6.11-2024-06-07:
amdgpu:
- DCN 4.0.x support
- DCN 3.5 updates
- GC 12.0 support
- DP MST fixes
- Cursor fixes
- MES11 updates
- MMHUB 4.1 support
- DML2 Updates
- DCN 3.1.5 fixes
- IPS fixes
- Various code cleanups
- GMC 12.0 support
- SDMA 7.0 support
- SMU 13 updates
- SR-IOV fixes
- VCN 5.x fixes
- MES12 support
- SMU 14.x updates
- Devcoredump improvements
- Fixes for HDP flush on platforms with >4k pages
- GC 9.4.3 fixes
- RAS ACA updates
- Silence UBSAN flex array warnings
- MMHUB 3.3 updates
amdkfd:
- Contiguous VRAM allocations
- GC 12.0 support
- SDMA 7.0 support
- SR-IOV fixes
radeon:
- Backlight workaround for iMac
- Silence UBSAN flex array warnings
UAPI:
- GFX12 modifier and DCC support
Proposed Mesa changes:
https://gitlab.freedesktop.org/mesa/mesa/-/merge_requests/29510
- KFD GFX ALU exceptions
Proposed ROCdebugger changes:
https://github.com/ROCm/ROCdbgapi/commit/08c760622b6601abf906f75abbc5e21d9fd425df
https://github.com/ROCm/ROCgdb/commit/944fe1c1414a68700414e86e32273b6bfa62ba6f
- KFD Contiguous VRAM allocation flag
Proposed ROCr/HIP changes:
https://github.com/ROCm/ROCT-Thunk-Interface/commit/f7b4a269914a3ab4f1e2453c2879adb97b5cc9e5
https://github.com/ROCm/ROCR-Runtime/pull/214/commits/26e8530d05a775872cb06dde6693db72be0c454a
https://github.com/ROCm/clr/commit/1d48f2a1ab38b632919c4b7274899b3faf4279ff
Signed-off-by: Dave Airlie <airlied@redhat.com>
From: Alex Deucher <alexander.deucher@amd.com>
Link: https://patchwork.freedesktop.org/patch/msgid/20240607195900.902537-1-alexander.deucher@amd.com
Diffstat (limited to 'drivers/gpu/drm/amd/display/dc/link/protocols')
8 files changed, 95 insertions, 74 deletions
diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c index c2d40979203e..d6d5bbf2108c 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_ddc.c @@ -51,10 +51,6 @@ struct i2c_payloads { struct vector payloads; }; -struct aux_payloads { - struct vector payloads; -}; - static bool i2c_payloads_create( struct dc_context *ctx, struct i2c_payloads *payloads, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c index a01d0842bf8e..964abccebdc6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_capability.c @@ -209,6 +209,9 @@ static enum dc_link_rate linkRateInKHzToLinkRateMultiplier(uint32_t link_rate_in case 8100000: link_rate = LINK_RATE_HIGH3; // Rate_9 (HBR3)- 8.10 Gbps/Lane break; + case 10000000: + link_rate = LINK_RATE_UHBR10; // UHBR10 - 10.0 Gbps/Lane + break; default: link_rate = LINK_RATE_UNKNOWN; break; @@ -534,7 +537,7 @@ static bool decide_fallback_link_setting_max_bw_policy( struct dc_link_settings *cur, enum link_training_result training_result) { - uint8_t cur_idx = 0, next_idx; + uint32_t cur_idx = 0, next_idx; bool found = false; if (training_result == LINK_TRAINING_ABORT) @@ -952,6 +955,9 @@ bool link_decide_link_settings(struct dc_stream_state *stream, } else { edp_decide_link_settings(link, link_setting, req_bw); } + } else if (stream->signal == SIGNAL_TYPE_VIRTUAL) { + link_setting->lane_count = LANE_COUNT_FOUR; + link_setting->link_rate = LINK_RATE_HIGH3; } else { decide_dp_link_settings(link, link_setting, req_bw); } @@ -2059,7 +2065,7 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) enum dc_link_rate lttpr_max_link_rate; enum dc_link_rate cable_max_link_rate; struct link_encoder *link_enc = NULL; - + bool is_uhbr13_5_supported = true; link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); @@ -2080,6 +2086,9 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) max_link_cap.link_spread = link->reported_link_cap.link_spread; + if (!link->dpcd_caps.dp_128b_132b_supported_link_rates.bits.UHBR13_5) + is_uhbr13_5_supported = false; + /* Lower link settings based on cable attributes * Cable ID is a DP2 feature to identify max certified link rate that * a cable can carry. The cable identification method requires both @@ -2098,9 +2107,13 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) cable_max_link_rate = get_cable_max_link_rate(link); if (!link->dc->debug.ignore_cable_id && - cable_max_link_rate != LINK_RATE_UNKNOWN && - cable_max_link_rate < max_link_cap.link_rate) - max_link_cap.link_rate = cable_max_link_rate; + cable_max_link_rate != LINK_RATE_UNKNOWN) { + if (cable_max_link_rate < max_link_cap.link_rate) + max_link_cap.link_rate = cable_max_link_rate; + + if (!link->dpcd_caps.cable_id.bits.UHBR13_5_CAPABILITY) + is_uhbr13_5_supported = false; + } /* account for lttpr repeaters cap * notes: repeaters do not snoop in the DPRX Capabilities addresses (3.6.3). @@ -2113,12 +2126,19 @@ struct dc_link_settings dp_get_max_link_cap(struct dc_link *link) if (lttpr_max_link_rate < max_link_cap.link_rate) max_link_cap.link_rate = lttpr_max_link_rate; + if (!link->dpcd_caps.lttpr_caps.supported_128b_132b_rates.bits.UHBR13_5) + is_uhbr13_5_supported = false; + DC_LOG_HW_LINK_TRAINING("%s\n Training with LTTPR, max_lane count %d max_link rate %d \n", __func__, max_link_cap.lane_count, max_link_cap.link_rate); } + if (max_link_cap.link_rate == LINK_RATE_UHBR13_5 && + !is_uhbr13_5_supported) + max_link_cap.link_rate = LINK_RATE_UHBR10; + if (link_dp_get_encoding_format(&max_link_cap) == DP_128b_132b_ENCODING && link->dc->debug.disable_uhbr) max_link_cap.link_rate = LINK_RATE_HIGH3; diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c index 0fcf0b8530ac..5f087e930cb6 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_irq_handler.c @@ -189,16 +189,30 @@ static void handle_hpd_irq_replay_sink(struct dc_link *link) union dpcd_replay_configuration replay_configuration = {0}; /*AMD Replay version reuse DP_PSR_ERROR_STATUS for REPLAY_ERROR status.*/ union psr_error_status replay_error_status = {0}; + bool ret = false; + int retries = 0; if (!link->replay_settings.replay_feature_enabled) return; - dm_helpers_dp_read_dpcd( - link->ctx, - link, - DP_SINK_PR_REPLAY_STATUS, - &replay_configuration.raw, - sizeof(replay_configuration.raw)); + while (retries < 10) { + ret = dm_helpers_dp_read_dpcd( + link->ctx, + link, + DP_SINK_PR_REPLAY_STATUS, + &replay_configuration.raw, + sizeof(replay_configuration.raw)); + + if (ret) + break; + + retries++; + } + + if (!ret) + DC_LOG_WARNING("[%s][%d] DPCD read addr.0x%x failed with %d retries\n", + __func__, __LINE__, + DP_SINK_PR_REPLAY_STATUS, retries); dm_helpers_dp_read_dpcd( link->ctx, @@ -373,6 +387,7 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, union device_service_irq device_service_clear = {0}; enum dc_status result; bool status = false; + bool allow_active = false; if (out_link_loss) *out_link_loss = false; @@ -427,12 +442,6 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, return false; } - if (handle_hpd_irq_psr_sink(link)) - /* PSR-related error was detected and handled */ - return true; - - handle_hpd_irq_replay_sink(link); - /* If PSR-related error handled, Main link may be off, * so do not handle as a normal sink status change interrupt. */ @@ -454,9 +463,8 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, * If we got sink count changed it means * Downstream port status changed, * then DM should call DC to do the detection. - * NOTE: Do not handle link loss on eDP since it is internal link*/ - if ((link->connector_signal != SIGNAL_TYPE_EDP) && - dp_parse_link_loss_status( + */ + if (dp_parse_link_loss_status( link, &hpd_irq_dpcd_data)) { /* Connectivity log: link loss */ @@ -465,6 +473,11 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, sizeof(hpd_irq_dpcd_data), "Status: "); + if (link->psr_settings.psr_feature_enabled) + edp_set_psr_allow_active(link, &allow_active, true, false, NULL); + else if (link->replay_settings.replay_allow_active) + edp_set_replay_allow_active(link, &allow_active, true, false, NULL); + if (defer_handling && has_left_work) *has_left_work = true; else @@ -477,6 +490,14 @@ bool dp_handle_hpd_rx_irq(struct dc_link *link, dp_trace_link_loss_increment(link); } + if (*out_link_loss == false) { + if (handle_hpd_irq_psr_sink(link)) + /* PSR-related error was detected and handled */ + return true; + + handle_hpd_irq_replay_sink(link); + } + if (link->type == dc_connection_sst_branch && hpd_irq_dpcd_data.bytes.sink_cnt.bits.SINK_COUNT != link->dpcd_sink_count) diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c index 2fa4e64e2430..bafa52a0165a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_phy.c @@ -147,32 +147,25 @@ enum dc_status dp_set_fec_ready(struct dc_link *link, const struct link_resource link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); + if (link_enc->funcs->fec_set_ready == NULL) + return DC_NOT_SUPPORTED; - if (!dp_should_enable_fec(link)) - return status; - - if (link_enc->funcs->fec_set_ready && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { - if (ready) { - fec_config = 1; - status = core_link_write_dpcd(link, - DP_FEC_CONFIGURATION, - &fec_config, - sizeof(fec_config)); - if (status == DC_OK) { - link_enc->funcs->fec_set_ready(link_enc, true); - link->fec_state = dc_link_fec_ready; - } else { - link_enc->funcs->fec_set_ready(link_enc, false); - link->fec_state = dc_link_fec_not_ready; - dm_error("dpcd write failed to set fec_ready"); - } - } else if (link->fec_state == dc_link_fec_ready) { + if (ready && dp_should_enable_fec(link)) { + fec_config = 1; + + status = core_link_write_dpcd(link, DP_FEC_CONFIGURATION, + &fec_config, sizeof(fec_config)); + + if (status == DC_OK) { + link_enc->funcs->fec_set_ready(link_enc, true); + link->fec_state = dc_link_fec_ready; + } + } else { + if (link->fec_state == dc_link_fec_ready) { fec_config = 0; - status = core_link_write_dpcd(link, - DP_FEC_CONFIGURATION, - &fec_config, - sizeof(fec_config)); + core_link_write_dpcd(link, DP_FEC_CONFIGURATION, + &fec_config, sizeof(fec_config)); + link_enc->funcs->fec_set_ready(link_enc, false); link->fec_state = dc_link_fec_not_ready; } @@ -187,14 +180,12 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) link_enc = link_enc_cfg_get_link_enc(link); ASSERT(link_enc); - - if (!dp_should_enable_fec(link)) + if (link_enc->funcs->fec_set_enable == NULL) return; - if (link_enc->funcs->fec_set_enable && - link->dpcd_caps.fec_cap.bits.FEC_CAPABLE) { - if (link->fec_state == dc_link_fec_ready && enable) { - /* Accord to DP spec, FEC enable sequence can first + if (enable && dp_should_enable_fec(link)) { + if (link->fec_state == dc_link_fec_ready) { + /* According to DP spec, FEC enable sequence can first * be transmitted anytime after 1000 LL codes have * been transmitted on the link after link training * completion. Using 1 lane RBR should have the maximum @@ -204,7 +195,9 @@ void dp_set_fec_enable(struct dc_link *link, bool enable) udelay(7); link_enc->funcs->fec_set_enable(link_enc, true); link->fec_state = dc_link_fec_enabled; - } else if (link->fec_state == dc_link_fec_enabled && !enable) { + } + } else { + if (link->fec_state == dc_link_fec_enabled) { link_enc->funcs->fec_set_enable(link_enc, false); link->fec_state = dc_link_fec_ready; } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c index 1818970b8eaf..a93dd83cd8c0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training.c @@ -914,10 +914,10 @@ static enum dc_status configure_lttpr_mode_non_transparent( /* Driver does not need to train the first hop. Skip DPCD read and clear * AUX_RD_INTERVAL for DPTX-to-DPIA hop. */ - if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA) + if (link->ep_type == DISPLAY_ENDPOINT_USB4_DPIA && repeater_cnt > 0 && repeater_cnt < MAX_REPEATER_CNT) link->dpcd_caps.lttpr_caps.aux_rd_interval[--repeater_cnt] = 0; - for (repeater_id = repeater_cnt; repeater_id > 0; repeater_id--) { + for (repeater_id = repeater_cnt; repeater_id > 0 && repeater_id < MAX_REPEATER_CNT; repeater_id--) { aux_interval_address = DP_TRAINING_AUX_RD_INTERVAL_PHY_REPEATER1 + ((DP_REPEATER_CONFIGURATION_AND_STATUS_SIZE) * (repeater_id - 1)); core_link_read_dpcd( @@ -1704,10 +1704,13 @@ bool perform_link_training_with_retries( is_link_bw_min = ((cur_link_settings.link_rate <= LINK_RATE_LOW) && (cur_link_settings.lane_count <= LANE_COUNT_ONE)); - if (is_link_bw_low) + if (is_link_bw_low) { DC_LOG_WARNING( "%s: Link(%d) bandwidth too low after fallback req_bw(%d) > link_bw(%d)\n", __func__, link->link_index, req_bw, link_bw); + + return false; + } } msleep(delay_between_attempts); diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c index edb21d21952a..cd1975c03f38 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dp_training_dpia.c @@ -430,10 +430,6 @@ static enum link_training_result dpia_training_cr_non_transparent( retry_count++; } - /* Abort link training if clock recovery failed due to HPD unplug. */ - if (link->is_hpd_pending) - result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING( "%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n", __func__, @@ -537,10 +533,6 @@ static enum link_training_result dpia_training_cr_transparent( retry_count++; } - /* Abort link training if clock recovery failed due to HPD unplug. */ - if (link->is_hpd_pending) - result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) clock recovery\n -hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, @@ -731,10 +723,6 @@ static enum link_training_result dpia_training_eq_non_transparent( lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } - /* Abort link training if equalization failed due to HPD unplug. */ - if (link->is_hpd_pending) - result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING( "%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n - status(%d)\n", __func__, @@ -822,10 +810,6 @@ static enum link_training_result dpia_training_eq_transparent( lt_settings->hw_lane_settings, lt_settings->dpcd_lane_settings); } - /* Abort link training if equalization failed due to HPD unplug. */ - if (link->is_hpd_pending) - result = LINK_TRAINING_ABORT; - DC_LOG_HW_LINK_TRAINING("%s\n DPIA(%d) equalization\n - hop(%d)\n - result(%d)\n - retries(%d)\n", __func__, link->link_id.enum_id - ENUM_ID_1, diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c index a72c898b64fa..584b9295a12a 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_dpcd.c @@ -165,6 +165,7 @@ static void dpcd_extend_address_range( *out_address = new_addr_range.start; *out_size = ADDRESS_RANGE_SIZE(new_addr_range.start, new_addr_range.end); *out_data = kcalloc(*out_size, sizeof(**out_data), GFP_KERNEL); + ASSERT(*out_data); } } diff --git a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c index ad9aca790dd7..89f66d88c3b0 100644 --- a/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c +++ b/drivers/gpu/drm/amd/display/dc/link/protocols/link_edp_panel_control.c @@ -248,6 +248,9 @@ bool edp_backlight_enable_aux(struct dc_link *link, bool enable) link->connector_signal != SIGNAL_TYPE_DISPLAY_PORT)) return false; + if (link->dc->debug.edp_oled_no_backlight_enable && link->dpcd_sink_ext_caps.bits.oled) + return true; + if (core_link_write_dpcd(link, DP_SOURCE_BACKLIGHT_ENABLE, &backlight_enable, 1) != DC_OK) return false; |
