aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm/display/drm_dp_mst_topology.c
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm/display/drm_dp_mst_topology.c')
-rw-r--r--drivers/gpu/drm/display/drm_dp_mst_topology.c207
1 files changed, 95 insertions, 112 deletions
diff --git a/drivers/gpu/drm/display/drm_dp_mst_topology.c b/drivers/gpu/drm/display/drm_dp_mst_topology.c
index ac90118b9e7a..06c91c5b7f7c 100644
--- a/drivers/gpu/drm/display/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/display/drm_dp_mst_topology.c
@@ -29,7 +29,6 @@
#include <linux/random.h>
#include <linux/sched.h>
#include <linux/seq_file.h>
-#include <linux/iopoll.h>
#if IS_ENABLED(CONFIG_DRM_DEBUG_DP_MST_TOPOLOGY_REFS)
#include <linux/stacktrace.h>
@@ -68,9 +67,6 @@ static bool dump_dp_payload_table(struct drm_dp_mst_topology_mgr *mgr,
static void drm_dp_mst_topology_put_port(struct drm_dp_mst_port *port);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots);
-
static int drm_dp_send_dpcd_read(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_port *port,
int offset, int size, u8 *bytes);
@@ -320,6 +316,9 @@ static bool drm_dp_decode_sideband_msg_hdr(const struct drm_dp_mst_topology_mgr
hdr->broadcast = (buf[idx] >> 7) & 0x1;
hdr->path_msg = (buf[idx] >> 6) & 0x1;
hdr->msg_len = buf[idx] & 0x3f;
+ if (hdr->msg_len < 1) /* min space for body CRC */
+ return false;
+
idx++;
hdr->somt = (buf[idx] >> 7) & 0x1;
hdr->eomt = (buf[idx] >> 6) & 0x1;
@@ -2282,7 +2281,7 @@ drm_dp_mst_port_add_connector(struct drm_dp_mst_branch *mstb,
port->cached_edid = drm_edid_read_ddc(port->connector,
&port->aux.ddc);
- drm_connector_register(port->connector);
+ drm_connector_dynamic_register(port->connector);
return;
error:
@@ -3264,7 +3263,7 @@ EXPORT_SYMBOL(drm_dp_send_query_stream_enc_status);
static int drm_dp_create_payload_at_dfp(struct drm_dp_mst_topology_mgr *mgr,
struct drm_dp_mst_atomic_payload *payload)
{
- return drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot,
+ return drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot,
payload->time_slots);
}
@@ -3295,7 +3294,7 @@ static void drm_dp_destroy_payload_at_remote_and_dfp(struct drm_dp_mst_topology_
}
if (payload->payload_allocation_status == DRM_DP_MST_PAYLOAD_ALLOCATION_DFP)
- drm_dp_dpcd_write_payload(mgr, payload->vcpi, payload->vc_start_slot, 0);
+ drm_dp_dpcd_write_payload(mgr->aux, payload->vcpi, payload->vc_start_slot, 0);
}
/**
@@ -3573,8 +3572,7 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
}
/**
- * drm_dp_get_vc_payload_bw - get the VC payload BW for an MST link
- * @mgr: The &drm_dp_mst_topology_mgr to use
+ * drm_dp_get_vc_payload_bw - get the VC payload BW for an MTP link
* @link_rate: link rate in 10kbits/s units
* @link_lane_count: lane count
*
@@ -3585,17 +3583,12 @@ static int drm_dp_send_up_ack_reply(struct drm_dp_mst_topology_mgr *mgr,
*
* Returns the BW / timeslot value in 20.12 fixed point format.
*/
-fixed20_12 drm_dp_get_vc_payload_bw(const struct drm_dp_mst_topology_mgr *mgr,
- int link_rate, int link_lane_count)
+fixed20_12 drm_dp_get_vc_payload_bw(int link_rate, int link_lane_count)
{
int ch_coding_efficiency =
drm_dp_bw_channel_coding_efficiency(drm_dp_is_uhbr_rate(link_rate));
fixed20_12 ret;
- if (link_rate == 0 || link_lane_count == 0)
- drm_dbg_kms(mgr->dev, "invalid link rate/lane count: (%d / %d)\n",
- link_rate, link_lane_count);
-
/* See DP v2.0 2.6.4.2, 2.7.6.3 VCPayload_Bandwidth_for_OneTimeSlotPer_MTP_Allocation */
ret.full = DIV_ROUND_DOWN_ULL(mul_u32_u32(link_rate * link_lane_count,
ch_coding_efficiency),
@@ -3683,7 +3676,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
goto out_unlock;
/* Write reset payload */
- drm_dp_dpcd_write_payload(mgr, 0, 0, 0x3f);
+ drm_dp_dpcd_clear_payload(mgr->aux);
drm_dp_mst_queue_probe_work(mgr);
@@ -3697,8 +3690,7 @@ int drm_dp_mst_topology_mgr_set_mst(struct drm_dp_mst_topology_mgr *mgr, bool ms
ret = 0;
mgr->payload_id_table_cleared = false;
- memset(&mgr->down_rep_recv, 0, sizeof(mgr->down_rep_recv));
- memset(&mgr->up_req_recv, 0, sizeof(mgr->up_req_recv));
+ mgr->reset_rx_state = true;
}
out_unlock:
@@ -3856,6 +3848,11 @@ out_fail:
}
EXPORT_SYMBOL(drm_dp_mst_topology_mgr_resume);
+static void reset_msg_rx_state(struct drm_dp_sideband_msg_rx *msg)
+{
+ memset(msg, 0, sizeof(*msg));
+}
+
static bool
drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
struct drm_dp_mst_branch **mstb)
@@ -3934,6 +3931,34 @@ drm_dp_get_one_sb_msg(struct drm_dp_mst_topology_mgr *mgr, bool up,
return true;
}
+static int get_msg_request_type(u8 data)
+{
+ return data & 0x7f;
+}
+
+static bool verify_rx_request_type(struct drm_dp_mst_topology_mgr *mgr,
+ const struct drm_dp_sideband_msg_tx *txmsg,
+ const struct drm_dp_sideband_msg_rx *rxmsg)
+{
+ const struct drm_dp_sideband_msg_hdr *hdr = &rxmsg->initial_hdr;
+ const struct drm_dp_mst_branch *mstb = txmsg->dst;
+ int tx_req_type = get_msg_request_type(txmsg->msg[0]);
+ int rx_req_type = get_msg_request_type(rxmsg->msg[0]);
+ char rad_str[64];
+
+ if (tx_req_type == rx_req_type)
+ return true;
+
+ drm_dp_mst_rad_to_str(mstb->rad, mstb->lct, rad_str, sizeof(rad_str));
+ drm_dbg_kms(mgr->dev,
+ "Got unexpected MST reply, mstb: %p seqno: %d lct: %d rad: %s rx_req_type: %s (%02x) != tx_req_type: %s (%02x)\n",
+ mstb, hdr->seqno, mstb->lct, rad_str,
+ drm_dp_mst_req_type_str(rx_req_type), rx_req_type,
+ drm_dp_mst_req_type_str(tx_req_type), tx_req_type);
+
+ return false;
+}
+
static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_sideband_msg_tx *txmsg;
@@ -3949,9 +3974,9 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
/* find the message */
mutex_lock(&mgr->qlock);
+
txmsg = list_first_entry_or_null(&mgr->tx_msg_downq,
struct drm_dp_sideband_msg_tx, next);
- mutex_unlock(&mgr->qlock);
/* Were we actually expecting a response, and from this mstb? */
if (!txmsg || txmsg->dst != mstb) {
@@ -3960,6 +3985,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
hdr = &msg->initial_hdr;
drm_dbg_kms(mgr->dev, "Got MST reply with no msg %p %d %d %02x %02x\n",
mstb, hdr->seqno, hdr->lct, hdr->rad[0], msg->msg[0]);
+
+ mutex_unlock(&mgr->qlock);
+
+ goto out_clear_reply;
+ }
+
+ if (!verify_rx_request_type(mgr, txmsg, msg)) {
+ mutex_unlock(&mgr->qlock);
+
goto out_clear_reply;
}
@@ -3975,20 +4009,15 @@ static int drm_dp_mst_handle_down_rep(struct drm_dp_mst_topology_mgr *mgr)
txmsg->reply.u.nak.nak_data);
}
- memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
- drm_dp_mst_topology_put_mstb(mstb);
-
- mutex_lock(&mgr->qlock);
txmsg->state = DRM_DP_SIDEBAND_TX_RX;
list_del(&txmsg->next);
+
mutex_unlock(&mgr->qlock);
wake_up_all(&mgr->tx_waitq);
- return 0;
-
out_clear_reply:
- memset(msg, 0, sizeof(struct drm_dp_sideband_msg_rx));
+ reset_msg_rx_state(msg);
out:
if (mstb)
drm_dp_mst_topology_put_mstb(mstb);
@@ -4070,16 +4099,20 @@ static void drm_dp_mst_up_req_work(struct work_struct *work)
static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
{
struct drm_dp_pending_up_req *up_req;
+ struct drm_dp_mst_branch *mst_primary;
+ int ret = 0;
if (!drm_dp_get_one_sb_msg(mgr, true, NULL))
- goto out;
+ goto out_clear_reply;
if (!mgr->up_req_recv.have_eomt)
return 0;
up_req = kzalloc(sizeof(*up_req), GFP_KERNEL);
- if (!up_req)
- return -ENOMEM;
+ if (!up_req) {
+ ret = -ENOMEM;
+ goto out_clear_reply;
+ }
INIT_LIST_HEAD(&up_req->next);
@@ -4090,10 +4123,19 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
drm_dbg_kms(mgr->dev, "Received unknown up req type, ignoring: %x\n",
up_req->msg.req_type);
kfree(up_req);
- goto out;
+ goto out_clear_reply;
+ }
+
+ mutex_lock(&mgr->lock);
+ mst_primary = mgr->mst_primary;
+ if (!mst_primary || !drm_dp_mst_topology_try_get_mstb(mst_primary)) {
+ mutex_unlock(&mgr->lock);
+ kfree(up_req);
+ goto out_clear_reply;
}
+ mutex_unlock(&mgr->lock);
- drm_dp_send_up_ack_reply(mgr, mgr->mst_primary, up_req->msg.req_type,
+ drm_dp_send_up_ack_reply(mgr, mst_primary, up_req->msg.req_type,
false);
if (up_req->msg.req_type == DP_CONNECTION_STATUS_NOTIFY) {
@@ -4110,13 +4152,13 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
conn_stat->peer_device_type);
mutex_lock(&mgr->probe_lock);
- handle_csn = mgr->mst_primary->link_address_sent;
+ handle_csn = mst_primary->link_address_sent;
mutex_unlock(&mgr->probe_lock);
if (!handle_csn) {
drm_dbg_kms(mgr->dev, "Got CSN before finish topology probing. Skip it.");
kfree(up_req);
- goto out;
+ goto out_put_primary;
}
} else if (up_req->msg.req_type == DP_RESOURCE_STATUS_NOTIFY) {
const struct drm_dp_resource_status_notify *res_stat =
@@ -4133,9 +4175,22 @@ static int drm_dp_mst_handle_up_req(struct drm_dp_mst_topology_mgr *mgr)
mutex_unlock(&mgr->up_req_lock);
queue_work(system_long_wq, &mgr->up_req_work);
-out:
- memset(&mgr->up_req_recv, 0, sizeof(struct drm_dp_sideband_msg_rx));
- return 0;
+out_put_primary:
+ drm_dp_mst_topology_put_mstb(mst_primary);
+out_clear_reply:
+ reset_msg_rx_state(&mgr->up_req_recv);
+ return ret;
+}
+
+static void update_msg_rx_state(struct drm_dp_mst_topology_mgr *mgr)
+{
+ mutex_lock(&mgr->lock);
+ if (mgr->reset_rx_state) {
+ mgr->reset_rx_state = false;
+ reset_msg_rx_state(&mgr->down_rep_recv);
+ reset_msg_rx_state(&mgr->up_req_recv);
+ }
+ mutex_unlock(&mgr->lock);
}
/**
@@ -4172,6 +4227,8 @@ int drm_dp_mst_hpd_irq_handle_event(struct drm_dp_mst_topology_mgr *mgr, const u
*handled = true;
}
+ update_msg_rx_state(mgr);
+
if (esi[1] & DP_DOWN_REP_MSG_RDY) {
ret = drm_dp_mst_handle_down_rep(mgr);
*handled = true;
@@ -4680,61 +4737,6 @@ void drm_dp_mst_update_slots(struct drm_dp_mst_topology_state *mst_state, uint8_
}
EXPORT_SYMBOL(drm_dp_mst_update_slots);
-static int drm_dp_dpcd_write_payload(struct drm_dp_mst_topology_mgr *mgr,
- int id, u8 start_slot, u8 num_slots)
-{
- u8 payload_alloc[3], status;
- int ret;
- int retries = 0;
-
- drm_dp_dpcd_writeb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS,
- DP_PAYLOAD_TABLE_UPDATED);
-
- payload_alloc[0] = id;
- payload_alloc[1] = start_slot;
- payload_alloc[2] = num_slots;
-
- ret = drm_dp_dpcd_write(mgr->aux, DP_PAYLOAD_ALLOCATE_SET, payload_alloc, 3);
- if (ret != 3) {
- drm_dbg_kms(mgr->dev, "failed to write payload allocation %d\n", ret);
- goto fail;
- }
-
-retry:
- ret = drm_dp_dpcd_readb(mgr->aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0) {
- drm_dbg_kms(mgr->dev, "failed to read payload table status %d\n", ret);
- goto fail;
- }
-
- if (!(status & DP_PAYLOAD_TABLE_UPDATED)) {
- retries++;
- if (retries < 20) {
- usleep_range(10000, 20000);
- goto retry;
- }
- drm_dbg_kms(mgr->dev, "status not set after read payload table status %d\n",
- status);
- ret = -EINVAL;
- goto fail;
- }
- ret = 0;
-fail:
- return ret;
-}
-
-static int do_get_act_status(struct drm_dp_aux *aux)
-{
- int ret;
- u8 status;
-
- ret = drm_dp_dpcd_readb(aux, DP_PAYLOAD_TABLE_UPDATE_STATUS, &status);
- if (ret < 0)
- return ret;
-
- return status;
-}
-
/**
* drm_dp_check_act_status() - Polls for ACT handled status.
* @mgr: manager to use
@@ -4752,28 +4754,9 @@ int drm_dp_check_act_status(struct drm_dp_mst_topology_mgr *mgr)
* There doesn't seem to be any recommended retry count or timeout in
* the MST specification. Since some hubs have been observed to take
* over 1 second to update their payload allocations under certain
- * conditions, we use a rather large timeout value.
+ * conditions, we use a rather large timeout value of 3 seconds.
*/
- const int timeout_ms = 3000;
- int ret, status;
-
- ret = readx_poll_timeout(do_get_act_status, mgr->aux, status,
- status & DP_PAYLOAD_ACT_HANDLED || status < 0,
- 200, timeout_ms * USEC_PER_MSEC);
- if (ret < 0 && status >= 0) {
- drm_err(mgr->dev, "Failed to get ACT after %dms, last status: %02x\n",
- timeout_ms, status);
- return -EINVAL;
- } else if (status < 0) {
- /*
- * Failure here isn't unexpected - the hub may have
- * just been unplugged
- */
- drm_dbg_kms(mgr->dev, "Failed to read payload table status: %d\n", status);
- return status;
- }
-
- return 0;
+ return drm_dp_dpcd_poll_act_handled(mgr->aux, 3000);
}
EXPORT_SYMBOL(drm_dp_check_act_status);