diff options
Diffstat (limited to 'drivers/gpu/drm')
226 files changed, 7525 insertions, 2399 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c index ff98c87b2e0b..878b13efd154 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_dma_buf.c @@ -313,11 +313,23 @@ struct dma_buf *amdgpu_gem_prime_export(struct drm_gem_object *gobj, { struct amdgpu_bo *bo = gem_to_amdgpu_bo(gobj); struct dma_buf *buf; + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = false, + }; + int ret; if (amdgpu_ttm_tt_get_usermm(bo->tbo.ttm) || bo->flags & AMDGPU_GEM_CREATE_VM_ALWAYS_VALID) return ERR_PTR(-EPERM); + ret = ttm_bo_setup_export(&bo->tbo, &ctx); + if (ret) + return ERR_PTR(ret); + buf = drm_gem_prime_export(gobj, flags); if (!IS_ERR(buf)) buf->ops = &amdgpu_dmabuf_ops; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c index c80c8f543532..98aa99b314c9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gfx.c @@ -1474,7 +1474,8 @@ static int amdgpu_gfx_run_cleaner_shader_job(struct amdgpu_ring *ring) owner = (void *)(unsigned long)atomic_inc_return(&counter); r = amdgpu_job_alloc_with_ib(ring->adev, &entity, owner, - 64, 0, &job); + 64, 0, &job, + AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER); if (r) goto err; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c index 97b562a79ea8..9dcf51991b5b 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.c @@ -690,7 +690,7 @@ void amdgpu_gmc_flush_gpu_tlb(struct amdgpu_device *adev, uint32_t vmid, r = amdgpu_job_alloc_with_ib(ring->adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, 16 * 4, AMDGPU_IB_POOL_IMMEDIATE, - &job); + &job, AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB); if (r) goto error_alloc; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c index 9b1c55115921..d020a890a0ea 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c @@ -209,11 +209,12 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job) + struct amdgpu_job **job, u64 k_job_id) { int r; - r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, 0); + r = amdgpu_job_alloc(adev, NULL, entity, owner, 1, job, + k_job_id); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h index 2f302266662b..4a6487eb6cb5 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.h @@ -44,6 +44,22 @@ struct amdgpu_fence; enum amdgpu_ib_pool_type; +/* Internal kernel job ids. (decreasing values, starting from U64_MAX). */ +#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE (18446744073709551615ULL) +#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES (18446744073709551614ULL) +#define AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE (18446744073709551613ULL) +#define AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR (18446744073709551612ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER (18446744073709551611ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA (18446744073709551610ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER (18446744073709551609ULL) +#define AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE (18446744073709551608ULL) +#define AMDGPU_KERNEL_JOB_ID_MOVE_BLIT (18446744073709551607ULL) +#define AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER (18446744073709551606ULL) +#define AMDGPU_KERNEL_JOB_ID_CLEANER_SHADER (18446744073709551605ULL) +#define AMDGPU_KERNEL_JOB_ID_FLUSH_GPU_TLB (18446744073709551604ULL) +#define AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP (18446744073709551603ULL) +#define AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST (18446744073709551602ULL) + struct amdgpu_job { struct drm_sched_job base; struct amdgpu_vm *vm; @@ -96,7 +112,8 @@ int amdgpu_job_alloc(struct amdgpu_device *adev, struct amdgpu_vm *vm, int amdgpu_job_alloc_with_ib(struct amdgpu_device *adev, struct drm_sched_entity *entity, void *owner, size_t size, enum amdgpu_ib_pool_type pool_type, - struct amdgpu_job **job); + struct amdgpu_job **job, + u64 k_job_id); void amdgpu_job_set_resources(struct amdgpu_job *job, struct amdgpu_bo *gds, struct amdgpu_bo *gws, struct amdgpu_bo *oa); void amdgpu_job_free_resources(struct amdgpu_job *job); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c index f0d7e2487237..22da65f45226 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_jpeg.c @@ -196,7 +196,8 @@ static int amdgpu_jpeg_dec_set_reg(struct amdgpu_ring *ring, uint32_t handle, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c index 122a88294883..d18bade9c98f 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c @@ -1313,7 +1313,8 @@ void amdgpu_bo_release_notify(struct ttm_buffer_object *bo) if (r) goto out; - r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true); + r = amdgpu_fill_buffer(abo, 0, &bo->base._resv, &fence, true, + AMDGPU_KERNEL_JOB_ID_CLEAR_ON_RELEASE); if (WARN_ON(r)) goto out; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c index 27ab4e754b2a..428265046815 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c @@ -226,7 +226,8 @@ static int amdgpu_ttm_map_buffer(struct ttm_buffer_object *bo, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4 + num_bytes, - AMDGPU_IB_POOL_DELAYED, &job); + AMDGPU_IB_POOL_DELAYED, &job, + AMDGPU_KERNEL_JOB_ID_TTM_MAP_BUFFER); if (r) return r; @@ -406,7 +407,7 @@ static int amdgpu_move_blit(struct ttm_buffer_object *bo, struct dma_fence *wipe_fence = NULL; r = amdgpu_fill_buffer(abo, 0, NULL, &wipe_fence, - false); + false, AMDGPU_KERNEL_JOB_ID_MOVE_BLIT); if (r) { goto error; } else if (wipe_fence) { @@ -1510,7 +1511,8 @@ static int amdgpu_ttm_access_memory_sdma(struct ttm_buffer_object *bo, r = amdgpu_job_alloc_with_ib(adev, &adev->mman.high_pr, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4, AMDGPU_IB_POOL_DELAYED, - &job); + &job, + AMDGPU_KERNEL_JOB_ID_TTM_ACCESS_MEMORY_SDMA); if (r) goto out; @@ -2167,7 +2169,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, struct dma_resv *resv, bool vm_needs_flush, struct amdgpu_job **job, - bool delayed) + bool delayed, u64 k_job_id) { enum amdgpu_ib_pool_type pool = direct_submit ? AMDGPU_IB_POOL_DIRECT : @@ -2177,7 +2179,7 @@ static int amdgpu_ttm_prepare_job(struct amdgpu_device *adev, &adev->mman.high_pr; r = amdgpu_job_alloc_with_ib(adev, entity, AMDGPU_FENCE_OWNER_UNDEFINED, - num_dw * 4, pool, job); + num_dw * 4, pool, job, k_job_id); if (r) return r; @@ -2217,7 +2219,8 @@ int amdgpu_copy_buffer(struct amdgpu_ring *ring, uint64_t src_offset, num_loops = DIV_ROUND_UP(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->copy_num_dw, 8); r = amdgpu_ttm_prepare_job(adev, direct_submit, num_dw, - resv, vm_needs_flush, &job, false); + resv, vm_needs_flush, &job, false, + AMDGPU_KERNEL_JOB_ID_TTM_COPY_BUFFER); if (r) return r; @@ -2252,7 +2255,8 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, uint64_t dst_addr, uint32_t byte_count, struct dma_resv *resv, struct dma_fence **fence, - bool vm_needs_flush, bool delayed) + bool vm_needs_flush, bool delayed, + u64 k_job_id) { struct amdgpu_device *adev = ring->adev; unsigned int num_loops, num_dw; @@ -2265,7 +2269,7 @@ static int amdgpu_ttm_fill_mem(struct amdgpu_ring *ring, uint32_t src_data, num_loops = DIV_ROUND_UP_ULL(byte_count, max_bytes); num_dw = ALIGN(num_loops * adev->mman.buffer_funcs->fill_num_dw, 8); r = amdgpu_ttm_prepare_job(adev, false, num_dw, resv, vm_needs_flush, - &job, delayed); + &job, delayed, k_job_id); if (r) return r; @@ -2335,7 +2339,8 @@ int amdgpu_ttm_clear_buffer(struct amdgpu_bo *bo, goto err; r = amdgpu_ttm_fill_mem(ring, 0, addr, size, resv, - &next, true, true); + &next, true, true, + AMDGPU_KERNEL_JOB_ID_TTM_CLEAR_BUFFER); if (r) goto err; @@ -2354,7 +2359,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, struct dma_fence **f, - bool delayed) + bool delayed, + u64 k_job_id) { struct amdgpu_device *adev = amdgpu_ttm_adev(bo->tbo.bdev); struct amdgpu_ring *ring = adev->mman.buffer_funcs_ring; @@ -2384,7 +2390,7 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, goto error; r = amdgpu_ttm_fill_mem(ring, src_data, to, cur_size, resv, - &next, true, delayed); + &next, true, delayed, k_job_id); if (r) goto error; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h index 2309df3f68a9..d82d107fdcc6 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.h @@ -182,7 +182,8 @@ int amdgpu_fill_buffer(struct amdgpu_bo *bo, uint32_t src_data, struct dma_resv *resv, struct dma_fence **fence, - bool delayed); + bool delayed, + u64 k_job_id); int amdgpu_ttm_alloc_gart(struct ttm_buffer_object *bo); void amdgpu_ttm_recover_gart(struct ttm_buffer_object *tbo); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c index 74758b5ffc6c..5c38f0d30c87 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c @@ -1136,7 +1136,8 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo, r = amdgpu_job_alloc_with_ib(ring->adev, &adev->uvd.entity, AMDGPU_FENCE_OWNER_UNDEFINED, 64, direct ? AMDGPU_IB_POOL_DIRECT : - AMDGPU_IB_POOL_DELAYED, &job); + AMDGPU_IB_POOL_DELAYED, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c index b9060bcd4806..ce318f5de047 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c @@ -449,7 +449,7 @@ static int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle, r = amdgpu_job_alloc_with_ib(ring->adev, &ring->adev->vce.entity, AMDGPU_FENCE_OWNER_UNDEFINED, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -540,7 +540,8 @@ static int amdgpu_vce_get_destroy_msg(struct amdgpu_ring *ring, uint32_t handle, AMDGPU_FENCE_OWNER_UNDEFINED, ib_size_dw * 4, direct ? AMDGPU_IB_POOL_DIRECT : - AMDGPU_IB_POOL_DELAYED, &job); + AMDGPU_IB_POOL_DELAYED, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c index ad415203d245..595f0df17bcc 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c @@ -628,7 +628,7 @@ static int amdgpu_vcn_dec_send_msg(struct amdgpu_ring *ring, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, 64, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) goto err; @@ -808,7 +808,7 @@ static int amdgpu_vcn_dec_sw_send_msg(struct amdgpu_ring *ring, r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) goto err; @@ -938,7 +938,7 @@ static int amdgpu_vcn_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t hand r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -1005,7 +1005,7 @@ static int amdgpu_vcn_enc_get_destroy_msg(struct amdgpu_ring *ring, uint32_t han r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, AMDGPU_IB_POOL_DIRECT, - &job); + &job, AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c index bd12d8ff15a4..cc74a9827ac9 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c @@ -977,7 +977,8 @@ int amdgpu_vm_update_pdes(struct amdgpu_device *adev, params.vm = vm; params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, + AMDGPU_KERNEL_JOB_ID_VM_UPDATE_PDES); if (r) goto error; @@ -1146,7 +1147,8 @@ int amdgpu_vm_update_range(struct amdgpu_device *adev, struct amdgpu_vm *vm, dma_fence_put(tmp); } - r = vm->update_funcs->prepare(¶ms, sync); + r = vm->update_funcs->prepare(¶ms, sync, + AMDGPU_KERNEL_JOB_ID_VM_UPDATE_RANGE); if (r) goto error_free; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h index e045c1590d78..67eaf5402e7e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h @@ -308,7 +308,7 @@ struct amdgpu_vm_update_params { struct amdgpu_vm_update_funcs { int (*map_table)(struct amdgpu_bo_vm *bo); int (*prepare)(struct amdgpu_vm_update_params *p, - struct amdgpu_sync *sync); + struct amdgpu_sync *sync, u64 k_job_id); int (*update)(struct amdgpu_vm_update_params *p, struct amdgpu_bo_vm *bo, uint64_t pe, uint64_t addr, unsigned count, uint32_t incr, uint64_t flags); diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c index 0c1ef5850a5e..22e2e5b47341 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_cpu.c @@ -40,12 +40,14 @@ static int amdgpu_vm_cpu_map_table(struct amdgpu_bo_vm *table) * * @p: see amdgpu_vm_update_params definition * @sync: sync obj with fences to wait on + * @k_job_id: the id for tracing/debug purposes * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_cpu_prepare(struct amdgpu_vm_update_params *p, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, + u64 k_job_id) { if (!sync) return 0; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c index 30022123b0bf..f794fb1cc06e 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_pt.c @@ -26,6 +26,7 @@ #include "amdgpu.h" #include "amdgpu_trace.h" #include "amdgpu_vm.h" +#include "amdgpu_job.h" /* * amdgpu_vm_pt_cursor - state for for_each_amdgpu_vm_pt @@ -395,7 +396,8 @@ int amdgpu_vm_pt_clear(struct amdgpu_device *adev, struct amdgpu_vm *vm, params.vm = vm; params.immediate = immediate; - r = vm->update_funcs->prepare(¶ms, NULL); + r = vm->update_funcs->prepare(¶ms, NULL, + AMDGPU_KERNEL_JOB_ID_VM_PT_CLEAR); if (r) goto exit; diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c index 46d9fb433ab2..36805dcfa159 100644 --- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c +++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm_sdma.c @@ -40,7 +40,7 @@ static int amdgpu_vm_sdma_map_table(struct amdgpu_bo_vm *table) /* Allocate a new job for @count PTE updates */ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, - unsigned int count) + unsigned int count, u64 k_job_id) { enum amdgpu_ib_pool_type pool = p->immediate ? AMDGPU_IB_POOL_IMMEDIATE : AMDGPU_IB_POOL_DELAYED; @@ -56,7 +56,7 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, ndw = min(ndw, AMDGPU_VM_SDMA_MAX_NUM_DW); r = amdgpu_job_alloc_with_ib(p->adev, entity, AMDGPU_FENCE_OWNER_VM, - ndw * 4, pool, &p->job); + ndw * 4, pool, &p->job, k_job_id); if (r) return r; @@ -69,16 +69,17 @@ static int amdgpu_vm_sdma_alloc_job(struct amdgpu_vm_update_params *p, * * @p: see amdgpu_vm_update_params definition * @sync: amdgpu_sync object with fences to wait for + * @k_job_id: identifier of the job, for tracing purpose * * Returns: * Negativ errno, 0 for success. */ static int amdgpu_vm_sdma_prepare(struct amdgpu_vm_update_params *p, - struct amdgpu_sync *sync) + struct amdgpu_sync *sync, u64 k_job_id) { int r; - r = amdgpu_vm_sdma_alloc_job(p, 0); + r = amdgpu_vm_sdma_alloc_job(p, 0, k_job_id); if (r) return r; @@ -249,7 +250,8 @@ static int amdgpu_vm_sdma_update(struct amdgpu_vm_update_params *p, if (r) return r; - r = amdgpu_vm_sdma_alloc_job(p, count); + r = amdgpu_vm_sdma_alloc_job(p, count, + AMDGPU_KERNEL_JOB_ID_VM_UPDATE); if (r) return r; } diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c index 1c07b701d0e4..ceb94bbb03a4 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c @@ -217,7 +217,8 @@ static int uvd_v6_0_enc_get_create_msg(struct amdgpu_ring *ring, uint32_t handle int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -281,7 +282,8 @@ static int uvd_v6_0_enc_get_destroy_msg(struct amdgpu_ring *ring, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c index 9d237b5937fb..1f8866f3f63c 100644 --- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c +++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c @@ -225,7 +225,8 @@ static int uvd_v7_0_enc_get_create_msg(struct amdgpu_ring *ring, u32 handle, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; @@ -288,7 +289,8 @@ static int uvd_v7_0_enc_get_destroy_msg(struct amdgpu_ring *ring, u32 handle, int i, r; r = amdgpu_job_alloc_with_ib(ring->adev, NULL, NULL, ib_size_dw * 4, - AMDGPU_IB_POOL_DIRECT, &job); + AMDGPU_IB_POOL_DIRECT, &job, + AMDGPU_KERNEL_JOB_ID_VCN_RING_TEST); if (r) return r; diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c index 5d7eb0234002..86315ecb6f1d 100644 --- a/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c +++ b/drivers/gpu/drm/amd/amdkfd/kfd_migrate.c @@ -68,7 +68,8 @@ svm_migrate_gart_map(struct amdgpu_ring *ring, uint64_t npages, AMDGPU_FENCE_OWNER_UNDEFINED, num_dw * 4 + num_bytes, AMDGPU_IB_POOL_DELAYED, - &job); + &job, + AMDGPU_KERNEL_JOB_ID_KFD_GART_MAP); if (r) return r; diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c index fadc6098eaee..62defeccbb5c 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c @@ -3646,18 +3646,20 @@ static struct drm_mode_config_helper_funcs amdgpu_dm_mode_config_helperfuncs = { static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) { + const struct drm_panel_backlight_quirk *panel_backlight_quirk; struct amdgpu_dm_backlight_caps *caps; struct drm_connector *conn_base; struct amdgpu_device *adev; struct drm_luminance_range_info *luminance_range; - int min_input_signal_override; + struct drm_device *drm; if (aconnector->bl_idx == -1 || aconnector->dc_link->connector_signal != SIGNAL_TYPE_EDP) return; conn_base = &aconnector->base; - adev = drm_to_adev(conn_base->dev); + drm = conn_base->dev; + adev = drm_to_adev(drm); caps = &adev->dm.backlight_caps[aconnector->bl_idx]; caps->ext_caps = &aconnector->dc_link->dpcd_sink_ext_caps; @@ -3690,9 +3692,24 @@ static void update_connector_ext_caps(struct amdgpu_dm_connector *aconnector) else caps->aux_min_input_signal = 1; - min_input_signal_override = drm_get_panel_min_brightness_quirk(aconnector->drm_edid); - if (min_input_signal_override >= 0) - caps->min_input_signal = min_input_signal_override; + panel_backlight_quirk = + drm_get_panel_backlight_quirk(aconnector->drm_edid); + if (!IS_ERR_OR_NULL(panel_backlight_quirk)) { + if (panel_backlight_quirk->min_brightness) { + caps->min_input_signal = + panel_backlight_quirk->min_brightness - 1; + drm_info(drm, + "Applying panel backlight quirk, min_brightness: %d\n", + caps->min_input_signal); + } + if (panel_backlight_quirk->brightness_mask) { + drm_info(drm, + "Applying panel backlight quirk, brightness_mask: 0x%X\n", + panel_backlight_quirk->brightness_mask); + caps->brightness_mask = + panel_backlight_quirk->brightness_mask; + } + } } DEFINE_FREE(sink_release, struct dc_sink *, if (_T) dc_sink_release(_T)) @@ -4904,6 +4921,10 @@ static void amdgpu_dm_backlight_set_level(struct amdgpu_display_manager *dm, brightness = convert_brightness_from_user(caps, dm->brightness[bl_idx]); link = (struct dc_link *)dm->backlight_link[bl_idx]; + /* Apply brightness quirk */ + if (caps->brightness_mask) + brightness |= caps->brightness_mask; + /* Change brightness based on AUX property */ mutex_lock(&dm->dc_lock); if (dm->dc->caps.ips_support && dm->dc->ctx->dmub_srv->idle_allowed) { diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h index ce74125c713e..159f8ded0439 100644 --- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h +++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h @@ -216,6 +216,11 @@ struct amdgpu_dm_backlight_caps { */ bool aux_support; /** + * @brightness_mask: After deriving brightness, OR it with this mask. + * Workaround for panels with issues with certain brightness values. + */ + u32 brightness_mask; + /** * @ac_level: the default brightness if booted on AC */ u8 ac_level; diff --git a/drivers/gpu/drm/ast/ast_2100.c b/drivers/gpu/drm/ast/ast_2100.c index 477ee15eff5d..829e3b8b0d19 100644 --- a/drivers/gpu/drm/ast/ast_2100.c +++ b/drivers/gpu/drm/ast/ast_2100.c @@ -32,6 +32,39 @@ #include "ast_post.h" /* + * DRAM type + */ + +static enum ast_dram_layout ast_2100_get_dram_layout_p2a(struct ast_device *ast) +{ + u32 mcr_cfg; + enum ast_dram_layout dram_layout; + + ast_write32(ast, 0xf004, 0x1e6e0000); + ast_write32(ast, 0xf000, 0x1); + mcr_cfg = ast_read32(ast, 0x10004); + + switch (mcr_cfg & 0x0c) { + case 0: + case 4: + default: + dram_layout = AST_DRAM_512Mx16; + break; + case 8: + if (mcr_cfg & 0x40) + dram_layout = AST_DRAM_1Gx16; + else + dram_layout = AST_DRAM_512Mx32; + break; + case 0xc: + dram_layout = AST_DRAM_1Gx32; + break; + } + + return dram_layout; +} + +/* * POST */ @@ -266,6 +299,7 @@ static void ast_post_chip_2100(struct ast_device *ast) u8 j; u32 data, temp, i; const struct ast_dramstruct *dram_reg_info; + enum ast_dram_layout dram_layout = ast_2100_get_dram_layout_p2a(ast); j = ast_get_index_reg_mask(ast, AST_IO_VGACRI, 0xd0, 0xff); @@ -292,11 +326,17 @@ static void ast_post_chip_2100(struct ast_device *ast) for (i = 0; i < 15; i++) udelay(dram_reg_info->data); } else if (AST_DRAMSTRUCT_IS(dram_reg_info, DRAM_TYPE)) { - data = dram_reg_info->data; - if (ast->dram_type == AST_DRAM_1Gx16) + switch (dram_layout) { + case AST_DRAM_1Gx16: data = 0x00000d89; - else if (ast->dram_type == AST_DRAM_1Gx32) + break; + case AST_DRAM_1Gx32: data = 0x00000c8d; + break; + default: + data = dram_reg_info->data; + break; + } temp = ast_read32(ast, 0x12070); temp &= 0xc; diff --git a/drivers/gpu/drm/ast/ast_drv.h b/drivers/gpu/drm/ast/ast_drv.h index e37a55295ed7..c15aef014f69 100644 --- a/drivers/gpu/drm/ast/ast_drv.h +++ b/drivers/gpu/drm/ast/ast_drv.h @@ -98,13 +98,15 @@ enum ast_config_mode { ast_use_defaults }; -#define AST_DRAM_512Mx16 0 -#define AST_DRAM_1Gx16 1 -#define AST_DRAM_512Mx32 2 -#define AST_DRAM_1Gx32 3 -#define AST_DRAM_2Gx16 6 -#define AST_DRAM_4Gx16 7 -#define AST_DRAM_8Gx16 8 +enum ast_dram_layout { + AST_DRAM_512Mx16 = 0, + AST_DRAM_1Gx16 = 1, + AST_DRAM_512Mx32 = 2, + AST_DRAM_1Gx32 = 3, + AST_DRAM_2Gx16 = 6, + AST_DRAM_4Gx16 = 7, + AST_DRAM_8Gx16 = 8, +}; /* * Hardware cursor @@ -172,10 +174,6 @@ struct ast_device { enum ast_config_mode config_mode; enum ast_chip chip; - uint32_t dram_bus_width; - uint32_t dram_type; - uint32_t mclk; - void __iomem *vram; unsigned long vram_base; unsigned long vram_size; diff --git a/drivers/gpu/drm/ast/ast_main.c b/drivers/gpu/drm/ast/ast_main.c index 44b9b5f659fc..3eea6a6cdacd 100644 --- a/drivers/gpu/drm/ast/ast_main.c +++ b/drivers/gpu/drm/ast/ast_main.c @@ -210,126 +210,6 @@ static void ast_detect_tx_chip(struct ast_device *ast, bool need_post) drm_info(dev, "Using %s\n", info_str[ast->tx_chip]); } -static int ast_get_dram_info(struct ast_device *ast) -{ - struct drm_device *dev = &ast->base; - struct device_node *np = dev->dev->of_node; - uint32_t mcr_cfg, mcr_scu_mpll, mcr_scu_strap; - uint32_t denum, num, div, ref_pll, dsel; - - switch (ast->config_mode) { - case ast_use_dt: - /* - * If some properties are missing, use reasonable - * defaults for GEN5 - */ - if (of_property_read_u32(np, "aspeed,mcr-configuration", - &mcr_cfg)) - mcr_cfg = 0x00000577; - if (of_property_read_u32(np, "aspeed,mcr-scu-mpll", - &mcr_scu_mpll)) - mcr_scu_mpll = 0x000050C0; - if (of_property_read_u32(np, "aspeed,mcr-scu-strap", - &mcr_scu_strap)) - mcr_scu_strap = 0; - break; - case ast_use_p2a: - ast_write32(ast, 0xf004, 0x1e6e0000); - ast_write32(ast, 0xf000, 0x1); - mcr_cfg = ast_read32(ast, 0x10004); - mcr_scu_mpll = ast_read32(ast, 0x10120); - mcr_scu_strap = ast_read32(ast, 0x10170); - break; - case ast_use_defaults: - default: - ast->dram_bus_width = 16; - ast->dram_type = AST_DRAM_1Gx16; - if (IS_AST_GEN6(ast)) - ast->mclk = 800; - else - ast->mclk = 396; - return 0; - } - - if (mcr_cfg & 0x40) - ast->dram_bus_width = 16; - else - ast->dram_bus_width = 32; - - if (IS_AST_GEN6(ast)) { - switch (mcr_cfg & 0x03) { - case 0: - ast->dram_type = AST_DRAM_1Gx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_4Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_8Gx16; - break; - } - } else if (IS_AST_GEN4(ast) || IS_AST_GEN5(ast)) { - switch (mcr_cfg & 0x03) { - case 0: - ast->dram_type = AST_DRAM_512Mx16; - break; - default: - case 1: - ast->dram_type = AST_DRAM_1Gx16; - break; - case 2: - ast->dram_type = AST_DRAM_2Gx16; - break; - case 3: - ast->dram_type = AST_DRAM_4Gx16; - break; - } - } else { - switch (mcr_cfg & 0x0c) { - case 0: - case 4: - ast->dram_type = AST_DRAM_512Mx16; - break; - case 8: - if (mcr_cfg & 0x40) - ast->dram_type = AST_DRAM_1Gx16; - else - ast->dram_type = AST_DRAM_512Mx32; - break; - case 0xc: - ast->dram_type = AST_DRAM_1Gx32; - break; - } - } - - if (mcr_scu_strap & 0x2000) - ref_pll = 14318; - else - ref_pll = 12000; - - denum = mcr_scu_mpll & 0x1f; - num = (mcr_scu_mpll & 0x3fe0) >> 5; - dsel = (mcr_scu_mpll & 0xc000) >> 14; - switch (dsel) { - case 3: - div = 0x4; - break; - case 2: - case 1: - div = 0x2; - break; - default: - div = 0x1; - break; - } - ast->mclk = ref_pll * (num + 2) / ((denum + 2) * (div * 1000)); - return 0; -} - struct drm_device *ast_device_create(struct pci_dev *pdev, const struct drm_driver *drv, enum ast_chip chip, @@ -352,12 +232,6 @@ struct drm_device *ast_device_create(struct pci_dev *pdev, ast->regs = regs; ast->ioregs = ioregs; - ret = ast_get_dram_info(ast); - if (ret) - return ERR_PTR(ret); - drm_info(dev, "dram MCLK=%u Mhz type=%d bus_width=%d\n", - ast->mclk, ast->dram_type, ast->dram_bus_width); - ast_detect_tx_chip(ast, need_post); switch (ast->tx_chip) { case AST_TX_ASTDP: diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig index 6945029b3592..a250afd8d662 100644 --- a/drivers/gpu/drm/bridge/Kconfig +++ b/drivers/gpu/drm/bridge/Kconfig @@ -120,8 +120,7 @@ config DRM_ITE_IT6505 select DRM_DISPLAY_DP_AUX_BUS select DRM_KMS_HELPER select EXTCON - select CRYPTO - select CRYPTO_HASH + select CRYPTO_LIB_SHA1 select REGMAP_I2C help ITE IT6505 DisplayPort bridge chip driver. diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511.h b/drivers/gpu/drm/bridge/adv7511/adv7511.h index 85ebead9809c..8be7266fd4f4 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511.h +++ b/drivers/gpu/drm/bridge/adv7511/adv7511.h @@ -195,13 +195,14 @@ #define ADV7511_I2S_IEC958_DIRECT 3 #define ADV7511_PACKET(p, x) ((p) * 0x20 + (x)) -#define ADV7511_PACKET_SDP(x) ADV7511_PACKET(0, x) +#define ADV7511_PACKET_SPD(x) ADV7511_PACKET(0, x) #define ADV7511_PACKET_MPEG(x) ADV7511_PACKET(1, x) #define ADV7511_PACKET_ACP(x) ADV7511_PACKET(2, x) #define ADV7511_PACKET_ISRC1(x) ADV7511_PACKET(3, x) #define ADV7511_PACKET_ISRC2(x) ADV7511_PACKET(4, x) #define ADV7511_PACKET_GM(x) ADV7511_PACKET(5, x) -#define ADV7511_PACKET_SPARE(x) ADV7511_PACKET(6, x) +#define ADV7511_PACKET_SPARE1(x) ADV7511_PACKET(6, x) +#define ADV7511_PACKET_SPARE2(x) ADV7511_PACKET(7, x) #define ADV7511_REG_CEC_TX_FRAME_HDR 0x00 #define ADV7511_REG_CEC_TX_FRAME_DATA0 0x01 @@ -348,6 +349,7 @@ struct adv7511 { struct i2c_client *i2c_cec; struct regmap *regmap; + struct regmap *regmap_packet; struct regmap *regmap_cec; enum drm_connector_status status; bool powered; diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c index 766b1c96bc88..87e7e820810a 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_audio.c @@ -12,6 +12,8 @@ #include <sound/soc.h> #include <linux/of_graph.h> +#include <drm/display/drm_hdmi_state_helper.h> + #include "adv7511.h" static void adv7511_calc_cts_n(unsigned int f_tmds, unsigned int fs, @@ -155,17 +157,8 @@ int adv7511_hdmi_audio_prepare(struct drm_bridge *bridge, regmap_update_bits(adv7511->regmap, ADV7511_REG_I2C_FREQ_ID_CFG, ADV7511_I2C_FREQ_ID_CFG_RATE_MASK, rate << 4); - /* send current Audio infoframe values while updating */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, - BIT(5), BIT(5)); - - regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(0), 0x1); - - /* use Audio infoframe updated info */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, - BIT(5), 0); - - return 0; + return drm_atomic_helper_connector_hdmi_update_audio_infoframe(connector, + &hparms->cea); } int adv7511_hdmi_audio_startup(struct drm_bridge *bridge, @@ -188,15 +181,9 @@ int adv7511_hdmi_audio_startup(struct drm_bridge *bridge, /* not copyrighted */ regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CFG1, BIT(5), BIT(5)); - /* enable audio infoframes */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_PACKET_ENABLE1, - BIT(3), BIT(3)); /* AV mute disable */ regmap_update_bits(adv7511->regmap, ADV7511_REG_GC(0), BIT(7) | BIT(6), BIT(7)); - /* use Audio infoframe updated info */ - regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, - BIT(5), 0); /* enable SPDIF receiver */ if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) @@ -214,4 +201,6 @@ void adv7511_hdmi_audio_shutdown(struct drm_bridge *bridge, if (adv7511->audio_source == ADV7511_AUDIO_SOURCE_SPDIF) regmap_update_bits(adv7511->regmap, ADV7511_REG_AUDIO_CONFIG, BIT(7), 0); + + drm_atomic_helper_connector_hdmi_clear_audio_infoframe(connector); } diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c index 00d6417c177b..b9be86541307 100644 --- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c +++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c @@ -132,6 +132,13 @@ static const struct regmap_config adv7511_regmap_config = { .volatile_reg = adv7511_register_volatile, }; +static const struct regmap_config adv7511_packet_config = { + .reg_bits = 8, + .val_bits = 8, + + .max_register = 0xff, +}; + /* ----------------------------------------------------------------------------- * Hardware configuration */ @@ -886,9 +893,18 @@ static int adv7511_bridge_hdmi_clear_infoframe(struct drm_bridge *bridge, struct adv7511 *adv7511 = bridge_to_adv7511(bridge); switch (type) { + case HDMI_INFOFRAME_TYPE_AUDIO: + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME); + break; case HDMI_INFOFRAME_TYPE_AVI: adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME); break; + case HDMI_INFOFRAME_TYPE_SPD: + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1); + break; default: drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); break; @@ -903,16 +919,52 @@ static int adv7511_bridge_hdmi_write_infoframe(struct drm_bridge *bridge, { struct adv7511 *adv7511 = bridge_to_adv7511(bridge); - adv7511_bridge_hdmi_clear_infoframe(bridge, type); - switch (type) { + case HDMI_INFOFRAME_TYPE_AUDIO: + /* send current Audio infoframe values while updating */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(5), BIT(5)); + + /* The Audio infoframe id is not configurable */ + regmap_bulk_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_VERSION, + buffer + 1, len - 1); + + /* use Audio infoframe updated info */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(5), 0); + + adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AUDIO_INFOFRAME); + break; case HDMI_INFOFRAME_TYPE_AVI: + /* send current AVI infoframe values while updating */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(6), BIT(6)); + /* The AVI infoframe id is not configurable */ regmap_bulk_write(adv7511->regmap, ADV7511_REG_AVI_INFOFRAME_VERSION, buffer + 1, len - 1); + regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME_LENGTH, 0x2); + regmap_write(adv7511->regmap, ADV7511_REG_AUDIO_INFOFRAME(1), 0x1); + + /* use AVI infoframe updated info */ + regmap_update_bits(adv7511->regmap, ADV7511_REG_INFOFRAME_UPDATE, + BIT(6), 0); + adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_AVI_INFOFRAME); break; + case HDMI_INFOFRAME_TYPE_SPD: + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPD); + regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPD(0), + buffer, len); + adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPD); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + adv7511_packet_disable(adv7511, ADV7511_PACKET_ENABLE_SPARE1); + regmap_bulk_write(adv7511->regmap_packet, ADV7511_PACKET_SPARE1(0), + buffer, len); + adv7511_packet_enable(adv7511, ADV7511_PACKET_ENABLE_SPARE1); + break; default: drm_dbg_driver(adv7511->bridge.dev, "Unsupported HDMI InfoFrame %x\n", type); break; @@ -1242,6 +1294,13 @@ static int adv7511_probe(struct i2c_client *i2c) goto err_i2c_unregister_edid; } + adv7511->regmap_packet = devm_regmap_init_i2c(adv7511->i2c_packet, + &adv7511_packet_config); + if (IS_ERR(adv7511->regmap_packet)) { + ret = PTR_ERR(adv7511->regmap_packet); + goto err_i2c_unregister_packet; + } + regmap_write(adv7511->regmap, ADV7511_REG_PACKET_I2C_ADDR, adv7511->i2c_packet->addr << 1); diff --git a/drivers/gpu/drm/bridge/cadence/Kconfig b/drivers/gpu/drm/bridge/cadence/Kconfig index cced81633ddc..f1d8a8a151d8 100644 --- a/drivers/gpu/drm/bridge/cadence/Kconfig +++ b/drivers/gpu/drm/bridge/cadence/Kconfig @@ -6,6 +6,7 @@ config DRM_CDNS_DSI select DRM_PANEL_BRIDGE select GENERIC_PHY select GENERIC_PHY_MIPI_DPHY + select VIDEOMODE_HELPERS depends on OF help Support Cadence DPI to DSI bridge. This is an internal diff --git a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c index bea8346515b8..8f7a0d46601a 100644 --- a/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c +++ b/drivers/gpu/drm/bridge/imx/imx93-mipi-dsi.c @@ -492,14 +492,12 @@ static int imx93_dsi_get_phy_configure_opts(struct imx93_dsi *dsi, static enum drm_mode_status imx93_dsi_validate_mode(struct imx93_dsi *dsi, const struct drm_display_mode *mode) { - struct drm_bridge *bridge = dw_mipi_dsi_get_bridge(dsi->dmd); + struct drm_bridge *dmd_bridge = dw_mipi_dsi_get_bridge(dsi->dmd); + struct drm_bridge *last_bridge __free(drm_bridge_put) = + drm_bridge_chain_get_last_bridge(dmd_bridge->encoder); - /* Get the last bridge */ - while (drm_bridge_get_next_bridge(bridge)) - bridge = drm_bridge_get_next_bridge(bridge); - - if ((bridge->ops & DRM_BRIDGE_OP_DETECT) && - (bridge->ops & DRM_BRIDGE_OP_EDID)) { + if ((last_bridge->ops & DRM_BRIDGE_OP_DETECT) && + (last_bridge->ops & DRM_BRIDGE_OP_EDID)) { unsigned long pixel_clock_rate = mode->clock * 1000; unsigned long rounded_rate; diff --git a/drivers/gpu/drm/bridge/ite-it6263.c b/drivers/gpu/drm/bridge/ite-it6263.c index cf813672b4ff..2eb8fba7016c 100644 --- a/drivers/gpu/drm/bridge/ite-it6263.c +++ b/drivers/gpu/drm/bridge/ite-it6263.c @@ -146,6 +146,7 @@ #define HDMI_COLOR_DEPTH_24 FIELD_PREP(HDMI_COLOR_DEPTH, 4) #define HDMI_REG_PKT_GENERAL_CTRL 0xc6 +#define HDMI_REG_PKT_NULL_CTRL 0xc9 #define HDMI_REG_AVI_INFOFRM_CTRL 0xcd #define ENABLE_PKT BIT(0) #define REPEAT_PKT BIT(1) @@ -154,6 +155,12 @@ * 3) HDMI register bank1: 0x130 ~ 0x1ff (HDMI packet registers) */ +/* NULL packet registers */ +/* Header Byte(HB): n = 0 ~ 2 */ +#define HDMI_REG_PKT_HB(n) (0x138 + (n)) +/* Packet Byte(PB): n = 0 ~ 27(HDMI_MAX_INFOFRAME_SIZE), n = 0 for checksum */ +#define HDMI_REG_PKT_PB(n) (0x13b + (n)) + /* AVI packet registers */ #define HDMI_REG_AVI_DB1 0x158 #define HDMI_REG_AVI_DB2 0x159 @@ -224,7 +231,9 @@ static bool it6263_hdmi_writeable_reg(struct device *dev, unsigned int reg) case HDMI_REG_HDMI_MODE: case HDMI_REG_GCP: case HDMI_REG_PKT_GENERAL_CTRL: + case HDMI_REG_PKT_NULL_CTRL: case HDMI_REG_AVI_INFOFRM_CTRL: + case HDMI_REG_PKT_HB(0) ... HDMI_REG_PKT_PB(HDMI_MAX_INFOFRAME_SIZE): case HDMI_REG_AVI_DB1: case HDMI_REG_AVI_DB2: case HDMI_REG_AVI_DB3: @@ -755,10 +764,16 @@ static int it6263_hdmi_clear_infoframe(struct drm_bridge *bridge, { struct it6263 *it = bridge_to_it6263(bridge); - if (type == HDMI_INFOFRAME_TYPE_AVI) + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: regmap_write(it->hdmi_regmap, HDMI_REG_AVI_INFOFRM_CTRL, 0); - else + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + regmap_write(it->hdmi_regmap, HDMI_REG_PKT_NULL_CTRL, 0); + break; + default: dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); + } return 0; } @@ -770,27 +785,36 @@ static int it6263_hdmi_write_infoframe(struct drm_bridge *bridge, struct it6263 *it = bridge_to_it6263(bridge); struct regmap *regmap = it->hdmi_regmap; - if (type != HDMI_INFOFRAME_TYPE_AVI) { + switch (type) { + case HDMI_INFOFRAME_TYPE_AVI: + /* write the first AVI infoframe data byte chunk(DB1-DB5) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB1, + &buffer[HDMI_INFOFRAME_HEADER_SIZE], + HDMI_AVI_DB_CHUNK1_SIZE); + + /* write the second AVI infoframe data byte chunk(DB6-DB13) */ + regmap_bulk_write(regmap, HDMI_REG_AVI_DB6, + &buffer[HDMI_INFOFRAME_HEADER_SIZE + + HDMI_AVI_DB_CHUNK1_SIZE], + HDMI_AVI_DB_CHUNK2_SIZE); + + /* write checksum */ + regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]); + + regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, + ENABLE_PKT | REPEAT_PKT); + break; + case HDMI_INFOFRAME_TYPE_VENDOR: + /* write header and payload */ + regmap_bulk_write(regmap, HDMI_REG_PKT_HB(0), buffer, len); + + regmap_write(regmap, HDMI_REG_PKT_NULL_CTRL, + ENABLE_PKT | REPEAT_PKT); + break; + default: dev_dbg(it->dev, "unsupported HDMI infoframe 0x%x\n", type); - return 0; } - /* write the first AVI infoframe data byte chunk(DB1-DB5) */ - regmap_bulk_write(regmap, HDMI_REG_AVI_DB1, - &buffer[HDMI_INFOFRAME_HEADER_SIZE], - HDMI_AVI_DB_CHUNK1_SIZE); - - /* write the second AVI infoframe data byte chunk(DB6-DB13) */ - regmap_bulk_write(regmap, HDMI_REG_AVI_DB6, - &buffer[HDMI_INFOFRAME_HEADER_SIZE + - HDMI_AVI_DB_CHUNK1_SIZE], - HDMI_AVI_DB_CHUNK2_SIZE); - - /* write checksum */ - regmap_write(regmap, HDMI_REG_AVI_CSUM, buffer[3]); - - regmap_write(regmap, HDMI_REG_AVI_INFOFRM_CTRL, ENABLE_PKT | REPEAT_PKT); - return 0; } diff --git a/drivers/gpu/drm/bridge/ite-it6505.c b/drivers/gpu/drm/bridge/ite-it6505.c index 89649c17ffad..a094803ba7aa 100644 --- a/drivers/gpu/drm/bridge/ite-it6505.c +++ b/drivers/gpu/drm/bridge/ite-it6505.c @@ -21,7 +21,7 @@ #include <linux/wait.h> #include <linux/bitfield.h> -#include <crypto/hash.h> +#include <crypto/sha1.h> #include <drm/display/drm_dp_helper.h> #include <drm/display/drm_hdcp_helper.h> @@ -2107,35 +2107,6 @@ static void it6505_hdcp_part1_auth(struct it6505 *it6505) it6505->hdcp_status = HDCP_AUTH_GOING; } -static int it6505_sha1_digest(struct it6505 *it6505, u8 *sha1_input, - unsigned int size, u8 *output_av) -{ - struct shash_desc *desc; - struct crypto_shash *tfm; - int err; - struct device *dev = it6505->dev; - - tfm = crypto_alloc_shash("sha1", 0, 0); - if (IS_ERR(tfm)) { - dev_err(dev, "crypto_alloc_shash sha1 failed"); - return PTR_ERR(tfm); - } - desc = kzalloc(sizeof(*desc) + crypto_shash_descsize(tfm), GFP_KERNEL); - if (!desc) { - crypto_free_shash(tfm); - return -ENOMEM; - } - - desc->tfm = tfm; - err = crypto_shash_digest(desc, sha1_input, size, output_av); - if (err) - dev_err(dev, "crypto_shash_digest sha1 failed"); - - crypto_free_shash(tfm); - kfree(desc); - return err; -} - static int it6505_setup_sha1_input(struct it6505 *it6505, u8 *sha1_input) { struct device *dev = it6505->dev; @@ -2205,7 +2176,7 @@ static bool it6505_hdcp_part2_ksvlist_check(struct it6505 *it6505) return false; } - it6505_sha1_digest(it6505, it6505->sha1_input, i, (u8 *)av); + sha1(it6505->sha1_input, i, (u8 *)av); /*1B-05 V' must retry 3 times */ for (retry = 0; retry < 3; retry++) { err = it6505_get_dpcd(it6505, DP_AUX_HDCP_V_PRIME(0), (u8 *)bv, diff --git a/drivers/gpu/drm/bridge/simple-bridge.c b/drivers/gpu/drm/bridge/simple-bridge.c index 1f16d568bcc4..e4d0bc2200f8 100644 --- a/drivers/gpu/drm/bridge/simple-bridge.c +++ b/drivers/gpu/drm/bridge/simple-bridge.c @@ -267,6 +267,11 @@ static const struct of_device_id simple_bridge_match[] = { .connector_type = DRM_MODE_CONNECTOR_HDMIA, }, }, { + .compatible = "realtek,rtd2171", + .data = &(const struct simple_bridge_info) { + .connector_type = DRM_MODE_CONNECTOR_HDMIA, + }, + }, { .compatible = "ti,opa362", .data = &(const struct simple_bridge_info) { .connector_type = DRM_MODE_CONNECTOR_Composite, diff --git a/drivers/gpu/drm/bridge/synopsys/Kconfig b/drivers/gpu/drm/bridge/synopsys/Kconfig index f3ab2f985f8c..2c5e532410de 100644 --- a/drivers/gpu/drm/bridge/synopsys/Kconfig +++ b/drivers/gpu/drm/bridge/synopsys/Kconfig @@ -1,4 +1,11 @@ # SPDX-License-Identifier: GPL-2.0-only +config DRM_DW_DP + tristate + select DRM_DISPLAY_HELPER + select DRM_DISPLAY_DP_HELPER + select DRM_KMS_HELPER + select REGMAP_MMIO + config DRM_DW_HDMI tristate select DRM_DISPLAY_HDMI_HELPER diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile index 9dc376d220ad..4dada44029ac 100644 --- a/drivers/gpu/drm/bridge/synopsys/Makefile +++ b/drivers/gpu/drm/bridge/synopsys/Makefile @@ -1,4 +1,5 @@ # SPDX-License-Identifier: GPL-2.0-only +obj-$(CONFIG_DRM_DW_DP) += dw-dp.o obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o obj-$(CONFIG_DRM_DW_HDMI_GP_AUDIO) += dw-hdmi-gp-audio.o diff --git a/drivers/gpu/drm/bridge/synopsys/dw-dp.c b/drivers/gpu/drm/bridge/synopsys/dw-dp.c new file mode 100644 index 000000000000..9bbfe8da3de0 --- /dev/null +++ b/drivers/gpu/drm/bridge/synopsys/dw-dp.c @@ -0,0 +1,2095 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Synopsys DesignWare Cores DisplayPort Transmitter Controller + * + * Copyright (c) 2025 Rockchip Electronics Co., Ltd. + * + * Author: Andy Yan <andy.yan@rock-chips.com> + */ +#include <linux/bitfield.h> +#include <linux/clk.h> +#include <linux/iopoll.h> +#include <linux/irq.h> +#include <linux/media-bus-format.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <linux/regmap.h> +#include <linux/reset.h> +#include <linux/phy/phy.h> +#include <linux/unaligned.h> + +#include <drm/bridge/dw_dp.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/display/drm_dp_helper.h> +#include <drm/drm_edid.h> +#include <drm/drm_of.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#define DW_DP_VERSION_NUMBER 0x0000 +#define DW_DP_VERSION_TYPE 0x0004 +#define DW_DP_ID 0x0008 + +#define DW_DP_CONFIG_REG1 0x0100 +#define DW_DP_CONFIG_REG2 0x0104 +#define DW_DP_CONFIG_REG3 0x0108 + +#define DW_DP_CCTL 0x0200 +#define FORCE_HPD BIT(4) +#define DEFAULT_FAST_LINK_TRAIN_EN BIT(2) +#define ENHANCE_FRAMING_EN BIT(1) +#define SCRAMBLE_DIS BIT(0) +#define DW_DP_SOFT_RESET_CTRL 0x0204 +#define VIDEO_RESET BIT(5) +#define AUX_RESET BIT(4) +#define AUDIO_SAMPLER_RESET BIT(3) +#define HDCP_MODULE_RESET BIT(2) +#define PHY_SOFT_RESET BIT(1) +#define CONTROLLER_RESET BIT(0) + +#define DW_DP_VSAMPLE_CTRL 0x0300 +#define PIXEL_MODE_SELECT GENMASK(22, 21) +#define VIDEO_MAPPING GENMASK(20, 16) +#define VIDEO_STREAM_ENABLE BIT(5) + +#define DW_DP_VSAMPLE_STUFF_CTRL1 0x0304 + +#define DW_DP_VSAMPLE_STUFF_CTRL2 0x0308 + +#define DW_DP_VINPUT_POLARITY_CTRL 0x030c +#define DE_IN_POLARITY BIT(2) +#define HSYNC_IN_POLARITY BIT(1) +#define VSYNC_IN_POLARITY BIT(0) + +#define DW_DP_VIDEO_CONFIG1 0x0310 +#define HACTIVE GENMASK(31, 16) +#define HBLANK GENMASK(15, 2) +#define I_P BIT(1) +#define R_V_BLANK_IN_OSC BIT(0) + +#define DW_DP_VIDEO_CONFIG2 0x0314 +#define VBLANK GENMASK(31, 16) +#define VACTIVE GENMASK(15, 0) + +#define DW_DP_VIDEO_CONFIG3 0x0318 +#define H_SYNC_WIDTH GENMASK(31, 16) +#define H_FRONT_PORCH GENMASK(15, 0) + +#define DW_DP_VIDEO_CONFIG4 0x031c +#define V_SYNC_WIDTH GENMASK(31, 16) +#define V_FRONT_PORCH GENMASK(15, 0) + +#define DW_DP_VIDEO_CONFIG5 0x0320 +#define INIT_THRESHOLD_HI GENMASK(22, 21) +#define AVERAGE_BYTES_PER_TU_FRAC GENMASK(19, 16) +#define INIT_THRESHOLD GENMASK(13, 7) +#define AVERAGE_BYTES_PER_TU GENMASK(6, 0) + +#define DW_DP_VIDEO_MSA1 0x0324 +#define VSTART GENMASK(31, 16) +#define HSTART GENMASK(15, 0) + +#define DW_DP_VIDEO_MSA2 0x0328 +#define MISC0 GENMASK(31, 24) + +#define DW_DP_VIDEO_MSA3 0x032c +#define MISC1 GENMASK(31, 24) + +#define DW_DP_VIDEO_HBLANK_INTERVAL 0x0330 +#define HBLANK_INTERVAL_EN BIT(16) +#define HBLANK_INTERVAL GENMASK(15, 0) + +#define DW_DP_AUD_CONFIG1 0x0400 +#define AUDIO_TIMESTAMP_VERSION_NUM GENMASK(29, 24) +#define AUDIO_PACKET_ID GENMASK(23, 16) +#define AUDIO_MUTE BIT(15) +#define NUM_CHANNELS GENMASK(14, 12) +#define HBR_MODE_ENABLE BIT(10) +#define AUDIO_DATA_WIDTH GENMASK(9, 5) +#define AUDIO_DATA_IN_EN GENMASK(4, 1) +#define AUDIO_INF_SELECT BIT(0) + +#define DW_DP_SDP_VERTICAL_CTRL 0x0500 +#define EN_VERTICAL_SDP BIT(2) +#define EN_AUDIO_STREAM_SDP BIT(1) +#define EN_AUDIO_TIMESTAMP_SDP BIT(0) +#define DW_DP_SDP_HORIZONTAL_CTRL 0x0504 +#define EN_HORIZONTAL_SDP BIT(2) +#define DW_DP_SDP_STATUS_REGISTER 0x0508 +#define DW_DP_SDP_MANUAL_CTRL 0x050c +#define DW_DP_SDP_STATUS_EN 0x0510 + +#define DW_DP_SDP_REGISTER_BANK 0x0600 +#define SDP_REGS GENMASK(31, 0) + +#define DW_DP_PHYIF_CTRL 0x0a00 +#define PHY_WIDTH BIT(25) +#define PHY_POWERDOWN GENMASK(20, 17) +#define PHY_BUSY GENMASK(15, 12) +#define SSC_DIS BIT(16) +#define XMIT_ENABLE GENMASK(11, 8) +#define PHY_LANES GENMASK(7, 6) +#define PHY_RATE GENMASK(5, 4) +#define TPS_SEL GENMASK(3, 0) + +#define DW_DP_PHY_TX_EQ 0x0a04 +#define DW_DP_CUSTOMPAT0 0x0a08 +#define DW_DP_CUSTOMPAT1 0x0a0c +#define DW_DP_CUSTOMPAT2 0x0a10 +#define DW_DP_HBR2_COMPLIANCE_SCRAMBLER_RESET 0x0a14 +#define DW_DP_PHYIF_PWRDOWN_CTRL 0x0a18 + +#define DW_DP_AUX_CMD 0x0b00 +#define AUX_CMD_TYPE GENMASK(31, 28) +#define AUX_ADDR GENMASK(27, 8) +#define I2C_ADDR_ONLY BIT(4) +#define AUX_LEN_REQ GENMASK(3, 0) + +#define DW_DP_AUX_STATUS 0x0b04 +#define AUX_TIMEOUT BIT(17) +#define AUX_BYTES_READ GENMASK(23, 19) +#define AUX_STATUS GENMASK(7, 4) + +#define DW_DP_AUX_DATA0 0x0b08 +#define DW_DP_AUX_DATA1 0x0b0c +#define DW_DP_AUX_DATA2 0x0b10 +#define DW_DP_AUX_DATA3 0x0b14 + +#define DW_DP_GENERAL_INTERRUPT 0x0d00 +#define VIDEO_FIFO_OVERFLOW_STREAM0 BIT(6) +#define AUDIO_FIFO_OVERFLOW_STREAM0 BIT(5) +#define SDP_EVENT_STREAM0 BIT(4) +#define AUX_CMD_INVALID BIT(3) +#define HDCP_EVENT BIT(2) +#define AUX_REPLY_EVENT BIT(1) +#define HPD_EVENT BIT(0) + +#define DW_DP_GENERAL_INTERRUPT_ENABLE 0x0d04 +#define HDCP_EVENT_EN BIT(2) +#define AUX_REPLY_EVENT_EN BIT(1) +#define HPD_EVENT_EN BIT(0) + +#define DW_DP_HPD_STATUS 0x0d08 +#define HPD_STATE GENMASK(11, 9) +#define HPD_STATUS BIT(8) +#define HPD_HOT_UNPLUG BIT(2) +#define HPD_HOT_PLUG BIT(1) +#define HPD_IRQ BIT(0) + +#define DW_DP_HPD_INTERRUPT_ENABLE 0x0d0c +#define HPD_UNPLUG_ERR_EN BIT(3) +#define HPD_UNPLUG_EN BIT(2) +#define HPD_PLUG_EN BIT(1) +#define HPD_IRQ_EN BIT(0) + +#define DW_DP_HDCP_CFG 0x0e00 +#define DPCD12PLUS BIT(7) +#define CP_IRQ BIT(6) +#define BYPENCRYPTION BIT(5) +#define HDCP_LOCK BIT(4) +#define ENCRYPTIONDISABLE BIT(3) +#define ENABLE_HDCP_13 BIT(2) +#define ENABLE_HDCP BIT(1) + +#define DW_DP_HDCP_OBS 0x0e04 +#define HDCP22_RE_AUTHENTICATION_REQ BIT(31) +#define HDCP22_AUTHENTICATION_FAILED BIT(30) +#define HDCP22_AUTHENTICATION_SUCCESS BIT(29) +#define HDCP22_CAPABLE_SINK BIT(28) +#define HDCP22_SINK_CAP_CHECK_COMPLETE BIT(27) +#define HDCP22_STATE GENMASK(26, 24) +#define HDCP22_BOOTED BIT(23) +#define HDCP13_BSTATUS GENMASK(22, 19) +#define REPEATER BIT(18) +#define HDCP_CAPABLE BIT(17) +#define STATEE GENMASK(16, 14) +#define STATEOEG GENMASK(13, 11) +#define STATER GENMASK(10, 8) +#define STATEA GENMASK(7, 4) +#define SUBSTATEA GENMASK(3, 1) +#define HDCPENGAGED BIT(0) + +#define DW_DP_HDCP_APIINTCLR 0x0e08 +#define DW_DP_HDCP_APIINTSTAT 0x0e0c +#define DW_DP_HDCP_APIINTMSK 0x0e10 +#define HDCP22_GPIOINT BIT(8) +#define HDCP_ENGAGED BIT(7) +#define HDCP_FAILED BIT(6) +#define KSVSHA1CALCDONEINT BIT(5) +#define AUXRESPNACK7TIMES BIT(4) +#define AUXRESPTIMEOUT BIT(3) +#define AUXRESPDEFER7TIMES BIT(2) +#define KSVACCESSINT BIT(0) + +#define DW_DP_HDCP_KSVMEMCTRL 0x0e18 +#define KSVSHA1STATUS BIT(4) +#define KSVMEMACCESS BIT(1) +#define KSVMEMREQUEST BIT(0) + +#define DW_DP_HDCP_REG_BKSV0 0x3600 +#define DW_DP_HDCP_REG_BKSV1 0x3604 +#define DW_DP_HDCP_REG_ANCONF 0x3608 +#define AN_BYPASS BIT(0) + +#define DW_DP_HDCP_REG_AN0 0x360c +#define DW_DP_HDCP_REG_AN1 0x3610 +#define DW_DP_HDCP_REG_RMLCTL 0x3614 +#define ODPK_DECRYPT_ENABLE BIT(0) + +#define DW_DP_HDCP_REG_RMLSTS 0x3618 +#define IDPK_WR_OK_STS BIT(6) +#define IDPK_DATA_INDEX GENMASK(5, 0) +#define DW_DP_HDCP_REG_SEED 0x361c +#define DW_DP_HDCP_REG_DPK0 0x3620 +#define DW_DP_HDCP_REG_DPK1 0x3624 +#define DW_DP_HDCP22_GPIOSTS 0x3628 +#define DW_DP_HDCP22_GPIOCHNGSTS 0x362c +#define DW_DP_HDCP_REG_DPK_CRC 0x3630 + +#define DW_DP_MAX_REGISTER DW_DP_HDCP_REG_DPK_CRC + +#define SDP_REG_BANK_SIZE 16 + +struct dw_dp_link_caps { + bool enhanced_framing; + bool tps3_supported; + bool tps4_supported; + bool fast_training; + bool channel_coding; + bool ssc; +}; + +struct dw_dp_link_train_set { + unsigned int voltage_swing[4]; + unsigned int pre_emphasis[4]; + bool voltage_max_reached[4]; + bool pre_max_reached[4]; +}; + +struct dw_dp_link_train { + struct dw_dp_link_train_set adjust; + bool clock_recovered; + bool channel_equalized; +}; + +struct dw_dp_link { + u8 dpcd[DP_RECEIVER_CAP_SIZE]; + unsigned char revision; + unsigned int rate; + unsigned int lanes; + u8 sink_count; + u8 vsc_sdp_supported; + struct dw_dp_link_caps caps; + struct dw_dp_link_train train; + struct drm_dp_desc desc; +}; + +struct dw_dp_bridge_state { + struct drm_bridge_state base; + struct drm_display_mode mode; + u8 video_mapping; + u8 color_format; + u8 bpc; + u8 bpp; +}; + +struct dw_dp_sdp { + struct dp_sdp base; + unsigned long flags; +}; + +struct dw_dp_hotplug { + bool long_hpd; +}; + +struct dw_dp { + struct drm_bridge bridge; + struct device *dev; + struct regmap *regmap; + struct phy *phy; + struct clk *apb_clk; + struct clk *aux_clk; + struct clk *i2s_clk; + struct clk *spdif_clk; + struct clk *hdcp_clk; + struct reset_control *rstc; + struct completion complete; + int irq; + struct work_struct hpd_work; + struct dw_dp_hotplug hotplug; + /* Serialize hpd status access */ + struct mutex irq_lock; + + struct drm_dp_aux aux; + + struct dw_dp_link link; + struct dw_dp_plat_data plat_data; + u8 pixel_mode; + + DECLARE_BITMAP(sdp_reg_bank, SDP_REG_BANK_SIZE); +}; + +enum { + DW_DP_RGB_6BIT, + DW_DP_RGB_8BIT, + DW_DP_RGB_10BIT, + DW_DP_RGB_12BIT, + DW_DP_RGB_16BIT, + DW_DP_YCBCR444_8BIT, + DW_DP_YCBCR444_10BIT, + DW_DP_YCBCR444_12BIT, + DW_DP_YCBCR444_16BIT, + DW_DP_YCBCR422_8BIT, + DW_DP_YCBCR422_10BIT, + DW_DP_YCBCR422_12BIT, + DW_DP_YCBCR422_16BIT, + DW_DP_YCBCR420_8BIT, + DW_DP_YCBCR420_10BIT, + DW_DP_YCBCR420_12BIT, + DW_DP_YCBCR420_16BIT, +}; + +enum { + DW_DP_MP_SINGLE_PIXEL, + DW_DP_MP_DUAL_PIXEL, + DW_DP_MP_QUAD_PIXEL, +}; + +enum { + DW_DP_SDP_VERTICAL_INTERVAL = BIT(0), + DW_DP_SDP_HORIZONTAL_INTERVAL = BIT(1), +}; + +enum { + DW_DP_HPD_STATE_IDLE, + DW_DP_HPD_STATE_UNPLUG, + DP_DP_HPD_STATE_TIMEOUT = 4, + DW_DP_HPD_STATE_PLUG = 7 +}; + +enum { + DW_DP_PHY_PATTERN_NONE, + DW_DP_PHY_PATTERN_TPS_1, + DW_DP_PHY_PATTERN_TPS_2, + DW_DP_PHY_PATTERN_TPS_3, + DW_DP_PHY_PATTERN_TPS_4, + DW_DP_PHY_PATTERN_SERM, + DW_DP_PHY_PATTERN_PBRS7, + DW_DP_PHY_PATTERN_CUSTOM_80BIT, + DW_DP_PHY_PATTERN_CP2520_1, + DW_DP_PHY_PATTERN_CP2520_2, +}; + +struct dw_dp_output_format { + u32 bus_format; + u32 color_format; + u8 video_mapping; + u8 bpc; + u8 bpp; +}; + +#define to_dw_dp_bridge_state(s) container_of(s, struct dw_dp_bridge_state, base) + +static const struct dw_dp_output_format dw_dp_output_formats[] = { + { MEDIA_BUS_FMT_RGB101010_1X30, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_10BIT, 10, 30 }, + { MEDIA_BUS_FMT_RGB888_1X24, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_8BIT, 8, 24 }, + { MEDIA_BUS_FMT_YUV10_1X30, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_10BIT, 10, 30 }, + { MEDIA_BUS_FMT_YUV8_1X24, DRM_COLOR_FORMAT_YCBCR444, DW_DP_YCBCR444_8BIT, 8, 24}, + { MEDIA_BUS_FMT_YUYV10_1X20, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_10BIT, 10, 20 }, + { MEDIA_BUS_FMT_YUYV8_1X16, DRM_COLOR_FORMAT_YCBCR422, DW_DP_YCBCR422_8BIT, 8, 16 }, + { MEDIA_BUS_FMT_UYYVYY10_0_5X30, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_10BIT, 10, 15 }, + { MEDIA_BUS_FMT_UYYVYY8_0_5X24, DRM_COLOR_FORMAT_YCBCR420, DW_DP_YCBCR420_8BIT, 8, 12 }, + { MEDIA_BUS_FMT_RGB666_1X24_CPADHI, DRM_COLOR_FORMAT_RGB444, DW_DP_RGB_6BIT, 6, 18 }, +}; + +static const struct dw_dp_output_format *dw_dp_get_output_format(u32 bus_format) +{ + unsigned int i; + + for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) + if (dw_dp_output_formats[i].bus_format == bus_format) + return &dw_dp_output_formats[i]; + + return NULL; +} + +static inline struct dw_dp *bridge_to_dp(struct drm_bridge *b) +{ + return container_of(b, struct dw_dp, bridge); +} + +static struct dw_dp_bridge_state *dw_dp_get_bridge_state(struct dw_dp *dp) +{ + struct dw_dp_bridge_state *dw_bridge_state; + struct drm_bridge_state *state; + + state = drm_priv_to_bridge_state(dp->bridge.base.state); + if (!state) + return NULL; + + dw_bridge_state = to_dw_dp_bridge_state(state); + if (!dw_bridge_state) + return NULL; + + return dw_bridge_state; +} + +static inline void dw_dp_phy_set_pattern(struct dw_dp *dp, u32 pattern) +{ + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, TPS_SEL, + FIELD_PREP(TPS_SEL, pattern)); +} + +static void dw_dp_phy_xmit_enable(struct dw_dp *dp, u32 lanes) +{ + u32 xmit_enable; + + switch (lanes) { + case 4: + case 2: + case 1: + xmit_enable = GENMASK(lanes - 1, 0); + break; + case 0: + default: + xmit_enable = 0; + break; + } + + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, XMIT_ENABLE, + FIELD_PREP(XMIT_ENABLE, xmit_enable)); +} + +static bool dw_dp_bandwidth_ok(struct dw_dp *dp, + const struct drm_display_mode *mode, u32 bpp, + unsigned int lanes, unsigned int rate) +{ + u32 max_bw, req_bw; + + req_bw = mode->clock * bpp / 8; + max_bw = lanes * rate; + if (req_bw > max_bw) + return false; + + return true; +} + +static bool dw_dp_hpd_detect(struct dw_dp *dp) +{ + u32 value; + + regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value); + + return FIELD_GET(HPD_STATE, value) == DW_DP_HPD_STATE_PLUG; +} + +static void dw_dp_link_caps_reset(struct dw_dp_link_caps *caps) +{ + caps->enhanced_framing = false; + caps->tps3_supported = false; + caps->tps4_supported = false; + caps->fast_training = false; + caps->channel_coding = false; +} + +static void dw_dp_link_reset(struct dw_dp_link *link) +{ + link->vsc_sdp_supported = 0; + link->sink_count = 0; + link->revision = 0; + link->rate = 0; + link->lanes = 0; + + dw_dp_link_caps_reset(&link->caps); + memset(link->dpcd, 0, sizeof(link->dpcd)); +} + +static int dw_dp_link_parse(struct dw_dp *dp, struct drm_connector *connector) +{ + struct dw_dp_link *link = &dp->link; + int ret; + + dw_dp_link_reset(link); + + ret = drm_dp_read_dpcd_caps(&dp->aux, link->dpcd); + if (ret < 0) + return ret; + + drm_dp_read_desc(&dp->aux, &link->desc, drm_dp_is_branch(link->dpcd)); + + if (drm_dp_read_sink_count_cap(connector, link->dpcd, &link->desc)) { + ret = drm_dp_read_sink_count(&dp->aux); + if (ret < 0) + return ret; + + link->sink_count = ret; + + /* Dongle connected, but no display */ + if (!link->sink_count) + return -ENODEV; + } + + link->vsc_sdp_supported = drm_dp_vsc_sdp_supported(&dp->aux, link->dpcd); + + link->revision = link->dpcd[DP_DPCD_REV]; + link->rate = min_t(u32, min(dp->plat_data.max_link_rate, + dp->phy->attrs.max_link_rate * 100), + drm_dp_max_link_rate(link->dpcd)); + link->lanes = min_t(u8, phy_get_bus_width(dp->phy), + drm_dp_max_lane_count(link->dpcd)); + + link->caps.enhanced_framing = drm_dp_enhanced_frame_cap(link->dpcd); + link->caps.tps3_supported = drm_dp_tps3_supported(link->dpcd); + link->caps.tps4_supported = drm_dp_tps4_supported(link->dpcd); + link->caps.fast_training = drm_dp_fast_training_cap(link->dpcd); + link->caps.channel_coding = drm_dp_channel_coding_supported(link->dpcd); + link->caps.ssc = !!(link->dpcd[DP_MAX_DOWNSPREAD] & DP_MAX_DOWNSPREAD_0_5); + + return 0; +} + +static int dw_dp_link_train_update_vs_emph(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + struct dw_dp_link_train_set *train_set = &link->train.adjust; + unsigned int lanes = dp->link.lanes; + union phy_configure_opts phy_cfg; + unsigned int *vs, *pe; + int i, ret; + u8 buf[4]; + + vs = train_set->voltage_swing; + pe = train_set->pre_emphasis; + + for (i = 0; i < lanes; i++) { + phy_cfg.dp.voltage[i] = vs[i]; + phy_cfg.dp.pre[i] = pe[i]; + } + + phy_cfg.dp.set_lanes = false; + phy_cfg.dp.set_rate = false; + phy_cfg.dp.set_voltages = true; + + ret = phy_configure(dp->phy, &phy_cfg); + if (ret) + return ret; + + for (i = 0; i < lanes; i++) { + buf[i] = (vs[i] << DP_TRAIN_VOLTAGE_SWING_SHIFT) | + (pe[i] << DP_TRAIN_PRE_EMPHASIS_SHIFT); + if (train_set->voltage_max_reached[i]) + buf[i] |= DP_TRAIN_MAX_SWING_REACHED; + if (train_set->pre_max_reached[i]) + buf[i] |= DP_TRAIN_MAX_PRE_EMPHASIS_REACHED; + } + + ret = drm_dp_dpcd_write(&dp->aux, DP_TRAINING_LANE0_SET, buf, lanes); + if (ret < 0) + return ret; + + return 0; +} + +static int dw_dp_phy_configure(struct dw_dp *dp, unsigned int rate, + unsigned int lanes, bool ssc) +{ + union phy_configure_opts phy_cfg; + int ret; + + /* Move PHY to P3 */ + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN, + FIELD_PREP(PHY_POWERDOWN, 0x3)); + + phy_cfg.dp.lanes = lanes; + phy_cfg.dp.link_rate = rate / 100; + phy_cfg.dp.ssc = ssc; + phy_cfg.dp.set_lanes = true; + phy_cfg.dp.set_rate = true; + phy_cfg.dp.set_voltages = false; + ret = phy_configure(dp->phy, &phy_cfg); + if (ret) + return ret; + + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_LANES, + FIELD_PREP(PHY_LANES, lanes / 2)); + + /* Move PHY to P0 */ + regmap_update_bits(dp->regmap, DW_DP_PHYIF_CTRL, PHY_POWERDOWN, + FIELD_PREP(PHY_POWERDOWN, 0x0)); + + dw_dp_phy_xmit_enable(dp, lanes); + + return 0; +} + +static int dw_dp_link_configure(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + u8 buf[2]; + int ret; + + ret = dw_dp_phy_configure(dp, link->rate, link->lanes, link->caps.ssc); + if (ret) + return ret; + + buf[0] = drm_dp_link_rate_to_bw_code(link->rate); + buf[1] = link->lanes; + + if (link->caps.enhanced_framing) { + buf[1] |= DP_LANE_COUNT_ENHANCED_FRAME_EN; + regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN, + FIELD_PREP(ENHANCE_FRAMING_EN, 1)); + } else { + regmap_update_bits(dp->regmap, DW_DP_CCTL, ENHANCE_FRAMING_EN, + FIELD_PREP(ENHANCE_FRAMING_EN, 0)); + } + + ret = drm_dp_dpcd_write(&dp->aux, DP_LINK_BW_SET, buf, sizeof(buf)); + if (ret < 0) + return ret; + + buf[0] = link->caps.ssc ? DP_SPREAD_AMP_0_5 : 0; + buf[1] = link->caps.channel_coding ? DP_SET_ANSI_8B10B : 0; + + ret = drm_dp_dpcd_write(&dp->aux, DP_DOWNSPREAD_CTRL, buf, sizeof(buf)); + if (ret < 0) + return ret; + + return 0; +} + +static void dw_dp_link_train_init(struct dw_dp_link_train *train) +{ + struct dw_dp_link_train_set *adj = &train->adjust; + unsigned int i; + + for (i = 0; i < 4; i++) { + adj->voltage_swing[i] = 0; + adj->pre_emphasis[i] = 0; + adj->voltage_max_reached[i] = false; + adj->pre_max_reached[i] = false; + } + + train->clock_recovered = false; + train->channel_equalized = false; +} + +static bool dw_dp_link_train_valid(const struct dw_dp_link_train *train) +{ + return train->clock_recovered && train->channel_equalized; +} + +static int dw_dp_link_train_set_pattern(struct dw_dp *dp, u32 pattern) +{ + u8 buf = 0; + int ret; + + if (pattern && pattern != DP_TRAINING_PATTERN_4) { + buf |= DP_LINK_SCRAMBLING_DISABLE; + + regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS, + FIELD_PREP(SCRAMBLE_DIS, 1)); + } else { + regmap_update_bits(dp->regmap, DW_DP_CCTL, SCRAMBLE_DIS, + FIELD_PREP(SCRAMBLE_DIS, 0)); + } + + switch (pattern) { + case DP_TRAINING_PATTERN_DISABLE: + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_NONE); + break; + case DP_TRAINING_PATTERN_1: + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_1); + break; + case DP_TRAINING_PATTERN_2: + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_2); + break; + case DP_TRAINING_PATTERN_3: + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_3); + break; + case DP_TRAINING_PATTERN_4: + dw_dp_phy_set_pattern(dp, DW_DP_PHY_PATTERN_TPS_4); + break; + default: + return -EINVAL; + } + + ret = drm_dp_dpcd_writeb(&dp->aux, DP_TRAINING_PATTERN_SET, + buf | pattern); + if (ret < 0) + return ret; + + return 0; +} + +static u8 dw_dp_voltage_max(u8 preemph) +{ + switch (preemph & DP_TRAIN_PRE_EMPHASIS_MASK) { + case DP_TRAIN_PRE_EMPH_LEVEL_0: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_3; + case DP_TRAIN_PRE_EMPH_LEVEL_1: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_2; + case DP_TRAIN_PRE_EMPH_LEVEL_2: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_1; + case DP_TRAIN_PRE_EMPH_LEVEL_3: + default: + return DP_TRAIN_VOLTAGE_SWING_LEVEL_0; + } +} + +static bool dw_dp_link_get_adjustments(struct dw_dp_link *link, + u8 status[DP_LINK_STATUS_SIZE]) +{ + struct dw_dp_link_train_set *adj = &link->train.adjust; + unsigned int i; + bool changed = false; + u8 v = 0; + u8 p = 0; + + for (i = 0; i < link->lanes; i++) { + v = drm_dp_get_adjust_request_voltage(status, i); + v >>= DP_TRAIN_VOLTAGE_SWING_SHIFT; + p = drm_dp_get_adjust_request_pre_emphasis(status, i); + p >>= DP_TRAIN_PRE_EMPHASIS_SHIFT; + + if (v != adj->voltage_swing[i] || p != adj->pre_emphasis[i]) + changed = true; + + if (p >= (DP_TRAIN_PRE_EMPH_LEVEL_3 >> DP_TRAIN_PRE_EMPHASIS_SHIFT)) { + adj->pre_emphasis[i] = DP_TRAIN_PRE_EMPH_LEVEL_3 >> + DP_TRAIN_PRE_EMPHASIS_SHIFT; + adj->pre_max_reached[i] = true; + } else { + adj->pre_emphasis[i] = p; + adj->pre_max_reached[i] = false; + } + + v = min(v, dw_dp_voltage_max(p)); + if (v >= (DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> DP_TRAIN_VOLTAGE_SWING_SHIFT)) { + adj->voltage_swing[i] = DP_TRAIN_VOLTAGE_SWING_LEVEL_3 >> + DP_TRAIN_VOLTAGE_SWING_SHIFT; + adj->voltage_max_reached[i] = true; + } else { + adj->voltage_swing[i] = v; + adj->voltage_max_reached[i] = false; + } + } + + return changed; +} + +static int dw_dp_link_clock_recovery(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + u8 status[DP_LINK_STATUS_SIZE]; + unsigned int tries = 0; + int ret; + bool adj_changed; + + ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1); + if (ret) + return ret; + + for (;;) { + ret = dw_dp_link_train_update_vs_emph(dp); + if (ret) + return ret; + + drm_dp_link_train_clock_recovery_delay(&dp->aux, link->dpcd); + + ret = drm_dp_dpcd_read_link_status(&dp->aux, status); + if (ret < 0) { + dev_err(dp->dev, "failed to read link status: %d\n", ret); + return ret; + } + + if (drm_dp_clock_recovery_ok(status, link->lanes)) { + link->train.clock_recovered = true; + break; + } + + /* + * According to DP spec 1.4, if current ADJ is the same + * with previous REQ, we need to retry 5 times. + */ + adj_changed = dw_dp_link_get_adjustments(link, status); + if (!adj_changed) + tries++; + else + tries = 0; + + if (tries == 5) + break; + } + + return 0; +} + +static int dw_dp_link_channel_equalization(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + u8 status[DP_LINK_STATUS_SIZE], pattern; + unsigned int tries; + int ret; + + if (link->caps.tps4_supported) + pattern = DP_TRAINING_PATTERN_4; + else if (link->caps.tps3_supported) + pattern = DP_TRAINING_PATTERN_3; + else + pattern = DP_TRAINING_PATTERN_2; + ret = dw_dp_link_train_set_pattern(dp, pattern); + if (ret) + return ret; + + for (tries = 1; tries < 5; tries++) { + ret = dw_dp_link_train_update_vs_emph(dp); + if (ret) + return ret; + + drm_dp_link_train_channel_eq_delay(&dp->aux, link->dpcd); + + ret = drm_dp_dpcd_read_link_status(&dp->aux, status); + if (ret < 0) + return ret; + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + dev_err(dp->dev, "clock recovery lost while equalizing channel\n"); + link->train.clock_recovered = false; + break; + } + + if (drm_dp_channel_eq_ok(status, link->lanes)) { + link->train.channel_equalized = true; + break; + } + + dw_dp_link_get_adjustments(link, status); + } + + return 0; +} + +static int dw_dp_link_downgrade(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + struct dw_dp_bridge_state *state; + + state = dw_dp_get_bridge_state(dp); + + switch (link->rate) { + case 162000: + return -EINVAL; + case 270000: + link->rate = 162000; + break; + case 540000: + link->rate = 270000; + break; + case 810000: + link->rate = 540000; + break; + } + + if (!dw_dp_bandwidth_ok(dp, &state->mode, state->bpp, link->lanes, + link->rate)) + return -E2BIG; + + return 0; +} + +static int dw_dp_link_train_full(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + int ret; + +retry: + dw_dp_link_train_init(&link->train); + + dev_dbg(dp->dev, "full-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100); + + ret = dw_dp_link_configure(dp); + if (ret < 0) { + dev_err(dp->dev, "failed to configure DP link: %d\n", ret); + return ret; + } + + ret = dw_dp_link_clock_recovery(dp); + if (ret < 0) { + dev_err(dp->dev, "clock recovery failed: %d\n", ret); + goto out; + } + + if (!link->train.clock_recovered) { + dev_err(dp->dev, "clock recovery failed, downgrading link\n"); + + ret = dw_dp_link_downgrade(dp); + if (ret < 0) + goto out; + else + goto retry; + } + + dev_dbg(dp->dev, "clock recovery succeeded\n"); + + ret = dw_dp_link_channel_equalization(dp); + if (ret < 0) { + dev_err(dp->dev, "channel equalization failed: %d\n", ret); + goto out; + } + + if (!link->train.channel_equalized) { + dev_err(dp->dev, "channel equalization failed, downgrading link\n"); + + ret = dw_dp_link_downgrade(dp); + if (ret < 0) + goto out; + else + goto retry; + } + + dev_dbg(dp->dev, "channel equalization succeeded\n"); + +out: + dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE); + return ret; +} + +static int dw_dp_link_train_fast(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + int ret; + u8 status[DP_LINK_STATUS_SIZE]; + u8 pattern; + + dw_dp_link_train_init(&link->train); + + dev_dbg(dp->dev, "fast-training link: %u lane%s at %u MHz\n", + link->lanes, (link->lanes > 1) ? "s" : "", link->rate / 100); + + ret = dw_dp_link_configure(dp); + if (ret < 0) { + dev_err(dp->dev, "failed to configure DP link: %d\n", ret); + return ret; + } + + ret = dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_1); + if (ret) + goto out; + + usleep_range(500, 1000); + + if (link->caps.tps4_supported) + pattern = DP_TRAINING_PATTERN_4; + else if (link->caps.tps3_supported) + pattern = DP_TRAINING_PATTERN_3; + else + pattern = DP_TRAINING_PATTERN_2; + ret = dw_dp_link_train_set_pattern(dp, pattern); + if (ret) + goto out; + + usleep_range(500, 1000); + + ret = drm_dp_dpcd_read_link_status(&dp->aux, status); + if (ret < 0) { + dev_err(dp->dev, "failed to read link status: %d\n", ret); + goto out; + } + + if (!drm_dp_clock_recovery_ok(status, link->lanes)) { + dev_err(dp->dev, "clock recovery failed\n"); + ret = -EIO; + goto out; + } + + if (!drm_dp_channel_eq_ok(status, link->lanes)) { + dev_err(dp->dev, "channel equalization failed\n"); + ret = -EIO; + goto out; + } + +out: + dw_dp_link_train_set_pattern(dp, DP_TRAINING_PATTERN_DISABLE); + return ret; +} + +static int dw_dp_link_train(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + int ret; + + if (link->caps.fast_training) { + if (dw_dp_link_train_valid(&link->train)) { + ret = dw_dp_link_train_fast(dp); + if (ret < 0) + dev_err(dp->dev, "fast link training failed: %d\n", ret); + else + return 0; + } + } + + ret = dw_dp_link_train_full(dp); + if (ret < 0) { + dev_err(dp->dev, "full link training failed: %d\n", ret); + return ret; + } + + return 0; +} + +static int dw_dp_send_sdp(struct dw_dp *dp, struct dw_dp_sdp *sdp) +{ + const u8 *payload = sdp->base.db; + u32 reg; + int i, nr; + + nr = find_first_zero_bit(dp->sdp_reg_bank, SDP_REG_BANK_SIZE); + if (nr < SDP_REG_BANK_SIZE) + set_bit(nr, dp->sdp_reg_bank); + else + return -EBUSY; + + reg = DW_DP_SDP_REGISTER_BANK + nr * 9 * 4; + + /* SDP header */ + regmap_write(dp->regmap, reg, get_unaligned_le32(&sdp->base.sdp_header)); + + /* SDP data payload */ + for (i = 1; i < 9; i++, payload += 4) + regmap_write(dp->regmap, reg + i * 4, + FIELD_PREP(SDP_REGS, get_unaligned_le32(payload))); + + if (sdp->flags & DW_DP_SDP_VERTICAL_INTERVAL) + regmap_update_bits(dp->regmap, DW_DP_SDP_VERTICAL_CTRL, + EN_VERTICAL_SDP << nr, + EN_VERTICAL_SDP << nr); + + if (sdp->flags & DW_DP_SDP_HORIZONTAL_INTERVAL) + regmap_update_bits(dp->regmap, DW_DP_SDP_HORIZONTAL_CTRL, + EN_HORIZONTAL_SDP << nr, + EN_HORIZONTAL_SDP << nr); + + return 0; +} + +static int dw_dp_send_vsc_sdp(struct dw_dp *dp) +{ + struct dw_dp_bridge_state *state; + struct dw_dp_sdp sdp = {}; + struct drm_dp_vsc_sdp vsc = {}; + + state = dw_dp_get_bridge_state(dp); + if (!state) + return -EINVAL; + + vsc.bpc = state->bpc; + + vsc.sdp_type = DP_SDP_VSC; + vsc.revision = 0x5; + vsc.length = 0x13; + vsc.content_type = DP_CONTENT_TYPE_NOT_DEFINED; + + sdp.flags = DW_DP_SDP_VERTICAL_INTERVAL; + + switch (state->color_format) { + case DRM_COLOR_FORMAT_YCBCR444: + vsc.pixelformat = DP_PIXELFORMAT_YUV444; + break; + case DRM_COLOR_FORMAT_YCBCR420: + vsc.pixelformat = DP_PIXELFORMAT_YUV420; + break; + case DRM_COLOR_FORMAT_YCBCR422: + vsc.pixelformat = DP_PIXELFORMAT_YUV422; + break; + case DRM_COLOR_FORMAT_RGB444: + default: + vsc.pixelformat = DP_PIXELFORMAT_RGB; + break; + } + + if (state->color_format == DRM_COLOR_FORMAT_RGB444) { + vsc.colorimetry = DP_COLORIMETRY_DEFAULT; + vsc.dynamic_range = DP_DYNAMIC_RANGE_VESA; + } else { + vsc.colorimetry = DP_COLORIMETRY_BT709_YCC; + vsc.dynamic_range = DP_DYNAMIC_RANGE_CTA; + } + + drm_dp_vsc_sdp_pack(&vsc, &sdp.base); + + return dw_dp_send_sdp(dp, &sdp); +} + +static int dw_dp_video_set_pixel_mode(struct dw_dp *dp) +{ + switch (dp->pixel_mode) { + case DW_DP_MP_SINGLE_PIXEL: + case DW_DP_MP_DUAL_PIXEL: + case DW_DP_MP_QUAD_PIXEL: + break; + default: + return -EINVAL; + } + + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, PIXEL_MODE_SELECT, + FIELD_PREP(PIXEL_MODE_SELECT, dp->pixel_mode)); + + return 0; +} + +static bool dw_dp_video_need_vsc_sdp(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + struct dw_dp_bridge_state *state; + + state = dw_dp_get_bridge_state(dp); + if (!state) + return -EINVAL; + + if (!link->vsc_sdp_supported) + return false; + + if (state->color_format == DRM_COLOR_FORMAT_YCBCR420) + return true; + + return false; +} + +static int dw_dp_video_set_msa(struct dw_dp *dp, u8 color_format, u8 bpc, + u16 vstart, u16 hstart) +{ + u16 misc = 0; + + if (dw_dp_video_need_vsc_sdp(dp)) + misc |= DP_MSA_MISC_COLOR_VSC_SDP; + + switch (color_format) { + case DRM_COLOR_FORMAT_RGB444: + misc |= DP_MSA_MISC_COLOR_RGB; + break; + case DRM_COLOR_FORMAT_YCBCR444: + misc |= DP_MSA_MISC_COLOR_YCBCR_444_BT709; + break; + case DRM_COLOR_FORMAT_YCBCR422: + misc |= DP_MSA_MISC_COLOR_YCBCR_422_BT709; + break; + case DRM_COLOR_FORMAT_YCBCR420: + break; + default: + return -EINVAL; + } + + switch (bpc) { + case 6: + misc |= DP_MSA_MISC_6_BPC; + break; + case 8: + misc |= DP_MSA_MISC_8_BPC; + break; + case 10: + misc |= DP_MSA_MISC_10_BPC; + break; + case 12: + misc |= DP_MSA_MISC_12_BPC; + break; + case 16: + misc |= DP_MSA_MISC_16_BPC; + break; + default: + return -EINVAL; + } + + regmap_write(dp->regmap, DW_DP_VIDEO_MSA1, + FIELD_PREP(VSTART, vstart) | FIELD_PREP(HSTART, hstart)); + regmap_write(dp->regmap, DW_DP_VIDEO_MSA2, FIELD_PREP(MISC0, misc)); + regmap_write(dp->regmap, DW_DP_VIDEO_MSA3, FIELD_PREP(MISC1, misc >> 8)); + + return 0; +} + +static void dw_dp_video_disable(struct dw_dp *dp) +{ + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE, + FIELD_PREP(VIDEO_STREAM_ENABLE, 0)); +} + +static int dw_dp_video_enable(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + struct dw_dp_bridge_state *state; + struct drm_display_mode *mode; + u8 color_format, bpc, bpp; + u8 init_threshold, vic; + u32 hstart, hactive, hblank, h_sync_width, h_front_porch; + u32 vstart, vactive, vblank, v_sync_width, v_front_porch; + u32 peak_stream_bandwidth, link_bandwidth; + u32 average_bytes_per_tu, average_bytes_per_tu_frac; + u32 ts, hblank_interval; + u32 value; + int ret; + + state = dw_dp_get_bridge_state(dp); + if (!state) + return -EINVAL; + + bpc = state->bpc; + bpp = state->bpp; + color_format = state->color_format; + mode = &state->mode; + + vstart = mode->vtotal - mode->vsync_start; + hstart = mode->htotal - mode->hsync_start; + + ret = dw_dp_video_set_pixel_mode(dp); + if (ret) + return ret; + + ret = dw_dp_video_set_msa(dp, color_format, bpc, vstart, hstart); + if (ret) + return ret; + + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_MAPPING, + FIELD_PREP(VIDEO_MAPPING, state->video_mapping)); + + /* Configure DW_DP_VINPUT_POLARITY_CTRL register */ + value = 0; + if (mode->flags & DRM_MODE_FLAG_PHSYNC) + value |= FIELD_PREP(HSYNC_IN_POLARITY, 1); + if (mode->flags & DRM_MODE_FLAG_PVSYNC) + value |= FIELD_PREP(VSYNC_IN_POLARITY, 1); + regmap_write(dp->regmap, DW_DP_VINPUT_POLARITY_CTRL, value); + + /* Configure DW_DP_VIDEO_CONFIG1 register */ + hactive = mode->hdisplay; + hblank = mode->htotal - mode->hdisplay; + value = FIELD_PREP(HACTIVE, hactive) | FIELD_PREP(HBLANK, hblank); + if (mode->flags & DRM_MODE_FLAG_INTERLACE) + value |= FIELD_PREP(I_P, 1); + vic = drm_match_cea_mode(mode); + if (vic == 5 || vic == 6 || vic == 7 || + vic == 10 || vic == 11 || vic == 20 || + vic == 21 || vic == 22 || vic == 39 || + vic == 25 || vic == 26 || vic == 40 || + vic == 44 || vic == 45 || vic == 46 || + vic == 50 || vic == 51 || vic == 54 || + vic == 55 || vic == 58 || vic == 59) + value |= R_V_BLANK_IN_OSC; + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG1, value); + + /* Configure DW_DP_VIDEO_CONFIG2 register */ + vblank = mode->vtotal - mode->vdisplay; + vactive = mode->vdisplay; + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG2, + FIELD_PREP(VBLANK, vblank) | FIELD_PREP(VACTIVE, vactive)); + + /* Configure DW_DP_VIDEO_CONFIG3 register */ + h_sync_width = mode->hsync_end - mode->hsync_start; + h_front_porch = mode->hsync_start - mode->hdisplay; + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG3, + FIELD_PREP(H_SYNC_WIDTH, h_sync_width) | + FIELD_PREP(H_FRONT_PORCH, h_front_porch)); + + /* Configure DW_DP_VIDEO_CONFIG4 register */ + v_sync_width = mode->vsync_end - mode->vsync_start; + v_front_porch = mode->vsync_start - mode->vdisplay; + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG4, + FIELD_PREP(V_SYNC_WIDTH, v_sync_width) | + FIELD_PREP(V_FRONT_PORCH, v_front_porch)); + + /* Configure DW_DP_VIDEO_CONFIG5 register */ + peak_stream_bandwidth = mode->clock * bpp / 8; + link_bandwidth = (link->rate / 1000) * link->lanes; + ts = peak_stream_bandwidth * 64 / link_bandwidth; + average_bytes_per_tu = ts / 1000; + average_bytes_per_tu_frac = ts / 100 - average_bytes_per_tu * 10; + if (dp->pixel_mode == DW_DP_MP_SINGLE_PIXEL) { + if (average_bytes_per_tu < 6) + init_threshold = 32; + else if (hblank <= 80 && color_format != DRM_COLOR_FORMAT_YCBCR420) + init_threshold = 12; + else if (hblank <= 40 && color_format == DRM_COLOR_FORMAT_YCBCR420) + init_threshold = 3; + else + init_threshold = 16; + } else { + u32 t1 = 0, t2 = 0, t3 = 0; + + switch (bpc) { + case 6: + t1 = (4 * 1000 / 9) * link->lanes; + break; + case 8: + if (color_format == DRM_COLOR_FORMAT_YCBCR422) { + t1 = (1000 / 2) * link->lanes; + } else { + if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL) + t1 = (1000 / 3) * link->lanes; + else + t1 = (3000 / 16) * link->lanes; + } + break; + case 10: + if (color_format == DRM_COLOR_FORMAT_YCBCR422) + t1 = (2000 / 5) * link->lanes; + else + t1 = (4000 / 15) * link->lanes; + break; + case 12: + if (color_format == DRM_COLOR_FORMAT_YCBCR422) { + if (dp->pixel_mode == DW_DP_MP_DUAL_PIXEL) + t1 = (1000 / 6) * link->lanes; + else + t1 = (1000 / 3) * link->lanes; + } else { + t1 = (2000 / 9) * link->lanes; + } + break; + case 16: + if (color_format != DRM_COLOR_FORMAT_YCBCR422 && + dp->pixel_mode == DW_DP_MP_DUAL_PIXEL) + t1 = (1000 / 6) * link->lanes; + else + t1 = (1000 / 4) * link->lanes; + break; + default: + return -EINVAL; + } + + if (color_format == DRM_COLOR_FORMAT_YCBCR420) + t2 = (link->rate / 4) * 1000 / (mode->clock / 2); + else + t2 = (link->rate / 4) * 1000 / mode->clock; + + if (average_bytes_per_tu_frac) + t3 = average_bytes_per_tu + 1; + else + t3 = average_bytes_per_tu; + init_threshold = t1 * t2 * t3 / (1000 * 1000); + if (init_threshold <= 16 || average_bytes_per_tu < 10) + init_threshold = 40; + } + + regmap_write(dp->regmap, DW_DP_VIDEO_CONFIG5, + FIELD_PREP(INIT_THRESHOLD_HI, init_threshold >> 6) | + FIELD_PREP(AVERAGE_BYTES_PER_TU_FRAC, average_bytes_per_tu_frac) | + FIELD_PREP(INIT_THRESHOLD, init_threshold) | + FIELD_PREP(AVERAGE_BYTES_PER_TU, average_bytes_per_tu)); + + /* Configure DW_DP_VIDEO_HBLANK_INTERVAL register */ + hblank_interval = hblank * (link->rate / 4) / mode->clock; + regmap_write(dp->regmap, DW_DP_VIDEO_HBLANK_INTERVAL, + FIELD_PREP(HBLANK_INTERVAL_EN, 1) | + FIELD_PREP(HBLANK_INTERVAL, hblank_interval)); + + /* Video stream enable */ + regmap_update_bits(dp->regmap, DW_DP_VSAMPLE_CTRL, VIDEO_STREAM_ENABLE, + FIELD_PREP(VIDEO_STREAM_ENABLE, 1)); + + if (dw_dp_video_need_vsc_sdp(dp)) + dw_dp_send_vsc_sdp(dp); + + return 0; +} + +static void dw_dp_hpd_init(struct dw_dp *dp) +{ + /* Enable all HPD interrupts */ + regmap_update_bits(dp->regmap, DW_DP_HPD_INTERRUPT_ENABLE, + HPD_UNPLUG_EN | HPD_PLUG_EN | HPD_IRQ_EN, + FIELD_PREP(HPD_UNPLUG_EN, 1) | + FIELD_PREP(HPD_PLUG_EN, 1) | + FIELD_PREP(HPD_IRQ_EN, 1)); + + /* Enable all top-level interrupts */ + regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE, + HPD_EVENT_EN, FIELD_PREP(HPD_EVENT_EN, 1)); +} + +static void dw_dp_aux_init(struct dw_dp *dp) +{ + regmap_update_bits(dp->regmap, DW_DP_GENERAL_INTERRUPT_ENABLE, + AUX_REPLY_EVENT_EN, FIELD_PREP(AUX_REPLY_EVENT_EN, 1)); +} + +static void dw_dp_init_hw(struct dw_dp *dp) +{ + regmap_update_bits(dp->regmap, DW_DP_CCTL, DEFAULT_FAST_LINK_TRAIN_EN, + FIELD_PREP(DEFAULT_FAST_LINK_TRAIN_EN, 0)); + + dw_dp_hpd_init(dp); + dw_dp_aux_init(dp); +} + +static int dw_dp_aux_write_data(struct dw_dp *dp, const u8 *buffer, size_t size) +{ + size_t i, j; + + for (i = 0; i < DIV_ROUND_UP(size, 4); i++) { + size_t num = min_t(size_t, size - i * 4, 4); + u32 value = 0; + + for (j = 0; j < num; j++) + value |= buffer[i * 4 + j] << (j * 8); + + regmap_write(dp->regmap, DW_DP_AUX_DATA0 + i * 4, value); + } + + return size; +} + +static int dw_dp_aux_read_data(struct dw_dp *dp, u8 *buffer, size_t size) +{ + size_t i, j; + + for (i = 0; i < DIV_ROUND_UP(size, 4); i++) { + size_t num = min_t(size_t, size - i * 4, 4); + u32 value; + + regmap_read(dp->regmap, DW_DP_AUX_DATA0 + i * 4, &value); + + for (j = 0; j < num; j++) + buffer[i * 4 + j] = value >> (j * 8); + } + + return size; +} + +static ssize_t dw_dp_aux_transfer(struct drm_dp_aux *aux, + struct drm_dp_aux_msg *msg) +{ + struct dw_dp *dp = container_of(aux, struct dw_dp, aux); + unsigned long timeout = msecs_to_jiffies(10); + u32 status, value; + ssize_t ret = 0; + + if (WARN_ON(msg->size > 16)) + return -E2BIG; + + switch (msg->request & ~DP_AUX_I2C_MOT) { + case DP_AUX_NATIVE_WRITE: + case DP_AUX_I2C_WRITE: + case DP_AUX_I2C_WRITE_STATUS_UPDATE: + ret = dw_dp_aux_write_data(dp, msg->buffer, msg->size); + if (ret < 0) + return ret; + break; + case DP_AUX_NATIVE_READ: + case DP_AUX_I2C_READ: + break; + default: + return -EINVAL; + } + + if (msg->size > 0) + value = FIELD_PREP(AUX_LEN_REQ, msg->size - 1); + else + value = FIELD_PREP(I2C_ADDR_ONLY, 1); + value |= FIELD_PREP(AUX_CMD_TYPE, msg->request); + value |= FIELD_PREP(AUX_ADDR, msg->address); + regmap_write(dp->regmap, DW_DP_AUX_CMD, value); + + status = wait_for_completion_timeout(&dp->complete, timeout); + if (!status) { + dev_err(dp->dev, "timeout waiting for AUX reply\n"); + return -ETIMEDOUT; + } + + regmap_read(dp->regmap, DW_DP_AUX_STATUS, &value); + if (value & AUX_TIMEOUT) + return -ETIMEDOUT; + + msg->reply = FIELD_GET(AUX_STATUS, value); + + if (msg->size > 0 && msg->reply == DP_AUX_NATIVE_REPLY_ACK) { + if (msg->request & DP_AUX_I2C_READ) { + size_t count = FIELD_GET(AUX_BYTES_READ, value) - 1; + + if (count != msg->size) + return -EBUSY; + + ret = dw_dp_aux_read_data(dp, msg->buffer, count); + if (ret < 0) + return ret; + } + } + + return ret; +} + +/* + * Limits for the video timing for DP: + * 1. the hfp should be 2 pixels aligned; + * 2. the minimum hsync should be 9 pixel; + * 3. the minimum hbp should be 16 pixel; + */ +static int dw_dp_bridge_atomic_check(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct drm_display_mode *adjusted_mode = &crtc_state->adjusted_mode; + struct dw_dp *dp = bridge_to_dp(bridge); + struct dw_dp_bridge_state *state; + const struct dw_dp_output_format *fmt; + struct drm_display_mode *mode; + int min_hbp = 16; + int min_hsync = 9; + + state = to_dw_dp_bridge_state(bridge_state); + mode = &state->mode; + + fmt = dw_dp_get_output_format(bridge_state->output_bus_cfg.format); + if (!fmt) + return -EINVAL; + + state->video_mapping = fmt->video_mapping; + state->color_format = fmt->color_format; + state->bpc = fmt->bpc; + state->bpp = fmt->bpp; + + if ((adjusted_mode->hsync_start - adjusted_mode->hdisplay) & 0x1) { + adjusted_mode->hsync_start += 1; + dev_warn(dp->dev, "hfp is not 2 pixeel aligned, fixup to aligned hfp\n"); + } + + if (adjusted_mode->hsync_end - adjusted_mode->hsync_start < min_hsync) { + adjusted_mode->hsync_end = adjusted_mode->hsync_start + min_hsync; + dev_warn(dp->dev, "hsync is too narrow, fixup to min hsync:%d\n", min_hsync); + } + + if (adjusted_mode->htotal - adjusted_mode->hsync_end < min_hbp) { + adjusted_mode->htotal = adjusted_mode->hsync_end + min_hbp; + dev_warn(dp->dev, "hbp is too narrow, fixup to min hbp:%d\n", min_hbp); + } + + drm_mode_copy(mode, adjusted_mode); + + return 0; +} + +static enum drm_mode_status dw_dp_bridge_mode_valid(struct drm_bridge *bridge, + const struct drm_display_info *info, + const struct drm_display_mode *mode) +{ + struct dw_dp *dp = bridge_to_dp(bridge); + struct dw_dp_link *link = &dp->link; + u32 min_bpp; + + if (info->color_formats & DRM_COLOR_FORMAT_YCBCR420 && + link->vsc_sdp_supported && + (drm_mode_is_420_only(info, mode) || drm_mode_is_420_also(info, mode))) + min_bpp = 12; + else if (info->color_formats & DRM_COLOR_FORMAT_YCBCR422) + min_bpp = 16; + else if (info->color_formats & DRM_COLOR_FORMAT_RGB444) + min_bpp = 18; + else + min_bpp = 24; + + if (!link->vsc_sdp_supported && + drm_mode_is_420_only(info, mode)) + return MODE_NO_420; + + if (!dw_dp_bandwidth_ok(dp, mode, min_bpp, link->lanes, link->rate)) + return MODE_CLOCK_HIGH; + + return MODE_OK; +} + +static bool dw_dp_needs_link_retrain(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + u8 link_status[DP_LINK_STATUS_SIZE]; + + if (!dw_dp_link_train_valid(&link->train)) + return false; + + if (drm_dp_dpcd_read_link_status(&dp->aux, link_status) < 0) + return false; + + /* Retrain if Channel EQ or CR not ok */ + return !drm_dp_channel_eq_ok(link_status, dp->link.lanes); +} + +static void dw_dp_link_disable(struct dw_dp *dp) +{ + struct dw_dp_link *link = &dp->link; + + if (dw_dp_hpd_detect(dp)) + drm_dp_link_power_down(&dp->aux, dp->link.revision); + + dw_dp_phy_xmit_enable(dp, 0); + + phy_power_off(dp->phy); + + link->train.clock_recovered = false; + link->train.channel_equalized = false; +} + +static int dw_dp_link_enable(struct dw_dp *dp) +{ + int ret; + + ret = phy_power_on(dp->phy); + if (ret) + return ret; + + ret = drm_dp_link_power_up(&dp->aux, dp->link.revision); + if (ret < 0) + return ret; + + ret = dw_dp_link_train(dp); + + return ret; +} + +static void dw_dp_bridge_atomic_enable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct dw_dp *dp = bridge_to_dp(bridge); + struct drm_connector *connector; + struct drm_connector_state *conn_state; + int ret; + + connector = drm_atomic_get_new_connector_for_encoder(state, bridge->encoder); + if (!connector) { + dev_err(dp->dev, "failed to get connector\n"); + return; + } + + conn_state = drm_atomic_get_new_connector_state(state, connector); + if (!conn_state) { + dev_err(dp->dev, "failed to get connector state\n"); + return; + } + + set_bit(0, dp->sdp_reg_bank); + + ret = dw_dp_link_enable(dp); + if (ret < 0) { + dev_err(dp->dev, "failed to enable link: %d\n", ret); + return; + } + + ret = dw_dp_video_enable(dp); + if (ret < 0) { + dev_err(dp->dev, "failed to enable video: %d\n", ret); + return; + } +} + +static void dw_dp_reset(struct dw_dp *dp) +{ + int val; + + disable_irq(dp->irq); + regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET, + FIELD_PREP(CONTROLLER_RESET, 1)); + usleep_range(10, 20); + regmap_update_bits(dp->regmap, DW_DP_SOFT_RESET_CTRL, CONTROLLER_RESET, + FIELD_PREP(CONTROLLER_RESET, 0)); + + dw_dp_init_hw(dp); + regmap_read_poll_timeout(dp->regmap, DW_DP_HPD_STATUS, val, + FIELD_GET(HPD_HOT_PLUG, val), 200, 200000); + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG); + enable_irq(dp->irq); +} + +static void dw_dp_bridge_atomic_disable(struct drm_bridge *bridge, + struct drm_atomic_state *state) +{ + struct dw_dp *dp = bridge_to_dp(bridge); + + dw_dp_video_disable(dp); + dw_dp_link_disable(dp); + bitmap_zero(dp->sdp_reg_bank, SDP_REG_BANK_SIZE); + dw_dp_reset(dp); +} + +static bool dw_dp_hpd_detect_link(struct dw_dp *dp, struct drm_connector *connector) +{ + int ret; + + ret = phy_power_on(dp->phy); + if (ret < 0) + return false; + ret = dw_dp_link_parse(dp, connector); + phy_power_off(dp->phy); + + return !ret; +} + +static enum drm_connector_status dw_dp_bridge_detect(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_dp *dp = bridge_to_dp(bridge); + + if (!dw_dp_hpd_detect(dp)) + return connector_status_disconnected; + + if (!dw_dp_hpd_detect_link(dp, connector)) + return connector_status_disconnected; + + return connector_status_connected; +} + +static const struct drm_edid *dw_dp_bridge_edid_read(struct drm_bridge *bridge, + struct drm_connector *connector) +{ + struct dw_dp *dp = bridge_to_dp(bridge); + const struct drm_edid *edid; + int ret; + + ret = phy_power_on(dp->phy); + if (ret) + return NULL; + + edid = drm_edid_read_ddc(connector, &dp->aux.ddc); + + phy_power_off(dp->phy); + + return edid; +} + +static u32 *dw_dp_bridge_atomic_get_output_bus_fmts(struct drm_bridge *bridge, + struct drm_bridge_state *bridge_state, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state, + unsigned int *num_output_fmts) +{ + struct dw_dp *dp = bridge_to_dp(bridge); + struct dw_dp_link *link = &dp->link; + struct drm_display_info *di = &conn_state->connector->display_info; + struct drm_display_mode mode = crtc_state->mode; + const struct dw_dp_output_format *fmt; + u32 i, j = 0; + u32 *output_fmts; + + *num_output_fmts = 0; + + output_fmts = kcalloc(ARRAY_SIZE(dw_dp_output_formats), sizeof(*output_fmts), GFP_KERNEL); + if (!output_fmts) + return NULL; + + for (i = 0; i < ARRAY_SIZE(dw_dp_output_formats); i++) { + fmt = &dw_dp_output_formats[i]; + + if (fmt->bpc > conn_state->max_bpc) + continue; + + if (!(fmt->color_format & di->color_formats)) + continue; + + if (fmt->color_format == DRM_COLOR_FORMAT_YCBCR420 && + !link->vsc_sdp_supported) + continue; + + if (fmt->color_format != DRM_COLOR_FORMAT_YCBCR420 && + drm_mode_is_420_only(di, &mode)) + continue; + + if (!dw_dp_bandwidth_ok(dp, &mode, fmt->bpp, link->lanes, link->rate)) + continue; + + output_fmts[j++] = fmt->bus_format; + } + + *num_output_fmts = j; + + return output_fmts; +} + +static struct drm_bridge_state *dw_dp_bridge_atomic_duplicate_state(struct drm_bridge *bridge) +{ + struct dw_dp_bridge_state *state; + + state = kzalloc(sizeof(*state), GFP_KERNEL); + if (!state) + return NULL; + + __drm_atomic_helper_bridge_duplicate_state(bridge, &state->base); + + return &state->base; +} + +static const struct drm_bridge_funcs dw_dp_bridge_funcs = { + .atomic_duplicate_state = dw_dp_bridge_atomic_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_bridge_destroy_state, + .atomic_reset = drm_atomic_helper_bridge_reset, + .atomic_get_input_bus_fmts = drm_atomic_helper_bridge_propagate_bus_fmt, + .atomic_get_output_bus_fmts = dw_dp_bridge_atomic_get_output_bus_fmts, + .atomic_check = dw_dp_bridge_atomic_check, + .mode_valid = dw_dp_bridge_mode_valid, + .atomic_enable = dw_dp_bridge_atomic_enable, + .atomic_disable = dw_dp_bridge_atomic_disable, + .detect = dw_dp_bridge_detect, + .edid_read = dw_dp_bridge_edid_read, +}; + +static int dw_dp_link_retrain(struct dw_dp *dp) +{ + struct drm_device *dev = dp->bridge.dev; + struct drm_modeset_acquire_ctx ctx; + int ret; + + if (!dw_dp_needs_link_retrain(dp)) + return 0; + + dev_dbg(dp->dev, "Retraining link\n"); + + drm_modeset_acquire_init(&ctx, 0); + for (;;) { + ret = drm_modeset_lock(&dev->mode_config.connection_mutex, &ctx); + if (ret != -EDEADLK) + break; + + drm_modeset_backoff(&ctx); + } + + if (!ret) + ret = dw_dp_link_train(dp); + + drm_modeset_drop_locks(&ctx); + drm_modeset_acquire_fini(&ctx); + + return ret; +} + +static void dw_dp_hpd_work(struct work_struct *work) +{ + struct dw_dp *dp = container_of(work, struct dw_dp, hpd_work); + bool long_hpd; + int ret; + + mutex_lock(&dp->irq_lock); + long_hpd = dp->hotplug.long_hpd; + mutex_unlock(&dp->irq_lock); + + dev_dbg(dp->dev, "[drm] Get hpd irq - %s\n", long_hpd ? "long" : "short"); + + if (!long_hpd) { + if (dw_dp_needs_link_retrain(dp)) { + ret = dw_dp_link_retrain(dp); + if (ret) + dev_warn(dp->dev, "Retrain link failed\n"); + } + } else { + drm_helper_hpd_irq_event(dp->bridge.dev); + } +} + +static void dw_dp_handle_hpd_event(struct dw_dp *dp) +{ + u32 value; + + mutex_lock(&dp->irq_lock); + regmap_read(dp->regmap, DW_DP_HPD_STATUS, &value); + + if (value & HPD_IRQ) { + dev_dbg(dp->dev, "IRQ from the HPD\n"); + dp->hotplug.long_hpd = false; + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_IRQ); + } + + if (value & HPD_HOT_PLUG) { + dev_dbg(dp->dev, "Hot plug detected\n"); + dp->hotplug.long_hpd = true; + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_PLUG); + } + + if (value & HPD_HOT_UNPLUG) { + dev_dbg(dp->dev, "Unplug detected\n"); + dp->hotplug.long_hpd = true; + regmap_write(dp->regmap, DW_DP_HPD_STATUS, HPD_HOT_UNPLUG); + } + mutex_unlock(&dp->irq_lock); + + schedule_work(&dp->hpd_work); +} + +static irqreturn_t dw_dp_irq(int irq, void *data) +{ + struct dw_dp *dp = data; + u32 value; + + regmap_read(dp->regmap, DW_DP_GENERAL_INTERRUPT, &value); + if (!value) + return IRQ_NONE; + + if (value & HPD_EVENT) + dw_dp_handle_hpd_event(dp); + + if (value & AUX_REPLY_EVENT) { + regmap_write(dp->regmap, DW_DP_GENERAL_INTERRUPT, AUX_REPLY_EVENT); + complete(&dp->complete); + } + + return IRQ_HANDLED; +} + +static const struct regmap_range dw_dp_readable_ranges[] = { + regmap_reg_range(DW_DP_VERSION_NUMBER, DW_DP_ID), + regmap_reg_range(DW_DP_CONFIG_REG1, DW_DP_CONFIG_REG3), + regmap_reg_range(DW_DP_CCTL, DW_DP_SOFT_RESET_CTRL), + regmap_reg_range(DW_DP_VSAMPLE_CTRL, DW_DP_VIDEO_HBLANK_INTERVAL), + regmap_reg_range(DW_DP_AUD_CONFIG1, DW_DP_AUD_CONFIG1), + regmap_reg_range(DW_DP_SDP_VERTICAL_CTRL, DW_DP_SDP_STATUS_EN), + regmap_reg_range(DW_DP_PHYIF_CTRL, DW_DP_PHYIF_PWRDOWN_CTRL), + regmap_reg_range(DW_DP_AUX_CMD, DW_DP_AUX_DATA3), + regmap_reg_range(DW_DP_GENERAL_INTERRUPT, DW_DP_HPD_INTERRUPT_ENABLE), +}; + +static const struct regmap_access_table dw_dp_readable_table = { + .yes_ranges = dw_dp_readable_ranges, + .n_yes_ranges = ARRAY_SIZE(dw_dp_readable_ranges), +}; + +static const struct regmap_config dw_dp_regmap_config = { + .reg_bits = 32, + .reg_stride = 4, + .val_bits = 32, + .fast_io = true, + .max_register = DW_DP_MAX_REGISTER, + .rd_table = &dw_dp_readable_table, +}; + +static void dw_dp_phy_exit(void *data) +{ + struct dw_dp *dp = data; + + phy_exit(dp->phy); +} + +struct dw_dp *dw_dp_bind(struct device *dev, struct drm_encoder *encoder, + const struct dw_dp_plat_data *plat_data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_dp *dp; + struct drm_bridge *bridge; + void __iomem *res; + int ret; + + dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return ERR_PTR(-ENOMEM); + + dp = devm_drm_bridge_alloc(dev, struct dw_dp, bridge, &dw_dp_bridge_funcs); + if (IS_ERR(dp)) + return ERR_CAST(dp); + + dp->dev = dev; + dp->pixel_mode = DW_DP_MP_QUAD_PIXEL; + + dp->plat_data.max_link_rate = plat_data->max_link_rate; + bridge = &dp->bridge; + mutex_init(&dp->irq_lock); + INIT_WORK(&dp->hpd_work, dw_dp_hpd_work); + init_completion(&dp->complete); + + res = devm_platform_ioremap_resource(pdev, 0); + if (IS_ERR(res)) + return ERR_CAST(res); + + dp->regmap = devm_regmap_init_mmio(dev, res, &dw_dp_regmap_config); + if (IS_ERR(dp->regmap)) { + dev_err_probe(dev, PTR_ERR(dp->regmap), "failed to create regmap\n"); + return ERR_CAST(dp->regmap); + } + + dp->phy = devm_of_phy_get(dev, dev->of_node, NULL); + if (IS_ERR(dp->phy)) { + dev_err_probe(dev, PTR_ERR(dp->phy), "failed to get phy\n"); + return ERR_CAST(dp->phy); + } + + dp->apb_clk = devm_clk_get_enabled(dev, "apb"); + if (IS_ERR(dp->apb_clk)) { + dev_err_probe(dev, PTR_ERR(dp->apb_clk), "failed to get apb clock\n"); + return ERR_CAST(dp->apb_clk); + } + + dp->aux_clk = devm_clk_get_enabled(dev, "aux"); + if (IS_ERR(dp->aux_clk)) { + dev_err_probe(dev, PTR_ERR(dp->aux_clk), "failed to get aux clock\n"); + return ERR_CAST(dp->aux_clk); + } + + dp->i2s_clk = devm_clk_get(dev, "i2s"); + if (IS_ERR(dp->i2s_clk)) { + dev_err_probe(dev, PTR_ERR(dp->i2s_clk), "failed to get i2s clock\n"); + return ERR_CAST(dp->i2s_clk); + } + + dp->spdif_clk = devm_clk_get(dev, "spdif"); + if (IS_ERR(dp->spdif_clk)) { + dev_err_probe(dev, PTR_ERR(dp->spdif_clk), "failed to get spdif clock\n"); + return ERR_CAST(dp->spdif_clk); + } + + dp->hdcp_clk = devm_clk_get(dev, "hdcp"); + if (IS_ERR(dp->hdcp_clk)) { + dev_err_probe(dev, PTR_ERR(dp->hdcp_clk), "failed to get hdcp clock\n"); + return ERR_CAST(dp->hdcp_clk); + } + + dp->rstc = devm_reset_control_get(dev, NULL); + if (IS_ERR(dp->rstc)) { + dev_err_probe(dev, PTR_ERR(dp->rstc), "failed to get reset control\n"); + return ERR_CAST(dp->rstc); + } + + bridge->of_node = dev->of_node; + bridge->ops = DRM_BRIDGE_OP_DETECT | DRM_BRIDGE_OP_EDID | DRM_BRIDGE_OP_HPD; + bridge->type = DRM_MODE_CONNECTOR_DisplayPort; + bridge->ycbcr_420_allowed = true; + + dp->aux.dev = dev; + dp->aux.drm_dev = encoder->dev; + dp->aux.name = dev_name(dev); + dp->aux.transfer = dw_dp_aux_transfer; + ret = drm_dp_aux_register(&dp->aux); + if (ret) { + dev_err_probe(dev, ret, "Aux register failed\n"); + return ERR_PTR(ret); + } + + ret = drm_bridge_attach(encoder, bridge, NULL, DRM_BRIDGE_ATTACH_NO_CONNECTOR); + if (ret) + dev_err_probe(dev, ret, "Failed to attach bridge\n"); + + dw_dp_init_hw(dp); + + ret = phy_init(dp->phy); + if (ret) { + dev_err_probe(dev, ret, "phy init failed\n"); + return ERR_PTR(ret); + } + + ret = devm_add_action_or_reset(dev, dw_dp_phy_exit, dp); + if (ret) + return ERR_PTR(ret); + + dp->irq = platform_get_irq(pdev, 0); + if (dp->irq < 0) + return ERR_PTR(ret); + + ret = devm_request_threaded_irq(dev, dp->irq, NULL, dw_dp_irq, + IRQF_ONESHOT, dev_name(dev), dp); + if (ret) { + dev_err_probe(dev, ret, "failed to request irq\n"); + return ERR_PTR(ret); + } + + return dp; +} +EXPORT_SYMBOL_GPL(dw_dp_bind); + +MODULE_AUTHOR("Andy Yan <andyshrk@163.com>"); +MODULE_DESCRIPTION("DW DP Core Library"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/display/drm_bridge_connector.c b/drivers/gpu/drm/display/drm_bridge_connector.c index 091c5335355a..baacd21e7341 100644 --- a/drivers/gpu/drm/display/drm_bridge_connector.c +++ b/drivers/gpu/drm/display/drm_bridge_connector.c @@ -751,12 +751,11 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, return ERR_PTR(-EINVAL); } - if (!drm_bridge_get_next_bridge(bridge)) + if (drm_bridge_is_last(bridge)) connector_type = bridge->type; #ifdef CONFIG_OF - if (!drm_bridge_get_next_bridge(bridge) && - bridge->of_node) + if (drm_bridge_is_last(bridge) && bridge->of_node) connector->fwnode = fwnode_handle_get(of_fwnode_handle(bridge->of_node)); #endif @@ -777,8 +776,6 @@ struct drm_connector *drm_bridge_connector_init(struct drm_device *drm, if (!connector->ycbcr_420_allowed) supported_formats &= ~BIT(HDMI_COLORSPACE_YUV420); - bridge = bridge_connector->bridge_hdmi; - ret = drmm_connector_hdmi_init(drm, connector, bridge_connector->bridge_hdmi->vendor, bridge_connector->bridge_hdmi->product, diff --git a/drivers/gpu/drm/drm_bridge.c b/drivers/gpu/drm/drm_bridge.c index dd439d55177a..d031447eebc9 100644 --- a/drivers/gpu/drm/drm_bridge.c +++ b/drivers/gpu/drm/drm_bridge.c @@ -1435,6 +1435,9 @@ static void drm_bridge_debugfs_show_bridge(struct drm_printer *p, unsigned int idx) { drm_printf(p, "bridge[%u]: %ps\n", idx, bridge->funcs); + + drm_printf(p, "\trefcount: %u\n", kref_read(&bridge->refcount)); + drm_printf(p, "\ttype: [%d] %s\n", bridge->type, drm_get_connector_type_name(bridge->type)); diff --git a/drivers/gpu/drm/drm_format_helper.c b/drivers/gpu/drm/drm_format_helper.c index 37d5e0a7eb46..006836554cc2 100644 --- a/drivers/gpu/drm/drm_format_helper.c +++ b/drivers/gpu/drm/drm_format_helper.c @@ -1256,6 +1256,25 @@ int drm_fb_blit(struct iosys_map *dst, const unsigned int *dst_pitch, uint32_t d } EXPORT_SYMBOL(drm_fb_blit); +static void drm_fb_gray8_to_gray2_line(void *dbuf, const void *sbuf, unsigned int pixels) +{ + u8 *dbuf8 = dbuf; + const u8 *sbuf8 = sbuf; + u8 px; + + while (pixels) { + unsigned int i, bits = min(pixels, 4U); + u8 byte = 0; + + for (i = 0; i < bits; i++, pixels--) { + byte >>= 2; + px = (*sbuf8++ * 3 + 127) / 255; + byte |= (px &= 0x03) << 6; + } + *dbuf8++ = byte; + } +} + static void drm_fb_gray8_to_mono_line(void *dbuf, const void *sbuf, unsigned int pixels) { u8 *dbuf8 = dbuf; @@ -1362,3 +1381,92 @@ void drm_fb_xrgb8888_to_mono(struct iosys_map *dst, const unsigned int *dst_pitc } } EXPORT_SYMBOL(drm_fb_xrgb8888_to_mono); + +/** + * drm_fb_xrgb8888_to_gray2 - Convert XRGB8888 to gray2 + * @dst: Array of gray2 destination buffer + * @dst_pitch: Array of numbers of bytes between the start of two consecutive scanlines + * within @dst; can be NULL if scanlines are stored next to each other. + * @src: Array of XRGB8888 source buffers + * @fb: DRM framebuffer + * @clip: Clip rectangle area to copy + * @state: Transform and conversion state + * + * This function copies parts of a framebuffer to display memory and converts the + * color format during the process. Destination and framebuffer formats must match. The + * parameters @dst, @dst_pitch and @src refer to arrays. Each array must have at + * least as many entries as there are planes in @fb's format. Each entry stores the + * value for the format's respective color plane at the same index. + * + * This function does not apply clipping on @dst (i.e. the destination is at the + * top-left corner). The first pixel (upper left corner of the clip rectangle) will + * be converted and copied to the two first bits (LSB) in the first byte of the gray2 + * destination buffer. If the caller requires that the first pixel in a byte must + * be located at an x-coordinate that is a multiple of 8, then the caller must take + * care itself of supplying a suitable clip rectangle. + * + * DRM doesn't have native gray2 support. Drivers can use this function for + * gray2 devices that don't support XRGB8888 natively. Such drivers can + * announce the commonly supported XR24 format to userspace and use this function + * to convert to the native format. + * + */ +void drm_fb_xrgb8888_to_gray2(struct iosys_map *dst, const unsigned int *dst_pitch, + const struct iosys_map *src, const struct drm_framebuffer *fb, + const struct drm_rect *clip, struct drm_format_conv_state *state) +{ + static const unsigned int default_dst_pitch[DRM_FORMAT_MAX_PLANES] = { + 0, 0, 0, 0 + }; + unsigned int linepixels = drm_rect_width(clip); + unsigned int lines = drm_rect_height(clip); + unsigned int cpp = fb->format->cpp[0]; + unsigned int len_src32 = linepixels * cpp; + struct drm_device *dev = fb->dev; + void *vaddr = src[0].vaddr; + unsigned int dst_pitch_0; + unsigned int y; + u8 *gray2 = dst[0].vaddr, *gray8; + u32 *src32; + + if (drm_WARN_ON(dev, fb->format->format != DRM_FORMAT_XRGB8888)) + return; + + if (!dst_pitch) + dst_pitch = default_dst_pitch; + dst_pitch_0 = dst_pitch[0]; + + /* + * The gray2 destination buffer contains 2 bit per pixel + */ + if (!dst_pitch_0) + dst_pitch_0 = DIV_ROUND_UP(linepixels, 4); + + /* + * The dma memory is write-combined so reads are uncached. + * Speed up by fetching one line at a time. + * + * Also, format conversion from XR24 to gray2 are done + * line-by-line but are converted to 8-bit grayscale as an + * intermediate step. + * + * Allocate a buffer to be used for both copying from the cma + * memory and to store the intermediate grayscale line pixels. + */ + src32 = drm_format_conv_state_reserve(state, len_src32 + linepixels, GFP_KERNEL); + if (!src32) + return; + + gray8 = (u8 *)src32 + len_src32; + + vaddr += clip_offset(clip, fb->pitches[0], cpp); + for (y = 0; y < lines; y++) { + src32 = memcpy(src32, vaddr, len_src32); + drm_fb_xrgb8888_to_gray8_line(gray8, src32, linepixels); + drm_fb_gray8_to_gray2_line(gray2, gray8, linepixels); + vaddr += fb->pitches[0]; + gray2 += dst_pitch_0; + } +} +EXPORT_SYMBOL(drm_fb_xrgb8888_to_gray2); + diff --git a/drivers/gpu/drm/drm_gem.c b/drivers/gpu/drm/drm_gem.c index 4a89b6acb6af..8d25cc65707d 100644 --- a/drivers/gpu/drm/drm_gem.c +++ b/drivers/gpu/drm/drm_gem.c @@ -187,6 +187,7 @@ void drm_gem_private_object_init(struct drm_device *dev, kref_init(&obj->refcount); obj->handle_count = 0; obj->size = size; + mutex_init(&obj->gpuva.lock); dma_resv_init(&obj->_resv); if (!obj->resv) obj->resv = &obj->_resv; @@ -210,6 +211,7 @@ void drm_gem_private_object_fini(struct drm_gem_object *obj) WARN_ON(obj->dma_buf); dma_resv_fini(&obj->_resv); + mutex_destroy(&obj->gpuva.lock); } EXPORT_SYMBOL(drm_gem_private_object_fini); diff --git a/drivers/gpu/drm/drm_gpuvm.c b/drivers/gpu/drm/drm_gpuvm.c index d6bea8a4fffd..78a1a4f09509 100644 --- a/drivers/gpu/drm/drm_gpuvm.c +++ b/drivers/gpu/drm/drm_gpuvm.c @@ -497,8 +497,7 @@ * DRM GPUVM also does not take care of the locking of the backing * &drm_gem_object buffers GPU VA lists and &drm_gpuvm_bo abstractions by * itself; drivers are responsible to enforce mutual exclusion using either the - * GEMs dma_resv lock or alternatively a driver specific external lock. For the - * latter see also drm_gem_gpuva_set_lock(). + * GEMs dma_resv lock or the GEMs gpuva.lock mutex. * * However, DRM GPUVM contains lockdep checks to ensure callers of its API hold * the corresponding lock whenever the &drm_gem_objects GPU VA list is accessed @@ -1582,7 +1581,7 @@ drm_gpuvm_bo_destroy(struct kref *kref) drm_gpuvm_bo_list_del(vm_bo, extobj, lock); drm_gpuvm_bo_list_del(vm_bo, evict, lock); - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(gpuvm, obj); list_del(&vm_bo->list.entry.gem); if (ops && ops->vm_bo_free) @@ -1603,7 +1602,8 @@ drm_gpuvm_bo_destroy(struct kref *kref) * If the reference count drops to zero, the &gpuvm_bo is destroyed, which * includes removing it from the GEMs gpuva list. Hence, if a call to this * function can potentially let the reference count drop to zero the caller must - * hold the dma-resv or driver specific GEM gpuva lock. + * hold the lock that the GEM uses for its gpuva list (either the GEM's + * dma-resv or gpuva.lock mutex). * * This function may only be called from non-atomic context. * @@ -1627,7 +1627,7 @@ __drm_gpuvm_bo_find(struct drm_gpuvm *gpuvm, { struct drm_gpuvm_bo *vm_bo; - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(gpuvm, obj); drm_gem_for_each_gpuvm_bo(vm_bo, obj) if (vm_bo->vm == gpuvm) return vm_bo; @@ -1686,7 +1686,7 @@ drm_gpuvm_bo_obtain(struct drm_gpuvm *gpuvm, if (!vm_bo) return ERR_PTR(-ENOMEM); - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(gpuvm, obj); list_add_tail(&vm_bo->list.entry.gem, &obj->gpuva.list); return vm_bo; @@ -1722,7 +1722,7 @@ drm_gpuvm_bo_obtain_prealloc(struct drm_gpuvm_bo *__vm_bo) return vm_bo; } - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(gpuvm, obj); list_add_tail(&__vm_bo->list.entry.gem, &obj->gpuva.list); return __vm_bo; @@ -1894,8 +1894,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_remove); * reference of the latter is taken. * * This function expects the caller to protect the GEM's GPUVA list against - * concurrent access using either the GEMs dma_resv lock or a driver specific - * lock set through drm_gem_gpuva_set_lock(). + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. */ void drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) @@ -1910,7 +1909,7 @@ drm_gpuva_link(struct drm_gpuva *va, struct drm_gpuvm_bo *vm_bo) va->vm_bo = drm_gpuvm_bo_get(vm_bo); - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(gpuvm, obj); list_add_tail(&va->gem.entry, &vm_bo->list.gpuva); } EXPORT_SYMBOL_GPL(drm_gpuva_link); @@ -1930,8 +1929,7 @@ EXPORT_SYMBOL_GPL(drm_gpuva_link); * the latter is dropped. * * This function expects the caller to protect the GEM's GPUVA list against - * concurrent access using either the GEMs dma_resv lock or a driver specific - * lock set through drm_gem_gpuva_set_lock(). + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. */ void drm_gpuva_unlink(struct drm_gpuva *va) @@ -1942,7 +1940,7 @@ drm_gpuva_unlink(struct drm_gpuva *va) if (unlikely(!obj)) return; - drm_gem_gpuva_assert_lock_held(obj); + drm_gem_gpuva_assert_lock_held(va->vm, obj); list_del_init(&va->gem.entry); va->vm_bo = NULL; @@ -2943,8 +2941,8 @@ EXPORT_SYMBOL_GPL(drm_gpuvm_prefetch_ops_create); * After the caller finished processing the returned &drm_gpuva_ops, they must * be freed with &drm_gpuva_ops_free. * - * It is the callers responsibility to protect the GEMs GPUVA list against - * concurrent access using the GEMs dma_resv lock. + * This function expects the caller to protect the GEM's GPUVA list against + * concurrent access using either the GEM's dma-resv or gpuva.lock mutex. * * Returns: a pointer to the &drm_gpuva_ops on success, an ERR_PTR on failure */ @@ -2956,7 +2954,7 @@ drm_gpuvm_bo_unmap_ops_create(struct drm_gpuvm_bo *vm_bo) struct drm_gpuva *va; int ret; - drm_gem_gpuva_assert_lock_held(vm_bo->obj); + drm_gem_gpuva_assert_lock_held(vm_bo->vm, vm_bo->obj); ops = kzalloc(sizeof(*ops), GFP_KERNEL); if (!ops) diff --git a/drivers/gpu/drm/drm_panel_backlight_quirks.c b/drivers/gpu/drm/drm_panel_backlight_quirks.c index 598f812b7cb3..537dc6dd0534 100644 --- a/drivers/gpu/drm/drm_panel_backlight_quirks.c +++ b/drivers/gpu/drm/drm_panel_backlight_quirks.c @@ -8,23 +8,26 @@ #include <drm/drm_edid.h> #include <drm/drm_utils.h> -struct drm_panel_min_backlight_quirk { - struct { - enum dmi_field field; - const char * const value; - } dmi_match; +struct drm_panel_match { + enum dmi_field field; + const char * const value; +}; + +struct drm_get_panel_backlight_quirk { + struct drm_panel_match dmi_match; + struct drm_panel_match dmi_match_other; struct drm_edid_ident ident; - u8 min_brightness; + struct drm_panel_backlight_quirk quirk; }; -static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks[] = { +static const struct drm_get_panel_backlight_quirk drm_panel_min_backlight_quirks[] = { /* 13 inch matte panel */ { .dmi_match.field = DMI_BOARD_VENDOR, .dmi_match.value = "Framework", .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0bca), .ident.name = "NE135FBM-N41", - .min_brightness = 0, + .quirk = { .min_brightness = 1, }, }, /* 13 inch glossy panel */ { @@ -32,7 +35,7 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks .dmi_match.value = "Framework", .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x095f), .ident.name = "NE135FBM-N41", - .min_brightness = 0, + .quirk = { .min_brightness = 1, }, }, /* 13 inch 2.8k panel */ { @@ -40,56 +43,114 @@ static const struct drm_panel_min_backlight_quirk drm_panel_min_backlight_quirks .dmi_match.value = "Framework", .ident.panel_id = drm_edid_encode_panel_id('B', 'O', 'E', 0x0cb4), .ident.name = "NE135A1M-NY1", - .min_brightness = 0, + .quirk = { .min_brightness = 1, }, + }, + /* Steam Deck models */ + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "Valve", + .dmi_match_other.field = DMI_PRODUCT_NAME, + .dmi_match_other.value = "Jupiter", + .quirk = { .min_brightness = 1, }, + }, + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "Valve", + .dmi_match_other.field = DMI_PRODUCT_NAME, + .dmi_match_other.value = "Galileo", + .quirk = { .min_brightness = 1, }, + }, + /* Have OLED Panels with brightness issue when last byte is 0/1 */ + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "AYANEO", + .dmi_match_other.field = DMI_PRODUCT_NAME, + .dmi_match_other.value = "AYANEO 3", + .quirk = { .brightness_mask = 3, }, + }, + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "ZOTAC", + .dmi_match_other.field = DMI_BOARD_NAME, + .dmi_match_other.value = "G0A1W", + .quirk = { .brightness_mask = 3, }, + }, + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "ZOTAC", + .dmi_match_other.field = DMI_BOARD_NAME, + .dmi_match_other.value = "G1A1W", + .quirk = { .brightness_mask = 3, }, + }, + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "ONE-NETBOOK", + .dmi_match_other.field = DMI_PRODUCT_NAME, + .dmi_match_other.value = "ONEXPLAYER F1Pro", + .quirk = { .brightness_mask = 3, }, + }, + { + .dmi_match.field = DMI_SYS_VENDOR, + .dmi_match.value = "ONE-NETBOOK", + .dmi_match_other.field = DMI_PRODUCT_NAME, + .dmi_match_other.value = "ONEXPLAYER F1 EVA-02", + .quirk = { .brightness_mask = 3, }, }, }; -static bool drm_panel_min_backlight_quirk_matches(const struct drm_panel_min_backlight_quirk *quirk, - const struct drm_edid *edid) +static bool drm_panel_min_backlight_quirk_matches( + const struct drm_get_panel_backlight_quirk *quirk, + const struct drm_edid *edid) { - if (!dmi_match(quirk->dmi_match.field, quirk->dmi_match.value)) + if (quirk->dmi_match.field && + !dmi_match(quirk->dmi_match.field, quirk->dmi_match.value)) + return false; + + if (quirk->dmi_match_other.field && + !dmi_match(quirk->dmi_match_other.field, + quirk->dmi_match_other.value)) return false; - if (!drm_edid_match(edid, &quirk->ident)) + if (quirk->ident.panel_id && !drm_edid_match(edid, &quirk->ident)) return false; return true; } /** - * drm_get_panel_min_brightness_quirk - Get minimum supported brightness level for a panel. + * drm_get_panel_backlight_quirk - Get backlight quirks for a panel * @edid: EDID of the panel to check * * This function checks for platform specific (e.g. DMI based) quirks * providing info on the minimum backlight brightness for systems where this - * cannot be probed correctly from the hard-/firm-ware. + * cannot be probed correctly from the hard-/firm-ware and other sources. * * Returns: - * A negative error value or - * an override value in the range [0, 255] representing 0-100% to be scaled to - * the drivers target range. + * a drm_panel_backlight_quirk struct if a quirk was found, otherwise an + * error pointer. */ -int drm_get_panel_min_brightness_quirk(const struct drm_edid *edid) +const struct drm_panel_backlight_quirk * +drm_get_panel_backlight_quirk(const struct drm_edid *edid) { - const struct drm_panel_min_backlight_quirk *quirk; + const struct drm_get_panel_backlight_quirk *quirk; size_t i; if (!IS_ENABLED(CONFIG_DMI)) - return -ENODATA; + return ERR_PTR(-ENODATA); if (!edid) - return -EINVAL; + return ERR_PTR(-EINVAL); for (i = 0; i < ARRAY_SIZE(drm_panel_min_backlight_quirks); i++) { quirk = &drm_panel_min_backlight_quirks[i]; if (drm_panel_min_backlight_quirk_matches(quirk, edid)) - return quirk->min_brightness; + return &quirk->quirk; } - return -ENODATA; + return ERR_PTR(-ENODATA); } -EXPORT_SYMBOL(drm_get_panel_min_brightness_quirk); +EXPORT_SYMBOL(drm_get_panel_backlight_quirk); MODULE_DESCRIPTION("Quirks for panel backlight overrides"); MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/drm_sysfs.c b/drivers/gpu/drm/drm_sysfs.c index a455c56dbbeb..b01ffa4d6509 100644 --- a/drivers/gpu/drm/drm_sysfs.c +++ b/drivers/gpu/drm/drm_sysfs.c @@ -18,6 +18,7 @@ #include <linux/gfp.h> #include <linux/i2c.h> #include <linux/kdev_t.h> +#include <linux/pci.h> #include <linux/property.h> #include <linux/slab.h> @@ -30,6 +31,8 @@ #include <drm/drm_property.h> #include <drm/drm_sysfs.h> +#include <asm/video.h> + #include "drm_internal.h" #include "drm_crtc_internal.h" @@ -508,6 +511,43 @@ void drm_sysfs_connector_property_event(struct drm_connector *connector, } EXPORT_SYMBOL(drm_sysfs_connector_property_event); +static ssize_t boot_display_show(struct device *dev, struct device_attribute *attr, + char *buf) +{ + return sysfs_emit(buf, "1\n"); +} +static DEVICE_ATTR_RO(boot_display); + +static struct attribute *display_attrs[] = { + &dev_attr_boot_display.attr, + NULL +}; + +static umode_t boot_display_visible(struct kobject *kobj, + struct attribute *a, int n) +{ + struct device *dev = kobj_to_dev(kobj)->parent; + + if (dev_is_pci(dev)) { + struct pci_dev *pdev = to_pci_dev(dev); + + if (video_is_primary_device(&pdev->dev)) + return a->mode; + } + + return 0; +} + +static const struct attribute_group display_attr_group = { + .attrs = display_attrs, + .is_visible = boot_display_visible, +}; + +static const struct attribute_group *card_dev_groups[] = { + &display_attr_group, + NULL +}; + struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) { const char *minor_str; @@ -531,6 +571,7 @@ struct device *drm_sysfs_minor_alloc(struct drm_minor *minor) kdev->devt = MKDEV(DRM_MAJOR, minor->index); kdev->class = drm_class; + kdev->groups = card_dev_groups; kdev->type = &drm_sysfs_device_minor; } diff --git a/drivers/gpu/drm/gma500/fbdev.c b/drivers/gpu/drm/gma500/fbdev.c index 4a37136f90f4..32d31e5f5f1a 100644 --- a/drivers/gpu/drm/gma500/fbdev.c +++ b/drivers/gpu/drm/gma500/fbdev.c @@ -120,7 +120,6 @@ static void psb_fbdev_fb_destroy(struct fb_info *info) drm_fb_helper_fini(fb_helper); drm_framebuffer_unregister_private(fb); - fb->obj[0] = NULL; drm_framebuffer_cleanup(fb); kfree(fb); @@ -245,7 +244,6 @@ int psb_fbdev_driver_fbdev_probe(struct drm_fb_helper *fb_helper, err_drm_framebuffer_unregister_private: drm_framebuffer_unregister_private(fb); - fb->obj[0] = NULL; drm_framebuffer_cleanup(fb); kfree(fb); err_drm_gem_object_put: diff --git a/drivers/gpu/drm/gud/gud_connector.c b/drivers/gpu/drm/gud/gud_connector.c index 0f07d77c5d52..4a15695fa933 100644 --- a/drivers/gpu/drm/gud/gud_connector.c +++ b/drivers/gpu/drm/gud/gud_connector.c @@ -16,7 +16,6 @@ #include <drm/drm_modeset_helper_vtables.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" @@ -607,13 +606,16 @@ int gud_connector_fill_properties(struct drm_connector_state *connector_state, return gconn->num_properties; } +static const struct drm_encoder_funcs gud_drm_simple_encoder_funcs_cleanup = { + .destroy = drm_encoder_cleanup, +}; + static int gud_connector_create(struct gud_device *gdrm, unsigned int index, struct gud_connector_descriptor_req *desc) { struct drm_device *drm = &gdrm->drm; struct gud_connector *gconn; struct drm_connector *connector; - struct drm_encoder *encoder; int ret, connector_type; u32 flags; @@ -681,20 +683,13 @@ static int gud_connector_create(struct gud_device *gdrm, unsigned int index, return ret; } - /* The first connector is attached to the existing simple pipe encoder */ - if (!connector->index) { - encoder = &gdrm->pipe.encoder; - } else { - encoder = &gconn->encoder; - - ret = drm_simple_encoder_init(drm, encoder, DRM_MODE_ENCODER_NONE); - if (ret) - return ret; - - encoder->possible_crtcs = 1; - } + gconn->encoder.possible_crtcs = drm_crtc_mask(&gdrm->crtc); + ret = drm_encoder_init(drm, &gconn->encoder, &gud_drm_simple_encoder_funcs_cleanup, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; - return drm_connector_attach_encoder(connector, encoder); + return drm_connector_attach_encoder(connector, &gconn->encoder); } int gud_get_connectors(struct gud_device *gdrm) diff --git a/drivers/gpu/drm/gud/gud_drv.c b/drivers/gpu/drm/gud/gud_drv.c index b52a12cbba3e..b7345c8d823d 100644 --- a/drivers/gpu/drm/gud/gud_drv.c +++ b/drivers/gpu/drm/gud/gud_drv.c @@ -16,6 +16,7 @@ #include <drm/clients/drm_client_setup.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_blend.h> +#include <drm/drm_crtc_helper.h> #include <drm/drm_damage_helper.h> #include <drm/drm_debugfs.h> #include <drm/drm_drv.h> @@ -27,7 +28,6 @@ #include <drm/drm_managed.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> -#include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" @@ -289,7 +289,7 @@ static int gud_get_properties(struct gud_device *gdrm) * but mask out any additions on future devices. */ val &= GUD_ROTATION_MASK; - ret = drm_plane_create_rotation_property(&gdrm->pipe.plane, + ret = drm_plane_create_rotation_property(&gdrm->plane, DRM_MODE_ROTATE_0, val); break; default: @@ -338,10 +338,30 @@ static int gud_stats_debugfs(struct seq_file *m, void *data) return 0; } -static const struct drm_simple_display_pipe_funcs gud_pipe_funcs = { - .check = gud_pipe_check, - .update = gud_pipe_update, - DRM_GEM_SIMPLE_DISPLAY_PIPE_SHADOW_PLANE_FUNCS +static const struct drm_crtc_helper_funcs gud_crtc_helper_funcs = { + .atomic_check = drm_crtc_helper_atomic_check +}; + +static const struct drm_crtc_funcs gud_crtc_funcs = { + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static const struct drm_plane_helper_funcs gud_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = gud_plane_atomic_check, + .atomic_update = gud_plane_atomic_update, +}; + +static const struct drm_plane_funcs gud_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + DRM_GEM_SHADOW_PLANE_FUNCS, }; static const struct drm_mode_config_funcs gud_mode_config_funcs = { @@ -350,7 +370,7 @@ static const struct drm_mode_config_funcs gud_mode_config_funcs = { .atomic_commit = drm_atomic_helper_commit, }; -static const u64 gud_pipe_modifiers[] = { +static const u64 gud_plane_modifiers[] = { DRM_FORMAT_MOD_LINEAR, DRM_FORMAT_MOD_INVALID }; @@ -567,12 +587,17 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) return -ENOMEM; } - ret = drm_simple_display_pipe_init(drm, &gdrm->pipe, &gud_pipe_funcs, - formats, num_formats, - gud_pipe_modifiers, NULL); + ret = drm_universal_plane_init(drm, &gdrm->plane, 0, + &gud_plane_funcs, + formats, num_formats, + gud_plane_modifiers, + DRM_PLANE_TYPE_PRIMARY, NULL); if (ret) return ret; + drm_plane_helper_add(&gdrm->plane, &gud_plane_helper_funcs); + drm_plane_enable_fb_damage_clips(&gdrm->plane); + devm_kfree(dev, formats); devm_kfree(dev, formats_dev); @@ -582,7 +607,12 @@ static int gud_probe(struct usb_interface *intf, const struct usb_device_id *id) return ret; } - drm_plane_enable_fb_damage_clips(&gdrm->pipe.plane); + ret = drm_crtc_init_with_planes(drm, &gdrm->crtc, &gdrm->plane, NULL, + &gud_crtc_funcs, NULL); + if (ret) + return ret; + + drm_crtc_helper_add(&gdrm->crtc, &gud_crtc_helper_funcs); ret = gud_get_connectors(gdrm); if (ret) { diff --git a/drivers/gpu/drm/gud/gud_internal.h b/drivers/gpu/drm/gud/gud_internal.h index d6fb25388722..d27c31648341 100644 --- a/drivers/gpu/drm/gud/gud_internal.h +++ b/drivers/gpu/drm/gud/gud_internal.h @@ -11,11 +11,11 @@ #include <uapi/drm/drm_fourcc.h> #include <drm/drm_modes.h> -#include <drm/drm_simple_kms_helper.h> struct gud_device { struct drm_device drm; - struct drm_simple_display_pipe pipe; + struct drm_plane plane; + struct drm_crtc crtc; struct work_struct work; u32 flags; const struct drm_format_info *xrgb8888_emulation_format; @@ -62,11 +62,10 @@ int gud_usb_set_u8(struct gud_device *gdrm, u8 request, u8 val); void gud_clear_damage(struct gud_device *gdrm); void gud_flush_work(struct work_struct *work); -int gud_pipe_check(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_plane_state, - struct drm_crtc_state *new_crtc_state); -void gud_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state); +int gud_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state); +void gud_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *atomic_state); int gud_connector_fill_properties(struct drm_connector_state *connector_state, struct gud_property_req *properties); int gud_get_connectors(struct gud_device *gdrm); diff --git a/drivers/gpu/drm/gud/gud_pipe.c b/drivers/gpu/drm/gud/gud_pipe.c index 8d548d08f127..54d9aa9998e5 100644 --- a/drivers/gpu/drm/gud/gud_pipe.c +++ b/drivers/gpu/drm/gud/gud_pipe.c @@ -20,7 +20,6 @@ #include <drm/drm_gem_framebuffer_helper.h> #include <drm/drm_print.h> #include <drm/drm_rect.h> -#include <drm/drm_simple_kms_helper.h> #include <drm/gud.h> #include "gud_internal.h" @@ -451,14 +450,15 @@ static void gud_fb_handle_damage(struct gud_device *gdrm, struct drm_framebuffer gud_flush_damage(gdrm, fb, src, !fb->obj[0]->import_attach, damage); } -int gud_pipe_check(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *new_plane_state, - struct drm_crtc_state *new_crtc_state) +int gud_plane_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) { - struct gud_device *gdrm = to_gud_device(pipe->crtc.dev); - struct drm_plane_state *old_plane_state = pipe->plane.state; - const struct drm_display_mode *mode = &new_crtc_state->mode; - struct drm_atomic_state *state = new_plane_state->state; + struct gud_device *gdrm = to_gud_device(plane->dev); + struct drm_plane_state *old_plane_state = drm_atomic_get_old_plane_state(state, plane); + struct drm_plane_state *new_plane_state = drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *crtc = new_plane_state->crtc; + struct drm_crtc_state *crtc_state; + const struct drm_display_mode *mode; struct drm_framebuffer *old_fb = old_plane_state->fb; struct drm_connector_state *connector_state = NULL; struct drm_framebuffer *fb = new_plane_state->fb; @@ -469,20 +469,37 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe, int idx, ret; size_t len; - if (WARN_ON_ONCE(!fb)) + if (drm_WARN_ON_ONCE(plane->dev, !fb)) return -EINVAL; + if (drm_WARN_ON_ONCE(plane->dev, !crtc)) + return -EINVAL; + + crtc_state = drm_atomic_get_new_crtc_state(state, crtc); + + mode = &crtc_state->mode; + + ret = drm_atomic_helper_check_plane_state(new_plane_state, crtc_state, + DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, + false, false); + if (ret) + return ret; + + if (!new_plane_state->visible) + return 0; + if (old_plane_state->rotation != new_plane_state->rotation) - new_crtc_state->mode_changed = true; + crtc_state->mode_changed = true; if (old_fb && old_fb->format != format) - new_crtc_state->mode_changed = true; + crtc_state->mode_changed = true; - if (!new_crtc_state->mode_changed && !new_crtc_state->connectors_changed) + if (!crtc_state->mode_changed && !crtc_state->connectors_changed) return 0; /* Only one connector is supported */ - if (hweight32(new_crtc_state->connector_mask) != 1) + if (hweight32(crtc_state->connector_mask) != 1) return -EINVAL; if (format->format == DRM_FORMAT_XRGB8888 && gdrm->xrgb8888_emulation_format) @@ -500,7 +517,7 @@ int gud_pipe_check(struct drm_simple_display_pipe *pipe, if (!connector_state) { struct drm_connector_list_iter conn_iter; - drm_connector_list_iter_begin(pipe->crtc.dev, &conn_iter); + drm_connector_list_iter_begin(plane->dev, &conn_iter); drm_for_each_connector_iter(connector, &conn_iter) { if (connector->state->crtc) { connector_state = connector->state; @@ -567,16 +584,18 @@ out: return ret; } -void gud_pipe_update(struct drm_simple_display_pipe *pipe, - struct drm_plane_state *old_state) +void gud_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *atomic_state) { - struct drm_device *drm = pipe->crtc.dev; + struct drm_device *drm = plane->dev; struct gud_device *gdrm = to_gud_device(drm); - struct drm_plane_state *state = pipe->plane.state; - struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(state); - struct drm_framebuffer *fb = state->fb; - struct drm_crtc *crtc = &pipe->crtc; + struct drm_plane_state *old_state = drm_atomic_get_old_plane_state(atomic_state, plane); + struct drm_plane_state *new_state = drm_atomic_get_new_plane_state(atomic_state, plane); + struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(new_state); + struct drm_framebuffer *fb = new_state->fb; + struct drm_crtc *crtc = new_state->crtc; struct drm_rect damage; + struct drm_atomic_helper_damage_iter iter; int ret, idx; if (crtc->state->mode_changed || !crtc->state->enable) { @@ -611,7 +630,8 @@ void gud_pipe_update(struct drm_simple_display_pipe *pipe, if (ret) goto ctrl_disable; - if (drm_atomic_helper_damage_merged(old_state, state, &damage)) + drm_atomic_helper_damage_iter_init(&iter, old_state, new_state); + drm_atomic_for_each_plane_damage(&iter, &damage) gud_fb_handle_damage(gdrm, fb, &shadow_plane_state->data[0], &damage); drm_gem_fb_end_cpu_access(fb, DMA_FROM_DEVICE); diff --git a/drivers/gpu/drm/i915/Kconfig.debug b/drivers/gpu/drm/i915/Kconfig.debug index 1852e0804942..3562a02ef7ad 100644 --- a/drivers/gpu/drm/i915/Kconfig.debug +++ b/drivers/gpu/drm/i915/Kconfig.debug @@ -50,7 +50,7 @@ config DRM_I915_DEBUG select DRM_VGEM # used by igt/prime_vgem (dmabuf interop checks) select DRM_DEBUG_MM if DRM=y select DRM_EXPORT_FOR_TESTS if m - select DRM_DEBUG_SELFTEST + select DRM_KUNIT_TEST if KUNIT select DMABUF_SELFTESTS select SW_SYNC # signaling validation framework (igt/syncobj*) select DRM_I915_WERROR diff --git a/drivers/gpu/drm/i915/display/g4x_dp.c b/drivers/gpu/drm/i915/display/g4x_dp.c index 87f6b9602b16..aa159f9ce12f 100644 --- a/drivers/gpu/drm/i915/display/g4x_dp.c +++ b/drivers/gpu/drm/i915/display/g4x_dp.c @@ -424,17 +424,6 @@ intel_dp_link_down(struct intel_encoder *encoder, drm_dbg_kms(display->drm, "\n"); - if ((display->platform.ivybridge && port == PORT_A) || - (HAS_PCH_CPT(display) && port != PORT_A)) { - intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; - intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; - } else { - intel_dp->DP &= ~DP_LINK_TRAIN_MASK; - intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE; - } - intel_de_write(display, intel_dp->output_reg, intel_dp->DP); - intel_de_posting_read(display, intel_dp->output_reg); - intel_dp->DP &= ~DP_PORT_EN; intel_de_write(display, intel_dp->output_reg, intel_dp->DP); intel_de_posting_read(display, intel_dp->output_reg); @@ -612,6 +601,19 @@ cpt_set_link_train(struct intel_dp *intel_dp, } static void +cpt_set_idle_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + + intel_dp->DP &= ~DP_LINK_TRAIN_MASK_CPT; + intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE_CPT; + + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); +} + +static void g4x_set_link_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 dp_train_pat) @@ -639,6 +641,19 @@ g4x_set_link_train(struct intel_dp *intel_dp, intel_de_posting_read(display, intel_dp->output_reg); } +static void +g4x_set_idle_link_train(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(intel_dp); + + intel_dp->DP &= ~DP_LINK_TRAIN_MASK; + intel_dp->DP |= DP_LINK_TRAIN_PAT_IDLE; + + intel_de_write(display, intel_dp->output_reg, intel_dp->DP); + intel_de_posting_read(display, intel_dp->output_reg); +} + static void intel_dp_enable_port(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state) { @@ -1285,12 +1300,10 @@ bool g4x_dp_init(struct intel_display *display, drm_dbg_kms(display->drm, "No VBT child device for DP-%c\n", port_name(port)); - dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + dig_port = intel_dig_port_alloc(); if (!dig_port) return false; - dig_port->aux_ch = AUX_CH_NONE; - intel_connector = intel_connector_alloc(); if (!intel_connector) goto err_connector_alloc; @@ -1300,8 +1313,6 @@ bool g4x_dp_init(struct intel_display *display, intel_encoder->devdata = devdata; - mutex_init(&dig_port->hdcp.mutex); - if (drm_encoder_init(display->drm, &intel_encoder->base, &intel_dp_enc_funcs, DRM_MODE_ENCODER_TMDS, "DP %c", port_name(port))) @@ -1342,10 +1353,13 @@ bool g4x_dp_init(struct intel_display *display, intel_encoder->audio_disable = g4x_dp_audio_disable; if ((display->platform.ivybridge && port == PORT_A) || - (HAS_PCH_CPT(display) && port != PORT_A)) + (HAS_PCH_CPT(display) && port != PORT_A)) { dig_port->dp.set_link_train = cpt_set_link_train; - else + dig_port->dp.set_idle_link_train = cpt_set_idle_link_train; + } else { dig_port->dp.set_link_train = g4x_set_link_train; + dig_port->dp.set_idle_link_train = g4x_set_idle_link_train; + } if (display->platform.cherryview) intel_encoder->set_signal_levels = chv_set_signal_levels; @@ -1368,7 +1382,6 @@ bool g4x_dp_init(struct intel_display *display, } dig_port->dp.output_reg = output_reg; - dig_port->max_lanes = 4; intel_encoder->type = INTEL_OUTPUT_DP; intel_encoder->power_domain = intel_display_power_ddi_lanes_domain(display, port); diff --git a/drivers/gpu/drm/i915/display/g4x_hdmi.c b/drivers/gpu/drm/i915/display/g4x_hdmi.c index 2610f5702fb9..f6e2d1ed5639 100644 --- a/drivers/gpu/drm/i915/display/g4x_hdmi.c +++ b/drivers/gpu/drm/i915/display/g4x_hdmi.c @@ -19,7 +19,7 @@ #include "intel_display_types.h" #include "intel_dp_aux.h" #include "intel_dpio_phy.h" -#include "intel_fdi.h" +#include "intel_encoder.h" #include "intel_fifo_underrun.h" #include "intel_hdmi.h" #include "intel_hotplug.h" @@ -135,11 +135,8 @@ static int g4x_hdmi_compute_config(struct intel_encoder *encoder, struct intel_atomic_state *state = to_intel_atomic_state(crtc_state->uapi.state); struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); - if (HAS_PCH_SPLIT(display)) { + if (HAS_PCH_SPLIT(display)) crtc_state->has_pch_encoder = true; - if (!intel_fdi_compute_pipe_bpp(crtc_state)) - return -EINVAL; - } if (display->platform.g4x) crtc_state->has_hdmi_sink = g4x_compute_has_hdmi_sink(state, crtc); @@ -690,12 +687,10 @@ bool g4x_hdmi_init(struct intel_display *display, drm_dbg_kms(display->drm, "No VBT child device for HDMI-%c\n", port_name(port)); - dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + dig_port = intel_dig_port_alloc(); if (!dig_port) return false; - dig_port->aux_ch = AUX_CH_NONE; - intel_connector = intel_connector_alloc(); if (!intel_connector) goto err_connector_alloc; @@ -704,8 +699,6 @@ bool g4x_hdmi_init(struct intel_display *display, intel_encoder->devdata = devdata; - mutex_init(&dig_port->hdcp.mutex); - if (drm_encoder_init(display->drm, &intel_encoder->base, &intel_hdmi_enc_funcs, DRM_MODE_ENCODER_TMDS, "HDMI %c", port_name(port))) @@ -767,8 +760,6 @@ bool g4x_hdmi_init(struct intel_display *display, intel_encoder->cloneable |= BIT(INTEL_OUTPUT_HDMI); dig_port->hdmi.hdmi_reg = hdmi_reg; - dig_port->dp.output_reg = INVALID_MMIO_REG; - dig_port->max_lanes = 4; intel_infoframe_init(dig_port); diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.c b/drivers/gpu/drm/i915/display/i9xx_plane.c index f291ced989dc..3eb96d8abba8 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.c +++ b/drivers/gpu/drm/i915/display/i9xx_plane.c @@ -155,8 +155,7 @@ static bool i9xx_plane_has_windowing(struct intel_plane *plane) i9xx_plane == PLANE_C; } -static u32 i9xx_plane_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 i9xx_plane_ctl(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -355,11 +354,24 @@ i9xx_plane_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - plane_state->ctl = i9xx_plane_ctl(crtc_state, plane_state); + plane_state->ctl = i9xx_plane_ctl(plane_state); return 0; } +static u32 i8xx_plane_surf_offset(const struct intel_plane_state *plane_state) +{ + int x = plane_state->view.color_plane[0].x; + int y = plane_state->view.color_plane[0].y; + + return intel_fb_xy_to_linear(x, y, plane_state, 0); +} + +u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state) +{ + return plane_state->view.color_plane[0].offset; +} + static u32 i9xx_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) { struct intel_display *display = to_intel_display(crtc_state); @@ -463,7 +475,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb, enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; int x = plane_state->view.color_plane[0].x; int y = plane_state->view.color_plane[0].y; - u32 dspcntr, dspaddr_offset, linear_offset; + u32 dspcntr; dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); @@ -472,13 +484,6 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb, crtc_state->async_flip_planes & BIT(plane->id)) dspcntr |= DISP_ASYNC_FLIP; - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - - if (DISPLAY_VER(display) >= 4) - dspaddr_offset = plane_state->view.color_plane[0].offset; - else - dspaddr_offset = linear_offset; - if (display->platform.cherryview && i9xx_plane == PLANE_B) { int crtc_x = plane_state->uapi.dst.x1; int crtc_y = plane_state->uapi.dst.y1; @@ -498,7 +503,7 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb, DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } else if (DISPLAY_VER(display) >= 4) { intel_de_write_fw(display, DSPLINOFF(display, i9xx_plane), - linear_offset); + intel_fb_xy_to_linear(x, y, plane_state, 0)); intel_de_write_fw(display, DSPTILEOFF(display, i9xx_plane), DISP_OFFSET_Y(y) | DISP_OFFSET_X(x)); } @@ -511,11 +516,9 @@ static void i9xx_plane_update_arm(struct intel_dsb *dsb, intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr); if (DISPLAY_VER(display) >= 4) - intel_de_write_fw(display, DSPSURF(display, i9xx_plane), - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); + intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf); else - intel_de_write_fw(display, DSPADDR(display, i9xx_plane), - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); + intel_de_write_fw(display, DSPADDR(display, i9xx_plane), plane_state->surf); } static void i830_plane_update_arm(struct intel_dsb *dsb, @@ -604,16 +607,13 @@ g4x_primary_async_flip(struct intel_dsb *dsb, { struct intel_display *display = to_intel_display(plane); u32 dspcntr = plane_state->ctl | i9xx_plane_ctl_crtc(crtc_state); - u32 dspaddr_offset = plane_state->view.color_plane[0].offset; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; if (async_flip) dspcntr |= DISP_ASYNC_FLIP; intel_de_write_fw(display, DSPCNTR(display, i9xx_plane), dspcntr); - - intel_de_write_fw(display, DSPSURF(display, i9xx_plane), - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); + intel_de_write_fw(display, DSPSURF(display, i9xx_plane), plane_state->surf); } static void @@ -624,11 +624,9 @@ vlv_primary_async_flip(struct intel_dsb *dsb, bool async_flip) { struct intel_display *display = to_intel_display(plane); - u32 dspaddr_offset = plane_state->view.color_plane[0].offset; enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane), - intel_plane_ggtt_offset(plane_state) + dspaddr_offset); + intel_de_write_fw(display, DSPADDR_VLV(display, i9xx_plane), plane_state->surf); } static void @@ -1037,6 +1035,11 @@ intel_primary_plane_create(struct intel_display *display, enum pipe pipe) plane->get_hw_state = i9xx_plane_get_hw_state; plane->check_plane = i9xx_plane_check; + if (DISPLAY_VER(display) >= 4) + plane->surf_offset = i965_plane_surf_offset; + else + plane->surf_offset = i8xx_plane_surf_offset; + if (DISPLAY_VER(display) >= 5 || display->platform.g4x) plane->capture_error = g4x_primary_capture_error; else if (DISPLAY_VER(display) >= 4) @@ -1254,24 +1257,21 @@ bool i9xx_fixup_initial_plane_config(struct intel_crtc *crtc, const struct intel_plane_state *plane_state = to_intel_plane_state(plane->base.state); enum i9xx_plane_id i9xx_plane = plane->i9xx_plane; - u32 base; if (!plane_state->uapi.visible) return false; - base = intel_plane_ggtt_offset(plane_state); - /* * We may have moved the surface to a different * part of ggtt, make the plane aware of that. */ - if (plane_config->base == base) + if (plane_config->base == plane_state->surf) return false; if (DISPLAY_VER(display) >= 4) - intel_de_write(display, DSPSURF(display, i9xx_plane), base); + intel_de_write(display, DSPSURF(display, i9xx_plane), plane_state->surf); else - intel_de_write(display, DSPADDR(display, i9xx_plane), base); + intel_de_write(display, DSPADDR(display, i9xx_plane), plane_state->surf); return true; } diff --git a/drivers/gpu/drm/i915/display/i9xx_plane.h b/drivers/gpu/drm/i915/display/i9xx_plane.h index d90546d60855..565dab751301 100644 --- a/drivers/gpu/drm/i915/display/i9xx_plane.h +++ b/drivers/gpu/drm/i915/display/i9xx_plane.h @@ -24,6 +24,7 @@ unsigned int vlv_plane_min_alignment(struct intel_plane *plane, const struct drm_framebuffer *fb, int colot_plane); int i9xx_check_plane_surface(struct intel_plane_state *plane_state); +u32 i965_plane_surf_offset(const struct intel_plane_state *plane_state); struct intel_plane * intel_primary_plane_create(struct intel_display *display, enum pipe pipe); diff --git a/drivers/gpu/drm/i915/display/i9xx_wm.c b/drivers/gpu/drm/i915/display/i9xx_wm.c index 1f9db5118777..fd3b7b35f351 100644 --- a/drivers/gpu/drm/i915/display/i9xx_wm.c +++ b/drivers/gpu/drm/i915/display/i9xx_wm.c @@ -3,6 +3,10 @@ * Copyright © 2023 Intel Corporation */ +#include <linux/iopoll.h> + +#include "soc/intel_dram.h" + #include "i915_drv.h" #include "i915_reg.h" #include "i9xx_wm.h" @@ -85,7 +89,8 @@ static const struct cxsr_latency cxsr_latency_table[] = { static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); + const struct dram_info *dram_info = intel_dram_info(display->drm); + bool is_ddr3 = dram_info->type == INTEL_DRAM_DDR3; int i; for (i = 0; i < ARRAY_SIZE(cxsr_latency_table); i++) { @@ -93,15 +98,16 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis bool is_desktop = !display->platform.mobile; if (is_desktop == latency->is_desktop && - i915->is_ddr3 == latency->is_ddr3 && - DIV_ROUND_CLOSEST(i915->fsb_freq, 1000) == latency->fsb_freq && - DIV_ROUND_CLOSEST(i915->mem_freq, 1000) == latency->mem_freq) + is_ddr3 == latency->is_ddr3 && + DIV_ROUND_CLOSEST(dram_info->fsb_freq, 1000) == latency->fsb_freq && + DIV_ROUND_CLOSEST(dram_info->mem_freq, 1000) == latency->mem_freq) return latency; } drm_dbg_kms(display->drm, - "Could not find CxSR latency for DDR%s, FSB %u kHz, MEM %u kHz\n", - i915->is_ddr3 ? "3" : "2", i915->fsb_freq, i915->mem_freq); + "Could not find CxSR latency for %s, FSB %u kHz, MEM %u kHz\n", + intel_dram_type_str(dram_info->type), + dram_info->fsb_freq, dram_info->mem_freq); return NULL; } @@ -109,6 +115,7 @@ static const struct cxsr_latency *pnv_get_cxsr_latency(struct intel_display *dis static void chv_set_memory_dvfs(struct intel_display *display, bool enable) { u32 val; + int ret; vlv_punit_get(display->drm); @@ -121,8 +128,10 @@ static void chv_set_memory_dvfs(struct intel_display *display, bool enable) val |= FORCE_DDR_FREQ_REQ_ACK; vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val); - if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) & - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) + ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2), + (val & FORCE_DDR_FREQ_REQ_ACK) == 0, + 500, 3000, false); + if (ret) drm_err(display->drm, "timed out waiting for Punit DDR DVFS request\n"); @@ -3902,6 +3911,7 @@ static void vlv_wm_get_hw_state(struct intel_display *display) struct vlv_wm_values *wm = &display->wm.vlv; struct intel_crtc *crtc; u32 val; + int ret; vlv_read_wm_values(display, wm); @@ -3928,8 +3938,10 @@ static void vlv_wm_get_hw_state(struct intel_display *display) val |= FORCE_DDR_FREQ_REQ_ACK; vlv_punit_write(display->drm, PUNIT_REG_DDR_SETUP2, val); - if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2) & - FORCE_DDR_FREQ_REQ_ACK) == 0, 3)) { + ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DDR_SETUP2), + (val & FORCE_DDR_FREQ_REQ_ACK) == 0, + 500, 3000, false); + if (ret) { drm_dbg_kms(display->drm, "Punit not acking DDR DVFS request, " "assuming DDR DVFS is disabled\n"); diff --git a/drivers/gpu/drm/i915/display/icl_dsi.c b/drivers/gpu/drm/i915/display/icl_dsi.c index 8d9cb73a93a7..37faa8f19f6e 100644 --- a/drivers/gpu/drm/i915/display/icl_dsi.c +++ b/drivers/gpu/drm/i915/display/icl_dsi.c @@ -25,6 +25,8 @@ * Jani Nikula <jani.nikula@intel.com> */ +#include <linux/iopoll.h> + #include <drm/display/drm_dsc_helper.h> #include <drm/drm_atomic_helper.h> #include <drm/drm_fixed.h> @@ -72,8 +74,12 @@ static int payload_credits_available(struct intel_display *display, static bool wait_for_header_credits(struct intel_display *display, enum transcoder dsi_trans, int hdr_credit) { - if (wait_for_us(header_credits_available(display, dsi_trans) >= - hdr_credit, 100)) { + int ret, available; + + ret = poll_timeout_us(available = header_credits_available(display, dsi_trans), + available >= hdr_credit, + 10, 100, false); + if (ret) { drm_err(display->drm, "DSI header credits not released\n"); return false; } @@ -84,8 +90,12 @@ static bool wait_for_header_credits(struct intel_display *display, static bool wait_for_payload_credits(struct intel_display *display, enum transcoder dsi_trans, int payld_credit) { - if (wait_for_us(payload_credits_available(display, dsi_trans) >= - payld_credit, 100)) { + int ret, available; + + ret = poll_timeout_us(available = payload_credits_available(display, dsi_trans), + available >= payld_credit, + 10, 100, false); + if (ret) { drm_err(display->drm, "DSI payload credits not released\n"); return false; } @@ -137,8 +147,11 @@ static void wait_for_cmds_dispatched_to_panel(struct intel_encoder *encoder) /* wait for LP TX in progress bit to be cleared */ for_each_dsi_port(port, intel_dsi->ports) { dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us(!(intel_de_read(display, DSI_LP_MSG(dsi_trans)) & - LPTX_IN_PROGRESS), 20)) + + ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans), + LPTX_IN_PROGRESS, 0, + 20, 0, NULL); + if (ret) drm_err(display->drm, "LPTX bit not cleared\n"); } } @@ -516,13 +529,15 @@ static void gen11_dsi_enable_ddi_buffer(struct intel_encoder *encoder) struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; + int ret; for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(display, DDI_BUF_CTL(port), 0, DDI_BUF_CTL_ENABLE); - if (wait_for_us(!(intel_de_read(display, DDI_BUF_CTL(port)) & - DDI_BUF_IS_IDLE), - 500)) + ret = intel_de_wait_custom(display, DDI_BUF_CTL(port), + DDI_BUF_IS_IDLE, 0, + 500, 0, NULL); + if (ret) drm_err(display->drm, "DDI port:%c buffer idle\n", port_name(port)); } @@ -838,9 +853,14 @@ gen11_dsi_configure_transcoder(struct intel_encoder *encoder, /* wait for link ready */ for_each_dsi_port(port, intel_dsi->ports) { + int ret; + dsi_trans = dsi_port_to_transcoder(port); - if (wait_for_us((intel_de_read(display, DSI_TRANS_FUNC_CONF(dsi_trans)) & - LINK_READY), 2500)) + + ret = intel_de_wait_custom(display, DSI_TRANS_FUNC_CONF(dsi_trans), + LINK_READY, LINK_READY, + 2500, 0, NULL); + if (ret) drm_err(display->drm, "DSI link not ready\n"); } } @@ -1321,6 +1341,7 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) enum port port; enum transcoder dsi_trans; u32 tmp; + int ret; /* disable periodic update mode */ if (is_cmd_mode(intel_dsi)) { @@ -1337,9 +1358,10 @@ static void gen11_dsi_deconfigure_trancoder(struct intel_encoder *encoder) tmp &= ~LINK_ULPS_TYPE_LP11; intel_de_write(display, DSI_LP_MSG(dsi_trans), tmp); - if (wait_for_us((intel_de_read(display, DSI_LP_MSG(dsi_trans)) & - LINK_IN_ULPS), - 10)) + ret = intel_de_wait_custom(display, DSI_LP_MSG(dsi_trans), + LINK_IN_ULPS, LINK_IN_ULPS, + 10, 0, NULL); + if (ret) drm_err(display->drm, "DSI link not in ULPS\n"); } @@ -1367,14 +1389,17 @@ static void gen11_dsi_disable_port(struct intel_encoder *encoder) struct intel_display *display = to_intel_display(encoder); struct intel_dsi *intel_dsi = enc_to_intel_dsi(encoder); enum port port; + int ret; gen11_dsi_ungate_clocks(encoder); for_each_dsi_port(port, intel_dsi->ports) { intel_de_rmw(display, DDI_BUF_CTL(port), DDI_BUF_CTL_ENABLE, 0); - if (wait_for_us((intel_de_read(display, DDI_BUF_CTL(port)) & - DDI_BUF_IS_IDLE), - 8)) + ret = intel_de_wait_custom(display, DDI_BUF_CTL(port), + DDI_BUF_IS_IDLE, DDI_BUF_IS_IDLE, + 8, 0, NULL); + + if (ret) drm_err(display->drm, "DDI port:%c buffer not idle\n", port_name(port)); diff --git a/drivers/gpu/drm/i915/display/intel_alpm.c b/drivers/gpu/drm/i915/display/intel_alpm.c index dfdde8e4eabe..ed7a7ed486b5 100644 --- a/drivers/gpu/drm/i915/display/intel_alpm.c +++ b/drivers/gpu/drm/i915/display/intel_alpm.c @@ -16,6 +16,14 @@ #include "intel_psr.h" #include "intel_psr_regs.h" +#define SILENCE_PERIOD_MIN_TIME 80 +#define SILENCE_PERIOD_MAX_TIME 180 +#define SILENCE_PERIOD_TIME (SILENCE_PERIOD_MIN_TIME + \ + (SILENCE_PERIOD_MAX_TIME - \ + SILENCE_PERIOD_MIN_TIME) / 2) + +#define LFPS_CYCLE_COUNT 10 + bool intel_alpm_aux_wake_supported(struct intel_dp *intel_dp) { return intel_dp->alpm_dpcd & DP_ALPM_CAP; @@ -44,72 +52,49 @@ void intel_alpm_init(struct intel_dp *intel_dp) mutex_init(&intel_dp->alpm_parameters.lock); } -/* - * See Bspec: 71632 for the table - * - * Silence_period = tSilence,Min + ((tSilence,Max - tSilence,Min) / 2) - * - * Half cycle duration: - * - * Link rates 1.62 - 4.32 and tLFPS_Cycle = 70 ns - * FLOOR( (Link Rate * tLFPS_Cycle) / (2 * 10) ) - * - * Link rates 5.4 - 8.1 - * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ] = 10 - * LFPS Period chosen is the mid-point of the min:max values from the table - * FLOOR( LFPS Period in Symbol clocks / - * (2 * PORT_ALPM_LFPS_CTL[ LFPS Cycle Count ]) ) - */ -static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate, - int *silence_period, - int *lfps_half_cycle) +static int get_silence_period_symbols(const struct intel_crtc_state *crtc_state) { - switch (link_rate) { - case 162000: - *silence_period = 20; - *lfps_half_cycle = 5; - break; - case 216000: - *silence_period = 27; - *lfps_half_cycle = 7; - break; - case 243000: - *silence_period = 31; - *lfps_half_cycle = 8; - break; - case 270000: - *silence_period = 34; - *lfps_half_cycle = 9; - break; - case 324000: - *silence_period = 41; - *lfps_half_cycle = 11; - break; - case 432000: - *silence_period = 56; - *lfps_half_cycle = 15; - break; - case 540000: - *silence_period = 69; - *lfps_half_cycle = 12; - break; - case 648000: - *silence_period = 84; - *lfps_half_cycle = 15; - break; - case 675000: - *silence_period = 87; - *lfps_half_cycle = 15; - break; - case 810000: - *silence_period = 104; - *lfps_half_cycle = 19; - break; - default: - *silence_period = *lfps_half_cycle = -1; - return false; + return SILENCE_PERIOD_TIME * intel_dp_link_symbol_clock(crtc_state->port_clock) / + 1000 / 1000; +} + +static int get_lfps_cycle_min_max_time(const struct intel_crtc_state *crtc_state, + int *min, int *max) +{ + if (crtc_state->port_clock < 540000) { + *min = 65 * LFPS_CYCLE_COUNT; + *max = 75 * LFPS_CYCLE_COUNT; + } else if (crtc_state->port_clock <= 810000) { + *min = 140; + *max = 800; + } else { + *min = *max = -1; + return -1; } - return true; + + return 0; +} + +static int get_lfps_cycle_time(const struct intel_crtc_state *crtc_state) +{ + int tlfps_cycle_min, tlfps_cycle_max, ret; + + ret = get_lfps_cycle_min_max_time(crtc_state, &tlfps_cycle_min, + &tlfps_cycle_max); + if (ret) + return ret; + + return tlfps_cycle_min + (tlfps_cycle_max - tlfps_cycle_min) / 2; +} + +static int get_lfps_half_cycle_clocks(const struct intel_crtc_state *crtc_state) +{ + int lfps_cycle_time = get_lfps_cycle_time(crtc_state); + + if (lfps_cycle_time < 0) + return -1; + + return lfps_cycle_time * crtc_state->port_clock / 1000 / 1000 / (2 * LFPS_CYCLE_COUNT); } /* @@ -131,21 +116,19 @@ static bool _lnl_get_silence_period_and_lfps_half_cycle(int link_rate, * tML_PHY_LOCK = TPS4 Length * ( 10 / (Link Rate in MHz) ) * TPS4 Length = 252 Symbols */ -static int _lnl_compute_aux_less_wake_time(int port_clock) +static int _lnl_compute_aux_less_wake_time(const struct intel_crtc_state *crtc_state) { int tphy2_p2_to_p0 = 12 * 1000; - int tlfps_period_max = 800; - int tsilence_max = 180; int t1 = 50 * 1000; int tps4 = 252; /* port_clock is link rate in 10kbit/s units */ - int tml_phy_lock = 1000 * 1000 * tps4 / port_clock; + int tml_phy_lock = 1000 * 1000 * tps4 / crtc_state->port_clock; int num_ml_phy_lock = 7 + DIV_ROUND_UP(6500, tml_phy_lock) + 1; int t2 = num_ml_phy_lock * tml_phy_lock; int tcds = 1 * t2; - return DIV_ROUND_UP(tphy2_p2_to_p0 + tlfps_period_max + tsilence_max + - t1 + tcds, 1000); + return DIV_ROUND_UP(tphy2_p2_to_p0 + get_lfps_cycle_time(crtc_state) + + SILENCE_PERIOD_TIME + t1 + tcds, 1000); } static int @@ -157,13 +140,13 @@ _lnl_compute_aux_less_alpm_params(struct intel_dp *intel_dp, lfps_half_cycle; aux_less_wake_time = - _lnl_compute_aux_less_wake_time(crtc_state->port_clock); + _lnl_compute_aux_less_wake_time(crtc_state); aux_less_wake_lines = intel_usecs_to_scanlines(&crtc_state->hw.adjusted_mode, aux_less_wake_time); + silence_period = get_silence_period_symbols(crtc_state); - if (!_lnl_get_silence_period_and_lfps_half_cycle(crtc_state->port_clock, - &silence_period, - &lfps_half_cycle)) + lfps_half_cycle = get_lfps_half_cycle_clocks(crtc_state); + if (lfps_half_cycle < 0) return false; if (aux_less_wake_lines > ALPM_CTL_AUX_LESS_WAKE_TIME_MASK || @@ -406,7 +389,7 @@ void intel_alpm_port_configure(struct intel_dp *intel_dp, PORT_ALPM_CTL_MAX_PHY_SWING_HOLD(0) | PORT_ALPM_CTL_SILENCE_PERIOD( intel_dp->alpm_parameters.silence_period_sym_clocks); - lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(10) | + lfps_ctl_val = PORT_ALPM_LFPS_CTL_LFPS_CYCLE_COUNT(LFPS_CYCLE_COUNT) | PORT_ALPM_LFPS_CTL_LFPS_HALF_CYCLE_DURATION( intel_dp->alpm_parameters.lfps_half_cycle_num_of_syms) | PORT_ALPM_LFPS_CTL_FIRST_LFPS_HALF_CYCLE_DURATION( diff --git a/drivers/gpu/drm/i915/display/intel_backlight.c b/drivers/gpu/drm/i915/display/intel_backlight.c index e007380e9a63..3b14f929825a 100644 --- a/drivers/gpu/drm/i915/display/intel_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_backlight.c @@ -236,7 +236,8 @@ static void i9xx_set_backlight(const struct drm_connector_state *conn_state, u32 struct intel_panel *panel = &connector->panel; u32 tmp, mask; - drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0); + if (drm_WARN_ON(display->drm, panel->backlight.pwm_level_max == 0)) + return; if (panel->backlight.combination_mode) { struct pci_dev *pdev = to_pci_dev(display->drm->dev); diff --git a/drivers/gpu/drm/i915/display/intel_bios.c b/drivers/gpu/drm/i915/display/intel_bios.c index 9c268bed091d..3596dce84c28 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.c +++ b/drivers/gpu/drm/i915/display/intel_bios.c @@ -36,6 +36,7 @@ #include "soc/intel_rom.h" #include "i915_drv.h" +#include "i915_utils.h" #include "intel_display.h" #include "intel_display_core.h" #include "intel_display_rpm.h" @@ -1566,10 +1567,7 @@ parse_psr(struct intel_display *display, panel->vbt.psr.full_link = psr_table->full_link; panel->vbt.psr.require_aux_wakeup = psr_table->require_aux_to_wakeup; - - /* Allowed VBT values goes from 0 to 15 */ - panel->vbt.psr.idle_frames = psr_table->idle_frames < 0 ? 0 : - psr_table->idle_frames > 15 ? 15 : psr_table->idle_frames; + panel->vbt.psr.idle_frames = psr_table->idle_frames; /* * New psr options 0=500us, 1=100us, 2=2500us, 3=0us @@ -2480,6 +2478,25 @@ static int parse_bdb_216_dp_max_link_rate(const int vbt_max_link_rate) } } +static u32 edp_rate_override_mask(int rate) +{ + switch (rate) { + case 2000000: return BDB_263_VBT_EDP_LINK_RATE_20; + case 1350000: return BDB_263_VBT_EDP_LINK_RATE_13_5; + case 1000000: return BDB_263_VBT_EDP_LINK_RATE_10; + case 810000: return BDB_263_VBT_EDP_LINK_RATE_8_1; + case 675000: return BDB_263_VBT_EDP_LINK_RATE_6_75; + case 540000: return BDB_263_VBT_EDP_LINK_RATE_5_4; + case 432000: return BDB_263_VBT_EDP_LINK_RATE_4_32; + case 324000: return BDB_263_VBT_EDP_LINK_RATE_3_24; + case 270000: return BDB_263_VBT_EDP_LINK_RATE_2_7; + case 243000: return BDB_263_VBT_EDP_LINK_RATE_2_43; + case 216000: return BDB_263_VBT_EDP_LINK_RATE_2_16; + case 162000: return BDB_263_VBT_EDP_LINK_RATE_1_62; + default: return 0; + } +} + int intel_bios_dp_max_link_rate(const struct intel_bios_encoder_data *devdata) { if (!devdata || devdata->display->vbt.version < 216) @@ -2499,6 +2516,19 @@ int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata) return devdata->child.dp_max_lane_count + 1; } +bool +intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata, + int rate) +{ + if (!devdata || devdata->display->vbt.version < 263) + return false; + + if (devdata->child.edp_data_rate_override == BDB_263_VBT_EDP_RATES_MASK) + return false; + + return devdata->child.edp_data_rate_override & edp_rate_override_mask(rate); +} + static void sanitize_device_type(struct intel_bios_encoder_data *devdata, enum port port) { @@ -2747,8 +2777,10 @@ static int child_device_expected_size(u16 version) { BUILD_BUG_ON(sizeof(struct child_device_config) < 40); - if (version > 256) + if (version > 263) return -ENOENT; + else if (version >= 263) + return 44; else if (version >= 256) return 40; else if (version >= 216) @@ -3743,8 +3775,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_bios_vbt); void intel_bios_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; - - debugfs_create_file("i915_vbt", 0444, minor->debugfs_root, + debugfs_create_file("i915_vbt", 0444, display->drm->debugfs_root, display, &intel_bios_vbt_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_bios.h b/drivers/gpu/drm/i915/display/intel_bios.h index 6cd7a011b8c4..f9e438b2787b 100644 --- a/drivers/gpu/drm/i915/display/intel_bios.h +++ b/drivers/gpu/drm/i915/display/intel_bios.h @@ -50,180 +50,6 @@ enum intel_backlight_type { INTEL_BACKLIGHT_VESA_EDP_AUX_INTERFACE, }; -/* - * MIPI Sequence Block definitions - * - * Note the VBT spec has AssertReset / DeassertReset swapped from their - * usual naming, we use the proper names here to avoid confusion when - * reading the code. - */ -enum mipi_seq { - MIPI_SEQ_END = 0, - MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */ - MIPI_SEQ_INIT_OTP, - MIPI_SEQ_DISPLAY_ON, - MIPI_SEQ_DISPLAY_OFF, - MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */ - MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ - MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ - MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ - MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ - MIPI_SEQ_POWER_ON, /* sequence block v3+ */ - MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ - MIPI_SEQ_MAX -}; - -enum mipi_seq_element { - MIPI_SEQ_ELEM_END = 0, - MIPI_SEQ_ELEM_SEND_PKT, - MIPI_SEQ_ELEM_DELAY, - MIPI_SEQ_ELEM_GPIO, - MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ - MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ - MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ - MIPI_SEQ_ELEM_MAX -}; - -#define MIPI_DSI_UNDEFINED_PANEL_ID 0 -#define MIPI_DSI_GENERIC_PANEL_ID 1 - -struct mipi_config { - u16 panel_id; - - /* General Params */ - u32 enable_dithering:1; - u32 rsvd1:1; - u32 is_bridge:1; - - u32 panel_arch_type:2; - u32 is_cmd_mode:1; - -#define NON_BURST_SYNC_PULSE 0x1 -#define NON_BURST_SYNC_EVENTS 0x2 -#define BURST_MODE 0x3 - u32 video_transfer_mode:2; - - u32 cabc_supported:1; -#define PPS_BLC_PMIC 0 -#define PPS_BLC_SOC 1 - u32 pwm_blc:1; - - /* Bit 13:10 */ -#define PIXEL_FORMAT_RGB565 0x1 -#define PIXEL_FORMAT_RGB666 0x2 -#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3 -#define PIXEL_FORMAT_RGB888 0x4 - u32 videomode_color_format:4; - - /* Bit 15:14 */ -#define ENABLE_ROTATION_0 0x0 -#define ENABLE_ROTATION_90 0x1 -#define ENABLE_ROTATION_180 0x2 -#define ENABLE_ROTATION_270 0x3 - u32 rotation:2; - u32 bta_enabled:1; - u32 rsvd2:15; - - /* 2 byte Port Description */ -#define DUAL_LINK_NOT_SUPPORTED 0 -#define DUAL_LINK_FRONT_BACK 1 -#define DUAL_LINK_PIXEL_ALT 2 - u16 dual_link:2; - u16 lane_cnt:2; - u16 pixel_overlap:3; - u16 rgb_flip:1; -#define DL_DCS_PORT_A 0x00 -#define DL_DCS_PORT_C 0x01 -#define DL_DCS_PORT_A_AND_C 0x02 - u16 dl_dcs_cabc_ports:2; - u16 dl_dcs_backlight_ports:2; - u16 rsvd3:4; - - u16 rsvd4; - - u8 rsvd5; - u32 target_burst_mode_freq; - u32 dsi_ddr_clk; - u32 bridge_ref_clk; - -#define BYTE_CLK_SEL_20MHZ 0 -#define BYTE_CLK_SEL_10MHZ 1 -#define BYTE_CLK_SEL_5MHZ 2 - u8 byte_clk_sel:2; - - u8 rsvd6:6; - - /* DPHY Flags */ - u16 dphy_param_valid:1; - u16 eot_pkt_disabled:1; - u16 enable_clk_stop:1; - u16 rsvd7:13; - - u32 hs_tx_timeout; - u32 lp_rx_timeout; - u32 turn_around_timeout; - u32 device_reset_timer; - u32 master_init_timer; - u32 dbi_bw_timer; - u32 lp_byte_clk_val; - - /* 4 byte Dphy Params */ - u32 prepare_cnt:6; - u32 rsvd8:2; - u32 clk_zero_cnt:8; - u32 trail_cnt:5; - u32 rsvd9:3; - u32 exit_zero_cnt:6; - u32 rsvd10:2; - - u32 clk_lane_switch_cnt; - u32 hl_switch_cnt; - - u32 rsvd11[6]; - - /* timings based on dphy spec */ - u8 tclk_miss; - u8 tclk_post; - u8 rsvd12; - u8 tclk_pre; - u8 tclk_prepare; - u8 tclk_settle; - u8 tclk_term_enable; - u8 tclk_trail; - u16 tclk_prepare_clkzero; - u8 rsvd13; - u8 td_term_enable; - u8 teot; - u8 ths_exit; - u8 ths_prepare; - u16 ths_prepare_hszero; - u8 rsvd14; - u8 ths_settle; - u8 ths_skip; - u8 ths_trail; - u8 tinit; - u8 tlpx; - u8 rsvd15[3]; - - /* GPIOs */ - u8 panel_enable; - u8 bl_enable; - u8 pwm_enable; - u8 reset_r_n; - u8 pwr_down_r; - u8 stdby_r_n; - -} __packed; - -/* all delays have a unit of 100us */ -struct mipi_pps_data { - u16 panel_on_delay; - u16 bl_enable_delay; - u16 bl_disable_delay; - u16 panel_off_delay; - u16 panel_power_cycle_delay; -} __packed; - void intel_bios_init(struct intel_display *display); void intel_bios_init_panel_early(struct intel_display *display, struct intel_panel *panel, @@ -259,6 +85,8 @@ bool intel_bios_encoder_is_lspcon(const struct intel_bios_encoder_data *devdata) bool intel_bios_encoder_lane_reversal(const struct intel_bios_encoder_data *devdata); bool intel_bios_encoder_hpd_invert(const struct intel_bios_encoder_data *devdata); enum port intel_bios_encoder_port(const struct intel_bios_encoder_data *devdata); +bool intel_bios_encoder_reject_edp_rate(const struct intel_bios_encoder_data *devdata, + int rate); enum aux_ch intel_bios_dp_aux_ch(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_boost_level(const struct intel_bios_encoder_data *devdata); int intel_bios_dp_max_lane_count(const struct intel_bios_encoder_data *devdata); diff --git a/drivers/gpu/drm/i915/display/intel_bo.c b/drivers/gpu/drm/i915/display/intel_bo.c index 65d64f79a4bd..d29c1508ccb9 100644 --- a/drivers/gpu/drm/i915/display/intel_bo.c +++ b/drivers/gpu/drm/i915/display/intel_bo.c @@ -2,7 +2,7 @@ /* Copyright © 2024 Intel Corporation */ #include <drm/drm_panic.h> -#include "display/intel_display_types.h" + #include "gem/i915_gem_mman.h" #include "gem/i915_gem_object.h" #include "gem/i915_gem_object_frontbuffer.h" diff --git a/drivers/gpu/drm/i915/display/intel_bw.c b/drivers/gpu/drm/i915/display/intel_bw.c index d29a755612de..ac6da20d9529 100644 --- a/drivers/gpu/drm/i915/display/intel_bw.c +++ b/drivers/gpu/drm/i915/display/intel_bw.c @@ -359,7 +359,7 @@ static int icl_get_qgv_points(struct intel_display *display, for (i = 0; i < qi->num_psf_points; i++) drm_dbg_kms(display->drm, - "PSF GV %d: CLK=%d \n", + "PSF GV %d: CLK=%d\n", i, qi->psf_points[i].clk); } diff --git a/drivers/gpu/drm/i915/display/intel_cdclk.c b/drivers/gpu/drm/i915/display/intel_cdclk.c index 228aa64c1349..9725eebe5706 100644 --- a/drivers/gpu/drm/i915/display/intel_cdclk.c +++ b/drivers/gpu/drm/i915/display/intel_cdclk.c @@ -22,6 +22,7 @@ */ #include <linux/debugfs.h> +#include <linux/iopoll.h> #include <linux/time.h> #include <drm/drm_fixed.h> @@ -31,6 +32,7 @@ #include "hsw_ips.h" #include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_atomic.h" #include "intel_audio.h" #include "intel_bw.h" @@ -672,6 +674,7 @@ static void vlv_set_cdclk(struct intel_display *display, int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; + int ret; switch (cdclk) { case 400000: @@ -702,12 +705,12 @@ static void vlv_set_cdclk(struct intel_display *display, val &= ~DSPFREQGUAR_MASK; val |= (cmd << DSPFREQGUAR_SHIFT); vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val); - if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & - DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), - 50)) { - drm_err(display->drm, - "timed out waiting for CDclk change\n"); - } + + ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM), + (val & DSPFREQSTAT_MASK) == (cmd << DSPFREQSTAT_SHIFT), + 500, 50 * 1000, false); + if (ret) + drm_err(display->drm, "timed out waiting for CDCLK change\n"); if (cdclk == 400000) { u32 divider; @@ -721,11 +724,11 @@ static void vlv_set_cdclk(struct intel_display *display, val |= divider; vlv_cck_write(display->drm, CCK_DISPLAY_CLOCK_CONTROL, val); - if (wait_for((vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL) & - CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), - 50)) - drm_err(display->drm, - "timed out waiting for CDclk change\n"); + ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_DISPLAY_CLOCK_CONTROL), + (val & CCK_FREQUENCY_STATUS) == (divider << CCK_FREQUENCY_STATUS_SHIFT), + 500, 50 * 1000, false); + if (ret) + drm_err(display->drm, "timed out waiting for CDCLK change\n"); } /* adjust self-refresh exit latency value */ @@ -761,6 +764,7 @@ static void chv_set_cdclk(struct intel_display *display, int cdclk = cdclk_config->cdclk; u32 val, cmd = cdclk_config->voltage_level; intel_wakeref_t wakeref; + int ret; switch (cdclk) { case 333333: @@ -786,12 +790,12 @@ static void chv_set_cdclk(struct intel_display *display, val &= ~DSPFREQGUAR_MASK_CHV; val |= (cmd << DSPFREQGUAR_SHIFT_CHV); vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, val); - if (wait_for((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & - DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), - 50)) { - drm_err(display->drm, - "timed out waiting for CDclk change\n"); - } + + ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM), + (val & DSPFREQSTAT_MASK_CHV) == (cmd << DSPFREQSTAT_SHIFT_CHV), + 500, 50 * 1000, false); + if (ret) + drm_err(display->drm, "timed out waiting for CDCLK change\n"); vlv_punit_put(display->drm); @@ -903,8 +907,10 @@ static void bdw_set_cdclk(struct intel_display *display, * According to the spec, it should be enough to poll for this 1 us. * However, extensive testing shows that this can take longer. */ - if (wait_for_us(intel_de_read(display, LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 100)) + ret = intel_de_wait_custom(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE, + 100, 0, NULL); + if (ret) drm_err(display->drm, "Switching to FCLK failed\n"); intel_de_rmw(display, LCPLL_CTL, @@ -913,8 +919,10 @@ static void bdw_set_cdclk(struct intel_display *display, intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); - if (wait_for_us((intel_de_read(display, LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + ret = intel_de_wait_custom(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, 0, + 1, 0, NULL); + if (ret) drm_err(display->drm, "Switching back to LCPLL failed\n"); intel_pcode_write(display->drm, HSW_PCODE_DE_WRITE_FREQ_REQ, @@ -3569,7 +3577,7 @@ static int i9xx_hrawclk(struct intel_display *display) struct drm_i915_private *i915 = to_i915(display->drm); /* hrawclock is 1/4 the FSB frequency */ - return DIV_ROUND_CLOSEST(i9xx_fsb_freq(i915), 4); + return DIV_ROUND_CLOSEST(intel_fsb_freq(i915), 4); } /** @@ -3622,9 +3630,7 @@ DEFINE_SHOW_ATTRIBUTE(i915_cdclk_info); void intel_cdclk_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; - - debugfs_create_file("i915_cdclk_info", 0444, minor->debugfs_root, + debugfs_create_file("i915_cdclk_info", 0444, display->drm->debugfs_root, display, &i915_cdclk_info_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_connector.c b/drivers/gpu/drm/i915/display/intel_connector.c index 42c923f416b3..6a55854db5b6 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.c +++ b/drivers/gpu/drm/i915/display/intel_connector.c @@ -77,7 +77,7 @@ void intel_connector_cancel_modeset_retry_work(struct intel_connector *connector drm_connector_put(&connector->base); } -int intel_connector_init(struct intel_connector *connector) +static int intel_connector_init(struct intel_connector *connector) { struct intel_digital_connector_state *conn_state; diff --git a/drivers/gpu/drm/i915/display/intel_connector.h b/drivers/gpu/drm/i915/display/intel_connector.h index aafb25a814fa..0aa86626e646 100644 --- a/drivers/gpu/drm/i915/display/intel_connector.h +++ b/drivers/gpu/drm/i915/display/intel_connector.h @@ -14,7 +14,6 @@ struct i2c_adapter; struct intel_connector; struct intel_encoder; -int intel_connector_init(struct intel_connector *connector); struct intel_connector *intel_connector_alloc(void); void intel_connector_free(struct intel_connector *connector); void intel_connector_destroy(struct drm_connector *connector); diff --git a/drivers/gpu/drm/i915/display/intel_crt.c b/drivers/gpu/drm/i915/display/intel_crt.c index 898c5d9e8f7a..31e68047f217 100644 --- a/drivers/gpu/drm/i915/display/intel_crt.c +++ b/drivers/gpu/drm/i915/display/intel_crt.c @@ -50,6 +50,7 @@ #include "intel_gmbus.h" #include "intel_hotplug.h" #include "intel_hotplug_irq.h" +#include "intel_link_bw.h" #include "intel_load_detect.h" #include "intel_pch_display.h" #include "intel_pch_refclk.h" @@ -421,7 +422,7 @@ static int pch_crt_compute_config(struct intel_encoder *encoder, return -EINVAL; crtc_state->has_pch_encoder = true; - if (!intel_fdi_compute_pipe_bpp(crtc_state)) + if (!intel_link_bw_compute_pipe_bpp(crtc_state)) return -EINVAL; crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; @@ -446,7 +447,7 @@ static int hsw_crt_compute_config(struct intel_encoder *encoder, return -EINVAL; crtc_state->has_pch_encoder = true; - if (!intel_fdi_compute_pipe_bpp(crtc_state)) + if (!intel_link_bw_compute_pipe_bpp(crtc_state)) return -EINVAL; crtc_state->output_format = INTEL_OUTPUT_FORMAT_RGB; diff --git a/drivers/gpu/drm/i915/display/intel_cursor.c b/drivers/gpu/drm/i915/display/intel_cursor.c index 198e69efe9ac..d4d181f9dca5 100644 --- a/drivers/gpu/drm/i915/display/intel_cursor.c +++ b/drivers/gpu/drm/i915/display/intel_cursor.c @@ -33,17 +33,9 @@ static const u32 intel_cursor_formats[] = { DRM_FORMAT_ARGB8888, }; -static u32 intel_cursor_base(const struct intel_plane_state *plane_state) +static u32 intel_cursor_surf_offset(const struct intel_plane_state *plane_state) { - struct intel_display *display = to_intel_display(plane_state); - u32 base; - - if (DISPLAY_INFO(display)->cursor_needs_physical) - base = plane_state->phys_dma_addr; - else - base = intel_plane_ggtt_offset(plane_state); - - return base + plane_state->view.color_plane[0].offset; + return plane_state->view.color_plane[0].offset; } static u32 intel_cursor_position(const struct intel_crtc_state *crtc_state, @@ -213,8 +205,7 @@ static u32 i845_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) return cntl; } -static u32 i845_cursor_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 i845_cursor_ctl(const struct intel_plane_state *plane_state) { return CURSOR_ENABLE | CURSOR_FORMAT_ARGB | @@ -274,7 +265,7 @@ static int i845_check_cursor(struct intel_crtc_state *crtc_state, return -EINVAL; } - plane_state->ctl = i845_cursor_ctl(crtc_state, plane_state); + plane_state->ctl = i845_cursor_ctl(plane_state); return 0; } @@ -297,7 +288,7 @@ static void i845_cursor_update_arm(struct intel_dsb *dsb, size = CURSOR_HEIGHT(height) | CURSOR_WIDTH(width); - base = intel_cursor_base(plane_state); + base = plane_state->surf; pos = intel_cursor_position(crtc_state, plane_state, false); } @@ -406,8 +397,7 @@ static u32 i9xx_cursor_ctl_crtc(const struct intel_crtc_state *crtc_state) return cntl; } -static u32 i9xx_cursor_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 i9xx_cursor_ctl(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state); u32 cntl = 0; @@ -534,7 +524,7 @@ static int i9xx_check_cursor(struct intel_crtc_state *crtc_state, return -EINVAL; } - plane_state->ctl = i9xx_cursor_ctl(crtc_state, plane_state); + plane_state->ctl = i9xx_cursor_ctl(plane_state); return 0; } @@ -675,7 +665,7 @@ static void i9xx_cursor_update_arm(struct intel_dsb *dsb, if (width != height) fbc_ctl = CUR_FBC_EN | CUR_FBC_HEIGHT(height - 1); - base = intel_cursor_base(plane_state); + base = plane_state->surf; pos = intel_cursor_position(crtc_state, plane_state, false); } @@ -1051,6 +1041,8 @@ intel_cursor_plane_create(struct intel_display *display, cursor->check_plane = i9xx_check_cursor; } + cursor->surf_offset = intel_cursor_surf_offset; + if (DISPLAY_VER(display) >= 5 || display->platform.g4x) cursor->capture_error = g4x_cursor_capture_error; else diff --git a/drivers/gpu/drm/i915/display/intel_ddi.c b/drivers/gpu/drm/i915/display/intel_ddi.c index 0405396c7750..46017091bb0b 100644 --- a/drivers/gpu/drm/i915/display/intel_ddi.c +++ b/drivers/gpu/drm/i915/display/intel_ddi.c @@ -2166,7 +2166,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, { struct intel_display *display = to_intel_display(crtc_state); enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); - u32 ln0, ln1, pin_assignment; + enum intel_tc_pin_assignment pin_assignment; + u32 ln0, ln1; u8 width; if (DISPLAY_VER(display) >= 14) @@ -2188,11 +2189,11 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, ln1 &= ~(MG_DP_MODE_CFG_DP_X1_MODE | MG_DP_MODE_CFG_DP_X2_MODE); /* DPPATC */ - pin_assignment = intel_tc_port_get_pin_assignment_mask(dig_port); + pin_assignment = intel_tc_port_get_pin_assignment(dig_port); width = crtc_state->lane_count; switch (pin_assignment) { - case 0x0: + case INTEL_TC_PIN_ASSIGNMENT_NONE: drm_WARN_ON(display->drm, !intel_tc_port_in_legacy_mode(dig_port)); if (width == 1) { @@ -2202,20 +2203,20 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; - case 0x1: + case INTEL_TC_PIN_ASSIGNMENT_A: if (width == 4) { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; - case 0x2: + case INTEL_TC_PIN_ASSIGNMENT_B: if (width == 2) { ln0 |= MG_DP_MODE_CFG_DP_X2_MODE; ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; - case 0x3: - case 0x5: + case INTEL_TC_PIN_ASSIGNMENT_C: + case INTEL_TC_PIN_ASSIGNMENT_E: if (width == 1) { ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; @@ -2224,8 +2225,8 @@ icl_program_mg_dp_mode(struct intel_digital_port *dig_port, ln1 |= MG_DP_MODE_CFG_DP_X2_MODE; } break; - case 0x4: - case 0x6: + case INTEL_TC_PIN_ASSIGNMENT_D: + case INTEL_TC_PIN_ASSIGNMENT_F: if (width == 1) { ln0 |= MG_DP_MODE_CFG_DP_X1_MODE; ln1 |= MG_DP_MODE_CFG_DP_X1_MODE; @@ -2339,34 +2340,24 @@ static void intel_dp_sink_set_fec_ready(struct intel_dp *intel_dp, drm_dbg_kms(display->drm, "Failed to clear FEC detected flags\n"); } -static int read_fec_detected_status(struct drm_dp_aux *aux) -{ - int ret; - u8 status; - - ret = drm_dp_dpcd_readb(aux, DP_FEC_STATUS, &status); - if (ret < 0) - return ret; - - return status; -} - static int wait_for_fec_detected(struct drm_dp_aux *aux, bool enabled) { struct intel_display *display = to_intel_display(aux->drm_dev); int mask = enabled ? DP_FEC_DECODE_EN_DETECTED : DP_FEC_DECODE_DIS_DETECTED; - int status; - int err; + u8 status = 0; + int ret, err; - err = readx_poll_timeout(read_fec_detected_status, aux, status, - status & mask || status < 0, - 10000, 200000); + ret = poll_timeout_us(err = drm_dp_dpcd_read_byte(aux, DP_FEC_STATUS, &status), + err || (status & mask), + 10 * 1000, 200 * 1000, false); - if (err || status < 0) { + /* Either can be non-zero, but not both */ + ret = ret ?: err; + if (ret) { drm_dbg_kms(display->drm, - "Failed waiting for FEC %s to get detected: %d (status %d)\n", - str_enabled_disabled(enabled), err, status); - return err ? err : status; + "Failed waiting for FEC %s to get detected: %d (status 0x%02x)\n", + str_enabled_disabled(enabled), ret, status); + return ret; } return 0; @@ -2561,6 +2552,7 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder) enum port port = encoder->port; i915_reg_t reg; u32 set_bits, wait_bits; + int ret; if (DISPLAY_VER(display) < 14) return; @@ -2576,7 +2568,11 @@ mtl_ddi_enable_d2d(struct intel_encoder *encoder) } intel_de_rmw(display, reg, 0, set_bits); - if (wait_for_us(intel_de_read(display, reg) & wait_bits, 100)) { + + ret = intel_de_wait_custom(display, reg, + wait_bits, wait_bits, + 100, 0, NULL); + if (ret) { drm_err(display->drm, "Timeout waiting for D2D Link enable for DDI/PORT_BUF_CTL %c\n", port_name(port)); } @@ -3058,6 +3054,7 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder) enum port port = encoder->port; i915_reg_t reg; u32 clr_bits, wait_bits; + int ret; if (DISPLAY_VER(display) < 14) return; @@ -3073,7 +3070,11 @@ mtl_ddi_disable_d2d(struct intel_encoder *encoder) } intel_de_rmw(display, reg, clr_bits, 0); - if (wait_for_us(!(intel_de_read(display, reg) & wait_bits), 100)) + + ret = intel_de_wait_custom(display, reg, + wait_bits, 0, + 100, 0, NULL); + if (ret) drm_err(display->drm, "Timeout waiting for D2D Link disable for DDI/PORT_BUF_CTL %c\n", port_name(port)); } @@ -5148,12 +5149,10 @@ void intel_ddi_init(struct intel_display *display, phy_name(phy)); } - dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + dig_port = intel_dig_port_alloc(); if (!dig_port) return; - dig_port->aux_ch = AUX_CH_NONE; - encoder = &dig_port->base; encoder->devdata = devdata; @@ -5191,9 +5190,6 @@ void intel_ddi_init(struct intel_display *display, intel_encoder_link_check_init(encoder, intel_ddi_link_check); - mutex_init(&dig_port->hdcp.mutex); - dig_port->hdcp.num_streams = 0; - encoder->hotplug = intel_ddi_hotplug; encoder->compute_output_type = intel_ddi_compute_output_type; encoder->compute_config = intel_ddi_compute_config; @@ -5331,7 +5327,6 @@ void intel_ddi_init(struct intel_display *display, dig_port->ddi_a_4_lanes = DISPLAY_VER(display) < 11 && ddi_buf_ctl & DDI_A_4_LANES; - dig_port->dp.output_reg = INVALID_MMIO_REG; dig_port->max_lanes = intel_ddi_max_lanes(dig_port); if (need_aux_ch(encoder, init_dp)) { diff --git a/drivers/gpu/drm/i915/display/intel_display.c b/drivers/gpu/drm/i915/display/intel_display.c index 7035c1fc9033..c1a3a95c65f0 100644 --- a/drivers/gpu/drm/i915/display/intel_display.c +++ b/drivers/gpu/drm/i915/display/intel_display.c @@ -76,6 +76,7 @@ #include "intel_display_regs.h" #include "intel_display_rpm.h" #include "intel_display_types.h" +#include "intel_display_wa.h" #include "intel_dmc.h" #include "intel_dp.h" #include "intel_dp_link_training.h" @@ -1081,6 +1082,11 @@ static void intel_post_plane_update(struct intel_atomic_state *state, if (audio_enabling(old_crtc_state, new_crtc_state)) intel_encoders_audio_enable(state, crtc); + if (intel_display_wa(display, 14011503117)) { + if (old_crtc_state->pch_pfit.enabled != new_crtc_state->pch_pfit.enabled) + adl_scaler_ecc_unmask(new_crtc_state); + } + intel_alpm_post_plane_update(state, crtc); intel_psr_post_plane_update(state, crtc); diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.c b/drivers/gpu/drm/i915/display/intel_display_conversion.c index 4d565935e2cc..d56065f22655 100644 --- a/drivers/gpu/drm/i915/display/intel_display_conversion.c +++ b/drivers/gpu/drm/i915/display/intel_display_conversion.c @@ -4,7 +4,7 @@ #include "i915_drv.h" #include "intel_display_conversion.h" -struct intel_display *__i915_to_display(struct drm_i915_private *i915) +static struct intel_display *__i915_to_display(struct drm_i915_private *i915) { return i915->display; } diff --git a/drivers/gpu/drm/i915/display/intel_display_conversion.h b/drivers/gpu/drm/i915/display/intel_display_conversion.h index 46c7208d42ba..d497bc58a73f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_conversion.h +++ b/drivers/gpu/drm/i915/display/intel_display_conversion.h @@ -9,20 +9,8 @@ #define __INTEL_DISPLAY_CONVERSION__ struct drm_device; -struct drm_i915_private; struct intel_display; -struct intel_display *__i915_to_display(struct drm_i915_private *i915); struct intel_display *__drm_to_display(struct drm_device *drm); -/* - * Transitional macro to optionally convert struct drm_i915_private * to struct - * intel_display *, also accepting the latter. - */ -#define __to_intel_display(p) \ - _Generic(p, \ - const struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \ - struct drm_i915_private *: __i915_to_display((struct drm_i915_private *)(p)), \ - const struct intel_display *: (p), \ - struct intel_display *: (p)) #endif /* __INTEL_DISPLAY_CONVERSION__ */ diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs.c b/drivers/gpu/drm/i915/display/intel_display_debugfs.c index ce3f9810c42d..10dddec3796f 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs.c @@ -820,14 +820,14 @@ static const struct drm_info_list intel_display_debugfs_list[] = { void intel_display_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; + struct dentry *debugfs_root = display->drm->debugfs_root; - debugfs_create_file("i915_fifo_underrun_reset", 0644, minor->debugfs_root, + debugfs_create_file("i915_fifo_underrun_reset", 0644, debugfs_root, display, &i915_fifo_underrun_reset_ops); drm_debugfs_create_files(intel_display_debugfs_list, ARRAY_SIZE(intel_display_debugfs_list), - minor->debugfs_root, minor); + debugfs_root, display->drm->primary); intel_bios_debugfs_register(display); intel_cdclk_debugfs_register(display); diff --git a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c index 88914a1f3f62..de62b774272d 100644 --- a/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_debugfs_params.c @@ -7,7 +7,6 @@ #include <linux/kernel.h> #include <drm/drm_drv.h> -#include <drm/drm_file.h> #include "intel_display_core.h" #include "intel_display_debugfs_params.h" @@ -154,14 +153,14 @@ intel_display_debugfs_create_uint(const char *name, umode_t mode, /* add a subdirectory with files for each intel display param */ void intel_display_debugfs_params(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; + struct dentry *debugfs_root = display->drm->debugfs_root; struct dentry *dir; char dirname[16]; snprintf(dirname, sizeof(dirname), "%s_params", display->drm->driver->name); - dir = debugfs_lookup(dirname, minor->debugfs_root); + dir = debugfs_lookup(dirname, debugfs_root); if (!dir) - dir = debugfs_create_dir(dirname, minor->debugfs_root); + dir = debugfs_create_dir(dirname, debugfs_root); if (IS_ERR(dir)) return; diff --git a/drivers/gpu/drm/i915/display/intel_display_device.c b/drivers/gpu/drm/i915/display/intel_display_device.c index 089cffabbad5..65f0efc35bb7 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.c +++ b/drivers/gpu/drm/i915/display/intel_display_device.c @@ -1354,6 +1354,19 @@ static const struct intel_display_device_info xe2_lpd_display = { .__runtime_defaults.has_dbuf_overlap_detection = true, }; +static const struct intel_display_device_info wcl_display = { + XE_LPDP_FEATURES, + + .__runtime_defaults.cpu_transcoder_mask = + BIT(TRANSCODER_A) | BIT(TRANSCODER_B) | BIT(TRANSCODER_C), + .__runtime_defaults.pipe_mask = + BIT(PIPE_A) | BIT(PIPE_B) | BIT(PIPE_C), + .__runtime_defaults.fbc_mask = + BIT(INTEL_FBC_A) | BIT(INTEL_FBC_B) | BIT(INTEL_FBC_C), + .__runtime_defaults.port_mask = + BIT(PORT_A) | BIT(PORT_B) | BIT(PORT_TC1) | BIT(PORT_TC2), +}; + static const struct intel_display_device_info xe2_hpd_display = { XE_LPDP_FEATURES, .__runtime_defaults.port_mask = BIT(PORT_A) | @@ -1480,7 +1493,7 @@ static const struct { { 14, 1, &xe2_hpd_display }, { 20, 0, &xe2_lpd_display }, { 30, 0, &xe2_lpd_display }, - { 30, 2, &xe2_lpd_display }, + { 30, 2, &wcl_display }, }; static const struct intel_display_device_info * diff --git a/drivers/gpu/drm/i915/display/intel_display_device.h b/drivers/gpu/drm/i915/display/intel_display_device.h index 4308822f0415..6e87b763fe7c 100644 --- a/drivers/gpu/drm/i915/display/intel_display_device.h +++ b/drivers/gpu/drm/i915/display/intel_display_device.h @@ -9,7 +9,6 @@ #include <linux/bitops.h> #include <linux/types.h> -#include "intel_display_conversion.h" #include "intel_display_limits.h" struct drm_printer; @@ -224,8 +223,8 @@ struct intel_display_platforms { (IS_DISPLAY_VERx100((__display), (ipver), (ipver)) && \ IS_DISPLAY_STEP((__display), (from), (until))) -#define DISPLAY_INFO(__display) (__to_intel_display(__display)->info.__device_info) -#define DISPLAY_RUNTIME_INFO(__display) (&__to_intel_display(__display)->info.__runtime_info) +#define DISPLAY_INFO(__display) ((__display)->info.__device_info) +#define DISPLAY_RUNTIME_INFO(__display) (&(__display)->info.__runtime_info) #define DISPLAY_VER(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver) #define DISPLAY_VERx100(__display) (DISPLAY_RUNTIME_INFO(__display)->ip.ver * 100 + \ @@ -236,7 +235,7 @@ struct intel_display_platforms { #define INTEL_DISPLAY_STEP(__display) (DISPLAY_RUNTIME_INFO(__display)->step) #define IS_DISPLAY_STEP(__display, since, until) \ - (drm_WARN_ON(__to_intel_display(__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \ + (drm_WARN_ON((__display)->drm, INTEL_DISPLAY_STEP(__display) == STEP_NONE), \ INTEL_DISPLAY_STEP(__display) >= (since) && INTEL_DISPLAY_STEP(__display) < (until)) #define ARLS_HOST_BRIDGE_PCI_ID1 0x7D1C diff --git a/drivers/gpu/drm/i915/display/intel_display_driver.c b/drivers/gpu/drm/i915/display/intel_display_driver.c index 8586ba102605..cf1c14412abe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_driver.c +++ b/drivers/gpu/drm/i915/display/intel_display_driver.c @@ -18,6 +18,7 @@ #include <drm/drm_vblank.h> #include "i915_drv.h" +#include "i915_utils.h" #include "i9xx_wm.h" #include "intel_acpi.h" #include "intel_atomic.h" diff --git a/drivers/gpu/drm/i915/display/intel_display_irq.c b/drivers/gpu/drm/i915/display/intel_display_irq.c index fb25ec8adae3..123e054affbe 100644 --- a/drivers/gpu/drm/i915/display/intel_display_irq.c +++ b/drivers/gpu/drm/i915/display/intel_display_irq.c @@ -1506,10 +1506,14 @@ u32 gen11_gu_misc_irq_ack(struct intel_display *display, const u32 master_ctl) if (!(master_ctl & GEN11_GU_MISC_IRQ)) return 0; + intel_display_rpm_assert_block(display); + iir = intel_de_read(display, GEN11_GU_MISC_IIR); if (likely(iir)) intel_de_write(display, GEN11_GU_MISC_IIR, iir); + intel_display_rpm_assert_unblock(display); + return iir; } @@ -1986,20 +1990,17 @@ void vlv_display_irq_postinstall(struct intel_display *display) void ibx_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); - - if (HAS_PCH_NOP(i915)) + if (HAS_PCH_NOP(display)) return; gen2_irq_reset(to_intel_uncore(display->drm), SDE_IRQ_REGS); - if (HAS_PCH_CPT(i915) || HAS_PCH_LPT(i915)) + if (HAS_PCH_CPT(display) || HAS_PCH_LPT(display)) intel_de_write(display, SERR_INT, 0xffffffff); } void gen8_display_irq_reset(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); enum pipe pipe; if (!HAS_DISPLAY(display)) @@ -2016,7 +2017,7 @@ void gen8_display_irq_reset(struct intel_display *display) intel_display_irq_regs_reset(display, GEN8_DE_PORT_IRQ_REGS); intel_display_irq_regs_reset(display, GEN8_DE_MISC_IRQ_REGS); - if (HAS_PCH_SPLIT(i915)) + if (HAS_PCH_SPLIT(display)) ibx_display_irq_reset(display); } diff --git a/drivers/gpu/drm/i915/display/intel_display_params.c b/drivers/gpu/drm/i915/display/intel_display_params.c index 75316247ee8a..2aed110c5b09 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.c +++ b/drivers/gpu/drm/i915/display/intel_display_params.c @@ -120,6 +120,9 @@ intel_display_param_named_unsafe(enable_psr, int, 0400, "(0=disabled, 1=enable up to PSR1, 2=enable up to PSR2) " "Default: -1 (use per-chip default)"); +intel_display_param_named_unsafe(enable_panel_replay, int, 0400, + "Enable Panel Replay (0=disabled, 1=enabled). Default: -1 (use per-chip default)"); + intel_display_param_named(psr_safest_params, bool, 0400, "Replace PSR VBT parameters by the safest and not optimal ones. This " "is helpful to detect if PSR issues are related to bad values set in " diff --git a/drivers/gpu/drm/i915/display/intel_display_params.h b/drivers/gpu/drm/i915/display/intel_display_params.h index 784e6bae8615..b01bc5700c52 100644 --- a/drivers/gpu/drm/i915/display/intel_display_params.h +++ b/drivers/gpu/drm/i915/display/intel_display_params.h @@ -46,6 +46,7 @@ struct drm_printer; param(bool, enable_dp_mst, true, 0600) \ param(int, enable_fbc, -1, 0600) \ param(int, enable_psr, -1, 0600) \ + param(int, enable_panel_replay, -1, 0600) \ param(bool, psr_safest_params, false, 0400) \ param(bool, enable_psr2_sel_fetch, true, 0400) \ param(int, enable_dmc_wl, -1, 0400) \ diff --git a/drivers/gpu/drm/i915/display/intel_display_power.c b/drivers/gpu/drm/i915/display/intel_display_power.c index 273054c22325..7f9a2d743bac 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power.c +++ b/drivers/gpu/drm/i915/display/intel_display_power.c @@ -3,6 +3,7 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/iopoll.h> #include <linux/string_helpers.h> #include "soc/intel_dram.h" @@ -10,6 +11,7 @@ #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_backlight_regs.h" #include "intel_cdclk.h" #include "intel_clock_gating.h" @@ -1278,6 +1280,7 @@ static void hsw_disable_lcpll(struct intel_display *display, bool switch_to_fclk, bool allow_power_down) { u32 val; + int ret; assert_can_disable_lcpll(display); @@ -1287,8 +1290,10 @@ static void hsw_disable_lcpll(struct intel_display *display, val |= LCPLL_CD_SOURCE_FCLK; intel_de_write(display, LCPLL_CTL, val); - if (wait_for_us(intel_de_read(display, LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE, 1)) + ret = intel_de_wait_custom(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, LCPLL_CD_SOURCE_FCLK_DONE, + 1, 0, NULL); + if (ret) drm_err(display->drm, "Switching to FCLK failed\n"); val = intel_de_read(display, LCPLL_CTL); @@ -1306,8 +1311,10 @@ static void hsw_disable_lcpll(struct intel_display *display, hsw_write_dcomp(display, val); ndelay(100); - if (wait_for((hsw_read_dcomp(display) & - D_COMP_RCOMP_IN_PROGRESS) == 0, 1)) + ret = poll_timeout_us(val = hsw_read_dcomp(display), + (val & D_COMP_RCOMP_IN_PROGRESS) == 0, + 100, 1000, false); + if (ret) drm_err(display->drm, "D_COMP RCOMP still in progress\n"); if (allow_power_down) { @@ -1324,6 +1331,7 @@ static void hsw_restore_lcpll(struct intel_display *display) { struct drm_i915_private __maybe_unused *dev_priv = to_i915(display->drm); u32 val; + int ret; val = intel_de_read(display, LCPLL_CTL); @@ -1358,8 +1366,10 @@ static void hsw_restore_lcpll(struct intel_display *display) if (val & LCPLL_CD_SOURCE_FCLK) { intel_de_rmw(display, LCPLL_CTL, LCPLL_CD_SOURCE_FCLK, 0); - if (wait_for_us((intel_de_read(display, LCPLL_CTL) & - LCPLL_CD_SOURCE_FCLK_DONE) == 0, 1)) + ret = intel_de_wait_custom(display, LCPLL_CTL, + LCPLL_CD_SOURCE_FCLK_DONE, 0, + 1, 0, NULL); + if (ret) drm_err(display->drm, "Switching back to LCPLL failed\n"); } @@ -2155,8 +2165,6 @@ void intel_power_domains_resume(struct intel_display *display) power_domains->init_wakeref = intel_display_power_get(display, POWER_DOMAIN_INIT); } - - intel_power_domains_verify_state(display); } #if IS_ENABLED(CONFIG_DRM_I915_DEBUG_RUNTIME_PM) diff --git a/drivers/gpu/drm/i915/display/intel_display_power_map.c b/drivers/gpu/drm/i915/display/intel_display_power_map.c index 77268802b55e..39b71fffa2cd 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_map.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_map.c @@ -1717,6 +1717,59 @@ static const struct i915_power_well_desc_list xe3lpd_power_wells[] = { I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), }; +static const struct i915_power_well_desc wcl_power_wells_main[] = { + { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_2", &xe3lpd_pwdoms_pw_2, + .hsw.idx = ICL_PW_CTL_IDX_PW_2, + .id = SKL_DISP_PW_2), + ), + .ops = &hsw_power_well_ops, + .has_vga = true, + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_A", &xelpd_pwdoms_pw_a, + .hsw.idx = XELPD_PW_CTL_IDX_PW_A), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_A), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_B", &xe3lpd_pwdoms_pw_b, + .hsw.idx = XELPD_PW_CTL_IDX_PW_B), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_B), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("PW_C", &xe3lpd_pwdoms_pw_c, + .hsw.idx = XELPD_PW_CTL_IDX_PW_C), + ), + .ops = &hsw_power_well_ops, + .irq_pipe_mask = BIT(PIPE_C), + .has_fuses = true, + }, { + .instances = &I915_PW_INSTANCES( + I915_PW("AUX_A", &icl_pwdoms_aux_a, .xelpdp.aux_ch = AUX_CH_A), + I915_PW("AUX_B", &icl_pwdoms_aux_b, .xelpdp.aux_ch = AUX_CH_B), + I915_PW("AUX_TC1", &xelpdp_pwdoms_aux_tc1, .xelpdp.aux_ch = AUX_CH_USBC1), + I915_PW("AUX_TC2", &xelpdp_pwdoms_aux_tc2, .xelpdp.aux_ch = AUX_CH_USBC2), + ), + .ops = &xelpdp_aux_power_well_ops, + }, +}; + +static const struct i915_power_well_desc_list wcl_power_wells[] = { + I915_PW_DESCRIPTORS(i9xx_power_wells_always_on), + I915_PW_DESCRIPTORS(icl_power_wells_pw_1), + I915_PW_DESCRIPTORS(xe3lpd_power_wells_dcoff), + I915_PW_DESCRIPTORS(wcl_power_wells_main), + I915_PW_DESCRIPTORS(xe2lpd_power_wells_pica), +}; + static void init_power_well_domains(const struct i915_power_well_instance *inst, struct i915_power_well *power_well) { @@ -1824,7 +1877,9 @@ int intel_display_power_map_init(struct i915_power_domains *power_domains) return 0; } - if (DISPLAY_VER(display) >= 30) + if (DISPLAY_VERx100(display) == 3002) + return set_power_wells(power_domains, wcl_power_wells); + else if (DISPLAY_VER(display) >= 30) return set_power_wells(power_domains, xe3lpd_power_wells); else if (DISPLAY_VER(display) >= 20) return set_power_wells(power_domains, xe2lpd_power_wells); diff --git a/drivers/gpu/drm/i915/display/intel_display_power_well.c b/drivers/gpu/drm/i915/display/intel_display_power_well.c index 48cac225a809..5e88b930f5aa 100644 --- a/drivers/gpu/drm/i915/display/intel_display_power_well.c +++ b/drivers/gpu/drm/i915/display/intel_display_power_well.c @@ -3,6 +3,8 @@ * Copyright © 2022 Intel Corporation */ +#include <linux/iopoll.h> + #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" @@ -499,7 +501,6 @@ static void icl_tc_port_assert_ref_held(struct intel_display *display, static void icl_tc_cold_exit(struct intel_display *display) { - struct drm_i915_private *i915 = to_i915(display->drm); int ret, tries = 0; while (1) { @@ -514,7 +515,7 @@ static void icl_tc_cold_exit(struct intel_display *display) msleep(1); /* TODO: turn failure into a error as soon i915 CI updates ICL IFWI */ - drm_dbg_kms(&i915->drm, "TC cold block %s\n", ret ? "failed" : + drm_dbg_kms(display->drm, "TC cold block %s\n", ret ? "failed" : "succeeded"); } @@ -527,6 +528,8 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display, const struct i915_power_well_regs *regs = power_well->desc->ops->regs; bool is_tbt = power_well->desc->is_tc_tbt; bool timeout_expected; + u32 val; + int ret; icl_tc_port_assert_ref_held(display, power_well, dig_port); @@ -553,10 +556,11 @@ icl_tc_phy_aux_power_well_enable(struct intel_display *display, tc_port = TGL_AUX_PW_TO_TC_PORT(i915_power_well_instance(power_well)->hsw.idx); - if (wait_for(intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)) & - DKL_CMN_UC_DW27_UC_HEALTH, 1)) - drm_warn(display->drm, - "Timeout waiting TC uC health\n"); + ret = poll_timeout_us(val = intel_dkl_phy_read(display, DKL_CMN_UC_DW_27(tc_port)), + val & DKL_CMN_UC_DW27_UC_HEALTH, + 100, 1000, false); + if (ret) + drm_warn(display->drm, "Timeout waiting TC uC health\n"); } } @@ -1122,6 +1126,8 @@ static void vlv_set_power_well(struct intel_display *display, u32 mask; u32 state; u32 ctrl; + u32 val; + int ret; mask = PUNIT_PWRGT_MASK(pw_idx); state = enable ? PUNIT_PWRGT_PWR_ON(pw_idx) : @@ -1129,10 +1135,8 @@ static void vlv_set_power_well(struct intel_display *display, vlv_punit_get(display->drm); -#define COND \ - ((vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS) & mask) == state) - - if (COND) + val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS); + if ((val & mask) == state) goto out; ctrl = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL); @@ -1140,14 +1144,15 @@ static void vlv_set_power_well(struct intel_display *display, ctrl |= state; vlv_punit_write(display->drm, PUNIT_REG_PWRGT_CTRL, ctrl); - if (wait_for(COND, 100)) + ret = poll_timeout_us(val = vlv_punit_read(display->drm, PUNIT_REG_PWRGT_STATUS), + (val & mask) == state, + 500, 100 * 1000, false); + if (ret) drm_err(display->drm, "timeout setting power well state %08x (%08x)\n", state, vlv_punit_read(display->drm, PUNIT_REG_PWRGT_CTRL)); -#undef COND - out: vlv_punit_put(display->drm); } @@ -1208,7 +1213,7 @@ static void vlv_init_display_clock_gating(struct intel_display *display) * (and never recovering) in this case. intel_dsi_post_disable() will * clear it when we turn off the display. */ - intel_de_rmw(display, DSPCLK_GATE_D(display), + intel_de_rmw(display, VLV_DSPCLK_GATE_D, ~DPOUNIT_CLOCK_GATE_DISABLE, VRHUNIT_CLOCK_GATE_DISABLE); /* @@ -1711,23 +1716,24 @@ static void chv_set_pipe_power_well(struct intel_display *display, enum pipe pipe = PIPE_A; u32 state; u32 ctrl; + int ret; state = enable ? DP_SSS_PWR_ON(pipe) : DP_SSS_PWR_GATE(pipe); vlv_punit_get(display->drm); -#define COND \ - ((vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM) & DP_SSS_MASK(pipe)) == state) - - if (COND) + ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM); + if ((ctrl & DP_SSS_MASK(pipe)) == state) goto out; - ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM); ctrl &= ~DP_SSC_MASK(pipe); ctrl |= enable ? DP_SSC_PWR_ON(pipe) : DP_SSC_PWR_GATE(pipe); vlv_punit_write(display->drm, PUNIT_REG_DSPSSPM, ctrl); - if (wait_for(COND, 100)) + ret = poll_timeout_us(ctrl = vlv_punit_read(display->drm, PUNIT_REG_DSPSSPM), + (ctrl & DP_SSS_MASK(pipe)) == state, + 500, 100 * 1000, false); + if (ret) drm_err(display->drm, "timeout setting power well state %08x (%08x)\n", state, @@ -1765,7 +1771,6 @@ static void chv_pipe_power_well_disable(struct intel_display *display, static void tgl_tc_cold_request(struct intel_display *display, bool block) { - struct drm_i915_private *i915 = to_i915(display->drm); u8 tries = 0; int ret; @@ -1798,10 +1803,9 @@ tgl_tc_cold_request(struct intel_display *display, bool block) } if (ret) - drm_err(&i915->drm, "TC cold %sblock failed\n", - block ? "" : "un"); + drm_err(display->drm, "TC cold %sblock failed\n", block ? "" : "un"); else - drm_dbg_kms(&i915->drm, "TC cold %sblock succeeded\n", + drm_dbg_kms(display->drm, "TC cold %sblock succeeded\n", block ? "" : "un"); } diff --git a/drivers/gpu/drm/i915/display/intel_display_regs.h b/drivers/gpu/drm/i915/display/intel_display_regs.h index 7bd09d981cd2..9d71e26a4fa2 100644 --- a/drivers/gpu/drm/i915/display/intel_display_regs.h +++ b/drivers/gpu/drm/i915/display/intel_display_regs.h @@ -2890,6 +2890,7 @@ enum skl_power_gate { #define DP_PIN_ASSIGNMENT_SHIFT(idx) ((idx) * 4) #define DP_PIN_ASSIGNMENT_MASK(idx) (0xf << ((idx) * 4)) #define DP_PIN_ASSIGNMENT(idx, x) ((x) << ((idx) * 4)) +/* See enum intel_tc_pin_assignment for the pin assignment field values. */ #define _TCSS_DDI_STATUS_1 0x161500 #define _TCSS_DDI_STATUS_2 0x161504 @@ -2897,6 +2898,7 @@ enum skl_power_gate { _TCSS_DDI_STATUS_1, \ _TCSS_DDI_STATUS_2)) #define TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK REG_GENMASK(28, 25) +/* See enum intel_tc_pin_assignment for the pin assignment field values. */ #define TCSS_DDI_STATUS_READY REG_BIT(2) #define TCSS_DDI_STATUS_HPD_LIVE_STATUS_TBT REG_BIT(1) #define TCSS_DDI_STATUS_HPD_LIVE_STATUS_ALT REG_BIT(0) diff --git a/drivers/gpu/drm/i915/display/intel_display_types.h b/drivers/gpu/drm/i915/display/intel_display_types.h index ce45261c4a8f..fd9d2527889b 100644 --- a/drivers/gpu/drm/i915/display/intel_display_types.h +++ b/drivers/gpu/drm/i915/display/intel_display_types.h @@ -50,14 +50,15 @@ #include "intel_display_limits.h" #include "intel_display_power.h" #include "intel_dpll_mgr.h" +#include "intel_dsi_vbt_defs.h" #include "intel_wm_types.h" struct cec_notifier; struct drm_printer; -struct __intel_global_objs_state; struct intel_connector; struct intel_ddi_buf_trans; struct intel_fbc; +struct intel_global_objs_state; struct intel_hdcp_shim; struct intel_tc_port; @@ -593,7 +594,7 @@ struct intel_atomic_state { struct ref_tracker *wakeref; - struct __intel_global_objs_state *global_objs; + struct intel_global_objs_state *global_objs; int num_global_objs; /* Internal commit, as opposed to userspace/client initiated one */ @@ -642,7 +643,6 @@ struct intel_plane_state { #define PLANE_HAS_FENCE BIT(0) struct intel_fb_view view; - u32 phys_dma_addr; /* for cursor_needs_physical */ /* for legacy cursor fb unpin */ struct drm_vblank_work unpin_work; @@ -665,6 +665,9 @@ struct intel_plane_state { /* chroma upsampler control register */ u32 cus_ctl; + /* surface address register */ + u32 surf; + /* * scaler_id * = -1 : not using a scaler @@ -941,10 +944,6 @@ struct intel_csc_matrix { u16 postoff[3]; }; -void intel_io_mmio_fw_write(void *ctx, i915_reg_t reg, u32 val); - -typedef void (*intel_io_reg_write)(void *ctx, i915_reg_t reg, u32 val); - struct intel_crtc_state { /* * uapi (drm) state. This is the software state shown to userspace. @@ -1122,6 +1121,7 @@ struct intel_crtc_state { bool req_psr2_sdp_prior_scanline; bool has_panel_replay; bool wm_level_disabled; + bool pkg_c_latency_used; u32 dc3co_exitline; u16 su_y_granularity; u8 active_non_psr_pipes; @@ -1534,6 +1534,7 @@ struct intel_plane { bool (*get_hw_state)(struct intel_plane *plane, enum pipe *pipe); int (*check_plane)(struct intel_crtc_state *crtc_state, struct intel_plane_state *plane_state); + u32 (*surf_offset)(const struct intel_plane_state *plane_state); int (*min_cdclk)(const struct intel_crtc_state *crtc_state, const struct intel_plane_state *plane_state); void (*async_flip)(struct intel_dsb *dsb, @@ -1683,6 +1684,7 @@ struct intel_psr { u8 entry_setup_frames; bool link_ok; + bool pkg_c_latency_used; u8 active_non_psr_pipes; }; diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.c b/drivers/gpu/drm/i915/display/intel_display_wa.c index f57280e9d041..31cd2c9cd488 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.c +++ b/drivers/gpu/drm/i915/display/intel_display_wa.c @@ -3,6 +3,8 @@ * Copyright © 2023 Intel Corporation */ +#include <drm/drm_print.h> + #include "i915_reg.h" #include "intel_de.h" #include "intel_display_core.h" @@ -39,3 +41,36 @@ void intel_display_wa_apply(struct intel_display *display) else if (DISPLAY_VER(display) == 11) gen11_display_wa_apply(display); } + +/* + * Wa_16025573575: + * Fixes: Issue with bitbashing on Xe3 based platforms. + * Workaround: Set masks bits in GPIO CTL and preserve it during bitbashing sequence. + */ +static bool intel_display_needs_wa_16025573575(struct intel_display *display) +{ + return DISPLAY_VERx100(display) == 3000 || DISPLAY_VERx100(display) == 3002; +} + +/* + * Wa_14011503117: + * Fixes: Before enabling the scaler DE fatal error is masked + * Workaround: Unmask the DE fatal error register after enabling the scaler + * and after waiting of at least 1 frame. + */ +bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name) +{ + switch (wa) { + case INTEL_DISPLAY_WA_16023588340: + return intel_display_needs_wa_16023588340(display); + case INTEL_DISPLAY_WA_16025573575: + return intel_display_needs_wa_16025573575(display); + case INTEL_DISPLAY_WA_14011503117: + return DISPLAY_VER(display) == 13; + default: + drm_WARN(display->drm, 1, "Missing Wa number: %s\n", name); + break; + } + + return false; +} diff --git a/drivers/gpu/drm/i915/display/intel_display_wa.h b/drivers/gpu/drm/i915/display/intel_display_wa.h index babd9d16603d..abc1df83f066 100644 --- a/drivers/gpu/drm/i915/display/intel_display_wa.h +++ b/drivers/gpu/drm/i915/display/intel_display_wa.h @@ -21,4 +21,15 @@ static inline bool intel_display_needs_wa_16023588340(struct intel_display *disp bool intel_display_needs_wa_16023588340(struct intel_display *display); #endif +enum intel_display_wa { + INTEL_DISPLAY_WA_16023588340, + INTEL_DISPLAY_WA_16025573575, + INTEL_DISPLAY_WA_14011503117, +}; + +bool __intel_display_wa(struct intel_display *display, enum intel_display_wa wa, const char *name); + +#define intel_display_wa(__display, __wa) \ + __intel_display_wa((__display), INTEL_DISPLAY_WA_##__wa, __stringify(__wa)) + #endif diff --git a/drivers/gpu/drm/i915/display/intel_dmc.c b/drivers/gpu/drm/i915/display/intel_dmc.c index 744f51c0eab8..77a0199f9ea5 100644 --- a/drivers/gpu/drm/i915/display/intel_dmc.c +++ b/drivers/gpu/drm/i915/display/intel_dmc.c @@ -1603,9 +1603,7 @@ DEFINE_SHOW_ATTRIBUTE(intel_dmc_debugfs_status); void intel_dmc_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; - - debugfs_create_file("i915_dmc_info", 0444, minor->debugfs_root, + debugfs_create_file("i915_dmc_info", 0444, display->drm->debugfs_root, display, &intel_dmc_debugfs_status_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_dp.c b/drivers/gpu/drm/i915/display/intel_dp.c index 7976fec88606..2eab591a8ef5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.c +++ b/drivers/gpu/drm/i915/display/intel_dp.c @@ -27,6 +27,7 @@ #include <linux/export.h> #include <linux/i2c.h> +#include <linux/iopoll.h> #include <linux/log2.h> #include <linux/math.h> #include <linux/notifier.h> @@ -174,7 +175,6 @@ int intel_dp_link_symbol_clock(int rate) static int max_dprx_rate(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; int max_rate; if (intel_dp_tunnel_bw_alloc_is_enabled(intel_dp)) @@ -183,16 +183,13 @@ static int max_dprx_rate(struct intel_dp *intel_dp) max_rate = drm_dp_bw_code_to_link_rate(intel_dp->dpcd[DP_MAX_LINK_RATE]); /* - * Some broken eDP sinks illegally declare support for - * HBR3 without TPS4, and are unable to produce a stable - * output. Reject HBR3 when TPS4 is not available. + * Some platforms + eDP panels may not reliably support HBR3 + * due to signal integrity limitations, despite advertising it. + * Cap the link rate to HBR2 to avoid unstable configurations for the + * known machines. */ - if (max_rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) { - drm_dbg_kms(display->drm, - "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n", - encoder->base.base.id, encoder->base.name); - max_rate = 540000; - } + if (intel_dp_is_edp(intel_dp) && intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2)) + max_rate = min(max_rate, 540000); return max_rate; } @@ -1418,6 +1415,7 @@ intel_dp_mode_valid(struct drm_connector *_connector, struct intel_display *display = to_intel_display(_connector->dev); struct intel_connector *connector = to_intel_connector(_connector); struct intel_dp *intel_dp = intel_attached_dp(connector); + enum intel_output_format sink_format, output_format; const struct drm_display_mode *fixed_mode; int target_clock = mode->clock; int max_rate, mode_rate, max_lanes, max_link_clock; @@ -1451,6 +1449,13 @@ intel_dp_mode_valid(struct drm_connector *_connector, mode->hdisplay, target_clock); max_dotclk *= num_joined_pipes; + sink_format = intel_dp_sink_format(connector, mode); + output_format = intel_dp_output_format(connector, sink_format); + + status = intel_pfit_mode_valid(display, mode, output_format, num_joined_pipes); + if (status != MODE_OK) + return status; + if (target_clock > max_dotclk) return MODE_CLOCK_HIGH; @@ -1466,11 +1471,8 @@ intel_dp_mode_valid(struct drm_connector *_connector, intel_dp_mode_min_output_bpp(connector, mode)); if (intel_dp_has_dsc(connector)) { - enum intel_output_format sink_format, output_format; int pipe_bpp; - sink_format = intel_dp_sink_format(connector, mode); - output_format = intel_dp_output_format(connector, sink_format); /* * TBD pass the connector BPC, * for now U8_MAX so that max BPC on that platform would be picked @@ -2535,13 +2537,15 @@ intel_dp_dsc_compute_pipe_bpp_limits(struct intel_dp *intel_dp, bool intel_dp_compute_config_limits(struct intel_dp *intel_dp, - struct intel_connector *connector, + struct drm_connector_state *conn_state, struct intel_crtc_state *crtc_state, bool respect_downstream_limits, bool dsc, struct link_config_limits *limits) { bool is_mst = intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST); + struct intel_connector *connector = + to_intel_connector(conn_state->connector); limits->min_rate = intel_dp_min_link_rate(intel_dp); limits->max_rate = intel_dp_max_link_rate(intel_dp); @@ -2551,7 +2555,8 @@ intel_dp_compute_config_limits(struct intel_dp *intel_dp, limits->min_lane_count = intel_dp_min_lane_count(intel_dp); limits->max_lane_count = intel_dp_max_lane_count(intel_dp); - limits->pipe.min_bpp = intel_dp_min_bpp(crtc_state->output_format); + limits->pipe.min_bpp = intel_dp_in_hdr_mode(conn_state) ? 30 : + intel_dp_min_bpp(crtc_state->output_format); if (is_mst) { /* * FIXME: If all the streams can't fit into the link with their @@ -2650,7 +2655,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || - !intel_dp_compute_config_limits(intel_dp, connector, pipe_config, + !intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config, respect_downstream_limits, false, &limits); @@ -2684,7 +2689,7 @@ intel_dp_compute_link_config(struct intel_encoder *encoder, str_yes_no(ret), str_yes_no(joiner_needs_dsc), str_yes_no(intel_dp->force_dsc_en)); - if (!intel_dp_compute_config_limits(intel_dp, connector, pipe_config, + if (!intel_dp_compute_config_limits(intel_dp, conn_state, pipe_config, respect_downstream_limits, true, &limits)) @@ -2916,6 +2921,19 @@ static void intel_dp_compute_vsc_sdp(struct intel_dp *intel_dp, } } +bool +intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state) +{ + struct hdr_output_metadata *hdr_metadata; + + if (!conn_state->hdr_output_metadata) + return false; + + hdr_metadata = conn_state->hdr_output_metadata->data; + + return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084; +} + static void intel_dp_compute_hdr_metadata_infoframe_sdp(struct intel_dp *intel_dp, struct intel_crtc_state *crtc_state, @@ -3181,7 +3199,26 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state, */ min_hblank = min_hblank - 2; - min_hblank = min(10, min_hblank); + /* + * min_hblank formula is undergoing a change, to avoid underrun use the + * recomended value in spec to compare with the calculated one and use the + * minimum value + */ + if (intel_dp_is_uhbr(crtc_state)) { + /* + * Note: Bspec requires a min_hblank of 2 for YCBCR420 + * with compressed bpp 6, but the minimum compressed bpp + * supported by the driver is 8. + */ + drm_WARN_ON(display->drm, + (crtc_state->dsc.compression_enable && + crtc_state->output_format == INTEL_OUTPUT_FORMAT_YCBCR420 && + crtc_state->dsc.compressed_bpp_x16 < fxp_q4_from_int(8))); + min_hblank = min(3, min_hblank); + } else { + min_hblank = min(10, min_hblank); + } + crtc_state->min_hblank = min_hblank; return 0; @@ -3842,10 +3879,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) if (ret < 0) return ret; /* Wait for PCON to be FRL Ready */ - wait_for(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux) == true, TIMEOUT_FRL_READY_MS); - - if (!is_active) - return -ETIMEDOUT; + ret = poll_timeout_us(is_active = drm_dp_pcon_is_frl_ready(&intel_dp->aux), + is_active, + 1000, TIMEOUT_FRL_READY_MS * 1000, false); + if (ret) + return ret; ret = drm_dp_pcon_frl_configure_1(&intel_dp->aux, max_frl_bw, DP_PCON_ENABLE_SEQUENTIAL_LINK); @@ -3862,12 +3900,11 @@ static int intel_dp_pcon_start_frl_training(struct intel_dp *intel_dp) * Wait for FRL to be completed * Check if the HDMI Link is up and active. */ - wait_for(is_active = - intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), - TIMEOUT_HDMI_LINK_ACTIVE_MS); - - if (!is_active) - return -ETIMEDOUT; + ret = poll_timeout_us(is_active = intel_dp_pcon_is_frl_trained(intel_dp, max_frl_bw_mask, &frl_trained_mask), + is_active, + 1000, TIMEOUT_HDMI_LINK_ACTIVE_MS * 1000, false); + if (ret) + return ret; frl_trained: drm_dbg(display->drm, "FRL_TRAINED_MASK = %u\n", frl_trained_mask); @@ -4277,10 +4314,26 @@ static void intel_edp_mso_init(struct intel_dp *intel_dp) } static void +intel_edp_set_data_override_rates(struct intel_dp *intel_dp) +{ + struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int *sink_rates = intel_dp->sink_rates; + int i, count = 0; + + for (i = 0; i < intel_dp->num_sink_rates; i++) { + if (intel_bios_encoder_reject_edp_rate(encoder->devdata, + intel_dp->sink_rates[i])) + continue; + + sink_rates[count++] = intel_dp->sink_rates[i]; + } + intel_dp->num_sink_rates = count; +} + +static void intel_edp_set_sink_rates(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; intel_dp->num_sink_rates = 0; @@ -4306,16 +4359,13 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp) break; /* - * Some broken eDP sinks illegally declare support for - * HBR3 without TPS4, and are unable to produce a stable - * output. Reject HBR3 when TPS4 is not available. + * Some platforms cannot reliably drive HBR3 rates due to PHY limitations, + * even if the sink advertises support. Reject any sink rates above HBR2 on + * the known machines for stable output. */ - if (rate >= 810000 && !drm_dp_tps4_supported(intel_dp->dpcd)) { - drm_dbg_kms(display->drm, - "[ENCODER:%d:%s] Rejecting HBR3 due to missing TPS4 support\n", - encoder->base.base.id, encoder->base.name); + if (rate > 540000 && + intel_has_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2)) break; - } intel_dp->sink_rates[i] = rate; } @@ -4330,6 +4380,8 @@ intel_edp_set_sink_rates(struct intel_dp *intel_dp) intel_dp->use_rate_select = true; else intel_dp_set_sink_rates(intel_dp); + + intel_edp_set_data_override_rates(intel_dp); } static bool @@ -5611,14 +5663,9 @@ bool intel_digital_port_connected_locked(struct intel_encoder *encoder) intel_wakeref_t wakeref; with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) { - unsigned long wait_expires = jiffies + msecs_to_jiffies_timeout(4); - - do { - is_connected = dig_port->connected(encoder); - if (is_connected || is_glitch_free) - break; - usleep_range(10, 30); - } while (time_before(jiffies, wait_expires)); + poll_timeout_us(is_connected = dig_port->connected(encoder), + is_connected || is_glitch_free, + 30, 4000, false); } return is_connected; diff --git a/drivers/gpu/drm/i915/display/intel_dp.h b/drivers/gpu/drm/i915/display/intel_dp.h index 0657f5681196..f90cfd1dbbd0 100644 --- a/drivers/gpu/drm/i915/display/intel_dp.h +++ b/drivers/gpu/drm/i915/display/intel_dp.h @@ -193,7 +193,7 @@ void intel_dp_wait_source_oui(struct intel_dp *intel_dp); int intel_dp_output_bpp(enum intel_output_format output_format, int bpp); bool intel_dp_compute_config_limits(struct intel_dp *intel_dp, - struct intel_connector *connector, + struct drm_connector_state *conn_state, struct intel_crtc_state *crtc_state, bool respect_downstream_limits, bool dsc, @@ -214,5 +214,6 @@ int intel_dp_compute_min_hblank(struct intel_crtc_state *crtc_state, int intel_dp_dsc_bpp_step_x16(const struct intel_connector *connector); void intel_dp_dpcd_set_probe(struct intel_dp *intel_dp, bool force_on_external); +bool intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state); #endif /* __INTEL_DP_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c index 41228478b21c..12084a542fc5 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c +++ b/drivers/gpu/drm/i915/display/intel_dp_aux_backlight.c @@ -225,19 +225,6 @@ intel_dp_aux_hdr_set_aux_backlight(const struct drm_connector_state *conn_state, connector->base.base.id, connector->base.name); } -static bool -intel_dp_in_hdr_mode(const struct drm_connector_state *conn_state) -{ - struct hdr_output_metadata *hdr_metadata; - - if (!conn_state->hdr_output_metadata) - return false; - - hdr_metadata = conn_state->hdr_output_metadata->data; - - return hdr_metadata->hdmi_metadata_type1.eotf == HDMI_EOTF_SMPTE_ST2084; -} - static void intel_dp_aux_hdr_set_backlight(const struct drm_connector_state *conn_state, u32 level) { diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.c b/drivers/gpu/drm/i915/display/intel_dp_link_training.c index a479b63112ea..27f3716bdc1f 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.c +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.c @@ -22,6 +22,7 @@ */ #include <linux/debugfs.h> +#include <linux/iopoll.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_print.h> @@ -478,12 +479,13 @@ static u8 intel_dp_get_lane_adjust_train(struct intel_dp *intel_dp, _TRAIN_REQ_TX_FFE_ARGS(link_status, 2), \ _TRAIN_REQ_TX_FFE_ARGS(link_status, 3) -void +bool intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]) { + bool changed = false; int lane; if (intel_dp_is_uhbr(crtc_state)) { @@ -502,10 +504,17 @@ intel_dp_get_adjust_train(struct intel_dp *intel_dp, TRAIN_REQ_PREEMPH_ARGS(link_status)); } - for (lane = 0; lane < 4; lane++) - intel_dp->train_set[lane] = - intel_dp_get_lane_adjust_train(intel_dp, crtc_state, - dp_phy, link_status, lane); + for (lane = 0; lane < 4; lane++) { + u8 new = intel_dp_get_lane_adjust_train(intel_dp, crtc_state, + dp_phy, link_status, lane); + if (intel_dp->train_set[lane] == new) + continue; + + intel_dp->train_set[lane] = new; + changed = true; + } + + return changed; } static int intel_dp_training_pattern_set_reg(struct intel_dp *intel_dp, @@ -758,6 +767,63 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, } } +/* + * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 + * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or + * 1.2 devices that support it, TPS2 otherwise. + */ +static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, + const struct intel_crtc_state *crtc_state, + enum drm_dp_phy dp_phy) +{ + struct intel_display *display = to_intel_display(intel_dp); + bool source_tps3, sink_tps3, source_tps4, sink_tps4; + + /* UHBR+ use separate 128b/132b TPS2 */ + if (intel_dp_is_uhbr(crtc_state)) + return DP_TRAINING_PATTERN_2; + + /* + * TPS4 support is mandatory for all downstream devices that + * support HBR3. There are no known eDP panels that support + * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. + * LTTPRs must support TPS4. + */ + source_tps4 = intel_dp_source_supports_tps4(display); + sink_tps4 = dp_phy != DP_PHY_DPRX || + drm_dp_tps4_supported(intel_dp->dpcd); + if (source_tps4 && sink_tps4) { + return DP_TRAINING_PATTERN_4; + } else if (crtc_state->port_clock == 810000) { + if (!source_tps4) + lt_dbg(intel_dp, dp_phy, + "8.1 Gbps link rate without source TPS4 support\n"); + if (!sink_tps4) + lt_dbg(intel_dp, dp_phy, + "8.1 Gbps link rate without sink TPS4 support\n"); + } + + /* + * TPS3 support is mandatory for downstream devices that + * support HBR2. However, not all sinks follow the spec. + */ + source_tps3 = intel_dp_source_supports_tps3(display); + sink_tps3 = dp_phy != DP_PHY_DPRX || + drm_dp_tps3_supported(intel_dp->dpcd); + if (source_tps3 && sink_tps3) { + return DP_TRAINING_PATTERN_3; + } else if (crtc_state->port_clock >= 540000) { + if (!source_tps3) + lt_dbg(intel_dp, dp_phy, + ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); + if (!sink_tps3) + lt_dbg(intel_dp, dp_phy, + ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); + } + + return DP_TRAINING_PATTERN_2; +} + static void intel_dp_update_link_bw_set(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, u8 link_bw, u8 rate_select) @@ -950,63 +1016,6 @@ intel_dp_link_training_clock_recovery(struct intel_dp *intel_dp, } /* - * Pick Training Pattern Sequence (TPS) for channel equalization. 128b/132b TPS2 - * for UHBR+, TPS4 for HBR3 or for 1.4 devices that support it, TPS3 for HBR2 or - * 1.2 devices that support it, TPS2 otherwise. - */ -static u32 intel_dp_training_pattern(struct intel_dp *intel_dp, - const struct intel_crtc_state *crtc_state, - enum drm_dp_phy dp_phy) -{ - struct intel_display *display = to_intel_display(intel_dp); - bool source_tps3, sink_tps3, source_tps4, sink_tps4; - - /* UHBR+ use separate 128b/132b TPS2 */ - if (intel_dp_is_uhbr(crtc_state)) - return DP_TRAINING_PATTERN_2; - - /* - * TPS4 support is mandatory for all downstream devices that - * support HBR3. There are no known eDP panels that support - * TPS4 as of Feb 2018 as per VESA eDP_v1.4b_E1 specification. - * LTTPRs must support TPS4. - */ - source_tps4 = intel_dp_source_supports_tps4(display); - sink_tps4 = dp_phy != DP_PHY_DPRX || - drm_dp_tps4_supported(intel_dp->dpcd); - if (source_tps4 && sink_tps4) { - return DP_TRAINING_PATTERN_4; - } else if (crtc_state->port_clock == 810000) { - if (!source_tps4) - lt_dbg(intel_dp, dp_phy, - "8.1 Gbps link rate without source TPS4 support\n"); - if (!sink_tps4) - lt_dbg(intel_dp, dp_phy, - "8.1 Gbps link rate without sink TPS4 support\n"); - } - - /* - * TPS3 support is mandatory for downstream devices that - * support HBR2. However, not all sinks follow the spec. - */ - source_tps3 = intel_dp_source_supports_tps3(display); - sink_tps3 = dp_phy != DP_PHY_DPRX || - drm_dp_tps3_supported(intel_dp->dpcd); - if (source_tps3 && sink_tps3) { - return DP_TRAINING_PATTERN_3; - } else if (crtc_state->port_clock >= 540000) { - if (!source_tps3) - lt_dbg(intel_dp, dp_phy, - ">=5.4/6.48 Gbps link rate without source TPS3 support\n"); - if (!sink_tps3) - lt_dbg(intel_dp, dp_phy, - ">=5.4/6.48 Gbps link rate without sink TPS3 support\n"); - } - - return DP_TRAINING_PATTERN_2; -} - -/* * Perform the link training channel equalization phase on the given DP PHY * using one of training pattern 2, 3 or 4 depending on the source and * sink capabilities. @@ -1127,16 +1136,19 @@ void intel_dp_stop_link_train(struct intel_dp *intel_dp, { struct intel_display *display = to_intel_display(intel_dp); struct intel_encoder *encoder = &dp_to_dig_port(intel_dp)->base; + int ret; intel_dp->link.active = true; - intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_DISABLE); - if (intel_dp_is_uhbr(crtc_state) && - wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { - lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); + if (intel_dp_is_uhbr(crtc_state)) { + ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state), + ret == 0, + 500, 500 * 1000, false); + if (ret) + lt_dbg(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clearing\n"); } intel_hpd_unblock(encoder); @@ -1371,8 +1383,8 @@ intel_dp_link_train_all_phys(struct intel_dp *intel_dp, if (ret) ret = intel_dp_link_train_phy(intel_dp, crtc_state, DP_PHY_DPRX); - if (intel_dp->set_idle_link_train) - intel_dp->set_idle_link_train(intel_dp, crtc_state); + intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); + intel_dp->set_idle_link_train(intel_dp, crtc_state); return ret; } @@ -1574,8 +1586,12 @@ intel_dp_128b132b_link_train(struct intel_dp *intel_dp, int lttpr_count) { bool passed = false; + int ret; - if (wait_for(intel_dp_128b132b_intra_hop(intel_dp, crtc_state) == 0, 500)) { + ret = poll_timeout_us(ret = intel_dp_128b132b_intra_hop(intel_dp, crtc_state), + ret == 0, + 500, 500 * 1000, false); + if (ret) { lt_err(intel_dp, DP_PHY_DPRX, "128b/132b intra-hop not clear\n"); goto out; } @@ -1602,6 +1618,8 @@ out: intel_dp_program_link_training_pattern(intel_dp, crtc_state, DP_PHY_DPRX, DP_TRAINING_PATTERN_2); + intel_dp_disable_dpcd_training_pattern(intel_dp, DP_PHY_DPRX); + return passed; } diff --git a/drivers/gpu/drm/i915/display/intel_dp_link_training.h b/drivers/gpu/drm/i915/display/intel_dp_link_training.h index 46614124569f..1ba22ed6db08 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_link_training.h +++ b/drivers/gpu/drm/i915/display/intel_dp_link_training.h @@ -23,7 +23,7 @@ void intel_dp_link_training_set_bw(struct intel_dp *intel_dp, int link_bw, int rate_select, int lane_count, bool enhanced_framing); -void intel_dp_get_adjust_train(struct intel_dp *intel_dp, +bool intel_dp_get_adjust_train(struct intel_dp *intel_dp, const struct intel_crtc_state *crtc_state, enum drm_dp_phy dp_phy, const u8 link_status[DP_LINK_STATUS_SIZE]); diff --git a/drivers/gpu/drm/i915/display/intel_dp_mst.c b/drivers/gpu/drm/i915/display/intel_dp_mst.c index 74497c9a0554..352f7ef29c28 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_mst.c +++ b/drivers/gpu/drm/i915/display/intel_dp_mst.c @@ -611,12 +611,15 @@ adjust_limits_for_dsc_hblank_expansion_quirk(struct intel_dp *intel_dp, static bool mst_stream_compute_config_limits(struct intel_dp *intel_dp, - struct intel_connector *connector, + struct drm_connector_state *conn_state, struct intel_crtc_state *crtc_state, bool dsc, struct link_config_limits *limits) { - if (!intel_dp_compute_config_limits(intel_dp, connector, + struct intel_connector *connector = + to_intel_connector(conn_state->connector); + + if (!intel_dp_compute_config_limits(intel_dp, conn_state, crtc_state, false, dsc, limits)) return false; @@ -665,7 +668,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder, joiner_needs_dsc = intel_dp_joiner_needs_dsc(display, num_joined_pipes); dsc_needed = joiner_needs_dsc || intel_dp->force_dsc_en || - !mst_stream_compute_config_limits(intel_dp, connector, + !mst_stream_compute_config_limits(intel_dp, conn_state, pipe_config, false, &limits); if (!dsc_needed) { @@ -691,7 +694,7 @@ static int mst_stream_compute_config(struct intel_encoder *encoder, str_yes_no(intel_dp->force_dsc_en)); - if (!mst_stream_compute_config_limits(intel_dp, connector, + if (!mst_stream_compute_config_limits(intel_dp, conn_state, pipe_config, true, &limits)) return -EINVAL; diff --git a/drivers/gpu/drm/i915/display/intel_dp_test.c b/drivers/gpu/drm/i915/display/intel_dp_test.c index 6ed5012c5fac..5cfa1dd411da 100644 --- a/drivers/gpu/drm/i915/display/intel_dp_test.c +++ b/drivers/gpu/drm/i915/display/intel_dp_test.c @@ -6,7 +6,6 @@ #include <drm/display/drm_dp.h> #include <drm/display/drm_dp_helper.h> #include <drm/drm_edid.h> -#include <drm/drm_file.h> #include <drm/drm_print.h> #include <drm/drm_probe_helper.h> @@ -753,13 +752,12 @@ static const struct { void intel_dp_test_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; int i; for (i = 0; i < ARRAY_SIZE(intel_display_debugfs_files); i++) { debugfs_create_file(intel_display_debugfs_files[i].name, 0644, - minor->debugfs_root, + display->drm->debugfs_root, display, intel_display_debugfs_files[i].fops); } diff --git a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c index 33e0398120c8..8ea96cc524a1 100644 --- a/drivers/gpu/drm/i915/display/intel_dpll_mgr.c +++ b/drivers/gpu/drm/i915/display/intel_dpll_mgr.c @@ -2046,6 +2046,7 @@ static void bxt_ddi_pll_enable(struct intel_display *display, enum dpio_phy phy = DPIO_PHY0; enum dpio_channel ch = DPIO_CH0; u32 temp; + int ret; bxt_port_to_phy_channel(display, port, &phy, &ch); @@ -2056,8 +2057,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display, intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_POWER_ENABLE); - if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_POWER_STATE), 200)) + ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port), + PORT_PLL_POWER_STATE, PORT_PLL_POWER_STATE, + 200, 0, NULL); + if (ret) drm_err(display->drm, "Power state not set for PLL:%d\n", port); } @@ -2119,8 +2122,10 @@ static void bxt_ddi_pll_enable(struct intel_display *display, intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), 0, PORT_PLL_ENABLE); intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port)); - if (wait_for_us((intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & PORT_PLL_LOCK), - 200)) + ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port), + PORT_PLL_LOCK, PORT_PLL_LOCK, + 200, 0, NULL); + if (ret) drm_err(display->drm, "PLL %d not locked\n", port); if (display->platform.geminilake) { @@ -2144,6 +2149,7 @@ static void bxt_ddi_pll_disable(struct intel_display *display, struct intel_dpll *pll) { enum port port = (enum port)pll->info->id; /* 1:1 port->PLL mapping */ + int ret; intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_ENABLE, 0); intel_de_posting_read(display, BXT_PORT_PLL_ENABLE(port)); @@ -2152,8 +2158,10 @@ static void bxt_ddi_pll_disable(struct intel_display *display, intel_de_rmw(display, BXT_PORT_PLL_ENABLE(port), PORT_PLL_POWER_ENABLE, 0); - if (wait_for_us(!(intel_de_read(display, BXT_PORT_PLL_ENABLE(port)) & - PORT_PLL_POWER_STATE), 200)) + ret = intel_de_wait_custom(display, BXT_PORT_PLL_ENABLE(port), + PORT_PLL_POWER_STATE, 0, + 200, 0, NULL); + if (ret) drm_err(display->drm, "Power state not reset for PLL:%d\n", port); } diff --git a/drivers/gpu/drm/i915/display/intel_dpt.c b/drivers/gpu/drm/i915/display/intel_dpt.c index aea249e2699f..c0a817018d08 100644 --- a/drivers/gpu/drm/i915/display/intel_dpt.c +++ b/drivers/gpu/drm/i915/display/intel_dpt.c @@ -33,8 +33,6 @@ i915_vm_to_dpt(struct i915_address_space *vm) return container_of(vm, struct i915_dpt, vm); } -#define dpt_total_entries(dpt) ((dpt)->vm.total >> PAGE_SHIFT) - static void gen8_set_pte(void __iomem *addr, gen8_pte_t pte) { writeq(pte, addr); @@ -322,5 +320,5 @@ void intel_dpt_destroy(struct i915_address_space *vm) u64 intel_dpt_offset(struct i915_vma *dpt_vma) { - return dpt_vma->node.start; + return i915_vma_offset(dpt_vma); } diff --git a/drivers/gpu/drm/i915/display/intel_dsb.c b/drivers/gpu/drm/i915/display/intel_dsb.c index 53d8ae3a70e9..dee44d45b668 100644 --- a/drivers/gpu/drm/i915/display/intel_dsb.c +++ b/drivers/gpu/drm/i915/display/intel_dsb.c @@ -4,10 +4,11 @@ * */ +#include <linux/iopoll.h> + #include <drm/drm_print.h> #include <drm/drm_vblank.h> -#include "i915_utils.h" #include "intel_crtc.h" #include "intel_de.h" #include "intel_display_regs.h" @@ -871,8 +872,13 @@ void intel_dsb_wait(struct intel_dsb *dsb) struct intel_crtc *crtc = dsb->crtc; struct intel_display *display = to_intel_display(crtc->base.dev); enum pipe pipe = crtc->pipe; + bool is_busy; + int ret; - if (wait_for(!is_dsb_busy(display, pipe, dsb->id), 1)) { + ret = poll_timeout_us(is_busy = is_dsb_busy(display, pipe, dsb->id), + !is_busy, + 100, 1000, false); + if (ret) { u32 offset = intel_dsb_buffer_ggtt_offset(&dsb->dsb_buf); intel_de_write_fw(display, DSB_CTRL(pipe, dsb->id), diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c index e6a851d276f8..23402408e172 100644 --- a/drivers/gpu/drm/i915/display/intel_dsi_vbt.c +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt.c @@ -777,7 +777,7 @@ bool intel_dsi_vbt_init(struct intel_dsi *intel_dsi, u16 panel_id) intel_dsi->init_count = mipi_config->master_init_timer; intel_dsi->bw_timer = mipi_config->dbi_bw_timer; intel_dsi->video_frmt_cfg_bits = - mipi_config->bta_enabled ? DISABLE_VIDEO_BTA : 0; + mipi_config->bta_disable ? DISABLE_VIDEO_BTA : 0; intel_dsi->bgr_enabled = mipi_config->rgb_flip; /* Starting point, adjusted depending on dual link and burst mode */ diff --git a/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h new file mode 100644 index 000000000000..edc7331dcca2 --- /dev/null +++ b/drivers/gpu/drm/i915/display/intel_dsi_vbt_defs.h @@ -0,0 +1,197 @@ +/* SPDX-License-Identifier: MIT */ +/* Copyright © 2025 Intel Corporation */ + +#ifndef __INTEL_DSI_VBT_DEFS_H__ +#define __INTEL_DSI_VBT_DEFS_H__ + +#include <linux/types.h> + +/* + * MIPI Sequence Block definitions + * + * Note the VBT spec has AssertReset / DeassertReset swapped from their + * usual naming, we use the proper names here to avoid confusion when + * reading the code. + */ +enum mipi_seq { + MIPI_SEQ_END = 0, + MIPI_SEQ_DEASSERT_RESET, /* Spec says MipiAssertResetPin */ + MIPI_SEQ_INIT_OTP, + MIPI_SEQ_DISPLAY_ON, + MIPI_SEQ_DISPLAY_OFF, + MIPI_SEQ_ASSERT_RESET, /* Spec says MipiDeassertResetPin */ + MIPI_SEQ_BACKLIGHT_ON, /* sequence block v2+ */ + MIPI_SEQ_BACKLIGHT_OFF, /* sequence block v2+ */ + MIPI_SEQ_TEAR_ON, /* sequence block v2+ */ + MIPI_SEQ_TEAR_OFF, /* sequence block v3+ */ + MIPI_SEQ_POWER_ON, /* sequence block v3+ */ + MIPI_SEQ_POWER_OFF, /* sequence block v3+ */ + MIPI_SEQ_MAX +}; + +enum mipi_seq_element { + MIPI_SEQ_ELEM_END = 0, + MIPI_SEQ_ELEM_SEND_PKT, + MIPI_SEQ_ELEM_DELAY, + MIPI_SEQ_ELEM_GPIO, + MIPI_SEQ_ELEM_I2C, /* sequence block v2+ */ + MIPI_SEQ_ELEM_SPI, /* sequence block v3+ */ + MIPI_SEQ_ELEM_PMIC, /* sequence block v3+ */ + MIPI_SEQ_ELEM_MAX +}; + +#define MIPI_DSI_UNDEFINED_PANEL_ID 0 +#define MIPI_DSI_GENERIC_PANEL_ID 1 + +struct mipi_config { + u16 panel_id; + + /* General Params */ + struct { + u32 enable_dithering:1; + u32 rsvd1:1; + u32 is_bridge:1; + + u32 panel_arch_type:2; + u32 is_cmd_mode:1; + +#define NON_BURST_SYNC_PULSE 0x1 +#define NON_BURST_SYNC_EVENTS 0x2 +#define BURST_MODE 0x3 + u32 video_transfer_mode:2; + + u32 cabc_supported:1; +#define PPS_BLC_PMIC 0 +#define PPS_BLC_SOC 1 + u32 pwm_blc:1; + +#define PIXEL_FORMAT_RGB565 0x1 +#define PIXEL_FORMAT_RGB666 0x2 +#define PIXEL_FORMAT_RGB666_LOOSELY_PACKED 0x3 +#define PIXEL_FORMAT_RGB888 0x4 + u32 videomode_color_format:4; + +#define ENABLE_ROTATION_0 0x0 +#define ENABLE_ROTATION_90 0x1 +#define ENABLE_ROTATION_180 0x2 +#define ENABLE_ROTATION_270 0x3 + u32 rotation:2; + u32 bta_disable:1; + u32 rsvd2:15; + } __packed; + + /* Port Desc */ + struct { +#define DUAL_LINK_NOT_SUPPORTED 0 +#define DUAL_LINK_FRONT_BACK 1 +#define DUAL_LINK_PIXEL_ALT 2 + u16 dual_link:2; + u16 lane_cnt:2; + u16 pixel_overlap:3; + u16 rgb_flip:1; +#define DL_DCS_PORT_A 0x00 +#define DL_DCS_PORT_C 0x01 +#define DL_DCS_PORT_A_AND_C 0x02 + u16 dl_dcs_cabc_ports:2; + u16 dl_dcs_backlight_ports:2; + u16 port_sync:1; /* 219-230 */ + u16 rsvd3:3; + } __packed; + + /* DSI Controller Parameters */ + struct { + u16 dsi_usage:1; + u16 rsvd4:15; + } __packed; + + u8 rsvd5; + u32 target_burst_mode_freq; + u32 dsi_ddr_clk; + u32 bridge_ref_clk; + + /* LP Byte Clock */ + struct { +#define BYTE_CLK_SEL_20MHZ 0 +#define BYTE_CLK_SEL_10MHZ 1 +#define BYTE_CLK_SEL_5MHZ 2 + u8 byte_clk_sel:2; + u8 rsvd6:6; + } __packed; + + /* DPhy Flags */ + struct { + u16 dphy_param_valid:1; + u16 eot_pkt_disabled:1; + u16 enable_clk_stop:1; + u16 blanking_packets_during_bllp:1; /* 219+ */ + u16 lp_clock_during_lpm:1; /* 219+ */ + u16 rsvd7:11; + } __packed; + + u32 hs_tx_timeout; + u32 lp_rx_timeout; + u32 turn_around_timeout; + u32 device_reset_timer; + u32 master_init_timer; + u32 dbi_bw_timer; + u32 lp_byte_clk_val; + + /* DPhy Params */ + struct { + u32 prepare_cnt:6; + u32 rsvd8:2; + u32 clk_zero_cnt:8; + u32 trail_cnt:5; + u32 rsvd9:3; + u32 exit_zero_cnt:6; + u32 rsvd10:2; + } __packed; + + u32 clk_lane_switch_cnt; + u32 hl_switch_cnt; + + u32 rsvd11[6]; + + /* timings based on dphy spec */ + u8 tclk_miss; + u8 tclk_post; + u8 rsvd12; + u8 tclk_pre; + u8 tclk_prepare; + u8 tclk_settle; + u8 tclk_term_enable; + u8 tclk_trail; + u16 tclk_prepare_clkzero; + u8 rsvd13; + u8 td_term_enable; + u8 teot; + u8 ths_exit; + u8 ths_prepare; + u16 ths_prepare_hszero; + u8 rsvd14; + u8 ths_settle; + u8 ths_skip; + u8 ths_trail; + u8 tinit; + u8 tlpx; + u8 rsvd15[3]; + + /* GPIOs */ + u8 panel_enable; + u8 bl_enable; + u8 pwm_enable; + u8 reset_r_n; + u8 pwr_down_r; + u8 stdby_r_n; +} __packed; + +/* all delays have a unit of 100us */ +struct mipi_pps_data { + u16 panel_on_delay; + u16 bl_enable_delay; + u16 bl_disable_delay; + u16 panel_off_delay; + u16 panel_power_cycle_delay; +} __packed; + +#endif /* __INTEL_DSI_VBT_DEFS_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_encoder.c b/drivers/gpu/drm/i915/display/intel_encoder.c index 0b7bd26f4339..2ffe1f251ef8 100644 --- a/drivers/gpu/drm/i915/display/intel_encoder.c +++ b/drivers/gpu/drm/i915/display/intel_encoder.c @@ -8,6 +8,7 @@ #include "intel_display_core.h" #include "intel_display_types.h" #include "intel_encoder.h" +#include "intel_hotplug.h" static void intel_encoder_link_check_work_fn(struct work_struct *work) { @@ -37,6 +38,28 @@ void intel_encoder_link_check_queue_work(struct intel_encoder *encoder, int dela &encoder->link_check_work, msecs_to_jiffies(delay_ms)); } +void intel_encoder_unblock_all_hpds(struct intel_display *display) +{ + struct intel_encoder *encoder; + + if (!HAS_DISPLAY(display)) + return; + + for_each_intel_encoder(display->drm, encoder) + intel_hpd_unblock(encoder); +} + +void intel_encoder_block_all_hpds(struct intel_display *display) +{ + struct intel_encoder *encoder; + + if (!HAS_DISPLAY(display)) + return; + + for_each_intel_encoder(display->drm, encoder) + intel_hpd_block(encoder); +} + void intel_encoder_suspend_all(struct intel_display *display) { struct intel_encoder *encoder; @@ -80,3 +103,21 @@ void intel_encoder_shutdown_all(struct intel_display *display) if (encoder->shutdown_complete) encoder->shutdown_complete(encoder); } + +struct intel_digital_port *intel_dig_port_alloc(void) +{ + struct intel_digital_port *dig_port; + + dig_port = kzalloc(sizeof(*dig_port), GFP_KERNEL); + if (!dig_port) + return NULL; + + dig_port->hdmi.hdmi_reg = INVALID_MMIO_REG; + dig_port->dp.output_reg = INVALID_MMIO_REG; + dig_port->aux_ch = AUX_CH_NONE; + dig_port->max_lanes = 4; + + mutex_init(&dig_port->hdcp.mutex); + + return dig_port; +} diff --git a/drivers/gpu/drm/i915/display/intel_encoder.h b/drivers/gpu/drm/i915/display/intel_encoder.h index 3fa5589f0b1c..ace0fe1a8f27 100644 --- a/drivers/gpu/drm/i915/display/intel_encoder.h +++ b/drivers/gpu/drm/i915/display/intel_encoder.h @@ -6,6 +6,7 @@ #ifndef __INTEL_ENCODER_H__ #define __INTEL_ENCODER_H__ +struct intel_digital_port; struct intel_display; struct intel_encoder; @@ -17,4 +18,9 @@ void intel_encoder_link_check_flush_work(struct intel_encoder *encoder); void intel_encoder_suspend_all(struct intel_display *display); void intel_encoder_shutdown_all(struct intel_display *display); +void intel_encoder_block_all_hpds(struct intel_display *display); +void intel_encoder_unblock_all_hpds(struct intel_display *display); + +struct intel_digital_port *intel_dig_port_alloc(void); + #endif /* __INTEL_ENCODER_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_fb.c b/drivers/gpu/drm/i915/display/intel_fb.c index 0da842bd2f2f..b210c3250501 100644 --- a/drivers/gpu/drm/i915/display/intel_fb.c +++ b/drivers/gpu/drm/i915/display/intel_fb.c @@ -11,6 +11,7 @@ #include <drm/drm_modeset_helper.h> #include "i915_drv.h" +#include "i915_utils.h" #include "intel_bo.h" #include "intel_display.h" #include "intel_display_core.h" diff --git a/drivers/gpu/drm/i915/display/intel_fb_pin.c b/drivers/gpu/drm/i915/display/intel_fb_pin.c index 5a0151775a3a..45af04cb0fb2 100644 --- a/drivers/gpu/drm/i915/display/intel_fb_pin.c +++ b/drivers/gpu/drm/i915/display/intel_fb_pin.c @@ -11,6 +11,7 @@ #include "gem/i915_gem_object.h" #include "i915_drv.h" +#include "i915_vma.h" #include "intel_display_core.h" #include "intel_display_rpm.h" #include "intel_display_types.h" @@ -151,7 +152,7 @@ intel_fb_pin_to_ggtt(const struct drm_framebuffer *fb, * happy to scanout from anywhere within its global aperture. */ pinctl = 0; - if (HAS_GMCH(dev_priv)) + if (HAS_GMCH(display)) pinctl |= PIN_MAPPABLE; i915_gem_ww_ctx_init(&ww, true); @@ -192,7 +193,7 @@ retry: * mode that matches the user configuration. */ ret = i915_vma_pin_fence(vma); - if (ret != 0 && DISPLAY_VER(dev_priv) < 4) { + if (ret != 0 && DISPLAY_VER(display) < 4) { i915_vma_unpin(vma); goto err_unpin; } @@ -260,6 +261,7 @@ intel_plane_fb_vtd_guard(const struct intel_plane_state *plane_state) int intel_plane_pin_fb(struct intel_plane_state *plane_state, const struct intel_plane_state *old_plane_state) { + struct intel_display *display = to_intel_display(plane_state); struct intel_plane *plane = to_intel_plane(plane_state->uapi.plane); const struct intel_framebuffer *fb = to_intel_framebuffer(plane_state->hw.fb); @@ -277,17 +279,6 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state, plane_state->ggtt_vma = vma; - /* - * Pre-populate the dma address before we enter the vblank - * evade critical section as i915_gem_object_get_dma_address() - * will trigger might_sleep() even if it won't actually sleep, - * which is the case when the fb has already been pinned. - */ - if (intel_plane_needs_physical(plane)) { - struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); - - plane_state->phys_dma_addr = i915_gem_object_get_dma_address(obj, 0); - } } else { unsigned int alignment = intel_plane_fb_min_alignment(plane_state); @@ -309,6 +300,28 @@ int intel_plane_pin_fb(struct intel_plane_state *plane_state, plane_state->dpt_vma = vma; WARN_ON(plane_state->ggtt_vma == plane_state->dpt_vma); + + /* + * The DPT object contains only one vma, and there is no VT-d + * guard, so the VMA's offset within the DPT is always 0. + */ + drm_WARN_ON(display->drm, intel_dpt_offset(plane_state->dpt_vma)); + } + + /* + * Pre-populate the dma address before we enter the vblank + * evade critical section as i915_gem_object_get_dma_address() + * will trigger might_sleep() even if it won't actually sleep, + * which is the case when the fb has already been pinned. + */ + if (intel_plane_needs_physical(plane)) { + struct drm_i915_gem_object *obj = to_intel_bo(intel_fb_bo(&fb->base)); + + plane_state->surf = i915_gem_object_get_dma_address(obj, 0) + + plane->surf_offset(plane_state); + } else { + plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma) + + plane->surf_offset(plane_state); } return 0; diff --git a/drivers/gpu/drm/i915/display/intel_fbc.c b/drivers/gpu/drm/i915/display/intel_fbc.c index 685ac98bd001..d4c5deff9cbe 100644 --- a/drivers/gpu/drm/i915/display/intel_fbc.c +++ b/drivers/gpu/drm/i915/display/intel_fbc.c @@ -1460,7 +1460,7 @@ static int intel_fbc_check_plane(struct intel_atomic_state *state, return 0; } - if (intel_display_needs_wa_16023588340(display)) { + if (intel_display_wa(display, 16023588340)) { plane_state->no_fbc_reason = "Wa_16023588340"; return 0; } @@ -2240,10 +2240,9 @@ void intel_fbc_crtc_debugfs_add(struct intel_crtc *crtc) /* FIXME: remove this once igt is on board with per-crtc stuff */ void intel_fbc_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; struct intel_fbc *fbc; fbc = display->fbc[INTEL_FBC_A]; if (fbc) - intel_fbc_debugfs_add(fbc, minor->debugfs_root); + intel_fbc_debugfs_add(fbc, display->drm->debugfs_root); } diff --git a/drivers/gpu/drm/i915/display/intel_fdi.c b/drivers/gpu/drm/i915/display/intel_fdi.c index 8039a84671cc..59a36b3a22c1 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.c +++ b/drivers/gpu/drm/i915/display/intel_fdi.c @@ -292,34 +292,6 @@ int intel_fdi_link_freq(struct intel_display *display, return display->fdi.pll_freq; } -/** - * intel_fdi_compute_pipe_bpp - compute pipe bpp limited by max link bpp - * @crtc_state: the crtc state - * - * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can - * call this function during state computation in the simple case where the - * link bpp will always match the pipe bpp. This is the case for all non-DP - * encoders, while DP encoders will use a link bpp lower than pipe bpp in case - * of DSC compression. - * - * Returns %true in case of success, %false if pipe bpp would need to be - * reduced below its valid range. - */ -bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state) -{ - int pipe_bpp = min(crtc_state->pipe_bpp, - fxp_q4_to_int(crtc_state->max_link_bpp_x16)); - - pipe_bpp = rounddown(pipe_bpp, 2 * 3); - - if (pipe_bpp < 6 * 3) - return false; - - crtc_state->pipe_bpp = pipe_bpp; - - return true; -} - int ilk_fdi_compute_config(struct intel_crtc *crtc, struct intel_crtc_state *pipe_config) { diff --git a/drivers/gpu/drm/i915/display/intel_fdi.h b/drivers/gpu/drm/i915/display/intel_fdi.h index ad5e103c38a8..1cd08df9b0c2 100644 --- a/drivers/gpu/drm/i915/display/intel_fdi.h +++ b/drivers/gpu/drm/i915/display/intel_fdi.h @@ -20,7 +20,6 @@ struct intel_link_bw_limits; int intel_fdi_add_affected_crtcs(struct intel_atomic_state *state); int intel_fdi_link_freq(struct intel_display *display, const struct intel_crtc_state *pipe_config); -bool intel_fdi_compute_pipe_bpp(struct intel_crtc_state *crtc_state); int ilk_fdi_compute_config(struct intel_crtc *intel_crtc, struct intel_crtc_state *pipe_config); int intel_fdi_atomic_check_link(struct intel_atomic_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_global_state.c b/drivers/gpu/drm/i915/display/intel_global_state.c index 000a898c9480..30eff6009e87 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.c +++ b/drivers/gpu/drm/i915/display/intel_global_state.c @@ -13,6 +13,36 @@ #include "intel_display_types.h" #include "intel_global_state.h" +#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_global_objs && \ + ((obj) = (__state)->global_objs[__i].ptr, \ + (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if(obj) + +#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_global_objs && \ + ((obj) = (__state)->global_objs[__i].ptr, \ + (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \ + (__i)++) \ + for_each_if(obj) + +#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ + for ((__i) = 0; \ + (__i) < (__state)->num_global_objs && \ + ((obj) = (__state)->global_objs[__i].ptr, \ + (old_obj_state) = (__state)->global_objs[__i].old_state, \ + (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \ + (__i)++) \ + for_each_if(obj) + +struct intel_global_objs_state { + struct intel_global_obj *ptr; + struct intel_global_state *state, *old_state, *new_state; +}; + struct intel_global_commit { struct kref ref; struct completion done; @@ -148,7 +178,7 @@ intel_atomic_get_global_obj_state(struct intel_atomic_state *state, struct intel_display *display = to_intel_display(state); int index, num_objs, i; size_t size; - struct __intel_global_objs_state *arr; + struct intel_global_objs_state *arr; struct intel_global_state *obj_state; for (i = 0; i < state->num_global_objs; i++) diff --git a/drivers/gpu/drm/i915/display/intel_global_state.h b/drivers/gpu/drm/i915/display/intel_global_state.h index d42fb2547ee9..e1efa530cc86 100644 --- a/drivers/gpu/drm/i915/display/intel_global_state.h +++ b/drivers/gpu/drm/i915/display/intel_global_state.h @@ -11,6 +11,7 @@ struct intel_atomic_state; struct intel_display; +struct intel_global_commit; struct intel_global_obj; struct intel_global_state; @@ -26,36 +27,6 @@ struct intel_global_obj { const struct intel_global_state_funcs *funcs; }; -#define intel_for_each_global_obj(obj, dev_priv) \ - list_for_each_entry(obj, &(dev_priv)->display.global.obj_list, head) - -#define for_each_new_global_obj_in_state(__state, obj, new_obj_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_global_objs && \ - ((obj) = (__state)->global_objs[__i].ptr, \ - (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \ - (__i)++) \ - for_each_if(obj) - -#define for_each_old_global_obj_in_state(__state, obj, old_obj_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_global_objs && \ - ((obj) = (__state)->global_objs[__i].ptr, \ - (old_obj_state) = (__state)->global_objs[__i].old_state, 1); \ - (__i)++) \ - for_each_if(obj) - -#define for_each_oldnew_global_obj_in_state(__state, obj, old_obj_state, new_obj_state, __i) \ - for ((__i) = 0; \ - (__i) < (__state)->num_global_objs && \ - ((obj) = (__state)->global_objs[__i].ptr, \ - (old_obj_state) = (__state)->global_objs[__i].old_state, \ - (new_obj_state) = (__state)->global_objs[__i].new_state, 1); \ - (__i)++) \ - for_each_if(obj) - -struct intel_global_commit; - struct intel_global_state { struct intel_global_obj *obj; struct intel_atomic_state *state; @@ -64,11 +35,6 @@ struct intel_global_state { bool changed, serialized; }; -struct __intel_global_objs_state { - struct intel_global_obj *ptr; - struct intel_global_state *state, *old_state, *new_state; -}; - void intel_atomic_global_obj_init(struct intel_display *display, struct intel_global_obj *obj, struct intel_global_state *state, diff --git a/drivers/gpu/drm/i915/display/intel_gmbus.c b/drivers/gpu/drm/i915/display/intel_gmbus.c index 0d73f32fe7f1..358210adb8f8 100644 --- a/drivers/gpu/drm/i915/display/intel_gmbus.c +++ b/drivers/gpu/drm/i915/display/intel_gmbus.c @@ -30,6 +30,7 @@ #include <linux/export.h> #include <linux/i2c-algo-bit.h> #include <linux/i2c.h> +#include <linux/iopoll.h> #include <drm/display/drm_hdcp_helper.h> @@ -39,6 +40,7 @@ #include "intel_de.h" #include "intel_display_regs.h" #include "intel_display_types.h" +#include "intel_display_wa.h" #include "intel_gmbus.h" #include "intel_gmbus_regs.h" @@ -217,7 +219,7 @@ static void pnv_gmbus_clock_gating(struct intel_display *display, bool enable) { /* When using bit bashing for I2C, this bit needs to be set to 1 */ - intel_de_rmw(display, DSPCLK_GATE_D(display), + intel_de_rmw(display, DSPCLK_GATE_D, PNV_GMBUSUNIT_CLOCK_GATE_DISABLE, !enable ? PNV_GMBUSUNIT_CLOCK_GATE_DISABLE : 0); } @@ -240,14 +242,20 @@ static void bxt_gmbus_clock_gating(struct intel_display *display, static u32 get_reserved(struct intel_gmbus *bus) { struct intel_display *display = bus->display; - u32 reserved = 0; + u32 preserve_bits = 0; + + if (display->platform.i830 || display->platform.i845g) + return 0; /* On most chips, these bits must be preserved in software. */ - if (!display->platform.i830 && !display->platform.i845g) - reserved = intel_de_read_notrace(display, bus->gpio_reg) & - (GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE); + preserve_bits |= GPIO_DATA_PULLUP_DISABLE | GPIO_CLOCK_PULLUP_DISABLE; + + /* Wa_16025573575: the masks bits need to be preserved through out */ + if (intel_display_wa(display, 16025573575)) + preserve_bits |= GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK | + GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; - return reserved; + return intel_de_read_notrace(display, bus->gpio_reg) & preserve_bits; } static int get_clock(void *data) @@ -308,6 +316,22 @@ static void set_data(void *data, int state_high) intel_de_posting_read(display, bus->gpio_reg); } +static void +ptl_handle_mask_bits(struct intel_gmbus *bus, bool set) +{ + struct intel_display *display = bus->display; + u32 reg_val = intel_de_read_notrace(display, bus->gpio_reg); + u32 mask_bits = GPIO_CLOCK_DIR_MASK | GPIO_CLOCK_VAL_MASK | + GPIO_DATA_DIR_MASK | GPIO_DATA_VAL_MASK; + if (set) + reg_val |= mask_bits; + else + reg_val &= ~mask_bits; + + intel_de_write_notrace(display, bus->gpio_reg, reg_val); + intel_de_posting_read(display, bus->gpio_reg); +} + static int intel_gpio_pre_xfer(struct i2c_adapter *adapter) { @@ -319,6 +343,9 @@ intel_gpio_pre_xfer(struct i2c_adapter *adapter) if (display->platform.pineview) pnv_gmbus_clock_gating(display, false); + if (intel_display_wa(display, 16025573575)) + ptl_handle_mask_bits(bus, true); + set_data(bus, 1); set_clock(bus, 1); udelay(I2C_RISEFALL_TIME); @@ -336,6 +363,9 @@ intel_gpio_post_xfer(struct i2c_adapter *adapter) if (display->platform.pineview) pnv_gmbus_clock_gating(display, true); + + if (intel_display_wa(display, 16025573575)) + ptl_handle_mask_bits(bus, false); } static void @@ -385,11 +415,14 @@ static int gmbus_wait(struct intel_display *display, u32 status, u32 irq_en) intel_de_write_fw(display, GMBUS4(display), irq_en); status |= GMBUS_SATOER; - ret = wait_for_us((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status, - 2); + + ret = poll_timeout_us_atomic(gmbus2 = intel_de_read_fw(display, GMBUS2(display)), + gmbus2 & status, + 0, 2, false); if (ret) - ret = wait_for((gmbus2 = intel_de_read_fw(display, GMBUS2(display))) & status, - 50); + ret = poll_timeout_us(gmbus2 = intel_de_read_fw(display, GMBUS2(display)), + gmbus2 & status, + 500, 50 * 1000, false); intel_de_write_fw(display, GMBUS4(display), 0); remove_wait_queue(&display->gmbus.wait_queue, &wait); diff --git a/drivers/gpu/drm/i915/display/intel_hdcp.c b/drivers/gpu/drm/i915/display/intel_hdcp.c index 42202c8bb066..531ee122bf82 100644 --- a/drivers/gpu/drm/i915/display/intel_hdcp.c +++ b/drivers/gpu/drm/i915/display/intel_hdcp.c @@ -11,6 +11,7 @@ #include <linux/component.h> #include <linux/debugfs.h> #include <linux/i2c.h> +#include <linux/iopoll.h> #include <linux/random.h> #include <drm/display/drm_hdcp_helper.h> @@ -326,16 +327,13 @@ static int intel_hdcp_poll_ksv_fifo(struct intel_digital_port *dig_port, bool ksv_ready; /* Poll for ksv list ready (spec says max time allowed is 5s) */ - ret = __wait_for(read_ret = shim->read_ksv_ready(dig_port, - &ksv_ready), - read_ret || ksv_ready, 5 * 1000 * 1000, 1000, - 100 * 1000); + ret = poll_timeout_us(read_ret = shim->read_ksv_ready(dig_port, &ksv_ready), + read_ret || ksv_ready, + 100 * 1000, 5 * 1000 * 1000, false); if (ret) return ret; if (read_ret) return read_ret; - if (!ksv_ready) - return -ETIMEDOUT; return 0; } @@ -817,6 +815,7 @@ static int intel_hdcp_auth(struct intel_connector *connector) enum port port = dig_port->base.port; unsigned long r0_prime_gen_start; int ret, i, tries = 2; + u32 val; union { u32 reg[2]; u8 shim[DRM_HDCP_AN_LEN]; @@ -905,8 +904,10 @@ static int intel_hdcp_auth(struct intel_connector *connector) HDCP_CONF_AUTH_AND_ENC); /* Wait for R0 ready */ - if (wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & - (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), 1)) { + ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)), + val & (HDCP_STATUS_R0_READY | HDCP_STATUS_ENC), + 100, 1000, false); + if (ret) { drm_err(display->drm, "Timed out waiting for R0 ready\n"); return -ETIMEDOUT; } @@ -938,16 +939,16 @@ static int intel_hdcp_auth(struct intel_connector *connector) ri.reg); /* Wait for Ri prime match */ - if (!wait_for(intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & - (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) + ret = poll_timeout_us(val = intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)), + val & (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), + 100, 1000, false); + if (!ret) break; } if (i == tries) { drm_dbg_kms(display->drm, - "Timed out waiting for Ri prime match (%x)\n", - intel_de_read(display, - HDCP_STATUS(display, cpu_transcoder, port))); + "Timed out waiting for Ri prime match (%x)\n", val); return -ETIMEDOUT; } @@ -2446,12 +2447,6 @@ static int _intel_hdcp_enable(struct intel_atomic_state *state, if (!hdcp->shim) return -ENOENT; - if (!connector->encoder) { - drm_err(display->drm, "[CONNECTOR:%d:%s] encoder is not initialized\n", - connector->base.base.id, connector->base.name); - return -ENODEV; - } - mutex_lock(&hdcp->mutex); mutex_lock(&dig_port->hdcp.mutex); drm_WARN_ON(display->drm, diff --git a/drivers/gpu/drm/i915/display/intel_hdmi.c b/drivers/gpu/drm/i915/display/intel_hdmi.c index 9961ff259298..4ab7e2e3bfd4 100644 --- a/drivers/gpu/drm/i915/display/intel_hdmi.c +++ b/drivers/gpu/drm/i915/display/intel_hdmi.c @@ -29,6 +29,7 @@ #include <linux/delay.h> #include <linux/hdmi.h> #include <linux/i2c.h> +#include <linux/iopoll.h> #include <linux/slab.h> #include <linux/string_helpers.h> @@ -60,6 +61,7 @@ #include "intel_hdcp_regs.h" #include "intel_hdcp_shim.h" #include "intel_hdmi.h" +#include "intel_link_bw.h" #include "intel_lspcon.h" #include "intel_panel.h" #include "intel_pfit.h" @@ -1582,9 +1584,9 @@ bool intel_hdmi_hdcp_check_link_once(struct intel_digital_port *dig_port, intel_de_write(display, HDCP_RPRIME(display, cpu_transcoder, port), ri.reg); /* Wait for Ri prime match */ - if (wait_for((intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port)) & - (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC)) == - (HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC), 1)) { + ret = intel_de_wait_for_set(display, HDCP_STATUS(display, cpu_transcoder, port), + HDCP_STATUS_RI_MATCH | HDCP_STATUS_ENC, 1); + if (ret) { drm_dbg_kms(display->drm, "Ri' mismatch detected (%x)\n", intel_de_read(display, HDCP_STATUS(display, cpu_transcoder, port))); @@ -1689,11 +1691,10 @@ intel_hdmi_hdcp2_wait_for_msg(struct intel_digital_port *dig_port, if (timeout < 0) return timeout; - ret = __wait_for(ret = hdcp2_detect_msg_availability(dig_port, - msg_id, &msg_ready, - &msg_sz), - !ret && msg_ready && msg_sz, timeout * 1000, - 1000, 5 * 1000); + ret = poll_timeout_us(ret = hdcp2_detect_msg_availability(dig_port, msg_id, + &msg_ready, &msg_sz), + !ret && msg_ready && msg_sz, + 4000, timeout * 1000, false); if (ret) drm_dbg_kms(display->drm, "msg_id: %d, ret: %d, timeout: %d\n", @@ -2053,6 +2054,10 @@ intel_hdmi_mode_valid(struct drm_connector *_connector, else sink_format = INTEL_OUTPUT_FORMAT_RGB; + status = intel_pfit_mode_valid(display, mode, sink_format, 0); + if (status != MODE_OK) + return status; + status = intel_hdmi_mode_clock_valid(&connector->base, clock, has_hdmi_sink, sink_format); if (status != MODE_OK) { if (ycbcr_420_only || @@ -2341,6 +2346,9 @@ int intel_hdmi_compute_config(struct intel_encoder *encoder, if (adjusted_mode->flags & DRM_MODE_FLAG_DBLCLK) pipe_config->pixel_multiplier = 2; + if (!intel_link_bw_compute_pipe_bpp(pipe_config)) + return -EINVAL; + pipe_config->has_audio = intel_hdmi_has_audio(encoder, pipe_config, conn_state) && intel_audio_compute_config(encoder, pipe_config, conn_state); diff --git a/drivers/gpu/drm/i915/display/intel_hotplug.c b/drivers/gpu/drm/i915/display/intel_hotplug.c index 265aa97fcc75..4451a792600a 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug.c @@ -28,6 +28,7 @@ #include "i915_drv.h" #include "i915_irq.h" +#include "i915_utils.h" #include "intel_connector.h" #include "intel_display_power.h" #include "intel_display_core.h" @@ -971,8 +972,6 @@ void intel_hpd_cancel_work(struct intel_display *display) spin_lock_irq(&display->irq.lock); - drm_WARN_ON(display->drm, get_blocked_hpd_pin_mask(display)); - display->hotplug.long_hpd_pin_mask = 0; display->hotplug.short_hpd_pin_mask = 0; display->hotplug.event_bits = 0; @@ -1333,12 +1332,12 @@ static const struct file_operations i915_hpd_short_storm_ctl_fops = { void intel_hpd_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; + struct dentry *debugfs_root = display->drm->debugfs_root; - debugfs_create_file("i915_hpd_storm_ctl", 0644, minor->debugfs_root, + debugfs_create_file("i915_hpd_storm_ctl", 0644, debugfs_root, display, &i915_hpd_storm_ctl_fops); - debugfs_create_file("i915_hpd_short_storm_ctl", 0644, minor->debugfs_root, + debugfs_create_file("i915_hpd_short_storm_ctl", 0644, debugfs_root, display, &i915_hpd_short_storm_ctl_fops); - debugfs_create_bool("i915_ignore_long_hpd", 0644, minor->debugfs_root, + debugfs_create_bool("i915_ignore_long_hpd", 0644, debugfs_root, &display->hotplug.ignore_long_hpd); } diff --git a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c index 43aee70597bf..4f72f3fb9af5 100644 --- a/drivers/gpu/drm/i915/display/intel_hotplug_irq.c +++ b/drivers/gpu/drm/i915/display/intel_hotplug_irq.c @@ -1025,7 +1025,7 @@ static void mtp_tc_hpd_enable_detection(struct intel_encoder *encoder) { struct intel_display *display = to_intel_display(encoder); - intel_de_rmw(display, SHOTPLUG_CTL_DDI, + intel_de_rmw(display, SHOTPLUG_CTL_TC, mtp_tc_hotplug_mask(encoder->hpd_pin), mtp_tc_hotplug_enables(encoder)); } diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.c b/drivers/gpu/drm/i915/display/intel_link_bw.c index 3caef7f9c7c4..f52dee0ea412 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.c +++ b/drivers/gpu/drm/i915/display/intel_link_bw.c @@ -165,6 +165,34 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, } /** + * intel_link_bw_compute_pipe_bpp - compute pipe bpp limited by max link bpp + * @crtc_state: the crtc state + * + * Compute the pipe bpp limited by the CRTC's maximum link bpp. Encoders can + * call this function during state computation in the simple case where the + * link bpp will always match the pipe bpp. This is the case for all non-DP + * encoders, while DP encoders will use a link bpp lower than pipe bpp in case + * of DSC compression. + * + * Returns %true in case of success, %false if pipe bpp would need to be + * reduced below its valid range. + */ +bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state) +{ + int pipe_bpp = min(crtc_state->pipe_bpp, + fxp_q4_to_int(crtc_state->max_link_bpp_x16)); + + pipe_bpp = rounddown(pipe_bpp, 2 * 3); + + if (pipe_bpp < 6 * 3) + return false; + + crtc_state->pipe_bpp = pipe_bpp; + + return true; +} + +/** * intel_link_bw_set_bpp_limit_for_pipe - set link bpp limit for a pipe to its minimum * @state: atomic state * @old_limits: link BW limits @@ -449,6 +477,7 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector) switch (connector->base.connector_type) { case DRM_MODE_CONNECTOR_DisplayPort: case DRM_MODE_CONNECTOR_eDP: + case DRM_MODE_CONNECTOR_HDMIA: break; case DRM_MODE_CONNECTOR_VGA: case DRM_MODE_CONNECTOR_SVIDEO: @@ -458,11 +487,6 @@ void intel_link_bw_connector_debugfs_add(struct intel_connector *connector) break; return; - case DRM_MODE_CONNECTOR_HDMIA: - if (HAS_FDI(display) && !HAS_DDI(display)) - break; - - return; default: return; } diff --git a/drivers/gpu/drm/i915/display/intel_link_bw.h b/drivers/gpu/drm/i915/display/intel_link_bw.h index b499042e62b1..95ab7c50c61d 100644 --- a/drivers/gpu/drm/i915/display/intel_link_bw.h +++ b/drivers/gpu/drm/i915/display/intel_link_bw.h @@ -27,6 +27,7 @@ int intel_link_bw_reduce_bpp(struct intel_atomic_state *state, struct intel_link_bw_limits *limits, u8 pipe_mask, const char *reason); +bool intel_link_bw_compute_pipe_bpp(struct intel_crtc_state *crtc_state); bool intel_link_bw_set_bpp_limit_for_pipe(struct intel_atomic_state *state, const struct intel_link_bw_limits *old_limits, struct intel_link_bw_limits *new_limits, diff --git a/drivers/gpu/drm/i915/display/intel_lpe_audio.c b/drivers/gpu/drm/i915/display/intel_lpe_audio.c index 666148a14522..42284e9928f2 100644 --- a/drivers/gpu/drm/i915/display/intel_lpe_audio.c +++ b/drivers/gpu/drm/i915/display/intel_lpe_audio.c @@ -68,9 +68,9 @@ #include <linux/platform_device.h> #include <linux/pm_runtime.h> +#include <drm/drm_print.h> #include <drm/intel/intel_lpe_audio.h> -#include "i915_drv.h" #include "i915_irq.h" #include "intel_audio_regs.h" #include "intel_de.h" @@ -170,14 +170,11 @@ static struct irq_chip lpe_audio_irqchip = { static int lpe_audio_irq_init(struct intel_display *display) { - struct drm_i915_private *dev_priv = to_i915(display->drm); int irq = display->audio.lpe.irq; - drm_WARN_ON(display->drm, !intel_irqs_enabled(dev_priv)); - irq_set_chip_and_handler_name(irq, - &lpe_audio_irqchip, - handle_simple_irq, - "hdmi_lpe_audio_irq_handler"); + irq_set_chip_and_handler_name(irq, &lpe_audio_irqchip, + handle_simple_irq, + "hdmi_lpe_audio_irq_handler"); return 0; } diff --git a/drivers/gpu/drm/i915/display/intel_lspcon.c b/drivers/gpu/drm/i915/display/intel_lspcon.c index abc4b562083d..d56026c4efdd 100644 --- a/drivers/gpu/drm/i915/display/intel_lspcon.c +++ b/drivers/gpu/drm/i915/display/intel_lspcon.c @@ -23,6 +23,8 @@ * */ +#include <linux/iopoll.h> + #include <drm/display/drm_dp_dual_mode_helper.h> #include <drm/display/drm_hdmi_helper.h> #include <drm/drm_atomic_helper.h> @@ -181,6 +183,8 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, struct intel_dp *intel_dp = lspcon_to_intel_dp(lspcon); struct intel_display *display = to_intel_display(intel_dp); enum drm_lspcon_mode current_mode; + int timeout_us; + int ret; current_mode = lspcon_get_current_mode(lspcon); if (current_mode == mode) @@ -189,9 +193,12 @@ static enum drm_lspcon_mode lspcon_wait_mode(struct intel_lspcon *lspcon, drm_dbg_kms(display->drm, "Waiting for LSPCON mode %s to settle\n", lspcon_mode_name(mode)); - wait_for((current_mode = lspcon_get_current_mode(lspcon)) == mode, - lspcon_get_mode_settle_timeout(lspcon)); - if (current_mode != mode) + timeout_us = lspcon_get_mode_settle_timeout(lspcon) * 1000; + + ret = poll_timeout_us(current_mode = lspcon_get_current_mode(lspcon), + current_mode == mode, + 5000, timeout_us, false); + if (ret) drm_err(display->drm, "LSPCON mode hasn't settled\n"); out: diff --git a/drivers/gpu/drm/i915/display/intel_lvds.c b/drivers/gpu/drm/i915/display/intel_lvds.c index 7e48a235c99f..48f4d8ed4f15 100644 --- a/drivers/gpu/drm/i915/display/intel_lvds.c +++ b/drivers/gpu/drm/i915/display/intel_lvds.c @@ -48,6 +48,7 @@ #include "intel_dpll.h" #include "intel_fdi.h" #include "intel_gmbus.h" +#include "intel_link_bw.h" #include "intel_lvds.h" #include "intel_lvds_regs.h" #include "intel_panel.h" @@ -433,7 +434,7 @@ static int intel_lvds_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(display)) { crtc_state->has_pch_encoder = true; - if (!intel_fdi_compute_pipe_bpp(crtc_state)) + if (!intel_link_bw_compute_pipe_bpp(crtc_state)) return -EINVAL; } diff --git a/drivers/gpu/drm/i915/display/intel_opregion.c b/drivers/gpu/drm/i915/display/intel_opregion.c index 81efdb17fc0c..cbc220310813 100644 --- a/drivers/gpu/drm/i915/display/intel_opregion.c +++ b/drivers/gpu/drm/i915/display/intel_opregion.c @@ -28,13 +28,13 @@ #include <linux/acpi.h> #include <linux/debugfs.h> #include <linux/dmi.h> +#include <linux/iopoll.h> #include <acpi/video.h> #include <drm/drm_edid.h> #include <drm/drm_file.h> #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_acpi.h" #include "intel_backlight.h" #include "intel_display_core.h" @@ -357,10 +357,12 @@ static int swsci(struct intel_display *display, pci_write_config_word(pdev, SWSCI, swsci_val); /* Poll for the result. */ -#define C (((scic = swsci->scic) & SWSCI_SCIC_INDICATOR) == 0) - if (wait_for(C, dslp)) { + ret = poll_timeout_us(scic = swsci->scic, + (scic & SWSCI_SCIC_INDICATOR) == 0, + 1000, dslp * 1000, false); + if (ret) { drm_dbg(display->drm, "SWSCI request timed out\n"); - return -ETIMEDOUT; + return ret; } scic = (scic & SWSCI_SCIC_EXIT_STATUS_MASK) >> @@ -1299,8 +1301,6 @@ DEFINE_SHOW_ATTRIBUTE(intel_opregion); void intel_opregion_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; - - debugfs_create_file("i915_opregion", 0444, minor->debugfs_root, + debugfs_create_file("i915_opregion", 0444, display->drm->debugfs_root, display, &intel_opregion_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_overlay.c b/drivers/gpu/drm/i915/display/intel_overlay.c index 159a5f998ea0..272f9e7af4d4 100644 --- a/drivers/gpu/drm/i915/display/intel_overlay.c +++ b/drivers/gpu/drm/i915/display/intel_overlay.c @@ -217,10 +217,9 @@ static void i830_overlay_clock_gating(struct intel_display *display, /* WA_OVERLAY_CLKGATE:alm */ if (enable) - intel_de_write(display, DSPCLK_GATE_D(display), 0); + intel_de_write(display, DSPCLK_GATE_D, 0); else - intel_de_write(display, DSPCLK_GATE_D(display), - OVRUNIT_CLOCK_GATE_DISABLE); + intel_de_write(display, DSPCLK_GATE_D, OVRUNIT_CLOCK_GATE_DISABLE); /* WA_DISABLE_L2CACHE_CLOCK_GATING:alm */ pci_bus_read_config_byte(pdev->bus, diff --git a/drivers/gpu/drm/i915/display/intel_pch.h b/drivers/gpu/drm/i915/display/intel_pch.h index cf4dab1b98bf..19cac7412d0a 100644 --- a/drivers/gpu/drm/i915/display/intel_pch.h +++ b/drivers/gpu/drm/i915/display/intel_pch.h @@ -6,8 +6,6 @@ #ifndef __INTEL_PCH__ #define __INTEL_PCH__ -#include "intel_display_conversion.h" - struct intel_display; /* @@ -36,7 +34,7 @@ enum intel_pch { PCH_LNL, }; -#define INTEL_PCH_TYPE(_display) (__to_intel_display(_display)->pch_type) +#define INTEL_PCH_TYPE(_display) ((_display)->pch_type) #define HAS_PCH_DG2(display) (INTEL_PCH_TYPE(display) == PCH_DG2) #define HAS_PCH_ADP(display) (INTEL_PCH_TYPE(display) == PCH_ADP) #define HAS_PCH_DG1(display) (INTEL_PCH_TYPE(display) == PCH_DG1) diff --git a/drivers/gpu/drm/i915/display/intel_pch_refclk.c b/drivers/gpu/drm/i915/display/intel_pch_refclk.c index d3c5255bf1a8..9ae53679a041 100644 --- a/drivers/gpu/drm/i915/display/intel_pch_refclk.c +++ b/drivers/gpu/drm/i915/display/intel_pch_refclk.c @@ -17,16 +17,22 @@ static void lpt_fdi_reset_mphy(struct intel_display *display) { + int ret; + intel_de_rmw(display, SOUTH_CHICKEN2, 0, FDI_MPHY_IOSFSB_RESET_CTL); - if (wait_for_us(intel_de_read(display, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS, 100)) + ret = intel_de_wait_custom(display, SOUTH_CHICKEN2, + FDI_MPHY_IOSFSB_RESET_STATUS, FDI_MPHY_IOSFSB_RESET_STATUS, + 100, 0, NULL); + if (ret) drm_err(display->drm, "FDI mPHY reset assert timeout\n"); intel_de_rmw(display, SOUTH_CHICKEN2, FDI_MPHY_IOSFSB_RESET_CTL, 0); - if (wait_for_us((intel_de_read(display, SOUTH_CHICKEN2) & - FDI_MPHY_IOSFSB_RESET_STATUS) == 0, 100)) + ret = intel_de_wait_custom(display, SOUTH_CHICKEN2, + FDI_MPHY_IOSFSB_RESET_STATUS, 0, + 100, 0, NULL); + if (ret) drm_err(display->drm, "FDI mPHY reset de-assert timeout\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_pfit.c b/drivers/gpu/drm/i915/display/intel_pfit.c index 13541be4d6df..68539e7c2a24 100644 --- a/drivers/gpu/drm/i915/display/intel_pfit.c +++ b/drivers/gpu/drm/i915/display/intel_pfit.c @@ -14,6 +14,7 @@ #include "intel_lvds_regs.h" #include "intel_pfit.h" #include "intel_pfit_regs.h" +#include "skl_scaler.h" static int intel_pch_pfit_check_dst_window(const struct intel_crtc_state *crtc_state) { @@ -546,6 +547,16 @@ out: return intel_gmch_pfit_check_timings(crtc_state); } +enum drm_mode_status +intel_pfit_mode_valid(struct intel_display *display, + const struct drm_display_mode *mode, + enum intel_output_format output_format, + int num_joined_pipes) +{ + return skl_scaler_mode_valid(display, mode, output_format, + num_joined_pipes); +} + int intel_pfit_compute_config(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state) { diff --git a/drivers/gpu/drm/i915/display/intel_pfit.h b/drivers/gpu/drm/i915/display/intel_pfit.h index ef34f9b49d09..c1bb0d1f344e 100644 --- a/drivers/gpu/drm/i915/display/intel_pfit.h +++ b/drivers/gpu/drm/i915/display/intel_pfit.h @@ -6,8 +6,12 @@ #ifndef __INTEL_PFIT_H__ #define __INTEL_PFIT_H__ +enum drm_mode_status; +struct drm_display_mode; struct drm_connector_state; struct intel_crtc_state; +struct intel_display; +enum intel_output_format; int intel_pfit_compute_config(struct intel_crtc_state *crtc_state, const struct drm_connector_state *conn_state); @@ -17,5 +21,9 @@ void ilk_pfit_get_config(struct intel_crtc_state *crtc_state); void i9xx_pfit_enable(const struct intel_crtc_state *crtc_state); void i9xx_pfit_disable(const struct intel_crtc_state *old_crtc_state); void i9xx_pfit_get_config(struct intel_crtc_state *crtc_state); - +enum drm_mode_status +intel_pfit_mode_valid(struct intel_display *display, + const struct drm_display_mode *mode, + enum intel_output_format output_format, + int num_joined_pipes); #endif /* __INTEL_PFIT_H__ */ diff --git a/drivers/gpu/drm/i915/display/intel_plane.c b/drivers/gpu/drm/i915/display/intel_plane.c index 36fb07471deb..81f05ee9a21a 100644 --- a/drivers/gpu/drm/i915/display/intel_plane.c +++ b/drivers/gpu/drm/i915/display/intel_plane.c @@ -46,7 +46,6 @@ #include "gem/i915_gem_object.h" #include "i915_scheduler_types.h" -#include "i915_vma.h" #include "i9xx_plane_regs.h" #include "intel_bo.h" #include "intel_cdclk.h" @@ -1749,8 +1748,3 @@ int intel_plane_atomic_check(struct intel_atomic_state *state) return 0; } - -u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state) -{ - return i915_ggtt_offset(plane_state->ggtt_vma); -} diff --git a/drivers/gpu/drm/i915/display/intel_plane.h b/drivers/gpu/drm/i915/display/intel_plane.h index 4ef012c08fa4..8af41ccc0a69 100644 --- a/drivers/gpu/drm/i915/display/intel_plane.h +++ b/drivers/gpu/drm/i915/display/intel_plane.h @@ -87,7 +87,6 @@ int intel_plane_add_affected(struct intel_atomic_state *state, struct intel_crtc *crtc); int intel_plane_atomic_check(struct intel_atomic_state *state); -u32 intel_plane_ggtt_offset(const struct intel_plane_state *plane_state); bool intel_plane_format_mod_supported_async(struct drm_plane *plane, u32 format, u64 modifier); diff --git a/drivers/gpu/drm/i915/display/intel_plane_initial.c b/drivers/gpu/drm/i915/display/intel_plane_initial.c index 4246173ed311..a9f36b1b50c1 100644 --- a/drivers/gpu/drm/i915/display/intel_plane_initial.c +++ b/drivers/gpu/drm/i915/display/intel_plane_initial.c @@ -360,6 +360,8 @@ valid_fb: i915_vma_pin_fence(vma) == 0 && vma->fence) plane_state->flags |= PLANE_HAS_FENCE; + plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma); + plane_state->uapi.src_x = 0; plane_state->uapi.src_y = 0; plane_state->uapi.src_w = fb->width << 16; diff --git a/drivers/gpu/drm/i915/display/intel_pps.c b/drivers/gpu/drm/i915/display/intel_pps.c index b64d0b30f5b1..327e0de86f1e 100644 --- a/drivers/gpu/drm/i915/display/intel_pps.c +++ b/drivers/gpu/drm/i915/display/intel_pps.c @@ -4,6 +4,7 @@ */ #include <linux/debugfs.h> +#include <linux/iopoll.h> #include <drm/drm_print.h> @@ -608,6 +609,8 @@ static void wait_panel_status(struct intel_dp *intel_dp, struct intel_display *display = to_intel_display(intel_dp); struct intel_digital_port *dig_port = dp_to_dig_port(intel_dp); i915_reg_t pp_stat_reg, pp_ctrl_reg; + int ret; + u32 val; lockdep_assert_held(&display->pps.mutex); @@ -624,13 +627,18 @@ static void wait_panel_status(struct intel_dp *intel_dp, intel_de_read(display, pp_stat_reg), intel_de_read(display, pp_ctrl_reg)); - if (intel_de_wait(display, pp_stat_reg, mask, value, 5000)) + ret = poll_timeout_us(val = intel_de_read(display, pp_stat_reg), + (val & mask) == value, + 10 * 1000, 5000 * 1000, true); + if (ret) { drm_err(display->drm, "[ENCODER:%d:%s] %s panel status timeout: PP_STATUS: 0x%08x PP_CONTROL: 0x%08x\n", dig_port->base.base.base.id, dig_port->base.base.name, pps_name(intel_dp), intel_de_read(display, pp_stat_reg), intel_de_read(display, pp_ctrl_reg)); + return; + } drm_dbg_kms(display->drm, "Wait complete\n"); } diff --git a/drivers/gpu/drm/i915/display/intel_psr.c b/drivers/gpu/drm/i915/display/intel_psr.c index 41988e193a41..22433fe2ee14 100644 --- a/drivers/gpu/drm/i915/display/intel_psr.c +++ b/drivers/gpu/drm/i915/display/intel_psr.c @@ -233,16 +233,12 @@ bool intel_psr_needs_aux_io_power(struct intel_encoder *encoder, static bool psr_global_enabled(struct intel_dp *intel_dp) { - struct intel_display *display = to_intel_display(intel_dp); struct intel_connector *connector = intel_dp->attached_connector; switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DEFAULT: - if (display->params.enable_psr == -1) - return intel_dp_is_edp(intel_dp) ? - connector->panel.vbt.psr.enable : - true; - return display->params.enable_psr; + return intel_dp_is_edp(intel_dp) ? + connector->panel.vbt.psr.enable : true; case I915_PSR_DEBUG_DISABLE: return false; default: @@ -250,39 +246,23 @@ static bool psr_global_enabled(struct intel_dp *intel_dp) } } -static bool psr2_global_enabled(struct intel_dp *intel_dp) +static bool sel_update_global_enabled(struct intel_dp *intel_dp) { - struct intel_display *display = to_intel_display(intel_dp); - switch (intel_dp->psr.debug & I915_PSR_DEBUG_MODE_MASK) { case I915_PSR_DEBUG_DISABLE: case I915_PSR_DEBUG_FORCE_PSR1: return false; default: - if (display->params.enable_psr == 1) - return false; return true; } } -static bool psr2_su_region_et_global_enabled(struct intel_dp *intel_dp) -{ - struct intel_display *display = to_intel_display(intel_dp); - - if (display->params.enable_psr != -1) - return false; - - return true; -} - static bool panel_replay_global_enabled(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); - if ((display->params.enable_psr != -1) || - (intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE)) - return false; - return true; + return !(intel_dp->psr.debug & I915_PSR_DEBUG_PANEL_REPLAY_DISABLE) && + display->params.enable_panel_replay; } static u32 psr_irq_psr_error_bit_get(struct intel_dp *intel_dp) @@ -600,6 +580,16 @@ exit: static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); + int ret; + + ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT, + &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd)); + if (ret < 0) + return; + + if (!(intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & + DP_PANEL_REPLAY_SUPPORT)) + return; if (intel_dp_is_edp(intel_dp)) { if (!intel_alpm_aux_less_wake_supported(intel_dp)) { @@ -631,6 +621,15 @@ static void _panel_replay_init_dpcd(struct intel_dp *intel_dp) static void _psr_init_dpcd(struct intel_dp *intel_dp) { struct intel_display *display = to_intel_display(intel_dp); + int ret; + + ret = drm_dp_dpcd_read_data(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, + sizeof(intel_dp->psr_dpcd)); + if (ret < 0) + return; + + if (!intel_dp->psr_dpcd[0]) + return; drm_dbg_kms(display->drm, "eDP panel supports PSR version %x\n", intel_dp->psr_dpcd[0]); @@ -676,18 +675,9 @@ static void _psr_init_dpcd(struct intel_dp *intel_dp) void intel_psr_init_dpcd(struct intel_dp *intel_dp) { - drm_dp_dpcd_read(&intel_dp->aux, DP_PSR_SUPPORT, intel_dp->psr_dpcd, - sizeof(intel_dp->psr_dpcd)); - - drm_dp_dpcd_read(&intel_dp->aux, DP_PANEL_REPLAY_CAP_SUPPORT, - &intel_dp->pr_dpcd, sizeof(intel_dp->pr_dpcd)); - - if (intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & - DP_PANEL_REPLAY_SUPPORT) - _panel_replay_init_dpcd(intel_dp); + _psr_init_dpcd(intel_dp); - if (intel_dp->psr_dpcd[0]) - _psr_init_dpcd(intel_dp); + _panel_replay_init_dpcd(intel_dp); if (intel_dp->psr.sink_psr2_support || intel_dp->psr.sink_panel_replay_su_support) @@ -742,8 +732,7 @@ static bool psr2_su_region_et_valid(struct intel_dp *intel_dp, bool panel_replay return panel_replay ? intel_dp->pr_dpcd[INTEL_PR_DPCD_INDEX(DP_PANEL_REPLAY_CAP_SUPPORT)] & DP_PANEL_REPLAY_EARLY_TRANSPORT_SUPPORT : - intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED && - psr2_su_region_et_global_enabled(intel_dp); + intel_dp->psr_dpcd[0] == DP_PSR2_WITH_Y_COORD_ET_SUPPORTED; } static void _panel_replay_enable_sink(struct intel_dp *intel_dp, @@ -936,7 +925,7 @@ static void hsw_activate_psr1(struct intel_dp *intel_dp) /* Wa_16025596647 */ if ((DISPLAY_VER(display) == 20 || IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && - is_dc5_dc6_blocked(intel_dp)) + is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used) intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, intel_dp->psr.pipe, true); @@ -1026,7 +1015,7 @@ static void hsw_activate_psr2(struct intel_dp *intel_dp) /* Wa_16025596647 */ if ((DISPLAY_VER(display) == 20 || IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && - is_dc5_dc6_blocked(intel_dp)) + is_dc5_dc6_blocked(intel_dp) && intel_dp->psr.pkg_c_latency_used) idle_frames = 0; else idle_frames = psr_compute_idle_frames(intel_dp); @@ -1423,7 +1412,7 @@ static bool intel_psr2_config_valid(struct intel_dp *intel_dp, int crtc_vdisplay = crtc_state->hw.adjusted_mode.crtc_vdisplay; int psr_max_h = 0, psr_max_v = 0, max_bpp = 0; - if (!intel_dp->psr.sink_psr2_support) + if (!intel_dp->psr.sink_psr2_support || display->params.enable_psr == 1) return false; /* JSL and EHL only supports eDP 1.3 */ @@ -1528,7 +1517,7 @@ static bool intel_sel_update_config_valid(struct intel_dp *intel_dp, goto unsupported; } - if (!psr2_global_enabled(intel_dp)) { + if (!sel_update_global_enabled(intel_dp)) { drm_dbg_kms(display->drm, "Selective update disabled by flag\n"); goto unsupported; @@ -1576,7 +1565,7 @@ static bool _psr_compute_config(struct intel_dp *intel_dp, const struct drm_display_mode *adjusted_mode = &crtc_state->hw.adjusted_mode; int entry_setup_frames; - if (!CAN_PSR(intel_dp)) + if (!CAN_PSR(intel_dp) || !display->params.enable_psr) return false; /* @@ -1808,6 +1797,8 @@ static void intel_psr_activate(struct intel_dp *intel_dp) drm_WARN_ON(display->drm, intel_dp->psr.active); + drm_WARN_ON(display->drm, !intel_dp->psr.enabled); + lockdep_assert_held(&intel_dp->psr.lock); /* psr1, psr2 and panel-replay are mutually exclusive.*/ @@ -2027,6 +2018,7 @@ static void intel_psr_enable_locked(struct intel_dp *intel_dp, intel_dp->psr.req_psr2_sdp_prior_scanline = crtc_state->req_psr2_sdp_prior_scanline; intel_dp->psr.active_non_psr_pipes = crtc_state->active_non_psr_pipes; + intel_dp->psr.pkg_c_latency_used = crtc_state->pkg_c_latency_used; if (!psr_interrupt_error_check(intel_dp)) return; @@ -2103,8 +2095,9 @@ static void intel_psr_exit(struct intel_dp *intel_dp) drm_WARN_ON(display->drm, !(val & EDP_PSR2_ENABLE)); } else { - if (DISPLAY_VER(display) == 20 || - IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) + if ((DISPLAY_VER(display) == 20 || + IS_DISPLAY_VERx100_STEP(display, 3000, STEP_A0, STEP_B0)) && + intel_dp->psr.pkg_c_latency_used) intel_dmc_start_pkgc_exit_at_start_of_undelayed_vblank(display, intel_dp->psr.pipe, false); @@ -2207,6 +2200,7 @@ static void intel_psr_disable_locked(struct intel_dp *intel_dp) intel_dp->psr.su_region_et_enabled = false; intel_dp->psr.psr2_sel_fetch_cff_enabled = false; intel_dp->psr.active_non_psr_pipes = 0; + intel_dp->psr.pkg_c_latency_used = 0; } /** @@ -3099,7 +3093,7 @@ static bool __psr_wait_for_idle_locked(struct intel_dp *intel_dp) /* After the unlocked wait, verify that PSR is still wanted! */ mutex_lock(&intel_dp->psr.lock); - return err == 0 && intel_dp->psr.enabled; + return err == 0 && intel_dp->psr.enabled && !intel_dp->psr.pause_counter; } static int intel_psr_fastset_force(struct intel_display *display) @@ -3228,8 +3222,13 @@ static void intel_psr_work(struct work_struct *work) if (!intel_dp->psr.enabled) goto unlock; - if (READ_ONCE(intel_dp->psr.irq_aux_error)) + if (READ_ONCE(intel_dp->psr.irq_aux_error)) { intel_psr_handle_irq(intel_dp); + goto unlock; + } + + if (intel_dp->psr.pause_counter) + goto unlock; /* * We have to make sure PSR is ready for re-enable @@ -3723,7 +3722,7 @@ static void intel_psr_apply_underrun_on_idle_wa_locked(struct intel_dp *intel_dp struct intel_display *display = to_intel_display(intel_dp); bool dc5_dc6_blocked; - if (!intel_dp->psr.active) + if (!intel_dp->psr.active || !intel_dp->psr.pkg_c_latency_used) return; dc5_dc6_blocked = is_dc5_dc6_blocked(intel_dp); @@ -3748,7 +3747,8 @@ static void psr_dc5_dc6_wa_work(struct work_struct *work) mutex_lock(&intel_dp->psr.lock); - if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled) + if (intel_dp->psr.enabled && !intel_dp->psr.panel_replay_enabled && + !intel_dp->psr.pkg_c_latency_used) intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); mutex_unlock(&intel_dp->psr.lock); @@ -3826,7 +3826,8 @@ void intel_psr_notify_pipe_change(struct intel_atomic_state *state, goto unlock; if ((enable && intel_dp->psr.active_non_psr_pipes) || - (!enable && !intel_dp->psr.active_non_psr_pipes)) { + (!enable && !intel_dp->psr.active_non_psr_pipes) || + !intel_dp->psr.pkg_c_latency_used) { intel_dp->psr.active_non_psr_pipes = active_non_psr_pipes; goto unlock; } @@ -3861,7 +3862,7 @@ void intel_psr_notify_vblank_enable_disable(struct intel_display *display, break; } - if (intel_dp->psr.enabled) + if (intel_dp->psr.enabled && intel_dp->psr.pkg_c_latency_used) intel_psr_apply_underrun_on_idle_wa_locked(intel_dp); mutex_unlock(&intel_dp->psr.lock); @@ -4157,12 +4158,12 @@ DEFINE_SIMPLE_ATTRIBUTE(i915_edp_psr_debug_fops, void intel_psr_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; + struct dentry *debugfs_root = display->drm->debugfs_root; - debugfs_create_file("i915_edp_psr_debug", 0644, minor->debugfs_root, + debugfs_create_file("i915_edp_psr_debug", 0644, debugfs_root, display, &i915_edp_psr_debug_fops); - debugfs_create_file("i915_edp_psr_status", 0444, minor->debugfs_root, + debugfs_create_file("i915_edp_psr_status", 0444, debugfs_root, display, &i915_edp_psr_status_fops); } diff --git a/drivers/gpu/drm/i915/display/intel_quirks.c b/drivers/gpu/drm/i915/display/intel_quirks.c index a32fae510ed2..d2e16b79d6be 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.c +++ b/drivers/gpu/drm/i915/display/intel_quirks.c @@ -80,6 +80,12 @@ static void quirk_fw_sync_len(struct intel_dp *intel_dp) drm_info(display->drm, "Applying Fast Wake sync pulse count quirk\n"); } +static void quirk_edp_limit_rate_hbr2(struct intel_display *display) +{ + intel_set_quirk(display, QUIRK_EDP_LIMIT_RATE_HBR2); + drm_info(display->drm, "Applying eDP Limit rate to HBR2 quirk\n"); +} + struct intel_quirk { int device; int subsystem_vendor; @@ -231,6 +237,9 @@ static struct intel_quirk intel_quirks[] = { { 0x3184, 0x1019, 0xa94d, quirk_increase_ddi_disabled_time }, /* HP Notebook - 14-r206nv */ { 0x0f31, 0x103c, 0x220f, quirk_invert_brightness }, + + /* Dell XPS 13 7390 2-in-1 */ + { 0x8a12, 0x1028, 0x08b0, quirk_edp_limit_rate_hbr2 }, }; static const struct intel_dpcd_quirk intel_dpcd_quirks[] = { diff --git a/drivers/gpu/drm/i915/display/intel_quirks.h b/drivers/gpu/drm/i915/display/intel_quirks.h index cafdebda7535..06da0e286c67 100644 --- a/drivers/gpu/drm/i915/display/intel_quirks.h +++ b/drivers/gpu/drm/i915/display/intel_quirks.h @@ -20,6 +20,7 @@ enum intel_quirk_id { QUIRK_LVDS_SSC_DISABLE, QUIRK_NO_PPS_BACKLIGHT_POWER_HOOK, QUIRK_FW_SYNC_LEN, + QUIRK_EDP_LIMIT_RATE_HBR2, }; void intel_init_quirks(struct intel_display *display); diff --git a/drivers/gpu/drm/i915/display/intel_sdvo.c b/drivers/gpu/drm/i915/display/intel_sdvo.c index 87aff2754f69..6c032d81e7ee 100644 --- a/drivers/gpu/drm/i915/display/intel_sdvo.c +++ b/drivers/gpu/drm/i915/display/intel_sdvo.c @@ -47,11 +47,11 @@ #include "intel_display_driver.h" #include "intel_display_regs.h" #include "intel_display_types.h" -#include "intel_fdi.h" #include "intel_fifo_underrun.h" #include "intel_gmbus.h" #include "intel_hdmi.h" #include "intel_hotplug.h" +#include "intel_link_bw.h" #include "intel_panel.h" #include "intel_sdvo.h" #include "intel_sdvo_regs.h" @@ -1367,7 +1367,7 @@ static int intel_sdvo_compute_config(struct intel_encoder *encoder, if (HAS_PCH_SPLIT(display)) { pipe_config->has_pch_encoder = true; - if (!intel_fdi_compute_pipe_bpp(pipe_config)) + if (!intel_link_bw_compute_pipe_bpp(pipe_config)) return -EINVAL; } @@ -2052,8 +2052,10 @@ static void intel_sdvo_enable_hotplug(struct intel_encoder *encoder) { struct intel_sdvo *intel_sdvo = to_sdvo(encoder); - intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, - &intel_sdvo->hotplug_active, 2); + if (!intel_sdvo_write_cmd(intel_sdvo, SDVO_CMD_SET_ACTIVE_HOT_PLUG, + &intel_sdvo->hotplug_active, 2)) + drm_warn(intel_sdvo->base.base.dev, + "Failed to enable hotplug on SDVO encoder\n"); } static enum intel_hotplug_state diff --git a/drivers/gpu/drm/i915/display/intel_sprite.c b/drivers/gpu/drm/i915/display/intel_sprite.c index e6844df837af..75bbaa923204 100644 --- a/drivers/gpu/drm/i915/display/intel_sprite.c +++ b/drivers/gpu/drm/i915/display/intel_sprite.c @@ -264,8 +264,7 @@ static u32 vlv_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) return sprctl; } -static u32 vlv_sprite_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 vlv_sprite_ctl(const struct intel_plane_state *plane_state) { const struct drm_framebuffer *fb = plane_state->hw.fb; unsigned int rotation = plane_state->hw.rotation; @@ -395,15 +394,12 @@ vlv_sprite_update_arm(struct intel_dsb *dsb, enum pipe pipe = plane->pipe; enum plane_id plane_id = plane->id; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; - u32 sprctl, linear_offset; + u32 sprctl; sprctl = plane_state->ctl | vlv_sprite_ctl_crtc(crtc_state); - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - if (display->platform.cherryview && pipe == PIPE_B) chv_sprite_update_csc(plane_state); @@ -418,7 +414,8 @@ vlv_sprite_update_arm(struct intel_dsb *dsb, intel_de_write_fw(display, SPCONSTALPHA(pipe, plane_id), 0); - intel_de_write_fw(display, SPLINOFF(pipe, plane_id), linear_offset); + intel_de_write_fw(display, SPLINOFF(pipe, plane_id), + intel_fb_xy_to_linear(x, y, plane_state, 0)); intel_de_write_fw(display, SPTILEOFF(pipe, plane_id), SP_OFFSET_Y(y) | SP_OFFSET_X(x)); @@ -428,8 +425,7 @@ vlv_sprite_update_arm(struct intel_dsb *dsb, * the control register just before the surface register. */ intel_de_write_fw(display, SPCNTR(pipe, plane_id), sprctl); - intel_de_write_fw(display, SPSURF(pipe, plane_id), - intel_plane_ggtt_offset(plane_state) + sprsurf_offset); + intel_de_write_fw(display, SPSURF(pipe, plane_id), plane_state->surf); vlv_sprite_update_clrc(plane_state); vlv_sprite_update_gamma(plane_state); @@ -663,8 +659,7 @@ static bool ivb_need_sprite_gamma(const struct intel_plane_state *plane_state) (display->platform.ivybridge || display->platform.haswell); } -static u32 ivb_sprite_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 ivb_sprite_ctl(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -830,15 +825,12 @@ ivb_sprite_update_arm(struct intel_dsb *dsb, struct intel_display *display = to_intel_display(plane); enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 sprsurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; - u32 sprctl, linear_offset; + u32 sprctl; sprctl = plane_state->ctl | ivb_sprite_ctl_crtc(crtc_state); - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - if (key->flags) { intel_de_write_fw(display, SPRKEYVAL(pipe), key->min_value); intel_de_write_fw(display, SPRKEYMSK(pipe), @@ -852,7 +844,8 @@ ivb_sprite_update_arm(struct intel_dsb *dsb, intel_de_write_fw(display, SPROFFSET(pipe), SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } else { - intel_de_write_fw(display, SPRLINOFF(pipe), linear_offset); + intel_de_write_fw(display, SPRLINOFF(pipe), + intel_fb_xy_to_linear(x, y, plane_state, 0)); intel_de_write_fw(display, SPRTILEOFF(pipe), SPRITE_OFFSET_Y(y) | SPRITE_OFFSET_X(x)); } @@ -863,8 +856,7 @@ ivb_sprite_update_arm(struct intel_dsb *dsb, * the control register just before the surface register. */ intel_de_write_fw(display, SPRCTL(pipe), sprctl); - intel_de_write_fw(display, SPRSURF(pipe), - intel_plane_ggtt_offset(plane_state) + sprsurf_offset); + intel_de_write_fw(display, SPRSURF(pipe), plane_state->surf); ivb_sprite_update_gamma(plane_state); } @@ -1016,8 +1008,7 @@ static u32 g4x_sprite_ctl_crtc(const struct intel_crtc_state *crtc_state) return dvscntr; } -static u32 g4x_sprite_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 g4x_sprite_ctl(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -1181,15 +1172,12 @@ g4x_sprite_update_arm(struct intel_dsb *dsb, struct intel_display *display = to_intel_display(plane); enum pipe pipe = plane->pipe; const struct drm_intel_sprite_colorkey *key = &plane_state->ckey; - u32 dvssurf_offset = plane_state->view.color_plane[0].offset; u32 x = plane_state->view.color_plane[0].x; u32 y = plane_state->view.color_plane[0].y; - u32 dvscntr, linear_offset; + u32 dvscntr; dvscntr = plane_state->ctl | g4x_sprite_ctl_crtc(crtc_state); - linear_offset = intel_fb_xy_to_linear(x, y, plane_state, 0); - if (key->flags) { intel_de_write_fw(display, DVSKEYVAL(pipe), key->min_value); intel_de_write_fw(display, DVSKEYMSK(pipe), @@ -1197,7 +1185,8 @@ g4x_sprite_update_arm(struct intel_dsb *dsb, intel_de_write_fw(display, DVSKEYMAX(pipe), key->max_value); } - intel_de_write_fw(display, DVSLINOFF(pipe), linear_offset); + intel_de_write_fw(display, DVSLINOFF(pipe), + intel_fb_xy_to_linear(x, y, plane_state, 0)); intel_de_write_fw(display, DVSTILEOFF(pipe), DVS_OFFSET_Y(y) | DVS_OFFSET_X(x)); @@ -1207,8 +1196,7 @@ g4x_sprite_update_arm(struct intel_dsb *dsb, * the control register just before the surface register. */ intel_de_write_fw(display, DVSCNTR(pipe), dvscntr); - intel_de_write_fw(display, DVSSURF(pipe), - intel_plane_ggtt_offset(plane_state) + dvssurf_offset); + intel_de_write_fw(display, DVSSURF(pipe), plane_state->surf); if (display->platform.g4x) g4x_sprite_update_gamma(plane_state); @@ -1387,9 +1375,9 @@ g4x_sprite_check(struct intel_crtc_state *crtc_state, return ret; if (DISPLAY_VER(display) >= 7) - plane_state->ctl = ivb_sprite_ctl(crtc_state, plane_state); + plane_state->ctl = ivb_sprite_ctl(plane_state); else - plane_state->ctl = g4x_sprite_ctl(crtc_state, plane_state); + plane_state->ctl = g4x_sprite_ctl(plane_state); return 0; } @@ -1439,7 +1427,7 @@ vlv_sprite_check(struct intel_crtc_state *crtc_state, if (ret) return ret; - plane_state->ctl = vlv_sprite_ctl(crtc_state, plane_state); + plane_state->ctl = vlv_sprite_ctl(plane_state); return 0; } @@ -1624,6 +1612,7 @@ intel_sprite_plane_create(struct intel_display *display, plane->capture_error = vlv_sprite_capture_error; plane->get_hw_state = vlv_sprite_get_hw_state; plane->check_plane = vlv_sprite_check; + plane->surf_offset = i965_plane_surf_offset; plane->max_stride = i965_plane_max_stride; plane->min_alignment = vlv_plane_min_alignment; plane->min_cdclk = vlv_plane_min_cdclk; @@ -1648,6 +1637,7 @@ intel_sprite_plane_create(struct intel_display *display, plane->capture_error = ivb_sprite_capture_error; plane->get_hw_state = ivb_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; + plane->surf_offset = i965_plane_surf_offset; if (display->platform.broadwell || display->platform.haswell) { plane->max_stride = hsw_sprite_max_stride; @@ -1673,6 +1663,7 @@ intel_sprite_plane_create(struct intel_display *display, plane->capture_error = g4x_sprite_capture_error; plane->get_hw_state = g4x_sprite_get_hw_state; plane->check_plane = g4x_sprite_check; + plane->surf_offset = i965_plane_surf_offset; plane->max_stride = g4x_sprite_max_stride; plane->min_alignment = g4x_sprite_min_alignment; plane->min_cdclk = g4x_sprite_min_cdclk; diff --git a/drivers/gpu/drm/i915/display/intel_tc.c b/drivers/gpu/drm/i915/display/intel_tc.c index 3bc57579fe53..c4a5601c5107 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.c +++ b/drivers/gpu/drm/i915/display/intel_tc.c @@ -3,6 +3,8 @@ * Copyright © 2019 Intel Corporation */ +#include <linux/iopoll.h> + #include <drm/drm_print.h> #include "i915_reg.h" @@ -23,10 +25,6 @@ #include "intel_modeset_lock.h" #include "intel_tc.h" -#define DP_PIN_ASSIGNMENT_C 0x3 -#define DP_PIN_ASSIGNMENT_D 0x4 -#define DP_PIN_ASSIGNMENT_E 0x5 - enum tc_port_mode { TC_PORT_DISCONNECTED, TC_PORT_TBT_ALT, @@ -65,7 +63,9 @@ struct intel_tc_port { enum tc_port_mode mode; enum tc_port_mode init_mode; enum phy_fia phy_fia; + enum intel_tc_pin_assignment pin_assignment; u8 phy_fia_idx; + u8 max_lane_count; }; static enum intel_display_power_domain @@ -251,6 +251,9 @@ tc_port_power_domain(struct intel_tc_port *tc) { enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); + if (tc_port == TC_PORT_NONE) + return POWER_DOMAIN_INVALID; + return POWER_DOMAIN_PORT_DDI_LANES_TC1 + tc_port - TC_PORT_1; } @@ -263,13 +266,14 @@ assert_tc_port_power_enabled(struct intel_tc_port *tc) !intel_display_power_is_enabled(display, tc_port_power_domain(tc))); } -static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) +static u32 get_lane_mask(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - struct intel_tc_port *tc = to_tc_port(dig_port); + struct intel_display *display = to_intel_display(tc->dig_port); + intel_wakeref_t wakeref; u32 lane_mask; - lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia)); + with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) + lane_mask = intel_de_read(display, PORT_TX_DFLEXDPSP(tc->phy_fia)); drm_WARN_ON(display->drm, lane_mask == 0xffffffff); assert_tc_cold_blocked(tc); @@ -278,75 +282,87 @@ static u32 intel_tc_port_get_lane_mask(struct intel_digital_port *dig_port) return lane_mask >> DP_LANE_ASSIGNMENT_SHIFT(tc->phy_fia_idx); } -u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port) +static char pin_assignment_name(enum intel_tc_pin_assignment pin_assignment) { - struct intel_display *display = to_intel_display(dig_port); - struct intel_tc_port *tc = to_tc_port(dig_port); - u32 pin_mask; - - pin_mask = intel_de_read(display, PORT_TX_DFLEXPA1(tc->phy_fia)); - - drm_WARN_ON(display->drm, pin_mask == 0xffffffff); - assert_tc_cold_blocked(tc); + if (pin_assignment == INTEL_TC_PIN_ASSIGNMENT_NONE) + return '-'; - return (pin_mask & DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx)) >> - DP_PIN_ASSIGNMENT_SHIFT(tc->phy_fia_idx); + return 'A' + pin_assignment - INTEL_TC_PIN_ASSIGNMENT_A; } -static int lnl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) +static enum intel_tc_pin_assignment +get_pin_assignment(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - enum tc_port tc_port = intel_encoder_to_tc(&dig_port->base); + struct intel_display *display = to_intel_display(tc->dig_port); + enum tc_port tc_port = intel_encoder_to_tc(&tc->dig_port->base); + enum intel_tc_pin_assignment pin_assignment; intel_wakeref_t wakeref; - u32 val, pin_assignment; + i915_reg_t reg; + u32 mask; + u32 val; + + if (tc->mode == TC_PORT_TBT_ALT) + return INTEL_TC_PIN_ASSIGNMENT_NONE; + + if (DISPLAY_VER(display) >= 20) { + reg = TCSS_DDI_STATUS(tc_port); + mask = TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK; + } else { + reg = PORT_TX_DFLEXPA1(tc->phy_fia); + mask = DP_PIN_ASSIGNMENT_MASK(tc->phy_fia_idx); + } with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) - val = intel_de_read(display, TCSS_DDI_STATUS(tc_port)); + val = intel_de_read(display, reg); - pin_assignment = - REG_FIELD_GET(TCSS_DDI_STATUS_PIN_ASSIGNMENT_MASK, val); + drm_WARN_ON(display->drm, val == 0xffffffff); + assert_tc_cold_blocked(tc); + + pin_assignment = (val & mask) >> (ffs(mask) - 1); switch (pin_assignment) { + case INTEL_TC_PIN_ASSIGNMENT_A: + case INTEL_TC_PIN_ASSIGNMENT_B: + case INTEL_TC_PIN_ASSIGNMENT_F: + drm_WARN_ON(display->drm, DISPLAY_VER(display) > 11); + break; + case INTEL_TC_PIN_ASSIGNMENT_NONE: + case INTEL_TC_PIN_ASSIGNMENT_C: + case INTEL_TC_PIN_ASSIGNMENT_D: + case INTEL_TC_PIN_ASSIGNMENT_E: + break; default: MISSING_CASE(pin_assignment); - fallthrough; - case DP_PIN_ASSIGNMENT_D: - return 2; - case DP_PIN_ASSIGNMENT_C: - case DP_PIN_ASSIGNMENT_E: - return 4; } + + return pin_assignment; } -static int mtl_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) +static int mtl_get_max_lane_count(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - intel_wakeref_t wakeref; - u32 pin_mask; + enum intel_tc_pin_assignment pin_assignment; - with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) - pin_mask = intel_tc_port_get_pin_assignment_mask(dig_port); + pin_assignment = get_pin_assignment(tc); - switch (pin_mask) { + switch (pin_assignment) { + case INTEL_TC_PIN_ASSIGNMENT_NONE: + return 0; default: - MISSING_CASE(pin_mask); + MISSING_CASE(pin_assignment); fallthrough; - case DP_PIN_ASSIGNMENT_D: + case INTEL_TC_PIN_ASSIGNMENT_D: return 2; - case DP_PIN_ASSIGNMENT_C: - case DP_PIN_ASSIGNMENT_E: + case INTEL_TC_PIN_ASSIGNMENT_C: + case INTEL_TC_PIN_ASSIGNMENT_E: return 4; } } -static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) +static int icl_get_max_lane_count(struct intel_tc_port *tc) { - struct intel_display *display = to_intel_display(dig_port); - intel_wakeref_t wakeref; u32 lane_mask = 0; - with_intel_display_power(display, POWER_DOMAIN_DISPLAY_CORE, wakeref) - lane_mask = intel_tc_port_get_lane_mask(dig_port); + lane_mask = get_lane_mask(tc); switch (lane_mask) { default: @@ -365,23 +381,44 @@ static int intel_tc_port_get_max_lane_count(struct intel_digital_port *dig_port) } } +static int get_max_lane_count(struct intel_tc_port *tc) +{ + struct intel_display *display = to_intel_display(tc->dig_port); + + if (tc->mode != TC_PORT_DP_ALT) + return 4; + + if (DISPLAY_VER(display) >= 14) + return mtl_get_max_lane_count(tc); + + return icl_get_max_lane_count(tc); +} + +static void read_pin_configuration(struct intel_tc_port *tc) +{ + tc->pin_assignment = get_pin_assignment(tc); + tc->max_lane_count = get_max_lane_count(tc); +} + int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port) { - struct intel_display *display = to_intel_display(dig_port); struct intel_tc_port *tc = to_tc_port(dig_port); - if (!intel_encoder_is_tc(&dig_port->base) || tc->mode != TC_PORT_DP_ALT) + if (!intel_encoder_is_tc(&dig_port->base)) return 4; - assert_tc_cold_blocked(tc); + return tc->max_lane_count; +} - if (DISPLAY_VER(display) >= 20) - return lnl_tc_port_get_max_lane_count(dig_port); +enum intel_tc_pin_assignment +intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port) +{ + struct intel_tc_port *tc = to_tc_port(dig_port); - if (DISPLAY_VER(display) >= 14) - return mtl_tc_port_get_max_lane_count(dig_port); + if (!intel_encoder_is_tc(&dig_port->base)) + return INTEL_TC_PIN_ASSIGNMENT_NONE; - return intel_tc_port_get_max_lane_count(dig_port); + return tc->pin_assignment; } void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, @@ -596,9 +633,12 @@ static void icl_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + __tc_cold_unblock(tc, domain, tc_cold_wref); } @@ -656,8 +696,11 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if ((!tc_phy_is_ready(tc) || !icl_tc_phy_take_ownership(tc, true)) && @@ -668,6 +711,7 @@ static bool icl_tc_phy_connect(struct intel_tc_port *tc, goto out_unblock_tc_cold; } + read_pin_configuration(tc); if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -858,9 +902,12 @@ static void adlp_tc_phy_get_hw_state(struct intel_tc_port *tc) port_wakeref = intel_display_power_get(display, port_power_domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + } + intel_display_power_put(display, port_power_domain, port_wakeref); } @@ -873,6 +920,9 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) if (tc->mode == TC_PORT_TBT_ALT) { tc->lock_wakeref = tc_cold_block(tc); + + read_pin_configuration(tc); + return true; } @@ -894,6 +944,8 @@ static bool adlp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_unblock_tc_cold; @@ -1000,8 +1052,13 @@ static bool xelpdp_tc_phy_wait_for_tcss_power(struct intel_tc_port *tc, bool enabled) { struct intel_display *display = to_intel_display(tc->dig_port); + bool is_enabled; + int ret; - if (wait_for(xelpdp_tc_phy_tcss_power_is_enabled(tc) == enabled, 5)) { + ret = poll_timeout_us(is_enabled = xelpdp_tc_phy_tcss_power_is_enabled(tc), + is_enabled == enabled, + 200, 5000, false); + if (ret) { drm_dbg_kms(display->drm, "Port %s: timeout waiting for TCSS power to get %s\n", str_enabled_disabled(enabled), @@ -1124,9 +1181,18 @@ static void xelpdp_tc_phy_get_hw_state(struct intel_tc_port *tc) tc_cold_wref = __tc_cold_block(tc, &domain); tc->mode = tc_phy_get_current_mode(tc); - if (tc->mode != TC_PORT_DISCONNECTED) + if (tc->mode != TC_PORT_DISCONNECTED) { tc->lock_wakeref = tc_cold_block(tc); + read_pin_configuration(tc); + /* + * Set a valid lane count value for a DP-alt sink which got + * disconnected. The driver can only disable the output on this PHY. + */ + if (tc->max_lane_count == 0) + tc->max_lane_count = 4; + } + drm_WARN_ON(display->drm, (tc->mode == TC_PORT_DP_ALT || tc->mode == TC_PORT_LEGACY) && !xelpdp_tc_phy_tcss_power_is_enabled(tc)); @@ -1138,14 +1204,19 @@ static bool xelpdp_tc_phy_connect(struct intel_tc_port *tc, int required_lanes) { tc->lock_wakeref = tc_cold_block(tc); - if (tc->mode == TC_PORT_TBT_ALT) + if (tc->mode == TC_PORT_TBT_ALT) { + read_pin_configuration(tc); + return true; + } if (!xelpdp_tc_phy_enable_tcss_power(tc, true)) goto out_unblock_tccold; xelpdp_tc_phy_take_ownership(tc, true); + read_pin_configuration(tc); + if (!tc_phy_verify_legacy_or_dp_alt_mode(tc, required_lanes)) goto out_release_phy; @@ -1226,14 +1297,19 @@ static void tc_phy_get_hw_state(struct intel_tc_port *tc) tc->phy_ops->get_hw_state(tc); } -static bool tc_phy_is_ready_and_owned(struct intel_tc_port *tc, - bool phy_is_ready, bool phy_is_owned) +/* Is the PHY owned by display i.e. is it in legacy or DP-alt mode? */ +static bool tc_phy_owned_by_display(struct intel_tc_port *tc, + bool phy_is_ready, bool phy_is_owned) { struct intel_display *display = to_intel_display(tc->dig_port); - drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); + if (DISPLAY_VER(display) < 20) { + drm_WARN_ON(display->drm, phy_is_owned && !phy_is_ready); - return phy_is_ready && phy_is_owned; + return phy_is_ready && phy_is_owned; + } else { + return phy_is_owned; + } } static bool tc_phy_is_connected(struct intel_tc_port *tc, @@ -1244,7 +1320,7 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, bool phy_is_owned = tc_phy_is_owned(tc); bool is_connected; - if (tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) + if (tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) is_connected = port_pll_type == ICL_PORT_DPLL_MG_PHY; else is_connected = port_pll_type == ICL_PORT_DPLL_DEFAULT; @@ -1263,8 +1339,13 @@ static bool tc_phy_is_connected(struct intel_tc_port *tc, static bool tc_phy_wait_for_ready(struct intel_tc_port *tc) { struct intel_display *display = to_intel_display(tc->dig_port); + bool is_ready; + int ret; - if (wait_for(tc_phy_is_ready(tc), 500)) { + ret = poll_timeout_us(is_ready = tc_phy_is_ready(tc), + is_ready, + 1000, 500 * 1000, false); + if (ret) { drm_err(display->drm, "Port %s: timeout waiting for PHY ready\n", tc->port_name); @@ -1352,7 +1433,7 @@ tc_phy_get_current_mode(struct intel_tc_port *tc) phy_is_ready = tc_phy_is_ready(tc); phy_is_owned = tc_phy_is_owned(tc); - if (!tc_phy_is_ready_and_owned(tc, phy_is_ready, phy_is_owned)) { + if (!tc_phy_owned_by_display(tc, phy_is_ready, phy_is_owned)) { mode = get_tc_mode_in_phy_not_owned_state(tc, live_mode); } else { drm_WARN_ON(display->drm, live_mode == TC_PORT_TBT_ALT); @@ -1441,21 +1522,24 @@ static void intel_tc_port_reset_mode(struct intel_tc_port *tc, intel_display_power_flush_work(display); if (!intel_tc_cold_requires_aux_pw(dig_port)) { enum intel_display_power_domain aux_domain; - bool aux_powered; aux_domain = intel_aux_power_domain(dig_port); - aux_powered = intel_display_power_is_enabled(display, aux_domain); - drm_WARN_ON(display->drm, aux_powered); + if (intel_display_power_is_enabled(display, aux_domain)) + drm_dbg_kms(display->drm, "Port %s: AUX unexpectedly powered\n", + tc->port_name); } tc_phy_disconnect(tc); if (!force_disconnect) tc_phy_connect(tc, required_lanes); - drm_dbg_kms(display->drm, "Port %s: TC port mode reset (%s -> %s)\n", + drm_dbg_kms(display->drm, + "Port %s: TC port mode reset (%s -> %s) pin assignment: %c max lanes: %d\n", tc->port_name, tc_port_mode_name(old_tc_mode), - tc_port_mode_name(tc->mode)); + tc_port_mode_name(tc->mode), + pin_assignment_name(tc->pin_assignment), + tc->max_lane_count); } static bool intel_tc_port_needs_reset(struct intel_tc_port *tc) @@ -1610,9 +1694,11 @@ void intel_tc_port_sanitize_mode(struct intel_digital_port *dig_port, __intel_tc_port_put_link(tc); } - drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s)\n", + drm_dbg_kms(display->drm, "Port %s: sanitize mode (%s) pin assignment: %c max lanes: %d\n", tc->port_name, - tc_port_mode_name(tc->mode)); + tc_port_mode_name(tc->mode), + pin_assignment_name(tc->pin_assignment), + tc->max_lane_count); mutex_unlock(&tc->lock); } diff --git a/drivers/gpu/drm/i915/display/intel_tc.h b/drivers/gpu/drm/i915/display/intel_tc.h index 26c4265368c1..fff8b96e4972 100644 --- a/drivers/gpu/drm/i915/display/intel_tc.h +++ b/drivers/gpu/drm/i915/display/intel_tc.h @@ -12,6 +12,75 @@ struct intel_crtc_state; struct intel_digital_port; struct intel_encoder; +/* + * The following enum values must stay fixed, as they match the corresponding + * pin assignment fields in the PORT_TX_DFLEXPA1 and TCSS_DDI_STATUS registers. + */ +enum intel_tc_pin_assignment { /* Lanes (a) Signal/ Cable Notes */ + /* DP USB Rate (b) type */ + INTEL_TC_PIN_ASSIGNMENT_NONE = 0, /* 4 - - - (c) */ + INTEL_TC_PIN_ASSIGNMENT_A, /* 2/4 0 GEN2 TC->TC (d,e) */ + INTEL_TC_PIN_ASSIGNMENT_B, /* 1/2 1 GEN2 TC->TC (d,f,g) */ + INTEL_TC_PIN_ASSIGNMENT_C, /* 4 0 DP2 TC->TC (h) */ + INTEL_TC_PIN_ASSIGNMENT_D, /* 2 1 DP2 TC->TC (h,g) */ + INTEL_TC_PIN_ASSIGNMENT_E, /* 4 0 DP2 TC->DP */ + INTEL_TC_PIN_ASSIGNMENT_F, /* 2 1 GEN1/DP1 TC->DP (d,g,i) */ + /* + * (a) - DP unidirectional lanes, each lane using 1 differential signal + * pair. + * - USB SuperSpeed bidirectional lane, using 2 differential (TX and + * RX) signal pairs. + * - USB 2.0 (HighSpeed) unidirectional lane, using 1 differential + * signal pair. Not indicated, this lane is always present on pin + * assignments A-D and never present on pin assignments E/F. + * (b) - GEN1: USB 3.1 GEN1 bit rate (5 Gbps) and signaling. This + * is used for transferring only a USB stream. + * - GEN2: USB 3.1 GEN2 bit rate (10 Gbps) and signaling. This + * allows transferring an HBR3 (8.1 Gbps) DP stream. + * - DP1: Display Port signaling defined by the DP v1.3 Standard, + * with a maximum bit rate of HBR3. + * - DP2: Display Port signaling defined by the DP v2.1 Standard, + * with a maximum bit rate defined by the DP Alt Mode + * v2.1a Standard depending on the cable type as follows: + * - Passive (Full-Featured) USB 3.2 GEN1 + * TC->TC cables (CC3G1-X) : UHBR10 + * - Passive (Full-Featured) USB 3.2/4 GEN2 and + * Thunderbolt Alt Mode GEN2 + * TC->TC cables (CC3G2-X) all : UHBR10 + * DP54 logo : UHBR13.5 + * - Passive (Full-Featured) USB4 GEN3+ and + * Thunderbolt Alt Mode GEN3+ + * TC->TC cables (CC4G3-X) all : UHBR13.5 + * DP80 logo : UHBR20 + * - Active Re-Timed or + * Active Linear Re-driven (LRD) + * USB3.2 GEN1/2 and USB4 GEN2+ + * TC->TC cables all : HBR3 + * with DP_BR CTS : UHBR10 + * DP54 logo : UHBR13.5 + * DP80 logo : UHBR20 + * - Passive/Active Re-Timed or + * Active Linear Re-driven (LRD) + * TC->DP cables with DP_BR CTS/DP8K logo : HBR3 + * with DP_BR CTS : UHBR10 + * DP54 logo : UHBR13.5 + * DP80 logo : UHBR20 + * (c) Used in TBT-alt/legacy modes and on LNL+ after the sink + * disconnected in DP-alt mode. + * (d) Only defined by the DP Alt Standard v1.0a, deprecated by v1.0b, + * only supported on ICL. + * (e) GEN2 passive 1 m cable: 4 DP lanes, GEN2 active cable: 2 DP lanes. + * (f) GEN2 passive 1 m cable: 2 DP lanes, GEN2 active cable: 1 DP lane. + * (g) These pin assignments are also referred to as (USB/DP) + * multifunction or Multifunction Display Port (MFD) modes. + * (h) Also used where one end of the cable is a captive connector, + * attached to a DP->HDMI/DVI/VGA converter. + * (i) The DP end of the cable is a captive connector attached to a + * (DP/USB) multifunction dock as defined by the DockPort v1.0a + * specification. + */ +}; + bool intel_tc_port_in_tbt_alt_mode(struct intel_digital_port *dig_port); bool intel_tc_port_in_dp_alt_mode(struct intel_digital_port *dig_port); bool intel_tc_port_in_legacy_mode(struct intel_digital_port *dig_port); @@ -19,7 +88,8 @@ bool intel_tc_port_handles_hpd_glitches(struct intel_digital_port *dig_port); bool intel_tc_port_connected(struct intel_encoder *encoder); -u32 intel_tc_port_get_pin_assignment_mask(struct intel_digital_port *dig_port); +enum intel_tc_pin_assignment +intel_tc_port_get_pin_assignment(struct intel_digital_port *dig_port); int intel_tc_port_max_lane_count(struct intel_digital_port *dig_port); void intel_tc_port_set_fia_lane_count(struct intel_digital_port *dig_port, int required_lanes); diff --git a/drivers/gpu/drm/i915/display/intel_vblank.c b/drivers/gpu/drm/i915/display/intel_vblank.c index 70ba7aa26bf4..c15234c1d96e 100644 --- a/drivers/gpu/drm/i915/display/intel_vblank.c +++ b/drivers/gpu/drm/i915/display/intel_vblank.c @@ -3,9 +3,12 @@ * Copyright © 2022-2023 Intel Corporation */ +#include <linux/iopoll.h> + #include <drm/drm_vblank.h> #include "i915_drv.h" +#include "i915_utils.h" #include "intel_color.h" #include "intel_crtc.h" #include "intel_de.h" @@ -492,9 +495,14 @@ static void wait_for_pipe_scanline_moving(struct intel_crtc *crtc, bool state) { struct intel_display *display = to_intel_display(crtc); enum pipe pipe = crtc->pipe; + bool is_moving; + int ret; /* Wait for the display line to settle/start moving */ - if (wait_for(pipe_scanline_is_moving(display, pipe) == state, 100)) + ret = poll_timeout_us(is_moving = pipe_scanline_is_moving(display, pipe), + is_moving == state, + 500, 100 * 1000, false); + if (ret) drm_err(display->drm, "pipe %c scanline %s wait timed out\n", pipe_name(pipe), str_on_off(state)); @@ -724,9 +732,9 @@ int intel_vblank_evade(struct intel_vblank_evade_ctx *evade) break; if (!timeout) { - drm_err(display->drm, - "Potential atomic update failure on pipe %c\n", - pipe_name(crtc->pipe)); + drm_dbg_kms(display->drm, + "Potential atomic update failure on pipe %c\n", + pipe_name(crtc->pipe)); break; } diff --git a/drivers/gpu/drm/i915/display/intel_vbt_defs.h b/drivers/gpu/drm/i915/display/intel_vbt_defs.h index 92c04811aa28..70e31520c560 100644 --- a/drivers/gpu/drm/i915/display/intel_vbt_defs.h +++ b/drivers/gpu/drm/i915/display/intel_vbt_defs.h @@ -37,7 +37,7 @@ #ifndef _INTEL_VBT_DEFS_H_ #define _INTEL_VBT_DEFS_H_ -#include "intel_bios.h" +#include "intel_dsi_vbt_defs.h" /* EDID derived structures */ struct bdb_edid_pnp_id { @@ -437,6 +437,22 @@ enum vbt_gmbus_ddi { #define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR13P5 6 #define BDB_230_VBT_DP_MAX_LINK_RATE_UHBR20 7 +/* EDP link rate 263+ */ +#define BDB_263_VBT_EDP_LINK_RATE_1_62 BIT_U32(0) +#define BDB_263_VBT_EDP_LINK_RATE_2_16 BIT_U32(1) +#define BDB_263_VBT_EDP_LINK_RATE_2_43 BIT_U32(2) +#define BDB_263_VBT_EDP_LINK_RATE_2_7 BIT_U32(3) +#define BDB_263_VBT_EDP_LINK_RATE_3_24 BIT_U32(4) +#define BDB_263_VBT_EDP_LINK_RATE_4_32 BIT_U32(5) +#define BDB_263_VBT_EDP_LINK_RATE_5_4 BIT_U32(6) +#define BDB_263_VBT_EDP_LINK_RATE_6_75 BIT_U32(7) +#define BDB_263_VBT_EDP_LINK_RATE_8_1 BIT_U32(8) +#define BDB_263_VBT_EDP_LINK_RATE_10 BIT_U32(9) +#define BDB_263_VBT_EDP_LINK_RATE_13_5 BIT_U32(10) +#define BDB_263_VBT_EDP_LINK_RATE_20 BIT_U32(11) +#define BDB_263_VBT_EDP_NUM_RATES 12 +#define BDB_263_VBT_EDP_RATES_MASK GENMASK(BDB_263_VBT_EDP_NUM_RATES - 1, 0) + /* * The child device config, aka the display device data structure, provides a * description of a port and its configuration on the platform. @@ -547,6 +563,8 @@ struct child_device_config { u8 dp_max_link_rate:3; /* 216+ */ u8 dp_max_link_rate_reserved:5; /* 216+ */ u8 efp_index; /* 256+ */ + u32 edp_data_rate_override:12; /* 263+ */ + u32 edp_data_rate_override_reserved:20; /* 263+ */ } __packed; struct bdb_general_definitions { diff --git a/drivers/gpu/drm/i915/display/intel_wm.c b/drivers/gpu/drm/i915/display/intel_wm.c index bba82e888db2..f887a664fe22 100644 --- a/drivers/gpu/drm/i915/display/intel_wm.c +++ b/drivers/gpu/drm/i915/display/intel_wm.c @@ -5,7 +5,6 @@ #include <linux/debugfs.h> -#include <drm/drm_file.h> #include <drm/drm_print.h> #include "i9xx_wm.h" @@ -390,15 +389,15 @@ static const struct file_operations i915_cur_wm_latency_fops = { void intel_wm_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; + struct dentry *debugfs_root = display->drm->debugfs_root; - debugfs_create_file("i915_pri_wm_latency", 0644, minor->debugfs_root, + debugfs_create_file("i915_pri_wm_latency", 0644, debugfs_root, display, &i915_pri_wm_latency_fops); - debugfs_create_file("i915_spr_wm_latency", 0644, minor->debugfs_root, + debugfs_create_file("i915_spr_wm_latency", 0644, debugfs_root, display, &i915_spr_wm_latency_fops); - debugfs_create_file("i915_cur_wm_latency", 0644, minor->debugfs_root, + debugfs_create_file("i915_cur_wm_latency", 0644, debugfs_root, display, &i915_cur_wm_latency_fops); skl_watermark_debugfs_register(display); diff --git a/drivers/gpu/drm/i915/display/skl_scaler.c b/drivers/gpu/drm/i915/display/skl_scaler.c index d77798499c57..c6cccf170ff1 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.c +++ b/drivers/gpu/drm/i915/display/skl_scaler.c @@ -10,6 +10,7 @@ #include "intel_display_regs.h" #include "intel_display_trace.h" #include "intel_display_types.h" +#include "intel_display_wa.h" #include "intel_fb.h" #include "skl_scaler.h" #include "skl_universal_plane.h" @@ -91,11 +92,9 @@ static void skl_scaler_min_src_size(const struct drm_format_info *format, } } -static void skl_scaler_max_src_size(struct intel_crtc *crtc, +static void skl_scaler_max_src_size(struct intel_display *display, int *max_w, int *max_h) { - struct intel_display *display = to_intel_display(crtc); - if (DISPLAY_VER(display) >= 14) { *max_w = 4096; *max_h = 8192; @@ -134,6 +133,23 @@ static void skl_scaler_max_dst_size(struct intel_crtc *crtc, } } +enum drm_mode_status +skl_scaler_mode_valid(struct intel_display *display, + const struct drm_display_mode *mode, + enum intel_output_format output_format, + int num_joined_pipes) +{ + int max_h, max_w; + + if (num_joined_pipes < 2 && output_format == INTEL_OUTPUT_FORMAT_YCBCR420) { + skl_scaler_max_src_size(display, &max_w, &max_h); + if (mode->hdisplay > max_h) + return MODE_NO_420; + } + + return MODE_OK; +} + static int skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, unsigned int scaler_user, int *scaler_id, @@ -201,7 +217,7 @@ skl_update_scaler(struct intel_crtc_state *crtc_state, bool force_detach, } skl_scaler_min_src_size(format, modifier, &min_src_w, &min_src_h); - skl_scaler_max_src_size(crtc, &max_src_w, &max_src_h); + skl_scaler_max_src_size(display, &max_src_w, &max_src_h); skl_scaler_min_dst_size(&min_dst_w, &min_dst_h); skl_scaler_max_dst_size(crtc, &max_dst_w, &max_dst_h); @@ -747,6 +763,9 @@ void skl_pfit_enable(const struct intel_crtc_state *crtc_state) crtc_state->scaler_state.scaler_id < 0)) return; + if (intel_display_wa(display, 14011503117)) + adl_scaler_ecc_mask(crtc_state); + drm_rect_init(&src, 0, 0, drm_rect_width(&crtc_state->pipe_src) << 16, drm_rect_height(&crtc_state->pipe_src) << 16); @@ -923,3 +942,29 @@ void skl_scaler_get_config(struct intel_crtc_state *crtc_state) else scaler_state->scaler_users &= ~(1 << SKL_CRTC_INDEX); } + +void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + + if (!crtc_state->pch_pfit.enabled) + return; + + intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, ~0); +} + +void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state) +{ + struct intel_display *display = to_intel_display(crtc_state); + struct intel_crtc *crtc = to_intel_crtc(crtc_state->uapi.crtc); + const struct intel_crtc_scaler_state *scaler_state = + &crtc_state->scaler_state; + + if (scaler_state->scaler_id < 0) + return; + + intel_de_write_fw(display, + SKL_PS_ECC_STAT(crtc->pipe, scaler_state->scaler_id), + 1); + intel_de_write(display, XELPD_DISPLAY_ERR_FATAL_MASK, 0); +} diff --git a/drivers/gpu/drm/i915/display/skl_scaler.h b/drivers/gpu/drm/i915/display/skl_scaler.h index 355ea15260ca..12a19016c5f6 100644 --- a/drivers/gpu/drm/i915/display/skl_scaler.h +++ b/drivers/gpu/drm/i915/display/skl_scaler.h @@ -5,10 +5,14 @@ #ifndef INTEL_SCALER_H #define INTEL_SCALER_H +enum drm_mode_status; +struct drm_display_mode; struct intel_atomic_state; struct intel_crtc; struct intel_crtc_state; +struct intel_display; struct intel_dsb; +enum intel_output_format; struct intel_plane; struct intel_plane_state; @@ -32,4 +36,13 @@ void skl_scaler_disable(const struct intel_crtc_state *old_crtc_state); void skl_scaler_get_config(struct intel_crtc_state *crtc_state); +enum drm_mode_status +skl_scaler_mode_valid(struct intel_display *display, + const struct drm_display_mode *mode, + enum intel_output_format output_format, + int num_joined_pipes); + +void adl_scaler_ecc_mask(const struct intel_crtc_state *crtc_state); + +void adl_scaler_ecc_unmask(const struct intel_crtc_state *crtc_state); #endif diff --git a/drivers/gpu/drm/i915/display/skl_universal_plane.c b/drivers/gpu/drm/i915/display/skl_universal_plane.c index e20972ddfa09..950dc79dbdd4 100644 --- a/drivers/gpu/drm/i915/display/skl_universal_plane.c +++ b/drivers/gpu/drm/i915/display/skl_universal_plane.c @@ -10,6 +10,7 @@ #include "pxp/intel_pxp.h" #include "i915_drv.h" +#include "i915_utils.h" #include "intel_bo.h" #include "intel_de.h" #include "intel_display_irq.h" @@ -1166,8 +1167,7 @@ static u32 skl_plane_ctl_crtc(const struct intel_crtc_state *crtc_state) return plane_ctl; } -static u32 skl_plane_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 skl_plane_ctl(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -1225,8 +1225,7 @@ static u32 glk_plane_color_ctl_crtc(const struct intel_crtc_state *crtc_state) return plane_color_ctl; } -static u32 glk_plane_color_ctl(const struct intel_crtc_state *crtc_state, - const struct intel_plane_state *plane_state) +static u32 glk_plane_color_ctl(const struct intel_plane_state *plane_state) { struct intel_display *display = to_intel_display(plane_state); const struct drm_framebuffer *fb = plane_state->hw.fb; @@ -1271,12 +1270,6 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, u32 offset = plane_state->view.color_plane[color_plane].offset; if (intel_fb_uses_dpt(fb)) { - /* - * The DPT object contains only one vma, so the VMA's offset - * within the DPT is always 0. - */ - drm_WARN_ON(display->drm, plane_state->dpt_vma && - intel_dpt_offset(plane_state->dpt_vma)); drm_WARN_ON(display->drm, offset & 0x1fffff); return offset >> 9; } else { @@ -1285,13 +1278,20 @@ static u32 skl_surf_address(const struct intel_plane_state *plane_state, } } -static u32 skl_plane_surf(const struct intel_plane_state *plane_state, - int color_plane) +static int icl_plane_color_plane(const struct intel_plane_state *plane_state) +{ + if (plane_state->planar_linked_plane && !plane_state->is_y_plane) + return 1; + else + return 0; +} + +static u32 skl_plane_surf_offset(const struct intel_plane_state *plane_state) { + int color_plane = icl_plane_color_plane(plane_state); u32 plane_surf; - plane_surf = intel_plane_ggtt_offset(plane_state) + - skl_surf_address(plane_state, color_plane); + plane_surf = skl_surf_address(plane_state, color_plane); if (plane_state->decrypt) plane_surf |= PLANE_SURF_DECRYPT; @@ -1373,14 +1373,6 @@ static void icl_plane_csc_load_black(struct intel_dsb *dsb, intel_de_write_dsb(display, dsb, PLANE_CSC_POSTOFF(pipe, plane_id, 2), 0); } -static int icl_plane_color_plane(const struct intel_plane_state *plane_state) -{ - if (plane_state->planar_linked_plane && !plane_state->is_y_plane) - return 1; - else - return 0; -} - static void skl_plane_update_noarm(struct intel_dsb *dsb, struct intel_plane *plane, @@ -1476,7 +1468,7 @@ skl_plane_update_arm(struct intel_dsb *dsb, intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, 0)); + plane_state->surf); } static void icl_plane_update_sel_fetch_noarm(struct intel_dsb *dsb, @@ -1632,7 +1624,6 @@ icl_plane_update_arm(struct intel_dsb *dsb, struct intel_display *display = to_intel_display(plane); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - int color_plane = icl_plane_color_plane(plane_state); u32 plane_ctl; plane_ctl = plane_state->ctl | @@ -1658,7 +1649,7 @@ icl_plane_update_arm(struct intel_dsb *dsb, intel_de_write_dsb(display, dsb, PLANE_CTL(pipe, plane_id), plane_ctl); intel_de_write_dsb(display, dsb, PLANE_SURF(pipe, plane_id), - skl_plane_surf(plane_state, color_plane)); + plane_state->surf); } static void skl_plane_capture_error(struct intel_crtc *crtc, @@ -1682,10 +1673,10 @@ skl_plane_async_flip(struct intel_dsb *dsb, struct intel_display *display = to_intel_display(plane); enum plane_id plane_id = plane->id; enum pipe pipe = plane->pipe; - u32 plane_ctl = plane_state->ctl, plane_surf; + u32 plane_ctl = plane_state->ctl; + u32 plane_surf = plane_state->surf; plane_ctl |= skl_plane_ctl_crtc(crtc_state); - plane_surf = skl_plane_surf(plane_state, 0); if (async_flip) { if (DISPLAY_VER(display) >= 30) @@ -2363,11 +2354,10 @@ static int skl_plane_check(struct intel_crtc_state *crtc_state, plane_state->damage = DRM_RECT_INIT(0, 0, 0, 0); } - plane_state->ctl = skl_plane_ctl(crtc_state, plane_state); + plane_state->ctl = skl_plane_ctl(plane_state); if (DISPLAY_VER(display) >= 10) - plane_state->color_ctl = glk_plane_color_ctl(crtc_state, - plane_state); + plane_state->color_ctl = glk_plane_color_ctl(plane_state); if (intel_format_info_is_yuv_semiplanar(fb->format, fb->modifier) && icl_is_hdr_plane(display, plane->id)) @@ -2814,7 +2804,7 @@ static void skl_disable_tiling(struct intel_plane *plane) intel_de_write_fw(display, PLANE_CTL(plane->pipe, plane->id), plane_ctl); intel_de_write_fw(display, PLANE_SURF(plane->pipe, plane->id), - skl_plane_surf(state, 0)); + state->surf); } struct intel_plane * @@ -2865,6 +2855,8 @@ skl_universal_plane_create(struct intel_display *display, } plane->disable_tiling = skl_disable_tiling; + plane->surf_offset = skl_plane_surf_offset; + if (DISPLAY_VER(display) >= 13) plane->max_stride = adl_plane_max_stride; else @@ -3191,21 +3183,18 @@ bool skl_fixup_initial_plane_config(struct intel_crtc *crtc, to_intel_plane_state(plane->base.state); enum plane_id plane_id = plane->id; enum pipe pipe = crtc->pipe; - u32 base; if (!plane_state->uapi.visible) return false; - base = intel_plane_ggtt_offset(plane_state); - /* * We may have moved the surface to a different * part of ggtt, make the plane aware of that. */ - if (plane_config->base == base) + if (plane_config->base == plane_state->surf) return false; - intel_de_write(display, PLANE_SURF(pipe, plane_id), base); + intel_de_write(display, PLANE_SURF(pipe, plane_id), plane_state->surf); return true; } diff --git a/drivers/gpu/drm/i915/display/skl_watermark.c b/drivers/gpu/drm/i915/display/skl_watermark.c index 222c069fdadb..d74cbb43ae6f 100644 --- a/drivers/gpu/drm/i915/display/skl_watermark.c +++ b/drivers/gpu/drm/i915/display/skl_watermark.c @@ -6,7 +6,6 @@ #include <linux/debugfs.h> #include <drm/drm_blend.h> -#include <drm/drm_file.h> #include <drm/drm_print.h> #include "soc/intel_dram.h" @@ -1389,7 +1388,7 @@ skl_allocate_plane_ddb(struct skl_plane_ddb_iter *iter, { u16 size, extra = 0; - if (data_rate) { + if (data_rate && iter->data_rate) { extra = min_t(u16, iter->size, DIV64_U64_ROUND_UP(iter->size * data_rate, iter->data_rate)); @@ -2273,6 +2272,11 @@ static int skl_max_wm0_lines(const struct intel_crtc_state *crtc_state) return wm0_lines; } +/* + * TODO: In case we use PKG_C_LATENCY to allow C-states when the delayed vblank + * size is too small for the package C exit latency we need to notify PSR about + * the scenario to apply Wa_16025596647. + */ static int skl_max_wm_level_for_vblank(struct intel_crtc_state *crtc_state, int wm0_lines) { @@ -3205,12 +3209,12 @@ adjust_wm_latency(struct intel_display *display, } /* - * WA Level-0 adjustment for 16GB DIMMs: SKL+ + * WA Level-0 adjustment for 16Gb DIMMs: SKL+ * If we could not get dimm info enable this WA to prevent from - * any underrun. If not able to get Dimm info assume 16GB dimm + * any underrun. If not able to get DIMM info assume 16Gb DIMM * to avoid any underrun. */ - if (!display->platform.dg2 && dram_info->wm_lv_0_adjust_needed) + if (!display->platform.dg2 && dram_info->has_16gb_dimms) wm[0] += 1; } @@ -4033,14 +4037,14 @@ DEFINE_SHOW_ATTRIBUTE(intel_sagv_status); void skl_watermark_debugfs_register(struct intel_display *display) { - struct drm_minor *minor = display->drm->primary; + struct dentry *debugfs_root = display->drm->debugfs_root; if (HAS_IPC(display)) - debugfs_create_file("i915_ipc_status", 0644, minor->debugfs_root, + debugfs_create_file("i915_ipc_status", 0644, debugfs_root, display, &skl_watermark_ipc_status_fops); if (HAS_SAGV(display)) - debugfs_create_file("i915_sagv_status", 0444, minor->debugfs_root, + debugfs_create_file("i915_sagv_status", 0444, debugfs_root, display, &intel_sagv_status_fops); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi.c b/drivers/gpu/drm/i915/display/vlv_dsi.c index 6d9f3312de7e..c9a53fde79c4 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi.c @@ -761,7 +761,7 @@ static void intel_dsi_pre_enable(struct intel_atomic_state *state, if (display->platform.valleyview || display->platform.cherryview) { /* Disable DPOunit clock gating, can stall pipe */ - intel_de_rmw(display, DSPCLK_GATE_D(display), + intel_de_rmw(display, VLV_DSPCLK_GATE_D, 0, DPOUNIT_CLOCK_GATE_DISABLE); } @@ -918,7 +918,7 @@ static void intel_dsi_post_disable(struct intel_atomic_state *state, } else { vlv_dsi_pll_disable(encoder); - intel_de_rmw(display, DSPCLK_GATE_D(display), + intel_de_rmw(display, VLV_DSPCLK_GATE_D, DPOUNIT_CLOCK_GATE_DISABLE, 0); } diff --git a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c index d42b61e6f076..f078b9cda96c 100644 --- a/drivers/gpu/drm/i915/display/vlv_dsi_pll.c +++ b/drivers/gpu/drm/i915/display/vlv_dsi_pll.c @@ -25,12 +25,12 @@ * Yogesh Mohan Marimuthu <yogesh.mohan.marimuthu@intel.com> */ +#include <linux/iopoll.h> #include <linux/kernel.h> #include <linux/string_helpers.h> #include <drm/drm_print.h> -#include "i915_utils.h" #include "intel_de.h" #include "intel_display_types.h" #include "intel_dsi.h" @@ -142,11 +142,9 @@ static int vlv_dsi_pclk(struct intel_encoder *encoder, pll_div &= DSI_PLL_M1_DIV_MASK; pll_div = pll_div >> DSI_PLL_M1_DIV_SHIFT; - while (pll_ctl) { - pll_ctl = pll_ctl >> 1; - p++; - } - p--; + p = fls(pll_ctl); + if (p) + p--; if (!p) { drm_err(display->drm, "wrong P1 divisor\n"); @@ -216,6 +214,8 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder, const struct intel_crtc_state *config) { struct intel_display *display = to_intel_display(encoder); + u32 val; + int ret; drm_dbg_kms(display->drm, "\n"); @@ -233,9 +233,10 @@ void vlv_dsi_pll_enable(struct intel_encoder *encoder, vlv_cck_write(display->drm, CCK_REG_DSI_PLL_CONTROL, config->dsi_pll.ctrl); - if (wait_for(vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL) & - DSI_PLL_LOCK, 20)) { - + ret = poll_timeout_us(val = vlv_cck_read(display->drm, CCK_REG_DSI_PLL_CONTROL), + val & DSI_PLL_LOCK, + 500, 20 * 1000, false); + if (ret) { vlv_cck_put(display->drm); drm_err(display->drm, "DSI PLL lock failed\n"); return; @@ -262,6 +263,11 @@ void vlv_dsi_pll_disable(struct intel_encoder *encoder) vlv_cck_put(display->drm); } +static bool has_dsic_clock(struct intel_display *display) +{ + return display->platform.broxton; +} + bool bxt_dsi_pll_is_enabled(struct intel_display *display) { bool enabled; @@ -284,7 +290,7 @@ bool bxt_dsi_pll_is_enabled(struct intel_display *display) * causes a system hang. */ val = intel_de_read(display, BXT_DSI_PLL_CTL); - if (display->platform.geminilake) { + if (!has_dsic_clock(display)) { if (!(val & BXT_DSIA_16X_MASK)) { drm_dbg_kms(display->drm, "Invalid PLL divider (%08x)\n", val); @@ -358,6 +364,8 @@ u32 bxt_dsi_get_pclk(struct intel_encoder *encoder, u32 pclk; config->dsi_pll.ctrl = intel_de_read(display, BXT_DSI_PLL_CTL); + if (!has_dsic_clock(display)) + config->dsi_pll.ctrl &= ~BXT_DSIC_16X_MASK; pclk = bxt_dsi_pclk(encoder, config); @@ -514,7 +522,9 @@ int bxt_dsi_pll_compute(struct intel_encoder *encoder, * Spec says both have to be programmed, even if one is not getting * used. Configure MIPI_CLOCK_CTL dividers in modeset */ - config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2 | BXT_DSIC_16X_BY2; + config->dsi_pll.ctrl = dsi_ratio | BXT_DSIA_16X_BY2; + if (has_dsic_clock(display)) + config->dsi_pll.ctrl |= BXT_DSIC_16X_BY2; /* As per recommendation from hardware team, * Prog PVD ratio =1 if dsi ratio <= 50 diff --git a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c index 86d9d2fcb6a6..e747f5ed195e 100644 --- a/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c +++ b/drivers/gpu/drm/i915/gem/selftests/i915_gem_client_blt.c @@ -110,6 +110,7 @@ struct tiled_blits { static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) { + struct intel_display *display = i915->display; int gen = GRAPHICS_VER(i915); /* XY_FAST_COPY_BLT does not exist on pre-gen9 platforms */ @@ -121,7 +122,7 @@ static bool fastblit_supports_x_tiling(const struct drm_i915_private *i915) if (GRAPHICS_VER_FULL(i915) < IP_VER(12, 55)) return false; - return HAS_DISPLAY(i915); + return HAS_DISPLAY(display); } static bool fast_blit_ok(const struct blit_buffer *buf) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c index 6c499692d61e..88b147fa5cb1 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_clock_utils.c @@ -148,7 +148,7 @@ static u32 gen4_read_clock_frequency(struct intel_uncore *uncore) * * Testing on actual hardware has shown there is no /16. */ - return DIV_ROUND_CLOSEST(i9xx_fsb_freq(uncore->i915), 4) * 1000; + return DIV_ROUND_CLOSEST(intel_fsb_freq(uncore->i915), 4) * 1000; } static u32 read_clock_frequency(struct intel_uncore *uncore) diff --git a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c index 4dc23b8d3aa2..dcd40b30a96b 100644 --- a/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c +++ b/drivers/gpu/drm/i915/gt/intel_gt_debugfs.c @@ -82,14 +82,15 @@ static void gt_debugfs_register(struct intel_gt *gt, struct dentry *root) void intel_gt_debugfs_register(struct intel_gt *gt) { + struct dentry *debugfs_root = gt->i915->drm.debugfs_root; struct dentry *root; char gtname[4]; - if (!gt->i915->drm.primary->debugfs_root) + if (!debugfs_root) return; snprintf(gtname, sizeof(gtname), "gt%u", gt->info.id); - root = debugfs_create_dir(gtname, gt->i915->drm.primary->debugfs_root); + root = debugfs_create_dir(gtname, debugfs_root); if (IS_ERR(root)) return; diff --git a/drivers/gpu/drm/i915/gt/intel_rps.c b/drivers/gpu/drm/i915/gt/intel_rps.c index 0b35fdd461d4..006042e0b229 100644 --- a/drivers/gpu/drm/i915/gt/intel_rps.c +++ b/drivers/gpu/drm/i915/gt/intel_rps.c @@ -9,6 +9,7 @@ #include "display/intel_display.h" #include "display/intel_display_rps.h" +#include "soc/intel_dram.h" #include "i915_drv.h" #include "i915_irq.h" #include "i915_reg.h" @@ -276,20 +277,24 @@ static void gen5_rps_init(struct intel_rps *rps) { struct drm_i915_private *i915 = rps_to_i915(rps); struct intel_uncore *uncore = rps_to_uncore(rps); + unsigned int fsb_freq, mem_freq; u8 fmax, fmin, fstart; u32 rgvmodectl; int c_m, i; - if (i915->fsb_freq <= 3200000) + fsb_freq = intel_fsb_freq(i915); + mem_freq = intel_mem_freq(i915); + + if (fsb_freq <= 3200000) c_m = 0; - else if (i915->fsb_freq <= 4800000) + else if (fsb_freq <= 4800000) c_m = 1; else c_m = 2; for (i = 0; i < ARRAY_SIZE(cparams); i++) { if (cparams[i].i == c_m && - cparams[i].t == DIV_ROUND_CLOSEST(i915->mem_freq, 1000)) { + cparams[i].t == DIV_ROUND_CLOSEST(mem_freq, 1000)) { rps->ips.m = cparams[i].m; rps->ips.c = cparams[i].c; break; diff --git a/drivers/gpu/drm/i915/gvt/debugfs.c b/drivers/gpu/drm/i915/gvt/debugfs.c index 673534f061ef..415422b5943c 100644 --- a/drivers/gpu/drm/i915/gvt/debugfs.c +++ b/drivers/gpu/drm/i915/gvt/debugfs.c @@ -194,9 +194,9 @@ void intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu) void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) { struct intel_gvt *gvt = vgpu->gvt; - struct drm_minor *minor = gvt->gt->i915->drm.primary; + struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root; - if (minor->debugfs_root && gvt->debugfs_root) { + if (debugfs_root && gvt->debugfs_root) { debugfs_remove_recursive(vgpu->debugfs); vgpu->debugfs = NULL; } @@ -208,9 +208,9 @@ void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu) */ void intel_gvt_debugfs_init(struct intel_gvt *gvt) { - struct drm_minor *minor = gvt->gt->i915->drm.primary; + struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root; - gvt->debugfs_root = debugfs_create_dir("gvt", minor->debugfs_root); + gvt->debugfs_root = debugfs_create_dir("gvt", debugfs_root); debugfs_create_ulong("num_tracked_mmio", 0444, gvt->debugfs_root, &gvt->mmio.num_tracked_mmio); @@ -222,9 +222,9 @@ void intel_gvt_debugfs_init(struct intel_gvt *gvt) */ void intel_gvt_debugfs_clean(struct intel_gvt *gvt) { - struct drm_minor *minor = gvt->gt->i915->drm.primary; + struct dentry *debugfs_root = gvt->gt->i915->drm.debugfs_root; - if (minor->debugfs_root) { + if (debugfs_root) { debugfs_remove_recursive(gvt->debugfs_root); gvt->debugfs_root = NULL; } diff --git a/drivers/gpu/drm/i915/i915_debugfs.c b/drivers/gpu/drm/i915/i915_debugfs.c index 967c0501e91e..23fa098c4479 100644 --- a/drivers/gpu/drm/i915/i915_debugfs.c +++ b/drivers/gpu/drm/i915/i915_debugfs.c @@ -720,26 +720,24 @@ static const struct i915_debugfs_files { {"i915_gem_drop_caches", &i915_drop_caches_fops}, }; -void i915_debugfs_register(struct drm_i915_private *dev_priv) +void i915_debugfs_register(struct drm_i915_private *i915) { - struct drm_minor *minor = dev_priv->drm.primary; + struct dentry *debugfs_root = i915->drm.debugfs_root; int i; - i915_debugfs_params(dev_priv); + i915_debugfs_params(i915); - debugfs_create_file("i915_forcewake_user", S_IRUSR, minor->debugfs_root, - to_i915(minor->dev), &i915_forcewake_fops); + debugfs_create_file("i915_forcewake_user", S_IRUSR, debugfs_root, + i915, &i915_forcewake_fops); for (i = 0; i < ARRAY_SIZE(i915_debugfs_files); i++) { - debugfs_create_file(i915_debugfs_files[i].name, - S_IRUGO | S_IWUSR, - minor->debugfs_root, - to_i915(minor->dev), + debugfs_create_file(i915_debugfs_files[i].name, S_IRUGO | S_IWUSR, + debugfs_root, i915, i915_debugfs_files[i].fops); } drm_debugfs_create_files(i915_debugfs_list, ARRAY_SIZE(i915_debugfs_list), - minor->debugfs_root, minor); + debugfs_root, i915->drm.primary); - i915_gpu_error_debugfs_register(dev_priv); + i915_gpu_error_debugfs_register(i915); } diff --git a/drivers/gpu/drm/i915/i915_debugfs_params.c b/drivers/gpu/drm/i915/i915_debugfs_params.c index 33d2dcb0de65..89ab5eb14779 100644 --- a/drivers/gpu/drm/i915/i915_debugfs_params.c +++ b/drivers/gpu/drm/i915/i915_debugfs_params.c @@ -248,11 +248,11 @@ i915_debugfs_create_charp(const char *name, umode_t mode, /* add a subdirectory with files for each i915 param */ struct dentry *i915_debugfs_params(struct drm_i915_private *i915) { - struct drm_minor *minor = i915->drm.primary; + struct dentry *debugfs_root = i915->drm.debugfs_root; struct i915_params *params = &i915->params; struct dentry *dir; - dir = debugfs_create_dir("i915_params", minor->debugfs_root); + dir = debugfs_create_dir("i915_params", debugfs_root); if (IS_ERR(dir)) return dir; diff --git a/drivers/gpu/drm/i915/i915_driver.c b/drivers/gpu/drm/i915/i915_driver.c index c6263c6d3384..70f042ce8705 100644 --- a/drivers/gpu/drm/i915/i915_driver.c +++ b/drivers/gpu/drm/i915/i915_driver.c @@ -977,7 +977,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_power_domains_disable(display); drm_client_dev_suspend(&i915->drm, false); - if (HAS_DISPLAY(i915)) { + if (HAS_DISPLAY(display)) { drm_kms_helper_poll_disable(&i915->drm); intel_display_driver_disable_user_access(display); @@ -989,7 +989,7 @@ void i915_driver_shutdown(struct drm_i915_private *i915) intel_irq_suspend(i915); intel_hpd_cancel_work(display); - if (HAS_DISPLAY(i915)) + if (HAS_DISPLAY(display)) intel_display_driver_suspend_access(display); intel_encoder_suspend_all(display); @@ -1060,7 +1060,7 @@ static int i915_drm_suspend(struct drm_device *dev) * properly. */ intel_power_domains_disable(display); drm_client_dev_suspend(dev, false); - if (HAS_DISPLAY(dev_priv)) { + if (HAS_DISPLAY(display)) { drm_kms_helper_poll_disable(dev); intel_display_driver_disable_user_access(display); } @@ -1072,7 +1072,7 @@ static int i915_drm_suspend(struct drm_device *dev) intel_irq_suspend(dev_priv); intel_hpd_cancel_work(display); - if (HAS_DISPLAY(dev_priv)) + if (HAS_DISPLAY(display)) intel_display_driver_suspend_access(display); intel_encoder_suspend_all(display); @@ -1219,7 +1219,7 @@ static int i915_drm_resume(struct drm_device *dev) */ intel_irq_resume(dev_priv); - if (HAS_DISPLAY(dev_priv)) + if (HAS_DISPLAY(display)) drm_mode_config_reset(dev); i915_gem_resume(dev_priv); @@ -1228,14 +1228,14 @@ static int i915_drm_resume(struct drm_device *dev) intel_clock_gating_init(dev_priv); - if (HAS_DISPLAY(dev_priv)) + if (HAS_DISPLAY(display)) intel_display_driver_resume_access(display); intel_hpd_init(display); intel_display_driver_resume(display); - if (HAS_DISPLAY(dev_priv)) { + if (HAS_DISPLAY(display)) { intel_display_driver_enable_user_access(display); drm_kms_helper_poll_enable(dev); } diff --git a/drivers/gpu/drm/i915/i915_drv.h b/drivers/gpu/drm/i915/i915_drv.h index 4e4e89746aa6..2f3965feada1 100644 --- a/drivers/gpu/drm/i915/i915_drv.h +++ b/drivers/gpu/drm/i915/i915_drv.h @@ -237,8 +237,6 @@ struct drm_i915_private { bool preserve_bios_swizzle; - unsigned int fsb_freq, mem_freq, is_ddr3; - unsigned int hpll_freq; unsigned int czclk_freq; diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c index 0e4b832dff84..89f7c04a3330 100644 --- a/drivers/gpu/drm/i915/i915_gpu_error.c +++ b/drivers/gpu/drm/i915/i915_gpu_error.c @@ -2445,11 +2445,11 @@ static const struct file_operations i915_error_state_fops = { void i915_gpu_error_debugfs_register(struct drm_i915_private *i915) { - struct drm_minor *minor = i915->drm.primary; + struct dentry *debugfs_root = i915->drm.debugfs_root; - debugfs_create_file("i915_error_state", 0644, minor->debugfs_root, i915, + debugfs_create_file("i915_error_state", 0644, debugfs_root, i915, &i915_error_state_fops); - debugfs_create_file("i915_gpu_info", 0644, minor->debugfs_root, i915, + debugfs_create_file("i915_gpu_info", 0644, debugfs_root, i915, &i915_gpu_info_fops); } diff --git a/drivers/gpu/drm/i915/i915_irq.c b/drivers/gpu/drm/i915/i915_irq.c index 191ed8bb1d9c..a5fa40ab5de2 100644 --- a/drivers/gpu/drm/i915/i915_irq.c +++ b/drivers/gpu/drm/i915/i915_irq.c @@ -439,7 +439,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) * able to process them after we restore SDEIER (as soon as we restore * it, we'll get an interrupt if SDEIIR still has something to process * due to its back queue). */ - if (!HAS_PCH_NOP(i915)) { + if (!HAS_PCH_NOP(display)) { sde_ier = raw_reg_read(regs, SDEIER); raw_reg_write(regs, SDEIER, 0); } @@ -459,7 +459,7 @@ static irqreturn_t ilk_irq_handler(int irq, void *arg) de_iir = raw_reg_read(regs, DEIIR); if (de_iir) { raw_reg_write(regs, DEIIR, de_iir); - if (DISPLAY_VER(i915) >= 7) + if (DISPLAY_VER(display) >= 7) ivb_display_irq_handler(display, de_iir); else ilk_display_irq_handler(display, de_iir); @@ -834,6 +834,7 @@ static void cherryview_irq_postinstall(struct drm_i915_private *dev_priv) static u32 i9xx_error_mask(struct drm_i915_private *i915) { + struct intel_display *display = i915->display; /* * On gen2/3 FBC generates (seemingly spurious) * display INVALID_GTT/INVALID_GTT_PTE table errors. @@ -846,7 +847,7 @@ static u32 i9xx_error_mask(struct drm_i915_private *i915) * Unfortunately we can't mask off individual PGTBL_ER bits, * so we just have to mask off all page table errors via EMR. */ - if (HAS_FBC(i915)) + if (HAS_FBC(display)) return I915_ERROR_MEMORY_REFRESH; else return I915_ERROR_PAGE_TABLE | @@ -924,12 +925,12 @@ static void i915_irq_postinstall(struct drm_i915_private *dev_priv) I915_MASTER_ERROR_INTERRUPT | I915_USER_INTERRUPT; - if (DISPLAY_VER(dev_priv) >= 3) { + if (DISPLAY_VER(display) >= 3) { dev_priv->irq_mask &= ~I915_ASLE_INTERRUPT; enable_mask |= I915_ASLE_INTERRUPT; } - if (HAS_HOTPLUG(dev_priv)) { + if (HAS_HOTPLUG(display)) { dev_priv->irq_mask &= ~I915_DISPLAY_PORT_INTERRUPT; enable_mask |= I915_DISPLAY_PORT_INTERRUPT; } @@ -963,7 +964,7 @@ static irqreturn_t i915_irq_handler(int irq, void *arg) ret = IRQ_HANDLED; - if (HAS_HOTPLUG(dev_priv) && + if (HAS_HOTPLUG(display) && iir & I915_DISPLAY_PORT_INTERRUPT) hotplug_status = i9xx_hpd_irq_ack(display); diff --git a/drivers/gpu/drm/i915/i915_reg.h b/drivers/gpu/drm/i915/i915_reg.h index 03b895897f60..354ef75ef6a5 100644 --- a/drivers/gpu/drm/i915/i915_reg.h +++ b/drivers/gpu/drm/i915/i915_reg.h @@ -412,9 +412,9 @@ #define FW_BLC _MMIO(0x20d8) #define FW_BLC2 _MMIO(0x20dc) #define FW_BLC_SELF _MMIO(0x20e0) /* 915+ only */ -#define FW_BLC_SELF_EN_MASK (1 << 31) -#define FW_BLC_SELF_FIFO_MASK (1 << 16) /* 945 only */ -#define FW_BLC_SELF_EN (1 << 15) /* 945 only */ +#define FW_BLC_SELF_EN_MASK REG_BIT(31) +#define FW_BLC_SELF_FIFO_MASK REG_BIT(16) /* 945 only */ +#define FW_BLC_SELF_EN REG_BIT(15) /* 945 only */ #define MM_BURST_LENGTH 0x00700000 #define MM_FIFO_WATERMARK 0x0001F000 #define LM_BURST_LENGTH 0x00000700 @@ -613,7 +613,8 @@ #define DSTATE_GFX_CLOCK_GATING (1 << 1) #define DSTATE_DOT_CLOCK_GATING (1 << 0) -#define DSPCLK_GATE_D(__i915) _MMIO(DISPLAY_MMIO_BASE(__i915) + 0x6200) +#define DSPCLK_GATE_D _MMIO(0x6200) +#define VLV_DSPCLK_GATE_D _MMIO(VLV_DISPLAY_BASE + 0x6200) # define DPUNIT_B_CLOCK_GATE_DISABLE (1 << 30) /* 965 */ # define VSUNIT_CLOCK_GATE_DISABLE (1 << 29) /* 965 */ # define VRHUNIT_CLOCK_GATE_DISABLE (1 << 28) /* 965 */ diff --git a/drivers/gpu/drm/i915/i915_switcheroo.c b/drivers/gpu/drm/i915/i915_switcheroo.c index 4c02a04be681..3a95a55b2e87 100644 --- a/drivers/gpu/drm/i915/i915_switcheroo.c +++ b/drivers/gpu/drm/i915/i915_switcheroo.c @@ -15,13 +15,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, enum vga_switcheroo_state state) { struct drm_i915_private *i915 = pdev_to_i915(pdev); + struct intel_display *display = i915 ? i915->display : NULL; pm_message_t pmm = { .event = PM_EVENT_SUSPEND }; if (!i915) { dev_err(&pdev->dev, "DRM not initialized, aborting switch.\n"); return; } - if (!HAS_DISPLAY(i915)) { + if (!HAS_DISPLAY(display)) { dev_err(&pdev->dev, "Device state not initialized, aborting switch.\n"); return; } @@ -44,13 +45,14 @@ static void i915_switcheroo_set_state(struct pci_dev *pdev, static bool i915_switcheroo_can_switch(struct pci_dev *pdev) { struct drm_i915_private *i915 = pdev_to_i915(pdev); + struct intel_display *display = i915 ? i915->display : NULL; /* * FIXME: open_count is protected by drm_global_mutex but that would lead to * locking inversion with the driver load path. And the access here is * completely racy anyway. So don't bother with locking for now. */ - return i915 && HAS_DISPLAY(i915) && atomic_read(&i915->drm.open_count) == 0; + return i915 && HAS_DISPLAY(display) && atomic_read(&i915->drm.open_count) == 0; } static const struct vga_switcheroo_client_ops i915_switcheroo_ops = { diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h index f7fb40cfdb70..9cb40c2c4b12 100644 --- a/drivers/gpu/drm/i915/i915_utils.h +++ b/drivers/gpu/drm/i915/i915_utils.h @@ -267,8 +267,13 @@ wait_remaining_ms_from_jiffies(unsigned long timestamp_jiffies, int to_wait_ms) (Wmax)) #define wait_for(COND, MS) _wait_for((COND), (MS) * 1000, 10, 1000) -/* If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. */ -#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) +/* + * If CONFIG_PREEMPT_COUNT is disabled, in_atomic() always reports false. + * On PREEMPT_RT the context isn't becoming atomic because it is used in an + * interrupt handler or because a spinlock_t is acquired. This leads to + * warnings which don't occur otherwise and therefore the check is disabled. + */ +#if IS_ENABLED(CONFIG_DRM_I915_DEBUG) && IS_ENABLED(CONFIG_PREEMPT_COUNT) && !defined(CONFIG_PREEMPT_RT) # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) WARN_ON_ONCE((ATOMIC) && !in_atomic()) #else # define _WAIT_FOR_ATOMIC_CHECK(ATOMIC) do { } while (0) diff --git a/drivers/gpu/drm/i915/intel_clock_gating.c b/drivers/gpu/drm/i915/intel_clock_gating.c index f86a3629ae9e..467740969431 100644 --- a/drivers/gpu/drm/i915/intel_clock_gating.c +++ b/drivers/gpu/drm/i915/intel_clock_gating.c @@ -132,16 +132,17 @@ static void ibx_init_clock_gating(struct drm_i915_private *i915) static void g4x_disable_trickle_feed(struct drm_i915_private *dev_priv) { + struct intel_display *display = dev_priv->display; enum pipe pipe; - for_each_pipe(dev_priv, pipe) { - intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(dev_priv, pipe), + for_each_pipe(display, pipe) { + intel_uncore_rmw(&dev_priv->uncore, DSPCNTR(display, pipe), 0, DISP_TRICKLE_FEED_DISABLE); - intel_uncore_rmw(&dev_priv->uncore, DSPSURF(dev_priv, pipe), + intel_uncore_rmw(&dev_priv->uncore, DSPSURF(display, pipe), 0, 0); intel_uncore_posting_read(&dev_priv->uncore, - DSPSURF(dev_priv, pipe)); + DSPSURF(display, pipe)); } } @@ -218,7 +219,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915) /* The below fixes the weird display corruption, a few pixels shifted * downward, on (only) LVDS of some HP laptops with IVY. */ - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { val = intel_uncore_read(&i915->uncore, TRANS_CHICKEN2(pipe)); val |= TRANS_CHICKEN2_TIMING_OVERRIDE; val &= ~TRANS_CHICKEN2_FDI_POLARITY_REVERSED; @@ -229,7 +230,7 @@ static void cpt_init_clock_gating(struct drm_i915_private *i915) intel_uncore_write(&i915->uncore, TRANS_CHICKEN2(pipe), val); } /* WADP0ClockGatingDisable */ - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { intel_uncore_write(&i915->uncore, TRANS_CHICKEN1(pipe), TRANS_CHICKEN1_DP0UNIT_GC_DISABLE); } @@ -307,11 +308,13 @@ static void gen6_init_clock_gating(struct drm_i915_private *i915) static void lpt_init_clock_gating(struct drm_i915_private *i915) { + struct intel_display *display = i915->display; + /* * TODO: this bit should only be enabled when really needed, then * disabled when not needed anymore in order to save power. */ - if (HAS_PCH_LPT_LP(i915)) + if (HAS_PCH_LPT_LP(display)) intel_uncore_rmw(&i915->uncore, SOUTH_DSPCLK_GATE_D, 0, PCH_LP_PARTITION_LEVEL_DISABLE); @@ -355,7 +358,9 @@ static void dg2_init_clock_gating(struct drm_i915_private *i915) static void cnp_init_clock_gating(struct drm_i915_private *i915) { - if (!HAS_PCH_CNP(i915)) + struct intel_display *display = i915->display; + + if (!HAS_PCH_CNP(display)) return; /* Display WA #1181 WaSouthDisplayDisablePWMCGEGating: cnp */ @@ -421,6 +426,7 @@ static void skl_init_clock_gating(struct drm_i915_private *i915) static void bdw_init_clock_gating(struct drm_i915_private *i915) { + struct intel_display *display = i915->display; enum pipe pipe; /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ @@ -432,7 +438,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915) /* WaPsrDPAMaskVBlankInSRD:bdw */ intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD); - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { /* WaPsrDPRSUnmaskVBlankInSRD:bdw */ intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe), 0, BDW_UNMASK_VBL_TO_REGS_IN_SRD); @@ -468,6 +474,7 @@ static void bdw_init_clock_gating(struct drm_i915_private *i915) static void hsw_init_clock_gating(struct drm_i915_private *i915) { + struct intel_display *display = i915->display; enum pipe pipe; /* WaFbcAsynchFlipDisableFbcQueue:hsw,bdw */ @@ -476,7 +483,7 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915) /* WaPsrDPAMaskVBlankInSRD:hsw */ intel_uncore_rmw(&i915->uncore, CHICKEN_PAR1_1, 0, HSW_MASK_VBL_TO_PIPE_IN_SRD); - for_each_pipe(i915, pipe) { + for_each_pipe(display, pipe) { /* WaPsrDPRSUnmaskVBlankInSRD:hsw */ intel_uncore_rmw(&i915->uncore, CHICKEN_PIPESL_1(pipe), 0, HSW_UNMASK_VBL_TO_REGS_IN_SRD); @@ -494,6 +501,8 @@ static void hsw_init_clock_gating(struct drm_i915_private *i915) static void ivb_init_clock_gating(struct drm_i915_private *i915) { + struct intel_display *display = i915->display; + intel_uncore_write(&i915->uncore, ILK_DSPCLK_GATE_D, ILK_VRHUNIT_CLOCK_GATE_DISABLE); /* WaFbcAsynchFlipDisableFbcQueue:ivb */ @@ -531,7 +540,7 @@ static void ivb_init_clock_gating(struct drm_i915_private *i915) intel_uncore_rmw(&i915->uncore, GEN6_MBCUNIT_SNPCR, GEN6_MBC_SNPCR_MASK, GEN6_MBC_SNPCR_MED); - if (!HAS_PCH_NOP(i915)) + if (!HAS_PCH_NOP(display)) cpt_init_clock_gating(i915); gen6_check_mch_setup(i915); @@ -611,7 +620,7 @@ static void g4x_init_clock_gating(struct drm_i915_private *i915) OVCUNIT_CLOCK_GATE_DISABLE; if (IS_GM45(i915)) dspclk_gate |= DSSUNIT_CLOCK_GATE_DISABLE; - intel_uncore_write(&i915->uncore, DSPCLK_GATE_D(i915), dspclk_gate); + intel_uncore_write(&i915->uncore, DSPCLK_GATE_D, dspclk_gate); g4x_disable_trickle_feed(i915); } @@ -622,7 +631,7 @@ static void i965gm_init_clock_gating(struct drm_i915_private *i915) intel_uncore_write(uncore, RENCLK_GATE_D1, I965_RCC_CLOCK_GATE_DISABLE); intel_uncore_write(uncore, RENCLK_GATE_D2, 0); - intel_uncore_write(uncore, DSPCLK_GATE_D(i915), 0); + intel_uncore_write(uncore, DSPCLK_GATE_D, 0); intel_uncore_write(uncore, RAMCLK_GATE_D, 0); intel_uncore_write16(uncore, DEUC, 0); intel_uncore_write(uncore, diff --git a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c index 87ac4446d306..ca57a3dd3148 100644 --- a/drivers/gpu/drm/i915/intel_gvt_mmio_table.c +++ b/drivers/gpu/drm/i915/intel_gvt_mmio_table.c @@ -62,6 +62,7 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) { struct drm_i915_private *dev_priv = iter->i915; + struct intel_display *display = dev_priv->display; MMIO_RING_D(RING_IMR); MMIO_D(SDEIMR); @@ -133,38 +134,38 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(_MMIO(0x650b4)); MMIO_D(_MMIO(0xc4040)); MMIO_D(DERRMR); - MMIO_D(PIPEDSL(dev_priv, PIPE_A)); - MMIO_D(PIPEDSL(dev_priv, PIPE_B)); - MMIO_D(PIPEDSL(dev_priv, PIPE_C)); - MMIO_D(PIPEDSL(dev_priv, _PIPE_EDP)); - MMIO_D(TRANSCONF(dev_priv, TRANSCODER_A)); - MMIO_D(TRANSCONF(dev_priv, TRANSCODER_B)); - MMIO_D(TRANSCONF(dev_priv, TRANSCODER_C)); - MMIO_D(TRANSCONF(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPESTAT(dev_priv, PIPE_A)); - MMIO_D(PIPESTAT(dev_priv, PIPE_B)); - MMIO_D(PIPESTAT(dev_priv, PIPE_C)); - MMIO_D(PIPESTAT(dev_priv, _PIPE_EDP)); - MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_A)); - MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_B)); - MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, PIPE_C)); - MMIO_D(PIPE_FLIPCOUNT_G4X(dev_priv, _PIPE_EDP)); - MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_A)); - MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_B)); - MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, PIPE_C)); - MMIO_D(PIPE_FRMCOUNT_G4X(dev_priv, _PIPE_EDP)); - MMIO_D(CURCNTR(dev_priv, PIPE_A)); - MMIO_D(CURCNTR(dev_priv, PIPE_B)); - MMIO_D(CURCNTR(dev_priv, PIPE_C)); - MMIO_D(CURPOS(dev_priv, PIPE_A)); - MMIO_D(CURPOS(dev_priv, PIPE_B)); - MMIO_D(CURPOS(dev_priv, PIPE_C)); - MMIO_D(CURBASE(dev_priv, PIPE_A)); - MMIO_D(CURBASE(dev_priv, PIPE_B)); - MMIO_D(CURBASE(dev_priv, PIPE_C)); - MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_A)); - MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_B)); - MMIO_D(CUR_FBC_CTL(dev_priv, PIPE_C)); + MMIO_D(PIPEDSL(display, PIPE_A)); + MMIO_D(PIPEDSL(display, PIPE_B)); + MMIO_D(PIPEDSL(display, PIPE_C)); + MMIO_D(PIPEDSL(display, _PIPE_EDP)); + MMIO_D(TRANSCONF(display, TRANSCODER_A)); + MMIO_D(TRANSCONF(display, TRANSCODER_B)); + MMIO_D(TRANSCONF(display, TRANSCODER_C)); + MMIO_D(TRANSCONF(display, TRANSCODER_EDP)); + MMIO_D(PIPESTAT(display, PIPE_A)); + MMIO_D(PIPESTAT(display, PIPE_B)); + MMIO_D(PIPESTAT(display, PIPE_C)); + MMIO_D(PIPESTAT(display, _PIPE_EDP)); + MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_A)); + MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_B)); + MMIO_D(PIPE_FLIPCOUNT_G4X(display, PIPE_C)); + MMIO_D(PIPE_FLIPCOUNT_G4X(display, _PIPE_EDP)); + MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_A)); + MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_B)); + MMIO_D(PIPE_FRMCOUNT_G4X(display, PIPE_C)); + MMIO_D(PIPE_FRMCOUNT_G4X(display, _PIPE_EDP)); + MMIO_D(CURCNTR(display, PIPE_A)); + MMIO_D(CURCNTR(display, PIPE_B)); + MMIO_D(CURCNTR(display, PIPE_C)); + MMIO_D(CURPOS(display, PIPE_A)); + MMIO_D(CURPOS(display, PIPE_B)); + MMIO_D(CURPOS(display, PIPE_C)); + MMIO_D(CURBASE(display, PIPE_A)); + MMIO_D(CURBASE(display, PIPE_B)); + MMIO_D(CURBASE(display, PIPE_C)); + MMIO_D(CUR_FBC_CTL(display, PIPE_A)); + MMIO_D(CUR_FBC_CTL(display, PIPE_B)); + MMIO_D(CUR_FBC_CTL(display, PIPE_C)); MMIO_D(_MMIO(0x700ac)); MMIO_D(_MMIO(0x710ac)); MMIO_D(_MMIO(0x720ac)); @@ -172,32 +173,32 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(_MMIO(0x70094)); MMIO_D(_MMIO(0x70098)); MMIO_D(_MMIO(0x7009c)); - MMIO_D(DSPCNTR(dev_priv, PIPE_A)); - MMIO_D(DSPADDR(dev_priv, PIPE_A)); - MMIO_D(DSPSTRIDE(dev_priv, PIPE_A)); - MMIO_D(DSPPOS(dev_priv, PIPE_A)); - MMIO_D(DSPSIZE(dev_priv, PIPE_A)); - MMIO_D(DSPSURF(dev_priv, PIPE_A)); - MMIO_D(DSPOFFSET(dev_priv, PIPE_A)); - MMIO_D(DSPSURFLIVE(dev_priv, PIPE_A)); + MMIO_D(DSPCNTR(display, PIPE_A)); + MMIO_D(DSPADDR(display, PIPE_A)); + MMIO_D(DSPSTRIDE(display, PIPE_A)); + MMIO_D(DSPPOS(display, PIPE_A)); + MMIO_D(DSPSIZE(display, PIPE_A)); + MMIO_D(DSPSURF(display, PIPE_A)); + MMIO_D(DSPOFFSET(display, PIPE_A)); + MMIO_D(DSPSURFLIVE(display, PIPE_A)); MMIO_D(REG_50080(PIPE_A, PLANE_PRIMARY)); - MMIO_D(DSPCNTR(dev_priv, PIPE_B)); - MMIO_D(DSPADDR(dev_priv, PIPE_B)); - MMIO_D(DSPSTRIDE(dev_priv, PIPE_B)); - MMIO_D(DSPPOS(dev_priv, PIPE_B)); - MMIO_D(DSPSIZE(dev_priv, PIPE_B)); - MMIO_D(DSPSURF(dev_priv, PIPE_B)); - MMIO_D(DSPOFFSET(dev_priv, PIPE_B)); - MMIO_D(DSPSURFLIVE(dev_priv, PIPE_B)); + MMIO_D(DSPCNTR(display, PIPE_B)); + MMIO_D(DSPADDR(display, PIPE_B)); + MMIO_D(DSPSTRIDE(display, PIPE_B)); + MMIO_D(DSPPOS(display, PIPE_B)); + MMIO_D(DSPSIZE(display, PIPE_B)); + MMIO_D(DSPSURF(display, PIPE_B)); + MMIO_D(DSPOFFSET(display, PIPE_B)); + MMIO_D(DSPSURFLIVE(display, PIPE_B)); MMIO_D(REG_50080(PIPE_B, PLANE_PRIMARY)); - MMIO_D(DSPCNTR(dev_priv, PIPE_C)); - MMIO_D(DSPADDR(dev_priv, PIPE_C)); - MMIO_D(DSPSTRIDE(dev_priv, PIPE_C)); - MMIO_D(DSPPOS(dev_priv, PIPE_C)); - MMIO_D(DSPSIZE(dev_priv, PIPE_C)); - MMIO_D(DSPSURF(dev_priv, PIPE_C)); - MMIO_D(DSPOFFSET(dev_priv, PIPE_C)); - MMIO_D(DSPSURFLIVE(dev_priv, PIPE_C)); + MMIO_D(DSPCNTR(display, PIPE_C)); + MMIO_D(DSPADDR(display, PIPE_C)); + MMIO_D(DSPSTRIDE(display, PIPE_C)); + MMIO_D(DSPPOS(display, PIPE_C)); + MMIO_D(DSPSIZE(display, PIPE_C)); + MMIO_D(DSPSURF(display, PIPE_C)); + MMIO_D(DSPOFFSET(display, PIPE_C)); + MMIO_D(DSPSURFLIVE(display, PIPE_C)); MMIO_D(REG_50080(PIPE_C, PLANE_PRIMARY)); MMIO_D(SPRCTL(PIPE_A)); MMIO_D(SPRLINOFF(PIPE_A)); @@ -238,73 +239,73 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(SPRSCALE(PIPE_C)); MMIO_D(SPRSURFLIVE(PIPE_C)); MMIO_D(REG_50080(PIPE_C, PLANE_SPRITE0)); - MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_A)); - MMIO_D(BCLRPAT(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_A)); - MMIO_D(PIPESRC(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_B)); - MMIO_D(BCLRPAT(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_B)); - MMIO_D(PIPESRC(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_C)); - MMIO_D(BCLRPAT(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_C)); - MMIO_D(PIPESRC(dev_priv, TRANSCODER_C)); - MMIO_D(TRANS_HTOTAL(dev_priv, TRANSCODER_EDP)); - MMIO_D(TRANS_HBLANK(dev_priv, TRANSCODER_EDP)); - MMIO_D(TRANS_HSYNC(dev_priv, TRANSCODER_EDP)); - MMIO_D(TRANS_VTOTAL(dev_priv, TRANSCODER_EDP)); - MMIO_D(TRANS_VBLANK(dev_priv, TRANSCODER_EDP)); - MMIO_D(TRANS_VSYNC(dev_priv, TRANSCODER_EDP)); - MMIO_D(BCLRPAT(dev_priv, TRANSCODER_EDP)); - MMIO_D(TRANS_VSYNCSHIFT(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_A)); - MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_B)); - MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_C)); - MMIO_D(PIPE_DATA_M1(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_DATA_N1(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_DATA_M2(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_DATA_N2(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_LINK_M1(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_LINK_N1(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_LINK_M2(dev_priv, TRANSCODER_EDP)); - MMIO_D(PIPE_LINK_N2(dev_priv, TRANSCODER_EDP)); + MMIO_D(TRANS_HTOTAL(display, TRANSCODER_A)); + MMIO_D(TRANS_HBLANK(display, TRANSCODER_A)); + MMIO_D(TRANS_HSYNC(display, TRANSCODER_A)); + MMIO_D(TRANS_VTOTAL(display, TRANSCODER_A)); + MMIO_D(TRANS_VBLANK(display, TRANSCODER_A)); + MMIO_D(TRANS_VSYNC(display, TRANSCODER_A)); + MMIO_D(BCLRPAT(display, TRANSCODER_A)); + MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_A)); + MMIO_D(PIPESRC(display, TRANSCODER_A)); + MMIO_D(TRANS_HTOTAL(display, TRANSCODER_B)); + MMIO_D(TRANS_HBLANK(display, TRANSCODER_B)); + MMIO_D(TRANS_HSYNC(display, TRANSCODER_B)); + MMIO_D(TRANS_VTOTAL(display, TRANSCODER_B)); + MMIO_D(TRANS_VBLANK(display, TRANSCODER_B)); + MMIO_D(TRANS_VSYNC(display, TRANSCODER_B)); + MMIO_D(BCLRPAT(display, TRANSCODER_B)); + MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_B)); + MMIO_D(PIPESRC(display, TRANSCODER_B)); + MMIO_D(TRANS_HTOTAL(display, TRANSCODER_C)); + MMIO_D(TRANS_HBLANK(display, TRANSCODER_C)); + MMIO_D(TRANS_HSYNC(display, TRANSCODER_C)); + MMIO_D(TRANS_VTOTAL(display, TRANSCODER_C)); + MMIO_D(TRANS_VBLANK(display, TRANSCODER_C)); + MMIO_D(TRANS_VSYNC(display, TRANSCODER_C)); + MMIO_D(BCLRPAT(display, TRANSCODER_C)); + MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_C)); + MMIO_D(PIPESRC(display, TRANSCODER_C)); + MMIO_D(TRANS_HTOTAL(display, TRANSCODER_EDP)); + MMIO_D(TRANS_HBLANK(display, TRANSCODER_EDP)); + MMIO_D(TRANS_HSYNC(display, TRANSCODER_EDP)); + MMIO_D(TRANS_VTOTAL(display, TRANSCODER_EDP)); + MMIO_D(TRANS_VBLANK(display, TRANSCODER_EDP)); + MMIO_D(TRANS_VSYNC(display, TRANSCODER_EDP)); + MMIO_D(BCLRPAT(display, TRANSCODER_EDP)); + MMIO_D(TRANS_VSYNCSHIFT(display, TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M1(display, TRANSCODER_A)); + MMIO_D(PIPE_DATA_N1(display, TRANSCODER_A)); + MMIO_D(PIPE_DATA_M2(display, TRANSCODER_A)); + MMIO_D(PIPE_DATA_N2(display, TRANSCODER_A)); + MMIO_D(PIPE_LINK_M1(display, TRANSCODER_A)); + MMIO_D(PIPE_LINK_N1(display, TRANSCODER_A)); + MMIO_D(PIPE_LINK_M2(display, TRANSCODER_A)); + MMIO_D(PIPE_LINK_N2(display, TRANSCODER_A)); + MMIO_D(PIPE_DATA_M1(display, TRANSCODER_B)); + MMIO_D(PIPE_DATA_N1(display, TRANSCODER_B)); + MMIO_D(PIPE_DATA_M2(display, TRANSCODER_B)); + MMIO_D(PIPE_DATA_N2(display, TRANSCODER_B)); + MMIO_D(PIPE_LINK_M1(display, TRANSCODER_B)); + MMIO_D(PIPE_LINK_N1(display, TRANSCODER_B)); + MMIO_D(PIPE_LINK_M2(display, TRANSCODER_B)); + MMIO_D(PIPE_LINK_N2(display, TRANSCODER_B)); + MMIO_D(PIPE_DATA_M1(display, TRANSCODER_C)); + MMIO_D(PIPE_DATA_N1(display, TRANSCODER_C)); + MMIO_D(PIPE_DATA_M2(display, TRANSCODER_C)); + MMIO_D(PIPE_DATA_N2(display, TRANSCODER_C)); + MMIO_D(PIPE_LINK_M1(display, TRANSCODER_C)); + MMIO_D(PIPE_LINK_N1(display, TRANSCODER_C)); + MMIO_D(PIPE_LINK_M2(display, TRANSCODER_C)); + MMIO_D(PIPE_LINK_N2(display, TRANSCODER_C)); + MMIO_D(PIPE_DATA_M1(display, TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N1(display, TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_M2(display, TRANSCODER_EDP)); + MMIO_D(PIPE_DATA_N2(display, TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M1(display, TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N1(display, TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_M2(display, TRANSCODER_EDP)); + MMIO_D(PIPE_LINK_N2(display, TRANSCODER_EDP)); MMIO_D(PF_CTL(PIPE_A)); MMIO_D(PF_WIN_SZ(PIPE_A)); MMIO_D(PF_WIN_POS(PIPE_A)); @@ -513,12 +514,12 @@ static int iterate_generic_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(GAMMA_MODE(PIPE_A)); MMIO_D(GAMMA_MODE(PIPE_B)); MMIO_D(GAMMA_MODE(PIPE_C)); - MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_A)); - MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_B)); - MMIO_D(TRANS_MULT(dev_priv, TRANSCODER_C)); - MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_A)); - MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_B)); - MMIO_D(HSW_TVIDEO_DIP_CTL(dev_priv, TRANSCODER_C)); + MMIO_D(TRANS_MULT(display, TRANSCODER_A)); + MMIO_D(TRANS_MULT(display, TRANSCODER_B)); + MMIO_D(TRANS_MULT(display, TRANSCODER_C)); + MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_CTL(display, TRANSCODER_C)); MMIO_D(SFUSE_STRAP); MMIO_D(SBI_ADDR); MMIO_D(SBI_DATA); @@ -1111,6 +1112,7 @@ static int iterate_skl_plus_mmio(struct intel_gvt_mmio_table_iter *iter) static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) { struct drm_i915_private *dev_priv = iter->i915; + struct intel_display *display = dev_priv->display; MMIO_F(_MMIO(0x80000), 0x3000); MMIO_D(GEN7_SAMPLER_INSTDONE); @@ -1242,9 +1244,9 @@ static int iterate_bxt_mmio(struct intel_gvt_mmio_table_iter *iter) MMIO_D(BXT_DSI_PLL_ENABLE); MMIO_D(GEN9_CLKGATE_DIS_0); MMIO_D(GEN9_CLKGATE_DIS_4); - MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_A)); - MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_B)); - MMIO_D(HSW_TVIDEO_DIP_GCP(dev_priv, TRANSCODER_C)); + MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_A)); + MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_B)); + MMIO_D(HSW_TVIDEO_DIP_GCP(display, TRANSCODER_C)); MMIO_D(RC6_CTX_BASE); MMIO_D(GEN8_PUSHBUS_CONTROL); MMIO_D(GEN8_PUSHBUS_ENABLE); diff --git a/drivers/gpu/drm/i915/intel_uncore.c b/drivers/gpu/drm/i915/intel_uncore.c index c8e29fd72290..4ccba7c8ffb3 100644 --- a/drivers/gpu/drm/i915/intel_uncore.c +++ b/drivers/gpu/drm/i915/intel_uncore.c @@ -2502,6 +2502,7 @@ static int sanity_check_mmio_access(struct intel_uncore *uncore) int intel_uncore_init_mmio(struct intel_uncore *uncore) { struct drm_i915_private *i915 = uncore->i915; + struct intel_display *display = i915->display; int ret; ret = sanity_check_mmio_access(uncore); @@ -2536,7 +2537,7 @@ int intel_uncore_init_mmio(struct intel_uncore *uncore) GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.read_fw_domains); GEM_BUG_ON(intel_uncore_has_forcewake(uncore) != !!uncore->funcs.write_fw_domains); - if (HAS_FPGA_DBG_UNCLAIMED(i915)) + if (HAS_FPGA_DBG_UNCLAIMED(display)) uncore->flags |= UNCORE_HAS_FPGA_DBG_UNCLAIMED; if (IS_VALLEYVIEW(i915) || IS_CHERRYVIEW(i915)) diff --git a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c index e07c5b380789..545f79eb0cc5 100644 --- a/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c +++ b/drivers/gpu/drm/i915/pxp/intel_pxp_debugfs.c @@ -69,17 +69,17 @@ DEFINE_SIMPLE_ATTRIBUTE(pxp_terminate_fops, pxp_terminate_get, pxp_terminate_set void intel_pxp_debugfs_register(struct intel_pxp *pxp) { - struct drm_minor *minor; + struct dentry *debugfs_root; struct dentry *pxproot; if (!intel_pxp_is_supported(pxp)) return; - minor = pxp->ctrl_gt->i915->drm.primary; - if (!minor->debugfs_root) + debugfs_root = pxp->ctrl_gt->i915->drm.debugfs_root; + if (!debugfs_root) return; - pxproot = debugfs_create_dir("pxp", minor->debugfs_root); + pxproot = debugfs_create_dir("pxp", debugfs_root); if (IS_ERR(pxproot)) return; diff --git a/drivers/gpu/drm/i915/selftests/intel_uncore.c b/drivers/gpu/drm/i915/selftests/intel_uncore.c index 41eaa9b7f67d..58bcbdcef563 100644 --- a/drivers/gpu/drm/i915/selftests/intel_uncore.c +++ b/drivers/gpu/drm/i915/selftests/intel_uncore.c @@ -277,13 +277,15 @@ static int live_forcewake_domains(void *arg) #define FW_RANGE 0x40000 struct intel_gt *gt = arg; struct intel_uncore *uncore = gt->uncore; + struct drm_i915_private *i915 = gt->i915; + struct intel_display *display = i915->display; unsigned long *valid; u32 offset; int err; - if (!HAS_FPGA_DBG_UNCLAIMED(gt->i915) && - !IS_VALLEYVIEW(gt->i915) && - !IS_CHERRYVIEW(gt->i915)) + if (!HAS_FPGA_DBG_UNCLAIMED(display) && + !IS_VALLEYVIEW(i915) && + !IS_CHERRYVIEW(i915)) return 0; /* diff --git a/drivers/gpu/drm/i915/soc/intel_dram.c b/drivers/gpu/drm/i915/soc/intel_dram.c index deb159548a09..149527827624 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.c +++ b/drivers/gpu/drm/i915/soc/intel_dram.c @@ -11,6 +11,7 @@ #include "i915_drv.h" #include "i915_reg.h" +#include "i915_utils.h" #include "intel_dram.h" #include "intel_mchbar_regs.h" #include "intel_pcode.h" @@ -30,10 +31,11 @@ struct dram_channel_info { #define DRAM_TYPE_STR(type) [INTEL_DRAM_ ## type] = #type -static const char *intel_dram_type_str(enum intel_dram_type type) +const char *intel_dram_type_str(enum intel_dram_type type) { static const char * const str[] = { DRAM_TYPE_STR(UNKNOWN), + DRAM_TYPE_STR(DDR2), DRAM_TYPE_STR(DDR3), DRAM_TYPE_STR(DDR4), DRAM_TYPE_STR(LPDDR3), @@ -54,9 +56,10 @@ static const char *intel_dram_type_str(enum intel_dram_type type) #undef DRAM_TYPE_STR -static bool pnv_is_ddr3(struct drm_i915_private *i915) +static enum intel_dram_type pnv_dram_type(struct drm_i915_private *i915) { - return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3; + return intel_uncore_read(&i915->uncore, CSHRDDR3CTL) & CSHRDDR3CTL_DDR3 ? + INTEL_DRAM_DDR3 : INTEL_DRAM_DDR2; } static unsigned int pnv_mem_freq(struct drm_i915_private *dev_priv) @@ -135,25 +138,21 @@ static unsigned int vlv_mem_freq(struct drm_i915_private *i915) return 0; } -static void detect_mem_freq(struct drm_i915_private *i915) +unsigned int intel_mem_freq(struct drm_i915_private *i915) { if (IS_PINEVIEW(i915)) - i915->mem_freq = pnv_mem_freq(i915); + return pnv_mem_freq(i915); else if (GRAPHICS_VER(i915) == 5) - i915->mem_freq = ilk_mem_freq(i915); + return ilk_mem_freq(i915); else if (IS_CHERRYVIEW(i915)) - i915->mem_freq = chv_mem_freq(i915); + return chv_mem_freq(i915); else if (IS_VALLEYVIEW(i915)) - i915->mem_freq = vlv_mem_freq(i915); - - if (IS_PINEVIEW(i915)) - i915->is_ddr3 = pnv_is_ddr3(i915); - - if (i915->mem_freq) - drm_dbg(&i915->drm, "DDR speed: %d kHz\n", i915->mem_freq); + return vlv_mem_freq(i915); + else + return 0; } -unsigned int i9xx_fsb_freq(struct drm_i915_private *i915) +static unsigned int i9xx_fsb_freq(struct drm_i915_private *i915) { u32 fsb; @@ -235,15 +234,30 @@ static unsigned int ilk_fsb_freq(struct drm_i915_private *dev_priv) } } -static void detect_fsb_freq(struct drm_i915_private *i915) +unsigned int intel_fsb_freq(struct drm_i915_private *i915) { if (GRAPHICS_VER(i915) == 5) - i915->fsb_freq = ilk_fsb_freq(i915); + return ilk_fsb_freq(i915); else if (GRAPHICS_VER(i915) == 3 || GRAPHICS_VER(i915) == 4) - i915->fsb_freq = i9xx_fsb_freq(i915); + return i9xx_fsb_freq(i915); + else + return 0; +} - if (i915->fsb_freq) - drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", i915->fsb_freq); +static int i915_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info) +{ + dram_info->fsb_freq = intel_fsb_freq(i915); + if (dram_info->fsb_freq) + drm_dbg(&i915->drm, "FSB frequency: %d kHz\n", dram_info->fsb_freq); + + dram_info->mem_freq = intel_mem_freq(i915); + if (dram_info->mem_freq) + drm_dbg(&i915->drm, "DDR speed: %d kHz\n", dram_info->mem_freq); + + if (IS_PINEVIEW(i915)) + dram_info->type = pnv_dram_type(i915); + + return 0; } static int intel_dimm_num_devices(const struct dram_dimm_info *dimm) @@ -392,6 +406,9 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram u32 val; int ret; + /* Assume 16Gb DIMMs are present until proven otherwise */ + dram_info->has_16gb_dimms = true; + val = intel_uncore_read(&i915->uncore, SKL_MAD_DIMM_CH0_0_0_0_MCHBAR_MCMAIN); ret = skl_dram_get_channel_info(i915, &ch0, 0, val); @@ -414,13 +431,16 @@ skl_dram_get_channels_info(struct drm_i915_private *i915, struct dram_info *dram return -EINVAL; } - dram_info->wm_lv_0_adjust_needed = ch0.is_16gb_dimm || ch1.is_16gb_dimm; + dram_info->has_16gb_dimms = ch0.is_16gb_dimm || ch1.is_16gb_dimm; dram_info->symmetric_memory = intel_is_dram_symmetric(&ch0, &ch1); drm_dbg_kms(&i915->drm, "Memory configuration is symmetric? %s\n", str_yes_no(dram_info->symmetric_memory)); + drm_dbg_kms(&i915->drm, "16Gb DIMMs: %s\n", + str_yes_no(dram_info->has_16gb_dimms)); + return 0; } @@ -649,8 +669,9 @@ static int icl_pcode_read_mem_global_info(struct drm_i915_private *dev_priv, static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info) { - int ret = skl_get_dram_info(i915, dram_info); + int ret; + ret = skl_dram_get_channels_info(i915, dram_info); if (ret) return ret; @@ -659,8 +680,6 @@ static int gen11_get_dram_info(struct drm_i915_private *i915, struct dram_info * static int gen12_get_dram_info(struct drm_i915_private *i915, struct dram_info *dram_info) { - dram_info->wm_lv_0_adjust_needed = false; - return icl_pcode_read_mem_global_info(i915, dram_info); } @@ -709,13 +728,11 @@ static int xelpdp_get_dram_info(struct drm_i915_private *i915, struct dram_info int intel_dram_detect(struct drm_i915_private *i915) { + struct intel_display *display = i915->display; struct dram_info *dram_info; int ret; - detect_fsb_freq(i915); - detect_mem_freq(i915); - - if (GRAPHICS_VER(i915) < 9 || IS_DG2(i915) || !HAS_DISPLAY(i915)) + if (IS_DG2(i915) || !HAS_DISPLAY(display)) return 0; dram_info = drmm_kzalloc(&i915->drm, sizeof(*dram_info), GFP_KERNEL); @@ -724,13 +741,7 @@ int intel_dram_detect(struct drm_i915_private *i915) i915->dram_info = dram_info; - /* - * Assume level 0 watermark latency adjustment is needed until proven - * otherwise, this w/a is not needed by bxt/glk. - */ - dram_info->wm_lv_0_adjust_needed = !IS_BROXTON(i915) && !IS_GEMINILAKE(i915); - - if (DISPLAY_VER(i915) >= 14) + if (DISPLAY_VER(display) >= 14) ret = xelpdp_get_dram_info(i915, dram_info); else if (GRAPHICS_VER(i915) >= 12) ret = gen12_get_dram_info(i915, dram_info); @@ -738,23 +749,23 @@ int intel_dram_detect(struct drm_i915_private *i915) ret = gen11_get_dram_info(i915, dram_info); else if (IS_BROXTON(i915) || IS_GEMINILAKE(i915)) ret = bxt_get_dram_info(i915, dram_info); - else + else if (GRAPHICS_VER(i915) >= 9) ret = skl_get_dram_info(i915, dram_info); + else + ret = i915_get_dram_info(i915, dram_info); drm_dbg_kms(&i915->drm, "DRAM type: %s\n", intel_dram_type_str(dram_info->type)); + drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels); + + drm_dbg_kms(&i915->drm, "Num QGV points %u\n", dram_info->num_qgv_points); + drm_dbg_kms(&i915->drm, "Num PSF GV points %u\n", dram_info->num_psf_gv_points); + /* TODO: Do we want to abort probe on dram detection failures? */ if (ret) return 0; - drm_dbg_kms(&i915->drm, "Num qgv points %u\n", dram_info->num_qgv_points); - - drm_dbg_kms(&i915->drm, "DRAM channels: %u\n", dram_info->num_channels); - - drm_dbg_kms(&i915->drm, "Watermark level 0 adjustment needed: %s\n", - str_yes_no(dram_info->wm_lv_0_adjust_needed)); - return 0; } diff --git a/drivers/gpu/drm/i915/soc/intel_dram.h b/drivers/gpu/drm/i915/soc/intel_dram.h index 2a696e03aad4..03a973f1c941 100644 --- a/drivers/gpu/drm/i915/soc/intel_dram.h +++ b/drivers/gpu/drm/i915/soc/intel_dram.h @@ -12,11 +12,9 @@ struct drm_i915_private; struct drm_device; struct dram_info { - bool wm_lv_0_adjust_needed; - u8 num_channels; - bool symmetric_memory; enum intel_dram_type { INTEL_DRAM_UNKNOWN, + INTEL_DRAM_DDR2, INTEL_DRAM_DDR3, INTEL_DRAM_DDR4, INTEL_DRAM_LPDDR3, @@ -27,13 +25,20 @@ struct dram_info { INTEL_DRAM_GDDR_ECC, __INTEL_DRAM_TYPE_MAX, } type; + unsigned int fsb_freq; + unsigned int mem_freq; + u8 num_channels; u8 num_qgv_points; u8 num_psf_gv_points; + bool symmetric_memory; + bool has_16gb_dimms; }; void intel_dram_edram_detect(struct drm_i915_private *i915); int intel_dram_detect(struct drm_i915_private *i915); -unsigned int i9xx_fsb_freq(struct drm_i915_private *i915); +unsigned int intel_fsb_freq(struct drm_i915_private *i915); +unsigned int intel_mem_freq(struct drm_i915_private *i915); const struct dram_info *intel_dram_info(struct drm_device *drm); +const char *intel_dram_type_str(enum intel_dram_type type); #endif /* __INTEL_DRAM_H__ */ diff --git a/drivers/gpu/drm/i915/soc/intel_gmch.c b/drivers/gpu/drm/i915/soc/intel_gmch.c index 5346b8dda79a..f210c9655b53 100644 --- a/drivers/gpu/drm/i915/soc/intel_gmch.c +++ b/drivers/gpu/drm/i915/soc/intel_gmch.c @@ -148,7 +148,8 @@ void intel_gmch_bar_teardown(struct drm_i915_private *i915) int intel_gmch_vga_set_state(struct drm_i915_private *i915, bool enable_decode) { - unsigned int reg = DISPLAY_VER(i915) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; + struct intel_display *display = i915->display; + unsigned int reg = DISPLAY_VER(display) >= 6 ? SNB_GMCH_CTRL : INTEL_GMCH_CTRL; u16 gmch_ctrl; if (pci_read_config_word(i915->gmch.pdev, reg, &gmch_ctrl)) { diff --git a/drivers/gpu/drm/imagination/Kconfig b/drivers/gpu/drm/imagination/Kconfig index 3bfa2ac212dc..682dd2633d0c 100644 --- a/drivers/gpu/drm/imagination/Kconfig +++ b/drivers/gpu/drm/imagination/Kconfig @@ -3,8 +3,9 @@ config DRM_POWERVR tristate "Imagination Technologies PowerVR (Series 6 and later) & IMG Graphics" - depends on ARM64 + depends on (ARM64 || RISCV && 64BIT) depends on DRM + depends on MMU depends on PM select DRM_EXEC select DRM_GEM_SHMEM_HELPER diff --git a/drivers/gpu/drm/imagination/pvr_device.c b/drivers/gpu/drm/imagination/pvr_device.c index 8b9ba4983c4c..294b6019b415 100644 --- a/drivers/gpu/drm/imagination/pvr_device.c +++ b/drivers/gpu/drm/imagination/pvr_device.c @@ -23,6 +23,7 @@ #include <linux/firmware.h> #include <linux/gfp.h> #include <linux/interrupt.h> +#include <linux/of.h> #include <linux/platform_device.h> #include <linux/pm_runtime.h> #include <linux/reset.h> @@ -121,21 +122,6 @@ static int pvr_device_clk_init(struct pvr_device *pvr_dev) return 0; } -static int pvr_device_reset_init(struct pvr_device *pvr_dev) -{ - struct drm_device *drm_dev = from_pvr_device(pvr_dev); - struct reset_control *reset; - - reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL); - if (IS_ERR(reset)) - return dev_err_probe(drm_dev->dev, PTR_ERR(reset), - "failed to get gpu reset line\n"); - - pvr_dev->reset = reset; - - return 0; -} - /** * pvr_device_process_active_queues() - Process all queue related events. * @pvr_dev: PowerVR device to check @@ -618,6 +604,9 @@ pvr_device_init(struct pvr_device *pvr_dev) struct device *dev = drm_dev->dev; int err; + /* Get the platform-specific data based on the compatible string. */ + pvr_dev->device_data = of_device_get_match_data(dev); + /* * Setup device parameters. We do this first in case other steps * depend on them. @@ -631,8 +620,7 @@ pvr_device_init(struct pvr_device *pvr_dev) if (err) return err; - /* Get the reset line for the GPU */ - err = pvr_device_reset_init(pvr_dev); + err = pvr_dev->device_data->pwr_ops->init(pvr_dev); if (err) return err; diff --git a/drivers/gpu/drm/imagination/pvr_device.h b/drivers/gpu/drm/imagination/pvr_device.h index 7cb01c38d2a9..ab8f56ae15df 100644 --- a/drivers/gpu/drm/imagination/pvr_device.h +++ b/drivers/gpu/drm/imagination/pvr_device.h @@ -37,6 +37,9 @@ struct clk; /* Forward declaration from <linux/firmware.h>. */ struct firmware; +/* Forward declaration from <linux/pwrseq/consumer.h> */ +struct pwrseq_desc; + /** * struct pvr_gpu_id - Hardware GPU ID information for a PowerVR device * @b: Branch ID. @@ -58,6 +61,14 @@ struct pvr_fw_version { }; /** + * struct pvr_device_data - Platform specific data associated with a compatible string. + * @pwr_ops: Pointer to a structure with platform-specific power functions. + */ +struct pvr_device_data { + const struct pvr_power_sequence_ops *pwr_ops; +}; + +/** * struct pvr_device - powervr-specific wrapper for &struct drm_device */ struct pvr_device { @@ -98,6 +109,9 @@ struct pvr_device { /** @fw_version: Firmware version detected at runtime. */ struct pvr_fw_version fw_version; + /** @device_data: Pointer to platform-specific data. */ + const struct pvr_device_data *device_data; + /** @regs_resource: Resource representing device control registers. */ struct resource *regs_resource; @@ -148,6 +162,9 @@ struct pvr_device { */ struct reset_control *reset; + /** @pwrseq: Pointer to a power sequencer, if one is used. */ + struct pwrseq_desc *pwrseq; + /** @irq: IRQ number. */ int irq; diff --git a/drivers/gpu/drm/imagination/pvr_drv.c b/drivers/gpu/drm/imagination/pvr_drv.c index b058ec183bb3..916b40ced7eb 100644 --- a/drivers/gpu/drm/imagination/pvr_drv.c +++ b/drivers/gpu/drm/imagination/pvr_drv.c @@ -1480,15 +1480,33 @@ static void pvr_remove(struct platform_device *plat_dev) pvr_power_domains_fini(pvr_dev); } +static const struct pvr_device_data pvr_device_data_manual = { + .pwr_ops = &pvr_power_sequence_ops_manual, +}; + +static const struct pvr_device_data pvr_device_data_pwrseq = { + .pwr_ops = &pvr_power_sequence_ops_pwrseq, +}; + static const struct of_device_id dt_match[] = { - { .compatible = "img,img-rogue", .data = NULL }, + { + .compatible = "thead,th1520-gpu", + .data = &pvr_device_data_pwrseq, + }, + { + .compatible = "img,img-rogue", + .data = &pvr_device_data_manual, + }, /* * This legacy compatible string was introduced early on before the more generic * "img,img-rogue" was added. Keep it around here for compatibility, but never use * "img,img-axe" in new devicetrees. */ - { .compatible = "img,img-axe", .data = NULL }, + { + .compatible = "img,img-axe", + .data = &pvr_device_data_manual, + }, {} }; MODULE_DEVICE_TABLE(of, dt_match); @@ -1513,4 +1531,5 @@ MODULE_DESCRIPTION(PVR_DRIVER_DESC); MODULE_LICENSE("Dual MIT/GPL"); MODULE_IMPORT_NS("DMA_BUF"); MODULE_FIRMWARE("powervr/rogue_33.15.11.3_v1.fw"); +MODULE_FIRMWARE("powervr/rogue_36.52.104.182_v1.fw"); MODULE_FIRMWARE("powervr/rogue_36.53.104.796_v1.fw"); diff --git a/drivers/gpu/drm/imagination/pvr_power.c b/drivers/gpu/drm/imagination/pvr_power.c index 187a07e0bd9a..c6e7ff9e935d 100644 --- a/drivers/gpu/drm/imagination/pvr_power.c +++ b/drivers/gpu/drm/imagination/pvr_power.c @@ -18,6 +18,7 @@ #include <linux/platform_device.h> #include <linux/pm_domain.h> #include <linux/pm_runtime.h> +#include <linux/pwrseq/consumer.h> #include <linux/reset.h> #include <linux/timer.h> #include <linux/types.h> @@ -234,6 +235,118 @@ pvr_watchdog_init(struct pvr_device *pvr_dev) return 0; } +static int pvr_power_init_manual(struct pvr_device *pvr_dev) +{ + struct drm_device *drm_dev = from_pvr_device(pvr_dev); + struct reset_control *reset; + + reset = devm_reset_control_get_optional_exclusive(drm_dev->dev, NULL); + if (IS_ERR(reset)) + return dev_err_probe(drm_dev->dev, PTR_ERR(reset), + "failed to get gpu reset line\n"); + + pvr_dev->reset = reset; + + return 0; +} + +static int pvr_power_on_sequence_manual(struct pvr_device *pvr_dev) +{ + int err; + + err = clk_prepare_enable(pvr_dev->core_clk); + if (err) + return err; + + err = clk_prepare_enable(pvr_dev->sys_clk); + if (err) + goto err_core_clk_disable; + + err = clk_prepare_enable(pvr_dev->mem_clk); + if (err) + goto err_sys_clk_disable; + + /* + * According to the hardware manual, a delay of at least 32 clock + * cycles is required between de-asserting the clkgen reset and + * de-asserting the GPU reset. Assuming a worst-case scenario with + * a very high GPU clock frequency, a delay of 1 microsecond is + * sufficient to ensure this requirement is met across all + * feasible GPU clock speeds. + */ + udelay(1); + + err = reset_control_deassert(pvr_dev->reset); + if (err) + goto err_mem_clk_disable; + + return 0; + +err_mem_clk_disable: + clk_disable_unprepare(pvr_dev->mem_clk); + +err_sys_clk_disable: + clk_disable_unprepare(pvr_dev->sys_clk); + +err_core_clk_disable: + clk_disable_unprepare(pvr_dev->core_clk); + + return err; +} + +static int pvr_power_off_sequence_manual(struct pvr_device *pvr_dev) +{ + int err; + + err = reset_control_assert(pvr_dev->reset); + + clk_disable_unprepare(pvr_dev->mem_clk); + clk_disable_unprepare(pvr_dev->sys_clk); + clk_disable_unprepare(pvr_dev->core_clk); + + return err; +} + +const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual = { + .init = pvr_power_init_manual, + .power_on = pvr_power_on_sequence_manual, + .power_off = pvr_power_off_sequence_manual, +}; + +static int pvr_power_init_pwrseq(struct pvr_device *pvr_dev) +{ + struct device *dev = from_pvr_device(pvr_dev)->dev; + + pvr_dev->pwrseq = devm_pwrseq_get(dev, "gpu-power"); + if (IS_ERR(pvr_dev->pwrseq)) { + /* + * This platform requires a sequencer. If we can't get it, we + * must return the error (including -EPROBE_DEFER to wait for + * the provider to appear) + */ + return dev_err_probe(dev, PTR_ERR(pvr_dev->pwrseq), + "Failed to get required power sequencer\n"); + } + + return 0; +} + +static int pvr_power_on_sequence_pwrseq(struct pvr_device *pvr_dev) +{ + return pwrseq_power_on(pvr_dev->pwrseq); +} + +static int pvr_power_off_sequence_pwrseq(struct pvr_device *pvr_dev) +{ + return pwrseq_power_off(pvr_dev->pwrseq); +} + +const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq = { + .init = pvr_power_init_pwrseq, + .power_on = pvr_power_on_sequence_pwrseq, + .power_off = pvr_power_off_sequence_pwrseq, +}; + int pvr_power_device_suspend(struct device *dev) { @@ -252,11 +365,7 @@ pvr_power_device_suspend(struct device *dev) goto err_drm_dev_exit; } - clk_disable_unprepare(pvr_dev->mem_clk); - clk_disable_unprepare(pvr_dev->sys_clk); - clk_disable_unprepare(pvr_dev->core_clk); - - err = reset_control_assert(pvr_dev->reset); + err = pvr_dev->device_data->pwr_ops->power_off(pvr_dev); err_drm_dev_exit: drm_dev_exit(idx); @@ -276,53 +385,22 @@ pvr_power_device_resume(struct device *dev) if (!drm_dev_enter(drm_dev, &idx)) return -EIO; - err = clk_prepare_enable(pvr_dev->core_clk); + err = pvr_dev->device_data->pwr_ops->power_on(pvr_dev); if (err) goto err_drm_dev_exit; - err = clk_prepare_enable(pvr_dev->sys_clk); - if (err) - goto err_core_clk_disable; - - err = clk_prepare_enable(pvr_dev->mem_clk); - if (err) - goto err_sys_clk_disable; - - /* - * According to the hardware manual, a delay of at least 32 clock - * cycles is required between de-asserting the clkgen reset and - * de-asserting the GPU reset. Assuming a worst-case scenario with - * a very high GPU clock frequency, a delay of 1 microsecond is - * sufficient to ensure this requirement is met across all - * feasible GPU clock speeds. - */ - udelay(1); - - err = reset_control_deassert(pvr_dev->reset); - if (err) - goto err_mem_clk_disable; - if (pvr_dev->fw_dev.booted) { err = pvr_power_fw_enable(pvr_dev); if (err) - goto err_reset_assert; + goto err_power_off; } drm_dev_exit(idx); return 0; -err_reset_assert: - reset_control_assert(pvr_dev->reset); - -err_mem_clk_disable: - clk_disable_unprepare(pvr_dev->mem_clk); - -err_sys_clk_disable: - clk_disable_unprepare(pvr_dev->sys_clk); - -err_core_clk_disable: - clk_disable_unprepare(pvr_dev->core_clk); +err_power_off: + pvr_dev->device_data->pwr_ops->power_off(pvr_dev); err_drm_dev_exit: drm_dev_exit(idx); diff --git a/drivers/gpu/drm/imagination/pvr_power.h b/drivers/gpu/drm/imagination/pvr_power.h index ada85674a7ca..b853d092242c 100644 --- a/drivers/gpu/drm/imagination/pvr_power.h +++ b/drivers/gpu/drm/imagination/pvr_power.h @@ -41,4 +41,19 @@ pvr_power_put(struct pvr_device *pvr_dev) int pvr_power_domains_init(struct pvr_device *pvr_dev); void pvr_power_domains_fini(struct pvr_device *pvr_dev); +/** + * struct pvr_power_sequence_ops - Platform specific power sequence operations. + * @init: Pointer to the platform-specific initialization function. + * @power_on: Pointer to the platform-specific power on function. + * @power_off: Pointer to the platform-specific power off function. + */ +struct pvr_power_sequence_ops { + int (*init)(struct pvr_device *pvr_dev); + int (*power_on)(struct pvr_device *pvr_dev); + int (*power_off)(struct pvr_device *pvr_dev); +}; + +extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_manual; +extern const struct pvr_power_sequence_ops pvr_power_sequence_ops_pwrseq; + #endif /* PVR_POWER_H */ diff --git a/drivers/gpu/drm/nouveau/nouveau_platform.c b/drivers/gpu/drm/nouveau/nouveau_platform.c index a5ce8eb4a3be..8d5853deeee4 100644 --- a/drivers/gpu/drm/nouveau/nouveau_platform.c +++ b/drivers/gpu/drm/nouveau/nouveau_platform.c @@ -30,10 +30,7 @@ static int nouveau_platform_probe(struct platform_device *pdev) func = of_device_get_match_data(&pdev->dev); drm = nouveau_platform_device_create(func, pdev, &device); - if (IS_ERR(drm)) - return PTR_ERR(drm); - - return 0; + return PTR_ERR_OR_ZERO(drm); } static void nouveau_platform_remove(struct platform_device *pdev) diff --git a/drivers/gpu/drm/nouveau/nouveau_prime.c b/drivers/gpu/drm/nouveau/nouveau_prime.c index cd95446d6851..caab60fc62f6 100644 --- a/drivers/gpu/drm/nouveau/nouveau_prime.c +++ b/drivers/gpu/drm/nouveau/nouveau_prime.c @@ -108,9 +108,21 @@ struct dma_buf *nouveau_gem_prime_export(struct drm_gem_object *gobj, int flags) { struct nouveau_bo *nvbo = nouveau_gem_object(gobj); + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = false, + }; + int ret; if (nvbo->no_share) return ERR_PTR(-EPERM); + ret = ttm_bo_setup_export(&nvbo->bo, &ctx); + if (ret) + return ERR_PTR(ret); + return drm_gem_prime_export(gobj, flags); } diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c index 054b71dba6a7..794267f0f007 100644 --- a/drivers/gpu/drm/omapdrm/omap_drv.c +++ b/drivers/gpu/drm/omapdrm/omap_drv.c @@ -378,10 +378,8 @@ static int omap_display_id(struct omap_dss_device *output) struct device_node *node = NULL; if (output->bridge) { - struct drm_bridge *bridge = output->bridge; - - while (drm_bridge_get_next_bridge(bridge)) - bridge = drm_bridge_get_next_bridge(bridge); + struct drm_bridge *bridge __free(drm_bridge_put) = + drm_bridge_chain_get_last_bridge(output->bridge->encoder); node = bridge->of_node; } diff --git a/drivers/gpu/drm/panel/panel-edp.c b/drivers/gpu/drm/panel/panel-edp.c index feba520a0f45..62435e3cd9f4 100644 --- a/drivers/gpu/drm/panel/panel-edp.c +++ b/drivers/gpu/drm/panel/panel-edp.c @@ -1843,6 +1843,13 @@ static const struct panel_delay delay_200_500_e50_d100 = { .disable = 100, }; +static const struct panel_delay delay_80_500_e50_d50 = { + .hpd_absent = 80, + .unprepare = 500, + .enable = 50, + .disable = 50, +}; + #define EDP_PANEL_ENTRY(vend_chr_0, vend_chr_1, vend_chr_2, product_id, _delay, _name) \ { \ .ident = { \ @@ -1955,6 +1962,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a36, &delay_200_500_e200, "Unknown"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a3e, &delay_200_500_e80_d50, "NV116WHM-N49"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a5d, &delay_200_500_e50, "NV116WHM-N45"), + EDP_PANEL_ENTRY('B', 'O', 'E', 0x0a6a, &delay_200_500_e80, "NV140WUM-N44"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ac5, &delay_200_500_e50, "NV116WHM-N4C"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0ae8, &delay_200_500_e50_p2e80, "NV140WUM-N41"), EDP_PANEL_ENTRY('B', 'O', 'E', 0x0b09, &delay_200_500_e50_po2e200, "NV140FHM-NZ"), @@ -1996,6 +2004,7 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'M', 'N', 0x124c, &delay_200_500_e80_d50, "N122JCA-ENK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142b, &delay_200_500_e80_d50, "N140HCA-EAC"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x142e, &delay_200_500_e80_d50, "N140BGA-EA4"), + EDP_PANEL_ENTRY('C', 'M', 'N', 0x1441, &delay_200_500_e80_d50, "N140JCA-ELK"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x144f, &delay_200_500_e80_d50, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x1468, &delay_200_500_e80, "N140HGA-EA1"), EDP_PANEL_ENTRY('C', 'M', 'N', 0x14a8, &delay_200_500_e80, "N140JCA-ELP"), @@ -2011,10 +2020,12 @@ static const struct edp_panel_entry edp_panels[] = { EDP_PANEL_ENTRY('C', 'S', 'W', 0x1100, &delay_200_500_e80_d50, "MNB601LS1-1"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1103, &delay_200_500_e80_d50, "MNB601LS1-3"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1104, &delay_200_500_e50_d100, "MNB601LS1-4"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x143f, &delay_200_500_e50, "MNE007QS3-6"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1448, &delay_200_500_e50, "MNE007QS3-7"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1457, &delay_80_500_e80_p2e200, "MNE007QS3-8"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1462, &delay_200_500_e50, "MNE007QS5-2"), EDP_PANEL_ENTRY('C', 'S', 'W', 0x1468, &delay_200_500_e50, "MNE007QB2-2"), + EDP_PANEL_ENTRY('C', 'S', 'W', 0x146e, &delay_80_500_e50_d50, "MNE007QB3-1"), EDP_PANEL_ENTRY('E', 'T', 'C', 0x0000, &delay_50_500_e200_d200_po2e335, "LP079QX1-SP0V"), diff --git a/drivers/gpu/drm/panel/panel-lvds.c b/drivers/gpu/drm/panel/panel-lvds.c index 23fd535d8f47..46b07f38559f 100644 --- a/drivers/gpu/drm/panel/panel-lvds.c +++ b/drivers/gpu/drm/panel/panel-lvds.c @@ -28,8 +28,6 @@ struct panel_lvds { struct device *dev; const char *label; - unsigned int width; - unsigned int height; struct drm_display_mode dmode; u32 bus_flags; unsigned int bus_format; diff --git a/drivers/gpu/drm/panthor/panthor_gem.c b/drivers/gpu/drm/panthor/panthor_gem.c index a123bc740ba1..156c7a0b62a2 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.c +++ b/drivers/gpu/drm/panthor/panthor_gem.c @@ -74,7 +74,6 @@ static void panthor_gem_free_object(struct drm_gem_object *obj) mutex_destroy(&bo->label.lock); drm_gem_free_mmap_offset(&bo->base.base); - mutex_destroy(&bo->gpuva_list_lock); drm_gem_shmem_free(&bo->base); drm_gem_object_put(vm_root_gem); } @@ -246,8 +245,6 @@ struct drm_gem_object *panthor_gem_create_object(struct drm_device *ddev, size_t obj->base.base.funcs = &panthor_gem_funcs; obj->base.map_wc = !ptdev->coherent; - mutex_init(&obj->gpuva_list_lock); - drm_gem_gpuva_set_lock(&obj->base.base, &obj->gpuva_list_lock); mutex_init(&obj->label.lock); panthor_gem_debugfs_bo_init(obj); diff --git a/drivers/gpu/drm/panthor/panthor_gem.h b/drivers/gpu/drm/panthor/panthor_gem.h index 8fc7215e9b90..80c6e24112d0 100644 --- a/drivers/gpu/drm/panthor/panthor_gem.h +++ b/drivers/gpu/drm/panthor/panthor_gem.h @@ -79,18 +79,6 @@ struct panthor_gem_object { */ struct drm_gem_object *exclusive_vm_root_gem; - /** - * @gpuva_list_lock: Custom GPUVA lock. - * - * Used to protect insertion of drm_gpuva elements to the - * drm_gem_object.gpuva.list list. - * - * We can't use the GEM resv for that, because drm_gpuva_link() is - * called in a dma-signaling path, where we're not allowed to take - * resv locks. - */ - struct mutex gpuva_list_lock; - /** @flags: Combination of drm_panthor_bo_flags flags. */ u32 flags; diff --git a/drivers/gpu/drm/panthor/panthor_mmu.c b/drivers/gpu/drm/panthor/panthor_mmu.c index 2003b91a8409..6dec4354e378 100644 --- a/drivers/gpu/drm/panthor/panthor_mmu.c +++ b/drivers/gpu/drm/panthor/panthor_mmu.c @@ -569,15 +569,37 @@ static void lock_region(struct panthor_device *ptdev, u32 as_nr, write_cmd(ptdev, as_nr, AS_COMMAND_LOCK); } -static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr, - u32 op) +static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, + u64 iova, u64 size, u32 op) { const u32 l2_flush_op = CACHE_CLEAN | CACHE_INV; - u32 lsc_flush_op = 0; + u32 lsc_flush_op; int ret; - if (op == AS_COMMAND_FLUSH_MEM) + lockdep_assert_held(&ptdev->mmu->as.slots_lock); + + switch (op) { + case AS_COMMAND_FLUSH_MEM: lsc_flush_op = CACHE_CLEAN | CACHE_INV; + break; + case AS_COMMAND_FLUSH_PT: + lsc_flush_op = 0; + break; + default: + drm_WARN(&ptdev->base, 1, "Unexpected AS_COMMAND: %d", op); + return -EINVAL; + } + + if (as_nr < 0) + return 0; + + /* + * If the AS number is greater than zero, then we can be sure + * the device is up and running, so we don't need to explicitly + * power it up + */ + + lock_region(ptdev, as_nr, iova, size); ret = wait_ready(ptdev, as_nr); if (ret) @@ -598,33 +620,6 @@ static int mmu_hw_do_flush_on_gpu_ctrl(struct panthor_device *ptdev, int as_nr, return wait_ready(ptdev, as_nr); } -static int mmu_hw_do_operation_locked(struct panthor_device *ptdev, int as_nr, - u64 iova, u64 size, u32 op) -{ - lockdep_assert_held(&ptdev->mmu->as.slots_lock); - - if (as_nr < 0) - return 0; - - /* - * If the AS number is greater than zero, then we can be sure - * the device is up and running, so we don't need to explicitly - * power it up - */ - - if (op != AS_COMMAND_UNLOCK) - lock_region(ptdev, as_nr, iova, size); - - if (op == AS_COMMAND_FLUSH_MEM || op == AS_COMMAND_FLUSH_PT) - return mmu_hw_do_flush_on_gpu_ctrl(ptdev, as_nr, op); - - /* Run the MMU operation */ - write_cmd(ptdev, as_nr, op); - - /* Wait for the flush to complete */ - return wait_ready(ptdev, as_nr); -} - static int mmu_hw_do_operation(struct panthor_vm *vm, u64 iova, u64 size, u32 op) { @@ -1107,9 +1102,9 @@ static void panthor_vm_bo_put(struct drm_gpuvm_bo *vm_bo) * GEM vm_bo list. */ dma_resv_lock(drm_gpuvm_resv(vm), NULL); - mutex_lock(&bo->gpuva_list_lock); + mutex_lock(&bo->base.base.gpuva.lock); unpin = drm_gpuvm_bo_put(vm_bo); - mutex_unlock(&bo->gpuva_list_lock); + mutex_unlock(&bo->base.base.gpuva.lock); dma_resv_unlock(drm_gpuvm_resv(vm)); /* If the vm_bo object was destroyed, release the pin reference that @@ -1227,7 +1222,7 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, (flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) != DRM_PANTHOR_VM_BIND_OP_TYPE_MAP) return -EINVAL; - /* Make sure the VA and size are aligned and in-bounds. */ + /* Make sure the VA and size are in-bounds. */ if (size > bo->base.base.size || offset > bo->base.base.size - size) return -EINVAL; @@ -1282,9 +1277,9 @@ static int panthor_vm_prepare_map_op_ctx(struct panthor_vm_op_ctx *op_ctx, * calling this function. */ dma_resv_lock(panthor_vm_resv(vm), NULL); - mutex_lock(&bo->gpuva_list_lock); + mutex_lock(&bo->base.base.gpuva.lock); op_ctx->map.vm_bo = drm_gpuvm_bo_obtain_prealloc(preallocated_vm_bo); - mutex_unlock(&bo->gpuva_list_lock); + mutex_unlock(&bo->base.base.gpuva.lock); dma_resv_unlock(panthor_vm_resv(vm)); /* If the a vm_bo for this <VM,BO> combination exists, it already @@ -2036,10 +2031,10 @@ static void panthor_vma_link(struct panthor_vm *vm, { struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); - mutex_lock(&bo->gpuva_list_lock); + mutex_lock(&bo->base.base.gpuva.lock); drm_gpuva_link(&vma->base, vm_bo); drm_WARN_ON(&vm->ptdev->base, drm_gpuvm_bo_put(vm_bo)); - mutex_unlock(&bo->gpuva_list_lock); + mutex_unlock(&bo->base.base.gpuva.lock); } static void panthor_vma_unlink(struct panthor_vm *vm, @@ -2048,9 +2043,9 @@ static void panthor_vma_unlink(struct panthor_vm *vm, struct panthor_gem_object *bo = to_panthor_bo(vma->base.gem.obj); struct drm_gpuvm_bo *vm_bo = drm_gpuvm_bo_get(vma->base.vm_bo); - mutex_lock(&bo->gpuva_list_lock); + mutex_lock(&bo->base.base.gpuva.lock); drm_gpuva_unlink(&vma->base); - mutex_unlock(&bo->gpuva_list_lock); + mutex_unlock(&bo->base.base.gpuva.lock); /* drm_gpuva_unlink() release the vm_bo, but we manually retained it * when entering this function, so we can implement deferred VMA @@ -2420,8 +2415,9 @@ panthor_vm_create(struct panthor_device *ptdev, bool for_mcu, * to be handled the same way user VMAs are. */ drm_gpuvm_init(&vm->base, for_mcu ? "panthor-MCU-VM" : "panthor-GPU-VM", - DRM_GPUVM_RESV_PROTECTED, &ptdev->base, dummy_gem, - min_va, va_range, 0, 0, &panthor_gpuvm_ops); + DRM_GPUVM_RESV_PROTECTED | DRM_GPUVM_IMMEDIATE_MODE, + &ptdev->base, dummy_gem, min_va, va_range, 0, 0, + &panthor_gpuvm_ops); drm_gem_object_put(dummy_gem); return vm; @@ -2451,7 +2447,7 @@ panthor_vm_bind_prepare_op_ctx(struct drm_file *file, int ret; /* Aligned on page size. */ - if (!IS_ALIGNED(op->va | op->size, vm_pgsz)) + if (!IS_ALIGNED(op->va | op->size | op->bo_offset, vm_pgsz)) return -EINVAL; switch (op->flags & DRM_PANTHOR_VM_BIND_OP_TYPE_MASK) { diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c index af58b814e588..001b3543924a 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_lvds.c @@ -1013,7 +1013,7 @@ err_reset_assert: } static const struct dev_pm_ops rcar_lvds_pm_ops = { - SET_RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL) + RUNTIME_PM_OPS(rcar_lvds_runtime_suspend, rcar_lvds_runtime_resume, NULL) }; static struct platform_driver rcar_lvds_platform_driver = { @@ -1021,7 +1021,7 @@ static struct platform_driver rcar_lvds_platform_driver = { .remove = rcar_lvds_remove, .driver = { .name = "rcar-lvds", - .pm = &rcar_lvds_pm_ops, + .pm = pm_ptr(&rcar_lvds_pm_ops), .of_match_table = rcar_lvds_of_table, }, }; diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c index 1af4c73f7a88..5c73a513f678 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi.c @@ -576,7 +576,10 @@ static int rcar_mipi_dsi_startup(struct rcar_mipi_dsi *dsi, udelay(10); rcar_mipi_dsi_clr(dsi, CLOCKSET1, CLOCKSET1_UPDATEPLL); - ppisetr = PPISETR_DLEN_3 | PPISETR_CLEN; + rcar_mipi_dsi_clr(dsi, TXSETR, TXSETR_LANECNT_MASK); + rcar_mipi_dsi_set(dsi, TXSETR, dsi->lanes - 1); + + ppisetr = ((BIT(dsi->lanes) - 1) & PPISETR_DLEN_MASK) | PPISETR_CLEN; rcar_mipi_dsi_write(dsi, PPISETR, ppisetr); rcar_mipi_dsi_set(dsi, PHYSETUP, PHYSETUP_SHUTDOWNZ); @@ -934,9 +937,234 @@ static int rcar_mipi_dsi_host_detach(struct mipi_dsi_host *host, return 0; } +static ssize_t rcar_mipi_dsi_host_tx_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg, + bool is_rx_xfer) +{ + const bool is_tx_long = mipi_dsi_packet_format_is_long(msg->type); + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + struct mipi_dsi_packet packet; + u8 payload[16] = { 0 }; + u32 status; + int ret; + + ret = mipi_dsi_create_packet(&packet, msg); + if (ret) + return ret; + + /* Configure LP or HS command transfer. */ + rcar_mipi_dsi_write(dsi, TXCMSETR, (msg->flags & MIPI_DSI_MSG_USE_LPM) ? + TXCMSETR_SPDTYP : 0); + + /* Register access mode for RX transfer. */ + if (is_rx_xfer) + rcar_mipi_dsi_write(dsi, RXPSETR, 0); + + /* Do not use IRQ, poll for completion, the completion is quick. */ + rcar_mipi_dsi_write(dsi, TXCMIER, 0); + + /* + * Send the header: + * header[0] = Virtual Channel + Data Type + * header[1] = Word Count LSB (LP) or first param (SP) + * header[2] = Word Count MSB (LP) or second param (SP) + */ + rcar_mipi_dsi_write(dsi, TXCMPHDR, + (is_tx_long ? TXCMPHDR_FMT : 0) | + TXCMPHDR_VC(msg->channel) | + TXCMPHDR_DT(msg->type) | + TXCMPHDR_DATA1(packet.header[2]) | + TXCMPHDR_DATA0(packet.header[1])); + + if (is_tx_long) { + memcpy(payload, packet.payload, + min(msg->tx_len, sizeof(payload))); + + rcar_mipi_dsi_write(dsi, TXCMPPD0R, + (payload[3] << 24) | (payload[2] << 16) | + (payload[1] << 8) | payload[0]); + rcar_mipi_dsi_write(dsi, TXCMPPD1R, + (payload[7] << 24) | (payload[6] << 16) | + (payload[5] << 8) | payload[4]); + rcar_mipi_dsi_write(dsi, TXCMPPD2R, + (payload[11] << 24) | (payload[10] << 16) | + (payload[9] << 8) | payload[8]); + rcar_mipi_dsi_write(dsi, TXCMPPD3R, + (payload[15] << 24) | (payload[14] << 16) | + (payload[13] << 8) | payload[12]); + } + + /* Start the transfer, RX with BTA, TX without BTA. */ + if (is_rx_xfer) { + rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_BTAREQ); + + /* Wait until the transmission, BTA, reception completed. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + (status & RXPSR_BTAREQEND), + 2000, 50000, false, dsi, RXPSR); + } else { + rcar_mipi_dsi_write(dsi, TXCMCR, TXCMCR_TXREQ); + + /* Wait until the transmission completed. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + (status & TXCMSR_TXREQEND), + 2000, 50000, false, dsi, TXCMSR); + } + + if (ret < 0) { + dev_err(dsi->dev, "Command transfer timeout (0x%08x)\n", + status); + return ret; + } + + return packet.size; +} + +static ssize_t rcar_mipi_dsi_host_rx_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + u8 *rx_buf = (u8 *)(msg->rx_buf); + u32 reg, data, status, wc; + int i, ret; + + /* RX transfer received data validation and parsing starts here. */ + reg = rcar_mipi_dsi_read(dsi, TOSR); + if (reg & TOSR_TATO) { /* Turn-Around TimeOut. */ + /* Clear TATO Turn-Around TimeOut bit. */ + rcar_mipi_dsi_write(dsi, TOSR, TOSR_TATO); + return -ETIMEDOUT; + } + + reg = rcar_mipi_dsi_read(dsi, RXPSR); + + if (msg->flags & MIPI_DSI_MSG_REQ_ACK) { + /* Transfer with zero-length RX. */ + if (!(reg & RXPSR_RCVACK)) { + /* No ACK on RX response received. */ + return -EINVAL; + } + } else { + /* Transfer with non-zero-length RX. */ + if (!(reg & RXPSR_RCVRESP)) { + /* No packet header of RX response received. */ + return -EINVAL; + } + + if (reg & (RXPSR_CRCERR | RXPSR_WCERR | RXPSR_AXIERR | RXPSR_OVRERR)) { + /* Incorrect response payload. */ + return -ENODATA; + } + + data = rcar_mipi_dsi_read(dsi, RXPHDR); + if (data & RXPHDR_FMT) { /* Long Packet Response. */ + /* Read Long Packet Response length from packet header. */ + wc = data & 0xffff; + if (wc > msg->rx_len) { + dev_warn(dsi->dev, + "Long Packet Response longer than RX buffer (%d), limited to %zu Bytes\n", + wc, msg->rx_len); + wc = msg->rx_len; + } + + if (wc > 16) { + dev_warn(dsi->dev, + "Long Packet Response too long (%d), limited to 16 Bytes\n", + wc); + wc = 16; + } + + for (i = 0; i < msg->rx_len; i++) { + if (!(i % 4)) + data = rcar_mipi_dsi_read(dsi, RXPPD0R + i); + + rx_buf[i] = data & 0xff; + data >>= 8; + } + } else { /* Short Packet Response. */ + if (msg->rx_len >= 1) + rx_buf[0] = data & 0xff; + if (msg->rx_len >= 2) + rx_buf[1] = (data >> 8) & 0xff; + if (msg->rx_len >= 3) { + dev_warn(dsi->dev, + "Expected Short Packet Response too long (%zu), limited to 2 Bytes\n", + msg->rx_len); + } + } + } + + if (reg & RXPSR_RCVAKE) { + /* Acknowledge and Error report received. */ + return -EFAULT; + } + + /* Wait until the bus handover to host processor completed. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + !(status & PPIDL0SR_DIR), + 2000, 50000, false, dsi, PPIDL0SR); + if (ret < 0) { + dev_err(dsi->dev, "Command RX DIR timeout (0x%08x)\n", status); + return ret; + } + + /* Wait until the data lane is in LP11 stop state. */ + ret = read_poll_timeout(rcar_mipi_dsi_read, status, + status & PPIDL0SR_STPST, + 2000, 50000, false, dsi, PPIDL0SR); + if (ret < 0) { + dev_err(dsi->dev, "Command RX STPST timeout (0x%08x)\n", status); + return ret; + } + + return 0; +} + +static ssize_t rcar_mipi_dsi_host_transfer(struct mipi_dsi_host *host, + const struct mipi_dsi_msg *msg) +{ + const bool is_rx_xfer = (msg->flags & MIPI_DSI_MSG_REQ_ACK) || msg->rx_len; + struct rcar_mipi_dsi *dsi = host_to_rcar_mipi_dsi(host); + int ret; + + if (msg->tx_len > 16 || msg->rx_len > 16) { + /* ToDo: Implement Memory on AXI bus command mode. */ + dev_warn(dsi->dev, + "Register-based command mode supports only up to 16 Bytes long payload\n"); + return -EOPNOTSUPP; + } + + ret = rcar_mipi_dsi_host_tx_transfer(host, msg, is_rx_xfer); + + /* If TX transfer succeeded and this transfer has RX part. */ + if (ret >= 0 && is_rx_xfer) { + ret = rcar_mipi_dsi_host_rx_transfer(host, msg); + if (ret) + return ret; + + ret = msg->rx_len; + } + + /* + * Wait a bit between commands, otherwise panels based on ILI9881C + * TCON may fail to correctly receive all commands sent to them. + * Until we can actually test with another DSI device, keep the + * delay here, but eventually this delay might have to be moved + * into the ILI9881C panel driver. + */ + usleep_range(1000, 2000); + + /* Clear the completion interrupt. */ + if (!msg->rx_len) + rcar_mipi_dsi_write(dsi, TXCMSR, TXCMSR_TXREQEND); + + return ret; +} + static const struct mipi_dsi_host_ops rcar_mipi_dsi_host_ops = { .attach = rcar_mipi_dsi_host_attach, .detach = rcar_mipi_dsi_host_detach, + .transfer = rcar_mipi_dsi_host_transfer }; /* ----------------------------------------------------------------------------- diff --git a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h index a6b276f1d6ee..76521276e2af 100644 --- a/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h +++ b/drivers/gpu/drm/renesas/rcar-du/rcar_mipi_dsi_regs.h @@ -12,6 +12,130 @@ #define LINKSR_LPBUSY (1 << 1) #define LINKSR_HSBUSY (1 << 0) +#define TXSETR 0x100 +#define TXSETR_LANECNT_MASK (0x3 << 0) + +/* + * DSI Command Transfer Registers + */ +#define TXCMSETR 0x110 +#define TXCMSETR_SPDTYP (1 << 8) /* 0:HS 1:LP */ +#define TXCMSETR_LPPDACC (1 << 0) +#define TXCMCR 0x120 +#define TXCMCR_BTATYP (1 << 2) +#define TXCMCR_BTAREQ (1 << 1) +#define TXCMCR_TXREQ (1 << 0) +#define TXCMSR 0x130 +#define TXCMSR_CLSNERR (1 << 18) +#define TXCMSR_AXIERR (1 << 16) +#define TXCMSR_TXREQEND (1 << 0) +#define TXCMSCR 0x134 +#define TXCMSCR_CLSNERR (1 << 18) +#define TXCMSCR_AXIERR (1 << 16) +#define TXCMSCR_TXREQEND (1 << 0) +#define TXCMIER 0x138 +#define TXCMIER_CLSNERR (1 << 18) +#define TXCMIER_AXIERR (1 << 16) +#define TXCMIER_TXREQEND (1 << 0) +#define TXCMADDRSET0R 0x140 +#define TXCMPHDR 0x150 +#define TXCMPHDR_FMT (1 << 24) /* 0:SP 1:LP */ +#define TXCMPHDR_VC(n) (((n) & 0x3) << 22) +#define TXCMPHDR_DT(n) (((n) & 0x3f) << 16) +#define TXCMPHDR_DATA1(n) (((n) & 0xff) << 8) +#define TXCMPHDR_DATA0(n) (((n) & 0xff) << 0) +#define TXCMPPD0R 0x160 +#define TXCMPPD1R 0x164 +#define TXCMPPD2R 0x168 +#define TXCMPPD3R 0x16c + +#define RXSETR 0x200 +#define RXSETR_CRCEN (((n) & 0xf) << 24) +#define RXSETR_ECCEN (((n) & 0xf) << 16) +#define RXPSETR 0x210 +#define RXPSETR_LPPDACC (1 << 0) +#define RXPSR 0x220 +#define RXPSR_ECCERR1B (1 << 28) +#define RXPSR_UEXTRGERR (1 << 25) +#define RXPSR_RESPTOERR (1 << 24) +#define RXPSR_OVRERR (1 << 23) +#define RXPSR_AXIERR (1 << 22) +#define RXPSR_CRCERR (1 << 21) +#define RXPSR_WCERR (1 << 20) +#define RXPSR_UEXDTERR (1 << 19) +#define RXPSR_UEXPKTERR (1 << 18) +#define RXPSR_ECCERR (1 << 17) +#define RXPSR_MLFERR (1 << 16) +#define RXPSR_RCVACK (1 << 14) +#define RXPSR_RCVEOT (1 << 10) +#define RXPSR_RCVAKE (1 << 9) +#define RXPSR_RCVRESP (1 << 8) +#define RXPSR_BTAREQEND (1 << 0) +#define RXPSCR 0x224 +#define RXPSCR_ECCERR1B (1 << 28) +#define RXPSCR_UEXTRGERR (1 << 25) +#define RXPSCR_RESPTOERR (1 << 24) +#define RXPSCR_OVRERR (1 << 23) +#define RXPSCR_AXIERR (1 << 22) +#define RXPSCR_CRCERR (1 << 21) +#define RXPSCR_WCERR (1 << 20) +#define RXPSCR_UEXDTERR (1 << 19) +#define RXPSCR_UEXPKTERR (1 << 18) +#define RXPSCR_ECCERR (1 << 17) +#define RXPSCR_MLFERR (1 << 16) +#define RXPSCR_RCVACK (1 << 14) +#define RXPSCR_RCVEOT (1 << 10) +#define RXPSCR_RCVAKE (1 << 9) +#define RXPSCR_RCVRESP (1 << 8) +#define RXPSCR_BTAREQEND (1 << 0) +#define RXPIER 0x228 +#define RXPIER_ECCERR1B (1 << 28) +#define RXPIER_UEXTRGERR (1 << 25) +#define RXPIER_RESPTOERR (1 << 24) +#define RXPIER_OVRERR (1 << 23) +#define RXPIER_AXIERR (1 << 22) +#define RXPIER_CRCERR (1 << 21) +#define RXPIER_WCERR (1 << 20) +#define RXPIER_UEXDTERR (1 << 19) +#define RXPIER_UEXPKTERR (1 << 18) +#define RXPIER_ECCERR (1 << 17) +#define RXPIER_MLFERR (1 << 16) +#define RXPIER_RCVACK (1 << 14) +#define RXPIER_RCVEOT (1 << 10) +#define RXPIER_RCVAKE (1 << 9) +#define RXPIER_RCVRESP (1 << 8) +#define RXPIER_BTAREQEND (1 << 0) +#define RXPADDRSET0R 0x230 +#define RXPSIZESETR 0x238 +#define RXPSIZESETR_SIZE(n) (((n) & 0xf) << 3) +#define RXPHDR 0x240 +#define RXPHDR_FMT (1 << 24) /* 0:SP 1:LP */ +#define RXPHDR_VC(n) (((n) & 0x3) << 22) +#define RXPHDR_DT(n) (((n) & 0x3f) << 16) +#define RXPHDR_DATA1(n) (((n) & 0xff) << 8) +#define RXPHDR_DATA0(n) (((n) & 0xff) << 0) +#define RXPPD0R 0x250 +#define RXPPD1R 0x254 +#define RXPPD2R 0x258 +#define RXPPD3R 0x25c +#define AKEPR 0x300 +#define AKEPR_VC(n) (((n) & 0x3) << 22) +#define AKEPR_DT(n) (((n) & 0x3f) << 16) +#define AKEPR_ERRRPT(n) (((n) & 0xffff) << 0) +#define RXRESPTOSETR 0x400 +#define TACR 0x500 +#define TASR 0x510 +#define TASCR 0x514 +#define TAIER 0x518 +#define TOSR 0x610 +#define TOSR_TATO (1 << 2) +#define TOSR_LRXHTO (1 << 1) +#define TOSR_HRXTO (1 << 0) +#define TOSCR 0x614 +#define TOSCR_TATO (1 << 2) +#define TOSCR_LRXHTO (1 << 1) +#define TOSCR_HRXTO (1 << 0) + /* * Video Mode Register */ @@ -80,10 +204,7 @@ * PHY-Protocol Interface (PPI) Registers */ #define PPISETR 0x700 -#define PPISETR_DLEN_0 (0x1 << 0) -#define PPISETR_DLEN_1 (0x3 << 0) -#define PPISETR_DLEN_2 (0x7 << 0) -#define PPISETR_DLEN_3 (0xf << 0) +#define PPISETR_DLEN_MASK (0xf << 0) #define PPISETR_CLEN (1 << 8) #define PPICLCR 0x710 @@ -100,6 +221,10 @@ #define PPICLSCR_HSTOLP (1 << 27) #define PPICLSCR_TOHS (1 << 26) +#define PPIDL0SR 0x740 +#define PPIDL0SR_DIR (1 << 10) +#define PPIDL0SR_STPST (1 << 6) + #define PPIDLSR 0x760 #define PPIDLSR_STPST (0xf << 0) diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig index ab525668939a..616c0bc1ccee 100644 --- a/drivers/gpu/drm/rockchip/Kconfig +++ b/drivers/gpu/drm/rockchip/Kconfig @@ -10,6 +10,7 @@ config DRM_ROCKCHIP select VIDEOMODE_HELPERS select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP select DRM_DISPLAY_DP_AUX_BUS if ROCKCHIP_ANALOGIX_DP + select DRM_DW_DP if ROCKCHIP_DW_DP select DRM_DW_HDMI if ROCKCHIP_DW_HDMI select DRM_DW_HDMI_QP if ROCKCHIP_DW_HDMI_QP select DRM_DW_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI @@ -60,6 +61,14 @@ config ROCKCHIP_CDN_DP RK3399 based SoC, you should select this option. +config ROCKCHIP_DW_DP + bool "Rockchip specific extensions for Synopsys DW DP" + help + This selects support for Rockchip SoC specific extensions + to enable Synopsys DesignWare Cores based DisplayPort transmit + controller support on Rockchip SoC, If you want to enable DP on + rk3588 based SoC, you should select this option. + config ROCKCHIP_DW_HDMI bool "Rockchip specific extensions for Synopsys DW HDMI" help diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile index 2b867cebbc12..097f062399c7 100644 --- a/drivers/gpu/drm/rockchip/Makefile +++ b/drivers/gpu/drm/rockchip/Makefile @@ -14,6 +14,7 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI_QP) += dw_hdmi_qp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI2) += dw-mipi-dsi2-rockchip.o +rockchipdrm-$(CONFIG_ROCKCHIP_DW_DP) += dw_dp-rockchip.o rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o diff --git a/drivers/gpu/drm/rockchip/dw_dp-rockchip.c b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c new file mode 100644 index 000000000000..25ab4e46301e --- /dev/null +++ b/drivers/gpu/drm/rockchip/dw_dp-rockchip.c @@ -0,0 +1,150 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * Copyright (c) 2020 Rockchip Electronics Co., Ltd. + * + * Author: Zhang Yubing <yubing.zhang@rock-chips.com> + * Author: Andy Yan <andy.yan@rock-chips.com> + */ + +#include <linux/component.h> +#include <linux/of_device.h> +#include <linux/platform_device.h> +#include <drm/bridge/dw_dp.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_bridge.h> +#include <drm/drm_bridge_connector.h> +#include <drm/drm_of.h> +#include <drm/drm_print.h> +#include <drm/drm_probe_helper.h> +#include <drm/drm_simple_kms_helper.h> + +#include <linux/media-bus-format.h> +#include <linux/videodev2.h> + +#include "rockchip_drm_drv.h" +#include "rockchip_drm_vop.h" + +struct rockchip_dw_dp { + struct dw_dp *base; + struct device *dev; + struct rockchip_encoder encoder; +}; + +static int dw_dp_encoder_atomic_check(struct drm_encoder *encoder, + struct drm_crtc_state *crtc_state, + struct drm_connector_state *conn_state) +{ + struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state); + struct drm_atomic_state *state = conn_state->state; + struct drm_display_info *di = &conn_state->connector->display_info; + struct drm_bridge *bridge = drm_bridge_chain_get_first_bridge(encoder); + struct drm_bridge_state *bridge_state = drm_atomic_get_new_bridge_state(state, bridge); + u32 bus_format = bridge_state->input_bus_cfg.format; + + switch (bus_format) { + case MEDIA_BUS_FMT_UYYVYY10_0_5X30: + case MEDIA_BUS_FMT_UYYVYY8_0_5X24: + s->output_mode = ROCKCHIP_OUT_MODE_YUV420; + break; + case MEDIA_BUS_FMT_YUYV10_1X20: + case MEDIA_BUS_FMT_YUYV8_1X16: + s->output_mode = ROCKCHIP_OUT_MODE_S888_DUMMY; + break; + case MEDIA_BUS_FMT_RGB101010_1X30: + case MEDIA_BUS_FMT_RGB888_1X24: + case MEDIA_BUS_FMT_RGB666_1X24_CPADHI: + case MEDIA_BUS_FMT_YUV10_1X30: + case MEDIA_BUS_FMT_YUV8_1X24: + default: + s->output_mode = ROCKCHIP_OUT_MODE_AAAA; + break; + } + + s->output_type = DRM_MODE_CONNECTOR_DisplayPort; + s->bus_format = bus_format; + s->bus_flags = di->bus_flags; + s->color_space = V4L2_COLORSPACE_DEFAULT; + + return 0; +} + +static const struct drm_encoder_helper_funcs dw_dp_encoder_helper_funcs = { + .atomic_check = dw_dp_encoder_atomic_check, +}; + +static int dw_dp_rockchip_bind(struct device *dev, struct device *master, void *data) +{ + struct platform_device *pdev = to_platform_device(dev); + struct dw_dp_plat_data plat_data; + struct drm_device *drm_dev = data; + struct rockchip_dw_dp *dp; + struct drm_encoder *encoder; + struct drm_connector *connector; + int ret; + + dp = devm_kzalloc(dev, sizeof(*dp), GFP_KERNEL); + if (!dp) + return -ENOMEM; + + dp->dev = dev; + platform_set_drvdata(pdev, dp); + + plat_data.max_link_rate = 810000; + encoder = &dp->encoder.encoder; + encoder->possible_crtcs = drm_of_find_possible_crtcs(drm_dev, dev->of_node); + rockchip_drm_encoder_set_crtc_endpoint_id(&dp->encoder, dev->of_node, 0, 0); + + ret = drmm_encoder_init(drm_dev, encoder, NULL, DRM_MODE_ENCODER_TMDS, NULL); + if (ret) + return ret; + drm_encoder_helper_add(encoder, &dw_dp_encoder_helper_funcs); + + dp->base = dw_dp_bind(dev, encoder, &plat_data); + if (IS_ERR(dp->base)) { + ret = PTR_ERR(dp->base); + return ret; + } + + connector = drm_bridge_connector_init(drm_dev, encoder); + if (IS_ERR(connector)) { + ret = PTR_ERR(connector); + return dev_err_probe(dev, ret, "Failed to init bridge connector"); + } + + drm_connector_attach_encoder(connector, encoder); + + return 0; +} + +static const struct component_ops dw_dp_rockchip_component_ops = { + .bind = dw_dp_rockchip_bind, +}; + +static int dw_dp_probe(struct platform_device *pdev) +{ + struct device *dev = &pdev->dev; + + return component_add(dev, &dw_dp_rockchip_component_ops); +} + +static void dw_dp_remove(struct platform_device *pdev) +{ + struct rockchip_dw_dp *dp = platform_get_drvdata(pdev); + + component_del(dp->dev, &dw_dp_rockchip_component_ops); +} + +static const struct of_device_id dw_dp_of_match[] = { + { .compatible = "rockchip,rk3588-dp", }, + {} +}; +MODULE_DEVICE_TABLE(of, dw_dp_of_match); + +struct platform_driver dw_dp_driver = { + .probe = dw_dp_probe, + .remove = dw_dp_remove, + .driver = { + .name = "dw-dp", + .of_match_table = dw_dp_of_match, + }, +}; diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c index 180fad5d49ad..eb77bde9f628 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c @@ -529,6 +529,7 @@ static int __init rockchip_drm_init(void) ADD_ROCKCHIP_SUB_DRIVER(rockchip_dp_driver, CONFIG_ROCKCHIP_ANALOGIX_DP); ADD_ROCKCHIP_SUB_DRIVER(cdn_dp_driver, CONFIG_ROCKCHIP_CDN_DP); + ADD_ROCKCHIP_SUB_DRIVER(dw_dp_driver, CONFIG_ROCKCHIP_DW_DP); ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_rockchip_pltfm_driver, CONFIG_ROCKCHIP_DW_HDMI); ADD_ROCKCHIP_SUB_DRIVER(dw_hdmi_qp_rockchip_pltfm_driver, diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h index c183e82a42a5..2e86ad00979c 100644 --- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h +++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h @@ -87,6 +87,7 @@ int rockchip_drm_encoder_set_crtc_endpoint_id(struct rockchip_encoder *rencoder, struct device_node *np, int port, int reg); int rockchip_drm_endpoint_is_subdriver(struct device_node *ep); extern struct platform_driver cdn_dp_driver; +extern struct platform_driver dw_dp_driver; extern struct platform_driver dw_hdmi_rockchip_pltfm_driver; extern struct platform_driver dw_hdmi_qp_rockchip_pltfm_driver; extern struct platform_driver dw_mipi_dsi_rockchip_driver; diff --git a/drivers/gpu/drm/scheduler/sched_entity.c b/drivers/gpu/drm/scheduler/sched_entity.c index 8867b95ab089..5a4697f636f2 100644 --- a/drivers/gpu/drm/scheduler/sched_entity.c +++ b/drivers/gpu/drm/scheduler/sched_entity.c @@ -285,9 +285,9 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) return 0; sched = entity->rq->sched; - /** - * The client will not queue more IBs during this fini, consume existing - * queued IBs or discard them on SIGKILL + /* + * The client will not queue more jobs during this fini - consume + * existing queued ones, or discard them on SIGKILL. */ if (current->flags & PF_EXITING) { if (timeout) @@ -300,7 +300,7 @@ long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout) drm_sched_entity_is_idle(entity)); } - /* For killed process disable any more IBs enqueue right now */ + /* For a killed process disallow further enqueueing of jobs. */ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL); if ((!last_user || last_user == current->group_leader) && (current->flags & PF_EXITING) && (current->exit_code == SIGKILL)) @@ -324,9 +324,9 @@ EXPORT_SYMBOL(drm_sched_entity_flush); void drm_sched_entity_fini(struct drm_sched_entity *entity) { /* - * If consumption of existing IBs wasn't completed. Forcefully remove - * them here. Also makes sure that the scheduler won't touch this entity - * any more. + * If consumption of existing jobs wasn't completed forcefully remove + * them. Also makes sure that the scheduler won't touch this entity any + * more. */ drm_sched_entity_kill(entity); @@ -391,7 +391,8 @@ EXPORT_SYMBOL(drm_sched_entity_set_priority); * Add a callback to the current dependency of the entity to wake up the * scheduler when the entity becomes available. */ -static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) +static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity, + struct drm_sched_job *sched_job) { struct drm_gpu_scheduler *sched = entity->rq->sched; struct dma_fence *fence = entity->dependency; @@ -421,6 +422,10 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity) entity->dependency = fence; } + if (trace_drm_sched_job_unschedulable_enabled() && + !test_bit(DMA_FENCE_FLAG_SIGNALED_BIT, &entity->dependency->flags)) + trace_drm_sched_job_unschedulable(sched_job, entity->dependency); + if (!dma_fence_add_callback(entity->dependency, &entity->cb, drm_sched_entity_wakeup)) return true; @@ -461,10 +466,8 @@ struct drm_sched_job *drm_sched_entity_pop_job(struct drm_sched_entity *entity) while ((entity->dependency = drm_sched_job_dependency(sched_job, entity))) { - if (drm_sched_entity_add_dependency_cb(entity)) { - trace_drm_sched_job_unschedulable(sched_job, entity->dependency); + if (drm_sched_entity_add_dependency_cb(entity, sched_job)) return NULL; - } } /* skip jobs from entity that marked guilty */ diff --git a/drivers/gpu/drm/scheduler/sched_main.c b/drivers/gpu/drm/scheduler/sched_main.c index 5a550fd76bf0..46119aacb809 100644 --- a/drivers/gpu/drm/scheduler/sched_main.c +++ b/drivers/gpu/drm/scheduler/sched_main.c @@ -1424,6 +1424,22 @@ void drm_sched_fini(struct drm_gpu_scheduler *sched) * Prevents reinsertion and marks job_queue as idle, * it will be removed from the rq in drm_sched_entity_fini() * eventually + * + * FIXME: + * This lacks the proper spin_lock(&s_entity->lock) and + * is, therefore, a race condition. Most notably, it + * can race with drm_sched_entity_push_job(). The lock + * cannot be taken here, however, because this would + * lead to lock inversion -> deadlock. + * + * The best solution probably is to enforce the life + * time rule of all entities having to be torn down + * before their scheduler. Then, however, locking could + * be dropped alltogether from this function. + * + * For now, this remains a potential race in all + * drivers that keep entities alive for longer than + * the scheduler. */ s_entity->stopped = true; spin_unlock(&rq->lock); diff --git a/drivers/gpu/drm/scheduler/tests/sched_tests.h b/drivers/gpu/drm/scheduler/tests/sched_tests.h index 5b262126b776..7f31d35780cc 100644 --- a/drivers/gpu/drm/scheduler/tests/sched_tests.h +++ b/drivers/gpu/drm/scheduler/tests/sched_tests.h @@ -11,7 +11,6 @@ #include <linux/hrtimer.h> #include <linux/ktime.h> #include <linux/list.h> -#include <linux/atomic.h> #include <linux/mutex.h> #include <linux/types.h> diff --git a/drivers/gpu/drm/sitronix/st7571-i2c.c b/drivers/gpu/drm/sitronix/st7571-i2c.c index 453eb7e045e5..a6c4a6738ded 100644 --- a/drivers/gpu/drm/sitronix/st7571-i2c.c +++ b/drivers/gpu/drm/sitronix/st7571-i2c.c @@ -151,6 +151,7 @@ struct st7571_device { bool ignore_nak; bool grayscale; + bool inverted; u32 height_mm; u32 width_mm; u32 startline; @@ -218,10 +219,11 @@ static int st7571_send_command_list(struct st7571_device *st7571, return ret; } -static inline u8 st7571_transform_xy(const char *p, int x, int y) +static inline u8 st7571_transform_xy(const char *p, int x, int y, u8 bpp) { int xrest = x % 8; u8 result = 0; + u8 row_len = 16 * bpp; /* * Transforms an (x, y) pixel coordinate into a vertical 8-bit @@ -236,7 +238,7 @@ static inline u8 st7571_transform_xy(const char *p, int x, int y) for (int i = 0; i < 8; i++) { int row_idx = y + i; - u8 byte = p[row_idx * 16 + x]; + u8 byte = p[row_idx * row_len + x]; u8 bit = (byte >> xrest) & 1; result |= (bit << i); @@ -303,11 +305,11 @@ static void st7571_prepare_buffer_grayscale(struct st7571_device *st7571, struct iosys_map dst; switch (fb->format->format) { - case DRM_FORMAT_XRGB8888: /* Only support XRGB8888 in monochrome mode */ - dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 8); + case DRM_FORMAT_XRGB8888: + dst_pitch = DIV_ROUND_UP(drm_rect_width(rect), 4); iosys_map_set_vaddr(&dst, st7571->hwbuf); - drm_fb_xrgb8888_to_mono(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); + drm_fb_xrgb8888_to_gray2(&dst, &dst_pitch, vmap, fb, rect, fmtcnv_state); break; case DRM_FORMAT_R1: @@ -333,7 +335,7 @@ static int st7571_fb_update_rect_monochrome(struct drm_framebuffer *fb, struct d for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) { for (int x = rect->x1; x < rect->x2; x++) - row[x] = st7571_transform_xy(st7571->hwbuf, x, y); + row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 1); st7571_set_position(st7571, rect->x1, y); @@ -358,14 +360,13 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr rect->y2 = min_t(unsigned int, round_up(rect->y2, ST7571_PAGE_HEIGHT), st7571->nlines); switch (format) { - case DRM_FORMAT_XRGB8888: - /* Threated as monochrome (R1) */ - fallthrough; case DRM_FORMAT_R1: - x1 = rect->x1; - x2 = rect->x2; + x1 = rect->x1 * 1; + x2 = rect->x2 * 1; break; case DRM_FORMAT_R2: + fallthrough; + case DRM_FORMAT_XRGB8888: x1 = rect->x1 * 2; x2 = rect->x2 * 2; break; @@ -373,7 +374,7 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr for (int y = rect->y1; y < rect->y2; y += ST7571_PAGE_HEIGHT) { for (int x = x1; x < x2; x++) - row[x] = st7571_transform_xy(st7571->hwbuf, x, y); + row[x] = st7571_transform_xy(st7571->hwbuf, x, y, 2); st7571_set_position(st7571, rect->x1, y); @@ -386,15 +387,15 @@ static int st7571_fb_update_rect_grayscale(struct drm_framebuffer *fb, struct dr * even if the format is monochrome. * * The bit values maps to the following grayscale: - * 0 0 = White - * 0 1 = Light gray - * 1 0 = Dark gray - * 1 1 = Black + * 0 0 = Black + * 0 1 = Dark gray + * 1 0 = Light gray + * 1 1 = White * * For monochrome formats, write the same value twice to get * either a black or white pixel. */ - if (format == DRM_FORMAT_R1 || format == DRM_FORMAT_XRGB8888) + if (format == DRM_FORMAT_R1) regmap_bulk_write(st7571->regmap, ST7571_DATA_MODE, row + x, 1); } } @@ -792,6 +793,7 @@ static int st7567_parse_dt(struct st7571_device *st7567) of_property_read_u32(np, "width-mm", &st7567->width_mm); of_property_read_u32(np, "height-mm", &st7567->height_mm); + st7567->inverted = of_property_read_bool(np, "sitronix,inverted"); st7567->pformat = &st7571_monochrome; st7567->bpp = 1; @@ -819,6 +821,7 @@ static int st7571_parse_dt(struct st7571_device *st7571) of_property_read_u32(np, "width-mm", &st7571->width_mm); of_property_read_u32(np, "height-mm", &st7571->height_mm); st7571->grayscale = of_property_read_bool(np, "sitronix,grayscale"); + st7571->inverted = of_property_read_bool(np, "sitronix,inverted"); if (st7571->grayscale) { st7571->pformat = &st7571_grayscale; @@ -873,7 +876,7 @@ static int st7567_lcd_init(struct st7571_device *st7567) ST7571_SET_POWER(0x6), /* Power Control, VC: ON, VR: ON, VF: OFF */ ST7571_SET_POWER(0x7), /* Power Control, VC: ON, VR: ON, VF: ON */ - ST7571_SET_REVERSE(0), + ST7571_SET_REVERSE(st7567->inverted ? 1 : 0), ST7571_SET_ENTIRE_DISPLAY_ON(0), }; @@ -917,7 +920,7 @@ static int st7571_lcd_init(struct st7571_device *st7571) ST7571_SET_COLOR_MODE(st7571->pformat->mode), ST7571_COMMAND_SET_NORMAL, - ST7571_SET_REVERSE(0), + ST7571_SET_REVERSE(st7571->inverted ? 1 : 0), ST7571_SET_ENTIRE_DISPLAY_ON(0), }; @@ -1024,7 +1027,7 @@ static void st7571_remove(struct i2c_client *client) drm_dev_unplug(&st7571->dev); } -struct st7571_panel_data st7567_config = { +static const struct st7571_panel_data st7567_config = { .init = st7567_lcd_init, .parse_dt = st7567_parse_dt, .constraints = { @@ -1036,7 +1039,7 @@ struct st7571_panel_data st7567_config = { }, }; -struct st7571_panel_data st7571_config = { +static const struct st7571_panel_data st7571_config = { .init = st7571_lcd_init, .parse_dt = st7571_parse_dt, .constraints = { diff --git a/drivers/gpu/drm/solomon/ssd130x-spi.c b/drivers/gpu/drm/solomon/ssd130x-spi.c index 7c935870f7d2..b52f5fd592a1 100644 --- a/drivers/gpu/drm/solomon/ssd130x-spi.c +++ b/drivers/gpu/drm/solomon/ssd130x-spi.c @@ -74,8 +74,7 @@ static int ssd130x_spi_probe(struct spi_device *spi) t = devm_kzalloc(dev, sizeof(*t), GFP_KERNEL); if (!t) - return dev_err_probe(dev, -ENOMEM, - "Failed to allocate SPI transport data\n"); + return -ENOMEM; t->spi = spi; t->dc = dc; diff --git a/drivers/gpu/drm/sti/sti_hqvdp.c b/drivers/gpu/drm/sti/sti_hqvdp.c index 03684062309b..b76606e9a82d 100644 --- a/drivers/gpu/drm/sti/sti_hqvdp.c +++ b/drivers/gpu/drm/sti/sti_hqvdp.c @@ -744,7 +744,7 @@ static bool sti_hqvdp_check_hw_scaling(struct sti_hqvdp *hqvdp, inv_zy = DIV_ROUND_UP(src_h, dst_h); - return (inv_zy <= lfw) ? true : false; + return inv_zy <= lfw; } /** diff --git a/drivers/gpu/drm/stm/drv.c b/drivers/gpu/drm/stm/drv.c index 8ebcaf953782..ab00d1a6140c 100644 --- a/drivers/gpu/drm/stm/drv.c +++ b/drivers/gpu/drm/stm/drv.c @@ -236,8 +236,18 @@ static void stm_drm_platform_shutdown(struct platform_device *pdev) drm_atomic_helper_shutdown(platform_get_drvdata(pdev)); } +static struct ltdc_plat_data stm_drm_plat_data = { + .pad_max_freq_hz = 90000000, +}; + +static struct ltdc_plat_data stm_drm_plat_data_mp25 = { + .pad_max_freq_hz = 150000000, +}; + static const struct of_device_id drv_dt_ids[] = { - { .compatible = "st,stm32-ltdc"}, + { .compatible = "st,stm32-ltdc", .data = &stm_drm_plat_data, }, + { .compatible = "st,stm32mp251-ltdc", .data = &stm_drm_plat_data_mp25, }, + { .compatible = "st,stm32mp255-ltdc", .data = &stm_drm_plat_data_mp25, }, { /* end node */ }, }; MODULE_DEVICE_TABLE(of, drv_dt_ids); diff --git a/drivers/gpu/drm/stm/ltdc.c b/drivers/gpu/drm/stm/ltdc.c index b9477fbec1c1..d1501e86a5b1 100644 --- a/drivers/gpu/drm/stm/ltdc.c +++ b/drivers/gpu/drm/stm/ltdc.c @@ -14,6 +14,7 @@ #include <linux/interrupt.h> #include <linux/media-bus-format.h> #include <linux/module.h> +#include <linux/of.h> #include <linux/of_graph.h> #include <linux/pinctrl/consumer.h> #include <linux/platform_device.h> @@ -51,6 +52,7 @@ #define HWVER_10300 0x010300 #define HWVER_20101 0x020101 #define HWVER_40100 0x040100 +#define HWVER_40101 0x040101 /* * The address of some registers depends on the HW version: such registers have @@ -836,6 +838,12 @@ ltdc_crtc_mode_valid(struct drm_crtc *crtc, int target_max = target + CLK_TOLERANCE_HZ; int result; + if (ldev->lvds_clk) { + result = clk_round_rate(ldev->lvds_clk, target); + drm_dbg_driver(crtc->dev, "lvds pixclk rate target %d, available %d\n", + target, result); + } + result = clk_round_rate(ldev->pixel_clk, target); drm_dbg_driver(crtc->dev, "clk rate target %d, available %d\n", target, result); @@ -1780,6 +1788,7 @@ static int ltdc_get_caps(struct drm_device *ddev) { struct ltdc_device *ldev = ddev->dev_private; u32 bus_width_log2, lcr, gc2r; + const struct ltdc_plat_data *pdata = of_device_get_match_data(ddev->dev); /* * at least 1 layer must be managed & the number of layers @@ -1795,6 +1804,8 @@ static int ltdc_get_caps(struct drm_device *ddev) ldev->caps.bus_width = 8 << bus_width_log2; regmap_read(ldev->regmap, LTDC_IDR, &ldev->caps.hw_version); + ldev->caps.pad_max_freq_hz = pdata->pad_max_freq_hz; + switch (ldev->caps.hw_version) { case HWVER_10200: case HWVER_10300: @@ -1812,7 +1823,6 @@ static int ltdc_get_caps(struct drm_device *ddev) * does not work on 2nd layer. */ ldev->caps.non_alpha_only_l1 = true; - ldev->caps.pad_max_freq_hz = 90000000; if (ldev->caps.hw_version == HWVER_10200) ldev->caps.pad_max_freq_hz = 65000000; ldev->caps.nb_irq = 2; @@ -1843,6 +1853,7 @@ static int ltdc_get_caps(struct drm_device *ddev) ldev->caps.fifo_threshold = false; break; case HWVER_40100: + case HWVER_40101: ldev->caps.layer_ofs = LAY_OFS_1; ldev->caps.layer_regs = ltdc_layer_regs_a2; ldev->caps.pix_fmt_hw = ltdc_pix_fmt_a2; @@ -1850,7 +1861,6 @@ static int ltdc_get_caps(struct drm_device *ddev) ldev->caps.pix_fmt_nb = ARRAY_SIZE(ltdc_drm_fmt_a2); ldev->caps.pix_fmt_flex = true; ldev->caps.non_alpha_only_l1 = false; - ldev->caps.pad_max_freq_hz = 90000000; ldev->caps.nb_irq = 2; ldev->caps.ycbcr_input = true; ldev->caps.ycbcr_output = true; @@ -1873,6 +1883,10 @@ void ltdc_suspend(struct drm_device *ddev) drm_dbg_driver(ddev, "\n"); clk_disable_unprepare(ldev->pixel_clk); + if (ldev->bus_clk) + clk_disable_unprepare(ldev->bus_clk); + if (ldev->lvds_clk) + clk_disable_unprepare(ldev->lvds_clk); } int ltdc_resume(struct drm_device *ddev) @@ -1888,7 +1902,21 @@ int ltdc_resume(struct drm_device *ddev) return ret; } - return 0; + if (ldev->bus_clk) { + ret = clk_prepare_enable(ldev->bus_clk); + if (ret) { + drm_err(ddev, "failed to enable bus clock (%d)\n", ret); + return ret; + } + } + + if (ldev->lvds_clk) { + ret = clk_prepare_enable(ldev->lvds_clk); + if (ret) + drm_err(ddev, "failed to prepare lvds clock\n"); + } + + return ret; } int ltdc_load(struct drm_device *ddev) @@ -1923,6 +1951,20 @@ int ltdc_load(struct drm_device *ddev) return -ENODEV; } + if (of_device_is_compatible(np, "st,stm32mp251-ltdc") || + of_device_is_compatible(np, "st,stm32mp255-ltdc")) { + ldev->bus_clk = devm_clk_get(dev, "bus"); + if (IS_ERR(ldev->bus_clk)) + return dev_err_probe(dev, PTR_ERR(ldev->bus_clk), + "Unable to get bus clock\n"); + + ret = clk_prepare_enable(ldev->bus_clk); + if (ret) { + drm_err(ddev, "Unable to prepare bus clock\n"); + return ret; + } + } + /* Get endpoints if any */ for (i = 0; i < nb_endpoints; i++) { ret = drm_of_find_panel_or_bridge(np, 0, i, &panel, &bridge); @@ -1956,6 +1998,10 @@ int ltdc_load(struct drm_device *ddev) } } + ldev->lvds_clk = devm_clk_get(dev, "lvds"); + if (IS_ERR(ldev->lvds_clk)) + ldev->lvds_clk = NULL; + rstc = devm_reset_control_get_exclusive(dev, NULL); mutex_init(&ldev->err_lock); @@ -2035,6 +2081,9 @@ int ltdc_load(struct drm_device *ddev) clk_disable_unprepare(ldev->pixel_clk); + if (ldev->bus_clk) + clk_disable_unprepare(ldev->bus_clk); + pinctrl_pm_select_sleep_state(ddev->dev); pm_runtime_enable(ddev->dev); @@ -2043,6 +2092,9 @@ int ltdc_load(struct drm_device *ddev) err: clk_disable_unprepare(ldev->pixel_clk); + if (ldev->bus_clk) + clk_disable_unprepare(ldev->bus_clk); + return ret; } diff --git a/drivers/gpu/drm/stm/ltdc.h b/drivers/gpu/drm/stm/ltdc.h index 9d488043ffdb..17b51a7ce28e 100644 --- a/drivers/gpu/drm/stm/ltdc.h +++ b/drivers/gpu/drm/stm/ltdc.h @@ -40,10 +40,16 @@ struct fps_info { ktime_t last_timestamp; }; +struct ltdc_plat_data { + int pad_max_freq_hz; /* max frequency supported by pad */ +}; + struct ltdc_device { void __iomem *regs; struct regmap *regmap; struct clk *pixel_clk; /* lcd pixel clock */ + struct clk *lvds_clk; /* lvds pixel clock */ + struct clk *bus_clk; /* bus clock */ struct mutex err_lock; /* protecting error_status */ struct ltdc_caps caps; u32 irq_status; diff --git a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c index 963c380fea64..ddb4a7523ee6 100644 --- a/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c +++ b/drivers/gpu/drm/sysfb/drm_sysfb_modeset.c @@ -238,8 +238,7 @@ void drm_sysfb_plane_helper_atomic_update(struct drm_plane *plane, struct drm_at struct drm_shadow_plane_state *shadow_plane_state = to_drm_shadow_plane_state(plane_state); struct drm_framebuffer *fb = plane_state->fb; unsigned int dst_pitch = sysfb->fb_pitch; - struct drm_crtc_state *crtc_state = crtc_state = - drm_atomic_get_new_crtc_state(state, plane_state->crtc); + struct drm_crtc_state *crtc_state = drm_atomic_get_new_crtc_state(state, plane_state->crtc); struct drm_sysfb_crtc_state *sysfb_crtc_state = to_drm_sysfb_crtc_state(crtc_state); const struct drm_format_info *dst_format = sysfb_crtc_state->format; struct drm_atomic_helper_damage_iter iter; diff --git a/drivers/gpu/drm/sysfb/vesadrm.c b/drivers/gpu/drm/sysfb/vesadrm.c index 1170e71e4208..16a4b52d45c6 100644 --- a/drivers/gpu/drm/sysfb/vesadrm.c +++ b/drivers/gpu/drm/sysfb/vesadrm.c @@ -289,7 +289,7 @@ static int vesadrm_primary_plane_helper_atomic_check(struct drm_plane *plane, break; } break; - }; + } return 0; } diff --git a/drivers/gpu/drm/tests/drm_exec_test.c b/drivers/gpu/drm/tests/drm_exec_test.c index d6c4dd1194a0..3a20c788c51f 100644 --- a/drivers/gpu/drm/tests/drm_exec_test.c +++ b/drivers/gpu/drm/tests/drm_exec_test.c @@ -150,14 +150,22 @@ static void test_prepare(struct kunit *test) static void test_prepare_array(struct kunit *test) { struct drm_exec_priv *priv = test->priv; - struct drm_gem_object gobj1 = { }; - struct drm_gem_object gobj2 = { }; - struct drm_gem_object *array[] = { &gobj1, &gobj2 }; + struct drm_gem_object *gobj1; + struct drm_gem_object *gobj2; + struct drm_gem_object *array[] = { + (gobj1 = kunit_kzalloc(test, sizeof(*gobj1), GFP_KERNEL)), + (gobj2 = kunit_kzalloc(test, sizeof(*gobj2), GFP_KERNEL)), + }; struct drm_exec exec; int ret; - drm_gem_private_object_init(priv->drm, &gobj1, PAGE_SIZE); - drm_gem_private_object_init(priv->drm, &gobj2, PAGE_SIZE); + if (!gobj1 || !gobj2) { + KUNIT_FAIL(test, "Failed to allocate GEM objects.\n"); + return; + } + + drm_gem_private_object_init(priv->drm, gobj1, PAGE_SIZE); + drm_gem_private_object_init(priv->drm, gobj2, PAGE_SIZE); drm_exec_init(&exec, DRM_EXEC_INTERRUPTIBLE_WAIT, 0); drm_exec_until_all_locked(&exec) @@ -166,8 +174,8 @@ static void test_prepare_array(struct kunit *test) KUNIT_EXPECT_EQ(test, ret, 0); drm_exec_fini(&exec); - drm_gem_private_object_fini(&gobj1); - drm_gem_private_object_fini(&gobj2); + drm_gem_private_object_fini(gobj1); + drm_gem_private_object_fini(gobj2); } static void test_multiple_loops(struct kunit *test) diff --git a/drivers/gpu/drm/tidss/tidss_dispc.c b/drivers/gpu/drm/tidss/tidss_dispc.c index 3f6cff2ab1b2..7c8c15a5c39b 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc.c +++ b/drivers/gpu/drm/tidss/tidss_dispc.c @@ -4,6 +4,7 @@ * Author: Jyri Sarha <jsarha@ti.com> */ +#include <linux/bitfield.h> #include <linux/clk.h> #include <linux/delay.h> #include <linux/dma-mapping.h> @@ -594,79 +595,53 @@ void tidss_disable_oldi(struct tidss_device *tidss, u32 hw_videoport) * number. For example 7:0 */ -static u32 FLD_MASK(u32 start, u32 end) -{ - return ((1 << (start - end + 1)) - 1) << end; -} - -static u32 FLD_VAL(u32 val, u32 start, u32 end) -{ - return (val << end) & FLD_MASK(start, end); -} - -static u32 FLD_GET(u32 val, u32 start, u32 end) -{ - return (val & FLD_MASK(start, end)) >> end; -} - -static u32 FLD_MOD(u32 orig, u32 val, u32 start, u32 end) -{ - return (orig & ~FLD_MASK(start, end)) | FLD_VAL(val, start, end); -} - -static u32 REG_GET(struct dispc_device *dispc, u32 idx, u32 start, u32 end) -{ - return FLD_GET(dispc_read(dispc, idx), start, end); -} - -static void REG_FLD_MOD(struct dispc_device *dispc, u32 idx, u32 val, - u32 start, u32 end) -{ - dispc_write(dispc, idx, FLD_MOD(dispc_read(dispc, idx), val, - start, end)); -} - -static u32 VID_REG_GET(struct dispc_device *dispc, u32 hw_plane, u32 idx, - u32 start, u32 end) -{ - return FLD_GET(dispc_vid_read(dispc, hw_plane, idx), start, end); -} - -static void VID_REG_FLD_MOD(struct dispc_device *dispc, u32 hw_plane, u32 idx, - u32 val, u32 start, u32 end) -{ - dispc_vid_write(dispc, hw_plane, idx, - FLD_MOD(dispc_vid_read(dispc, hw_plane, idx), - val, start, end)); -} - -static u32 VP_REG_GET(struct dispc_device *dispc, u32 vp, u32 idx, - u32 start, u32 end) -{ - return FLD_GET(dispc_vp_read(dispc, vp, idx), start, end); -} - -static void VP_REG_FLD_MOD(struct dispc_device *dispc, u32 vp, u32 idx, u32 val, - u32 start, u32 end) -{ - dispc_vp_write(dispc, vp, idx, FLD_MOD(dispc_vp_read(dispc, vp, idx), - val, start, end)); -} - -__maybe_unused -static u32 OVR_REG_GET(struct dispc_device *dispc, u32 ovr, u32 idx, - u32 start, u32 end) -{ - return FLD_GET(dispc_ovr_read(dispc, ovr, idx), start, end); -} - -static void OVR_REG_FLD_MOD(struct dispc_device *dispc, u32 ovr, u32 idx, - u32 val, u32 start, u32 end) -{ - dispc_ovr_write(dispc, ovr, idx, - FLD_MOD(dispc_ovr_read(dispc, ovr, idx), - val, start, end)); -} +#define REG_GET(dispc, idx, mask) \ + ((u32)FIELD_GET((mask), dispc_read((dispc), (idx)))) + +#define REG_FLD_MOD(dispc, idx, val, mask) \ + ({ \ + struct dispc_device *_dispc = (dispc); \ + u32 _idx = (idx); \ + u32 _reg = dispc_read(_dispc, _idx); \ + FIELD_MODIFY((mask), &_reg, (val)); \ + dispc_write(_dispc, _idx, _reg); \ + }) + +#define VID_REG_GET(dispc, hw_plane, idx, mask) \ + ((u32)FIELD_GET((mask), dispc_vid_read((dispc), (hw_plane), (idx)))) + +#define VID_REG_FLD_MOD(dispc, hw_plane, idx, val, mask) \ + ({ \ + struct dispc_device *_dispc = (dispc); \ + u32 _hw_plane = (hw_plane); \ + u32 _idx = (idx); \ + u32 _reg = dispc_vid_read(_dispc, _hw_plane, _idx); \ + FIELD_MODIFY((mask), &_reg, (val)); \ + dispc_vid_write(_dispc, _hw_plane, _idx, _reg); \ + }) + +#define VP_REG_GET(dispc, vp, idx, mask) \ + ((u32)FIELD_GET((mask), dispc_vp_read((dispc), (vp), (idx)))) + +#define VP_REG_FLD_MOD(dispc, vp, idx, val, mask) \ + ({ \ + struct dispc_device *_dispc = (dispc); \ + u32 _vp = (vp); \ + u32 _idx = (idx); \ + u32 _reg = dispc_vp_read(_dispc, _vp, _idx); \ + FIELD_MODIFY((mask), &_reg, (val)); \ + dispc_vp_write(_dispc, _vp, _idx, _reg); \ + }) + +#define OVR_REG_FLD_MOD(dispc, ovr, idx, val, mask) \ + ({ \ + struct dispc_device *_dispc = (dispc); \ + u32 _ovr = (ovr); \ + u32 _idx = (idx); \ + u32 _reg = dispc_ovr_read(_dispc, _ovr, _idx); \ + FIELD_MODIFY((mask), &_reg, (val)); \ + dispc_ovr_write(_dispc, _ovr, _idx, _reg); \ + }) static dispc_irq_t dispc_vp_irq_from_raw(u32 stat, u32 hw_videoport) { @@ -1139,7 +1114,8 @@ static void dispc_set_num_datalines(struct dispc_device *dispc, v = 3; } - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, 10, 8); + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, v, + DISPC_VP_CONTROL_DATALINES_MASK); } static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport, @@ -1162,7 +1138,8 @@ static void dispc_enable_am65x_oldi(struct dispc_device *dispc, u32 hw_videoport oldi_cfg |= BIT(7); /* DEPOL */ - oldi_cfg = FLD_MOD(oldi_cfg, fmt->am65x_oldi_mode_reg_val, 3, 1); + FIELD_MODIFY(DISPC_VP_DSS_OLDI_CFG_MAP_MASK, &oldi_cfg, + fmt->am65x_oldi_mode_reg_val); oldi_cfg |= BIT(12); /* SOFTRST */ @@ -1224,14 +1201,14 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, vbp = mode->crtc_vtotal - mode->crtc_vsync_end; dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_H, - FLD_VAL(hsw - 1, 7, 0) | - FLD_VAL(hfp - 1, 19, 8) | - FLD_VAL(hbp - 1, 31, 20)); + FIELD_PREP(DISPC_VP_TIMING_H_SYNC_PULSE_MASK, hsw - 1) | + FIELD_PREP(DISPC_VP_TIMING_H_FRONT_PORCH_MASK, hfp - 1) | + FIELD_PREP(DISPC_VP_TIMING_H_BACK_PORCH_MASK, hbp - 1)); dispc_vp_write(dispc, hw_videoport, DISPC_VP_TIMING_V, - FLD_VAL(vsw - 1, 7, 0) | - FLD_VAL(vfp, 19, 8) | - FLD_VAL(vbp, 31, 20)); + FIELD_PREP(DISPC_VP_TIMING_V_SYNC_PULSE_MASK, vsw - 1) | + FIELD_PREP(DISPC_VP_TIMING_V_FRONT_PORCH_MASK, vfp) | + FIELD_PREP(DISPC_VP_TIMING_V_BACK_PORCH_MASK, vbp)); ivs = !!(mode->flags & DRM_MODE_FLAG_NVSYNC); @@ -1254,24 +1231,28 @@ void dispc_vp_enable(struct dispc_device *dispc, u32 hw_videoport, ieo = false; dispc_vp_write(dispc, hw_videoport, DISPC_VP_POL_FREQ, - FLD_VAL(align, 18, 18) | - FLD_VAL(onoff, 17, 17) | - FLD_VAL(rf, 16, 16) | - FLD_VAL(ieo, 15, 15) | - FLD_VAL(ipc, 14, 14) | - FLD_VAL(ihs, 13, 13) | - FLD_VAL(ivs, 12, 12)); + FIELD_PREP(DISPC_VP_POL_FREQ_ALIGN_MASK, align) | + FIELD_PREP(DISPC_VP_POL_FREQ_ONOFF_MASK, onoff) | + FIELD_PREP(DISPC_VP_POL_FREQ_RF_MASK, rf) | + FIELD_PREP(DISPC_VP_POL_FREQ_IEO_MASK, ieo) | + FIELD_PREP(DISPC_VP_POL_FREQ_IPC_MASK, ipc) | + FIELD_PREP(DISPC_VP_POL_FREQ_IHS_MASK, ihs) | + FIELD_PREP(DISPC_VP_POL_FREQ_IVS_MASK, ivs)); dispc_vp_write(dispc, hw_videoport, DISPC_VP_SIZE_SCREEN, - FLD_VAL(mode->crtc_hdisplay - 1, 11, 0) | - FLD_VAL(mode->crtc_vdisplay - 1, 27, 16)); + FIELD_PREP(DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK, + mode->crtc_hdisplay - 1) | + FIELD_PREP(DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK, + mode->crtc_vdisplay - 1)); - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 0, 0); + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, + DISPC_VP_CONTROL_ENABLE_MASK); } void dispc_vp_disable(struct dispc_device *dispc, u32 hw_videoport) { - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, 0, 0); + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 0, + DISPC_VP_CONTROL_ENABLE_MASK); } void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport) @@ -1285,13 +1266,16 @@ void dispc_vp_unprepare(struct dispc_device *dispc, u32 hw_videoport) bool dispc_vp_go_busy(struct dispc_device *dispc, u32 hw_videoport) { - return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5); + return VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, + DISPC_VP_CONTROL_GOBIT_MASK); } void dispc_vp_go(struct dispc_device *dispc, u32 hw_videoport) { - WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, 5, 5)); - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, 5, 5); + WARN_ON(VP_REG_GET(dispc, hw_videoport, DISPC_VP_CONTROL, + DISPC_VP_CONTROL_GOBIT_MASK)); + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONTROL, 1, + DISPC_VP_CONTROL_GOBIT_MASK); } enum c8_to_c12_mode { C8_TO_C12_REPLICATE, C8_TO_C12_MAX, C8_TO_C12_MIN }; @@ -1491,11 +1475,11 @@ static void dispc_am65x_ovr_set_plane(struct dispc_device *dispc, u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id; OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), - hw_id, 4, 1); - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), - x, 17, 6); - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), - y, 30, 19); + hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK); + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), x, + DISPC_OVR_ATTRIBUTES_POSX_MASK); + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), y, + DISPC_OVR_ATTRIBUTES_POSY_MASK); } static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc, @@ -1505,11 +1489,11 @@ static void dispc_j721e_ovr_set_plane(struct dispc_device *dispc, u32 hw_id = dispc->feat->vid_info[hw_plane].hw_id; OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), - hw_id, 4, 1); - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), - x, 13, 0); - OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), - y, 29, 16); + hw_id, DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK); + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), x, + DISPC_OVR_ATTRIBUTES2_POSX_MASK); + OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES2(layer), y, + DISPC_OVR_ATTRIBUTES2_POSY_MASK); } void dispc_ovr_set_plane(struct dispc_device *dispc, u32 hw_plane, @@ -1544,7 +1528,7 @@ void dispc_ovr_enable_layer(struct dispc_device *dispc, return; OVR_REG_FLD_MOD(dispc, hw_videoport, DISPC_OVR_ATTRIBUTES(layer), - !!enable, 0, 0); + !!enable, DISPC_OVR_ATTRIBUTES_ENABLE_MASK); } /* CSC */ @@ -1580,14 +1564,14 @@ struct dispc_csc_coef { static void dispc_csc_offset_regval(const struct dispc_csc_coef *csc, u32 *regval) { -#define OVAL(x, y) (FLD_VAL(x, 15, 3) | FLD_VAL(y, 31, 19)) +#define OVAL(x, y) (FIELD_PREP(GENMASK(15, 3), x) | FIELD_PREP(GENMASK(31, 19), y)) regval[5] = OVAL(csc->preoffset[0], csc->preoffset[1]); regval[6] = OVAL(csc->preoffset[2], csc->postoffset[0]); regval[7] = OVAL(csc->postoffset[1], csc->postoffset[2]); #undef OVAL } -#define CVAL(x, y) (FLD_VAL(x, 10, 0) | FLD_VAL(y, 26, 16)) +#define CVAL(x, y) (FIELD_PREP(GENMASK(10, 0), x) | FIELD_PREP(GENMASK(26, 16), y)) static void dispc_csc_yuv2rgb_regval(const struct dispc_csc_coef *csc, u32 *regval) { @@ -1767,7 +1751,8 @@ static void dispc_vid_csc_setup(struct dispc_device *dispc, u32 hw_plane, static void dispc_vid_csc_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) { - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 9, 9); + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, + DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK); } /* SCALER */ @@ -1826,7 +1811,8 @@ static void dispc_vid_write_fir_coefs(struct dispc_device *dispc, c1 = coefs->c1[phase]; c2 = coefs->c2[phase]; - c12 = FLD_VAL(c1, 19, 10) | FLD_VAL(c2, 29, 20); + c12 = FIELD_PREP(GENMASK(19, 10), c1) | FIELD_PREP(GENMASK(29, 20), + c2); dispc_vid_write(dispc, hw_plane, reg, c12); } @@ -2023,20 +2009,20 @@ static void dispc_vid_set_scaling(struct dispc_device *dispc, u32 fourcc) { /* HORIZONTAL RESIZE ENABLE */ - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, - sp->scale_x, 7, 7); + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_x, + DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK); /* VERTICAL RESIZE ENABLE */ - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, - sp->scale_y, 8, 8); + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->scale_y, + DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK); /* Skip the rest if no scaling is used */ if (!sp->scale_x && !sp->scale_y) return; /* VERTICAL 5-TAPS */ - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, - sp->five_taps, 21, 21); + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, sp->five_taps, + DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK); if (dispc_fourcc_is_yuv(fourcc)) { if (sp->scale_x) { @@ -2126,7 +2112,7 @@ static void dispc_plane_set_pixel_format(struct dispc_device *dispc, if (dispc_color_formats[i].fourcc == fourcc) { VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, dispc_color_formats[i].dss_code, - 6, 1); + DISPC_VID_ATTRIBUTES_FORMAT_MASK); return; } } @@ -2248,7 +2234,8 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, dispc_vid_write(dispc, hw_plane, DISPC_VID_BA_EXT_1, (u64)dma_addr >> 32); dispc_vid_write(dispc, hw_plane, DISPC_VID_PICTURE_SIZE, - (scale.in_w - 1) | ((scale.in_h - 1) << 16)); + FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK, scale.in_h - 1) | + FIELD_PREP(DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK, scale.in_w - 1)); /* For YUV422 format we use the macropixel size for pixel inc */ if (fourcc == DRM_FORMAT_YUYV || fourcc == DRM_FORMAT_UYVY) @@ -2285,8 +2272,10 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, if (!lite) { dispc_vid_write(dispc, hw_plane, DISPC_VID_SIZE, - (state->crtc_w - 1) | - ((state->crtc_h - 1) << 16)); + FIELD_PREP(DISPC_VID_SIZE_SIZEY_MASK, + state->crtc_h - 1) | + FIELD_PREP(DISPC_VID_SIZE_SIZEX_MASK, + state->crtc_w - 1)); dispc_vid_set_scaling(dispc, hw_plane, &scale, fourcc); } @@ -2300,38 +2289,45 @@ void dispc_plane_setup(struct dispc_device *dispc, u32 hw_plane, } dispc_vid_write(dispc, hw_plane, DISPC_VID_GLOBAL_ALPHA, - 0xFF & (state->alpha >> 8)); + FIELD_PREP(DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK, + state->alpha >> 8)); if (state->pixel_blend_mode == DRM_MODE_BLEND_PREMULTI) VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1, - 28, 28); + DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK); else VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, - 28, 28); + DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK); } void dispc_plane_enable(struct dispc_device *dispc, u32 hw_plane, bool enable) { - VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, 0, 0); + VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, !!enable, + DISPC_VID_ATTRIBUTES_ENABLE_MASK); } static u32 dispc_vid_get_fifo_size(struct dispc_device *dispc, u32 hw_plane) { - return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, 15, 0); + return VID_REG_GET(dispc, hw_plane, DISPC_VID_BUF_SIZE_STATUS, + DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK); } static void dispc_vid_set_mflag_threshold(struct dispc_device *dispc, u32 hw_plane, u32 low, u32 high) { dispc_vid_write(dispc, hw_plane, DISPC_VID_MFLAG_THRESHOLD, - FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); + FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK, high) | + FIELD_PREP(DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK, low)); } static void dispc_vid_set_buf_threshold(struct dispc_device *dispc, u32 hw_plane, u32 low, u32 high) { dispc_vid_write(dispc, hw_plane, DISPC_VID_BUF_THRESHOLD, - FLD_VAL(high, 31, 16) | FLD_VAL(low, 15, 0)); + FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK, + high) | + FIELD_PREP(DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK, + low)); } static void dispc_k2g_plane_init(struct dispc_device *dispc) @@ -2341,9 +2337,11 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc) dev_dbg(dispc->dev, "%s()\n", __func__); /* MFLAG_CTRL = ENABLED */ - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0); + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK); /* MFLAG_START = MFLAGNORMALSTARTMODE */ - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6); + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK); for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) { u32 size = dispc_vid_get_fifo_size(dispc, hw_plane); @@ -2380,7 +2378,7 @@ static void dispc_k2g_plane_init(struct dispc_device *dispc) * register is ignored. */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 1, - 19, 19); + DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK); } } @@ -2392,13 +2390,15 @@ static void dispc_k3_plane_init(struct dispc_device *dispc) dev_dbg(dispc->dev, "%s()\n", __func__); - REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, 2, 0); - REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, 5, 3); + REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_lo_pri, DSS_CBA_CFG_PRI_LO_MASK); + REG_FLD_MOD(dispc, DSS_CBA_CFG, cba_hi_pri, DSS_CBA_CFG_PRI_HI_MASK); /* MFLAG_CTRL = ENABLED */ - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, 1, 0); + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 2, + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK); /* MFLAG_START = MFLAGNORMALSTARTMODE */ - REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, 6, 6); + REG_FLD_MOD(dispc, DISPC_GLOBAL_MFLAG_ATTRIBUTE, 0, + DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK); for (hw_plane = 0; hw_plane < dispc->feat->num_vids; hw_plane++) { u32 size = dispc_vid_get_fifo_size(dispc, hw_plane); @@ -2431,7 +2431,7 @@ static void dispc_k3_plane_init(struct dispc_device *dispc) /* Prefech up to PRELOAD value */ VID_REG_FLD_MOD(dispc, hw_plane, DISPC_VID_ATTRIBUTES, 0, - 19, 19); + DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK); } } @@ -2461,7 +2461,8 @@ static void dispc_vp_init(struct dispc_device *dispc) /* Enable the gamma Shadow bit-field for all VPs*/ for (i = 0; i < dispc->feat->num_vps; i++) - VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, 2, 2); + VP_REG_FLD_MOD(dispc, i, DISPC_VP_CONFIG, 1, + DISPC_VP_CONFIG_GAMMAENABLE_MASK); } static void dispc_initial_config(struct dispc_device *dispc) @@ -2472,8 +2473,8 @@ static void dispc_initial_config(struct dispc_device *dispc) /* Note: Hardcoded DPI routing on J721E for now */ if (dispc->feat->subrev == DISPC_J721E) { dispc_write(dispc, DISPC_CONNECTIONS, - FLD_VAL(2, 3, 0) | /* VP1 to DPI0 */ - FLD_VAL(8, 7, 4) /* VP3 to DPI1 */ + FIELD_PREP(DISPC_CONNECTIONS_DPI_0_CONN_MASK, 2) | /* VP1 to DPI0 */ + FIELD_PREP(DISPC_CONNECTIONS_DPI_1_CONN_MASK, 8) /* VP3 to DPI1 */ ); } } @@ -2651,8 +2652,8 @@ static void dispc_k2g_cpr_from_ctm(const struct drm_color_ctm *ctm, cpr->m[CSC_BB] = dispc_S31_32_to_s2_8(ctm->matrix[8]); } -#define CVAL(xR, xG, xB) (FLD_VAL(xR, 9, 0) | FLD_VAL(xG, 20, 11) | \ - FLD_VAL(xB, 31, 22)) +#define CVAL(xR, xG, xB) (FIELD_PREP(GENMASK(9, 0), xR) | FIELD_PREP(GENMASK(20, 11), xG) | \ + FIELD_PREP(GENMASK(31, 22), xB)) static void dispc_k2g_vp_csc_cpr_regval(const struct dispc_csc_coef *csc, u32 *regval) @@ -2694,8 +2695,8 @@ static void dispc_k2g_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport, cprenable = 1; } - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, - cprenable, 15, 15); + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, cprenable, + DISPC_VP_CONFIG_CPR_MASK); } static s16 dispc_S31_32_to_s3_8(s64 coef) @@ -2760,8 +2761,8 @@ static void dispc_k3_vp_set_ctm(struct dispc_device *dispc, u32 hw_videoport, colorconvenable = 1; } - VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, - colorconvenable, 24, 24); + VP_REG_FLD_MOD(dispc, hw_videoport, DISPC_VP_CONFIG, colorconvenable, + DISPC_VP_CONFIG_COLORCONVENABLE_MASK); } static void dispc_vp_set_color_mgmt(struct dispc_device *dispc, @@ -2816,26 +2817,26 @@ int dispc_runtime_resume(struct dispc_device *dispc) clk_prepare_enable(dispc->fclk); - if (REG_GET(dispc, DSS_SYSSTATUS, 0, 0) == 0) + if (REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_FUNC_RESETDONE) == 0) dev_warn(dispc->dev, "DSS FUNC RESET not done!\n"); dev_dbg(dispc->dev, "OMAP DSS7 rev 0x%x\n", dispc_read(dispc, DSS_REVISION)); dev_dbg(dispc->dev, "VP RESETDONE %d,%d,%d\n", - REG_GET(dispc, DSS_SYSSTATUS, 1, 1), - REG_GET(dispc, DSS_SYSSTATUS, 2, 2), - REG_GET(dispc, DSS_SYSSTATUS, 3, 3)); + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(1, 1)), + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(2, 2)), + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(3, 3))); if (dispc->feat->subrev == DISPC_AM625 || dispc->feat->subrev == DISPC_AM65X) dev_dbg(dispc->dev, "OLDI RESETDONE %d,%d,%d\n", - REG_GET(dispc, DSS_SYSSTATUS, 5, 5), - REG_GET(dispc, DSS_SYSSTATUS, 6, 6), - REG_GET(dispc, DSS_SYSSTATUS, 7, 7)); + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(5, 5)), + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(6, 6)), + REG_GET(dispc, DSS_SYSSTATUS, GENMASK(7, 7))); dev_dbg(dispc->dev, "DISPC IDLE %d\n", - REG_GET(dispc, DSS_SYSSTATUS, 9, 9)); + REG_GET(dispc, DSS_SYSSTATUS, DSS_SYSSTATUS_DISPC_IDLE_STATUS)); dispc_initial_config(dispc); @@ -2912,7 +2913,8 @@ static void dispc_softreset_k2g(struct dispc_device *dispc) spin_unlock_irqrestore(&dispc->tidss->irq_lock, flags); for (unsigned int vp_idx = 0; vp_idx < dispc->feat->num_vps; ++vp_idx) - VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, 0, 0); + VP_REG_FLD_MOD(dispc, vp_idx, DISPC_VP_CONTROL, 0, + DISPC_VP_CONTROL_ENABLE_MASK); } static int dispc_softreset(struct dispc_device *dispc) @@ -2926,7 +2928,7 @@ static int dispc_softreset(struct dispc_device *dispc) } /* Soft reset */ - REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, 1, 1); + REG_FLD_MOD(dispc, DSS_SYSCONFIG, 1, DSS_SYSCONFIG_SOFTRESET_MASK); /* Wait for reset to complete */ ret = readl_poll_timeout(dispc->base_common + DSS_SYSSTATUS, val, val & 1, 100, 5000); diff --git a/drivers/gpu/drm/tidss/tidss_dispc_regs.h b/drivers/gpu/drm/tidss/tidss_dispc_regs.h index 50a3f28250ef..382027dddce8 100644 --- a/drivers/gpu/drm/tidss/tidss_dispc_regs.h +++ b/drivers/gpu/drm/tidss/tidss_dispc_regs.h @@ -56,7 +56,12 @@ enum dispc_common_regs { #define DSS_REVISION REG(DSS_REVISION) #define DSS_SYSCONFIG REG(DSS_SYSCONFIG) +#define DSS_SYSCONFIG_SOFTRESET_MASK GENMASK(1, 1) + #define DSS_SYSSTATUS REG(DSS_SYSSTATUS) +#define DSS_SYSSTATUS_DISPC_IDLE_STATUS GENMASK(9, 9) +#define DSS_SYSSTATUS_DISPC_FUNC_RESETDONE GENMASK(0, 0) + #define DISPC_IRQ_EOI REG(DISPC_IRQ_EOI) #define DISPC_IRQSTATUS_RAW REG(DISPC_IRQSTATUS_RAW) #define DISPC_IRQSTATUS REG(DISPC_IRQSTATUS) @@ -70,9 +75,15 @@ enum dispc_common_regs { #define WB_IRQSTATUS REG(WB_IRQSTATUS) #define DISPC_GLOBAL_MFLAG_ATTRIBUTE REG(DISPC_GLOBAL_MFLAG_ATTRIBUTE) +#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_START_MASK GENMASK(6, 6) +#define DISPC_GLOBAL_MFLAG_ATTRIBUTE_MFLAG_CTRL_MASK GENMASK(1, 0) + #define DISPC_GLOBAL_OUTPUT_ENABLE REG(DISPC_GLOBAL_OUTPUT_ENABLE) #define DISPC_GLOBAL_BUFFER REG(DISPC_GLOBAL_BUFFER) #define DSS_CBA_CFG REG(DSS_CBA_CFG) +#define DSS_CBA_CFG_PRI_HI_MASK GENMASK(5, 3) +#define DSS_CBA_CFG_PRI_LO_MASK GENMASK(2, 0) + #define DISPC_DBG_CONTROL REG(DISPC_DBG_CONTROL) #define DISPC_DBG_STATUS REG(DISPC_DBG_STATUS) #define DISPC_CLKGATING_DISABLE REG(DISPC_CLKGATING_DISABLE) @@ -88,6 +99,9 @@ enum dispc_common_regs { #define FBDC_CONSTANT_COLOR_0 REG(FBDC_CONSTANT_COLOR_0) #define FBDC_CONSTANT_COLOR_1 REG(FBDC_CONSTANT_COLOR_1) #define DISPC_CONNECTIONS REG(DISPC_CONNECTIONS) +#define DISPC_CONNECTIONS_DPI_1_CONN_MASK GENMASK(7, 4) +#define DISPC_CONNECTIONS_DPI_0_CONN_MASK GENMASK(3, 0) + #define DISPC_MSS_VP1 REG(DISPC_MSS_VP1) #define DISPC_MSS_VP3 REG(DISPC_MSS_VP3) @@ -102,13 +116,27 @@ enum dispc_common_regs { #define DISPC_VID_ACCUV2_0 0x18 #define DISPC_VID_ACCUV2_1 0x1c #define DISPC_VID_ATTRIBUTES 0x20 +#define DISPC_VID_ATTRIBUTES_PREMULTIPLYALPHA_MASK GENMASK(28, 28) +#define DISPC_VID_ATTRIBUTES_VERTICALTAPS_MASK GENMASK(21, 21) +#define DISPC_VID_ATTRIBUTES_BUFPRELOAD_MASK GENMASK(19, 19) +#define DISPC_VID_ATTRIBUTES_COLORCONVENABLE_MASK GENMASK(9, 9) +#define DISPC_VID_ATTRIBUTES_VRESIZEENABLE_MASK GENMASK(8, 8) +#define DISPC_VID_ATTRIBUTES_HRESIZEENABLE_MASK GENMASK(7, 7) +#define DISPC_VID_ATTRIBUTES_FORMAT_MASK GENMASK(6, 1) +#define DISPC_VID_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0) + #define DISPC_VID_ATTRIBUTES2 0x24 #define DISPC_VID_BA_0 0x28 #define DISPC_VID_BA_1 0x2c #define DISPC_VID_BA_UV_0 0x30 #define DISPC_VID_BA_UV_1 0x34 #define DISPC_VID_BUF_SIZE_STATUS 0x38 +#define DISPC_VID_BUF_SIZE_STATUS_BUFSIZE_MASK GENMASK(15, 0) + #define DISPC_VID_BUF_THRESHOLD 0x3c +#define DISPC_VID_BUF_THRESHOLD_BUFHIGHTHRESHOLD_MASK GENMASK(31, 16) +#define DISPC_VID_BUF_THRESHOLD_BUFLOWTHRESHOLD_MASK GENMASK(15, 0) + #define DISPC_VID_CSC_COEF(n) (0x40 + (n) * 4) #define DISPC_VID_FIRH 0x5c @@ -137,15 +165,26 @@ enum dispc_common_regs { #define DISPC_VID_FIR_COEF_V12_C(phase) (0x1bc + (phase) * 4) #define DISPC_VID_GLOBAL_ALPHA 0x1fc +#define DISPC_VID_GLOBAL_ALPHA_GLOBALALPHA_MASK GENMASK(7, 0) + #define DISPC_VID_K2G_IRQENABLE 0x200 /* K2G */ #define DISPC_VID_K2G_IRQSTATUS 0x204 /* K2G */ #define DISPC_VID_MFLAG_THRESHOLD 0x208 +#define DISPC_VID_MFLAG_THRESHOLD_HT_MFLAG_MASK GENMASK(31, 16) +#define DISPC_VID_MFLAG_THRESHOLD_LT_MFLAG_MASK GENMASK(15, 0) + #define DISPC_VID_PICTURE_SIZE 0x20c +#define DISPC_VID_PICTURE_SIZE_MEMSIZEY_MASK GENMASK(27, 16) +#define DISPC_VID_PICTURE_SIZE_MEMSIZEX_MASK GENMASK(11, 0) + #define DISPC_VID_PIXEL_INC 0x210 #define DISPC_VID_K2G_POSITION 0x214 /* K2G */ #define DISPC_VID_PRELOAD 0x218 #define DISPC_VID_ROW_INC 0x21c #define DISPC_VID_SIZE 0x220 +#define DISPC_VID_SIZE_SIZEY_MASK GENMASK(27, 16) +#define DISPC_VID_SIZE_SIZEX_MASK GENMASK(11, 0) + #define DISPC_VID_BA_EXT_0 0x22c #define DISPC_VID_BA_EXT_1 0x230 #define DISPC_VID_BA_UV_EXT_0 0x234 @@ -173,11 +212,27 @@ enum dispc_common_regs { #define DISPC_OVR_TRANS_COLOR_MIN 0x18 #define DISPC_OVR_TRANS_COLOR_MIN2 0x1c #define DISPC_OVR_ATTRIBUTES(n) (0x20 + (n) * 4) +#define DISPC_OVR_ATTRIBUTES_POSY_MASK GENMASK(30, 19) +#define DISPC_OVR_ATTRIBUTES_POSX_MASK GENMASK(17, 6) +#define DISPC_OVR_ATTRIBUTES_CHANNELIN_MASK GENMASK(4, 1) +#define DISPC_OVR_ATTRIBUTES_ENABLE_MASK GENMASK(0, 0) + #define DISPC_OVR_ATTRIBUTES2(n) (0x34 + (n) * 4) /* J721E */ +#define DISPC_OVR_ATTRIBUTES2_POSY_MASK GENMASK(29, 16) +#define DISPC_OVR_ATTRIBUTES2_POSX_MASK GENMASK(13, 0) + /* VP */ #define DISPC_VP_CONFIG 0x0 +#define DISPC_VP_CONFIG_COLORCONVENABLE_MASK GENMASK(24, 24) +#define DISPC_VP_CONFIG_CPR_MASK GENMASK(15, 15) +#define DISPC_VP_CONFIG_GAMMAENABLE_MASK GENMASK(2, 2) + #define DISPC_VP_CONTROL 0x4 +#define DISPC_VP_CONTROL_DATALINES_MASK GENMASK(10, 8) +#define DISPC_VP_CONTROL_GOBIT_MASK GENMASK(5, 5) +#define DISPC_VP_CONTROL_ENABLE_MASK GENMASK(0, 0) + #define DISPC_VP_CSC_COEF0 0x8 #define DISPC_VP_CSC_COEF1 0xc #define DISPC_VP_CSC_COEF2 0x10 @@ -189,9 +244,28 @@ enum dispc_common_regs { #define DISPC_VP_DATA_CYCLE_2 0x1c #define DISPC_VP_LINE_NUMBER 0x44 #define DISPC_VP_POL_FREQ 0x4c +#define DISPC_VP_POL_FREQ_ALIGN_MASK GENMASK(18, 18) +#define DISPC_VP_POL_FREQ_ONOFF_MASK GENMASK(17, 17) +#define DISPC_VP_POL_FREQ_RF_MASK GENMASK(16, 16) +#define DISPC_VP_POL_FREQ_IEO_MASK GENMASK(15, 15) +#define DISPC_VP_POL_FREQ_IPC_MASK GENMASK(14, 14) +#define DISPC_VP_POL_FREQ_IHS_MASK GENMASK(13, 13) +#define DISPC_VP_POL_FREQ_IVS_MASK GENMASK(12, 12) + #define DISPC_VP_SIZE_SCREEN 0x50 +#define DISPC_VP_SIZE_SCREEN_HDISPLAY_MASK GENMASK(11, 0) +#define DISPC_VP_SIZE_SCREEN_VDISPLAY_MASK GENMASK(27, 16) + #define DISPC_VP_TIMING_H 0x54 +#define DISPC_VP_TIMING_H_SYNC_PULSE_MASK GENMASK(7, 0) +#define DISPC_VP_TIMING_H_FRONT_PORCH_MASK GENMASK(19, 8) +#define DISPC_VP_TIMING_H_BACK_PORCH_MASK GENMASK(31, 20) + #define DISPC_VP_TIMING_V 0x58 +#define DISPC_VP_TIMING_V_SYNC_PULSE_MASK GENMASK(7, 0) +#define DISPC_VP_TIMING_V_FRONT_PORCH_MASK GENMASK(19, 8) +#define DISPC_VP_TIMING_V_BACK_PORCH_MASK GENMASK(31, 20) + #define DISPC_VP_CSC_COEF3 0x5c #define DISPC_VP_CSC_COEF4 0x60 #define DISPC_VP_CSC_COEF5 0x64 @@ -220,6 +294,8 @@ enum dispc_common_regs { #define DISPC_VP_SAFETY_LFSR_SEED 0x110 #define DISPC_VP_GAMMA_TABLE 0x120 #define DISPC_VP_DSS_OLDI_CFG 0x160 +#define DISPC_VP_DSS_OLDI_CFG_MAP_MASK GENMASK(3, 1) + #define DISPC_VP_DSS_OLDI_STATUS 0x164 #define DISPC_VP_DSS_OLDI_LB 0x168 #define DISPC_VP_DSS_MERGE_SPLIT 0x16c /* J721E */ diff --git a/drivers/gpu/drm/tiny/Kconfig b/drivers/gpu/drm/tiny/Kconfig index 06e54694a7f2..94a5bf61a115 100644 --- a/drivers/gpu/drm/tiny/Kconfig +++ b/drivers/gpu/drm/tiny/Kconfig @@ -82,6 +82,21 @@ config DRM_PANEL_MIPI_DBI https://github.com/notro/panel-mipi-dbi/wiki. To compile this driver as a module, choose M here. +config DRM_PIXPAPER + tristate "DRM support for PIXPAPER display panels" + depends on DRM && SPI + select DRM_CLIENT_SELECTION + select DRM_GEM_DMA_HELPER + select DRM_KMS_HELPER + help + DRM driver for the Mayqueen Pixpaper e-ink display panel. + + This driver supports small e-paper displays connected over SPI, + with a resolution of 122x250 and XRGB8888 framebuffer format. + It is intended for low-power embedded applications. + + If M is selected, the module will be built as pixpaper.ko. + config TINYDRM_HX8357D tristate "DRM support for HX8357D display panels" depends on DRM && SPI diff --git a/drivers/gpu/drm/tiny/Makefile b/drivers/gpu/drm/tiny/Makefile index 4a9ff61ec254..48d30bf6152f 100644 --- a/drivers/gpu/drm/tiny/Makefile +++ b/drivers/gpu/drm/tiny/Makefile @@ -6,6 +6,7 @@ obj-$(CONFIG_DRM_BOCHS) += bochs.o obj-$(CONFIG_DRM_CIRRUS_QEMU) += cirrus-qemu.o obj-$(CONFIG_DRM_GM12U320) += gm12u320.o obj-$(CONFIG_DRM_PANEL_MIPI_DBI) += panel-mipi-dbi.o +obj-$(CONFIG_DRM_PIXPAPER) += pixpaper.o obj-$(CONFIG_TINYDRM_HX8357D) += hx8357d.o obj-$(CONFIG_TINYDRM_ILI9163) += ili9163.o obj-$(CONFIG_TINYDRM_ILI9225) += ili9225.o diff --git a/drivers/gpu/drm/tiny/bochs.c b/drivers/gpu/drm/tiny/bochs.c index 8d3b7c4fa6a4..d2d5e9f1269f 100644 --- a/drivers/gpu/drm/tiny/bochs.c +++ b/drivers/gpu/drm/tiny/bochs.c @@ -252,7 +252,7 @@ static int bochs_hw_init(struct bochs_device *bochs) } bochs->ioports = 1; } else { - dev_err(dev->dev, "I/O ports are not supported\n"); + drm_err(dev, "I/O ports are not supported\n"); return -EIO; } diff --git a/drivers/gpu/drm/tiny/pixpaper.c b/drivers/gpu/drm/tiny/pixpaper.c new file mode 100644 index 000000000000..b1379cb5f030 --- /dev/null +++ b/drivers/gpu/drm/tiny/pixpaper.c @@ -0,0 +1,1165 @@ +// SPDX-License-Identifier: GPL-2.0 +/* + * DRM driver for PIXPAPER e-ink panel + * + * Author: LiangCheng Wang <zaq14760@gmail.com>, + */ +#include <linux/delay.h> +#include <linux/module.h> +#include <linux/spi/spi.h> + +#include <drm/clients/drm_client_setup.h> +#include <drm/drm_atomic.h> +#include <drm/drm_atomic_helper.h> +#include <drm/drm_drv.h> +#include <drm/drm_fbdev_shmem.h> +#include <drm/drm_framebuffer.h> +#include <drm/drm_gem_atomic_helper.h> +#include <drm/drm_gem_shmem_helper.h> +#include <drm/drm_gem_framebuffer_helper.h> +#include <drm/drm_probe_helper.h> + +/* + * Note on Undocumented Commands/Registers: + * + * Several commands and register parameters defined in this header are not + * documented in the datasheet. Their values and usage have been derived + * through analysis of existing userspace example programs. + * + * These 'unknown' definitions are crucial for the proper initialization + * and stable operation of the panel. Modifying these values without + * thorough understanding may lead to display anomalies, panel damage, + * or unexpected behavior. + */ + +/* Command definitions */ +#define PIXPAPER_CMD_PANEL_SETTING 0x00 /* R00H: Panel settings */ +#define PIXPAPER_CMD_POWER_SETTING 0x01 /* R01H: Power settings */ +#define PIXPAPER_CMD_POWER_OFF 0x02 /* R02H: Power off */ +#define PIXPAPER_CMD_POWER_OFF_SEQUENCE 0x03 /* R03H: Power off sequence */ +#define PIXPAPER_CMD_POWER_ON 0x04 /* R04H: Power on */ +#define PIXPAPER_CMD_BOOSTER_SOFT_START 0x06 /* R06H: Booster soft start */ +#define PIXPAPER_CMD_DEEP_SLEEP 0x07 /* R07H: Deep sleep */ +#define PIXPAPER_CMD_DATA_START_TRANSMISSION 0x10 +/* R10H: Data transmission start */ +#define PIXPAPER_CMD_DISPLAY_REFRESH 0x12 /* R12H: Display refresh */ +#define PIXPAPER_CMD_PLL_CONTROL 0x30 /* R30H: PLL control */ +#define PIXPAPER_CMD_TEMP_SENSOR_CALIB 0x41 +/* R41H: Temperature sensor calibration */ +#define PIXPAPER_CMD_UNKNOWN_4D 0x4D /* R4DH: Unknown command */ +#define PIXPAPER_CMD_VCOM_INTERVAL 0x50 /* R50H: VCOM interval */ +#define PIXPAPER_CMD_UNKNOWN_60 0x60 /* R60H: Unknown command */ +#define PIXPAPER_CMD_RESOLUTION_SETTING 0x61 /* R61H: Resolution settings */ +#define PIXPAPER_CMD_GATE_SOURCE_START 0x65 /* R65H: Gate/source start */ +#define PIXPAPER_CMD_UNKNOWN_B4 0xB4 /* RB4H: Unknown command */ +#define PIXPAPER_CMD_UNKNOWN_B5 0xB5 /* RB5H: Unknown command */ +#define PIXPAPER_CMD_UNKNOWN_E0 0xE0 /* RE0H: Unknown command */ +#define PIXPAPER_CMD_POWER_SAVING 0xE3 /* RE3H: Power saving */ +#define PIXPAPER_CMD_UNKNOWN_E7 0xE7 /* RE7H: Unknown command */ +#define PIXPAPER_CMD_UNKNOWN_E9 0xE9 /* RE9H: Unknown command */ + +/* R00H PSR - First Parameter */ +#define PIXPAPER_PSR_RST_N BIT(0) +/* Bit 0: RST_N, 1=no effect (default), 0=reset with booster OFF */ +#define PIXPAPER_PSR_SHD_N BIT(1) +/* Bit 1: SHD_N, 1=booster ON (default), 0=booster OFF */ +#define PIXPAPER_PSR_SHL BIT(2) +/* Bit 2: SHL, 1=shift right (default), 0=shift left */ +#define PIXPAPER_PSR_UD BIT(3) +/* Bit 3: UD, 1=scan up (default), 0=scan down */ +#define PIXPAPER_PSR_PST_MODE BIT(5) +/* Bit 5: PST_MODE, 0=frame scanning (default), 1=external */ +#define PIXPAPER_PSR_RES_MASK (3 << 6) +/* Bits 7-6: RES[1:0], resolution setting */ +#define PIXPAPER_PSR_RES_176x296 (0x0 << 6) /* 00: 176x296 */ +#define PIXPAPER_PSR_RES_128x296 (0x1 << 6) /* 01: 128x296 */ +#define PIXPAPER_PSR_RES_128x250 (0x2 << 6) /* 10: 128x250 */ +#define PIXPAPER_PSR_RES_112x204 (0x3 << 6) /* 11: 112x204 */ +#define PIXPAPER_PSR_CONFIG \ + (PIXPAPER_PSR_RST_N | PIXPAPER_PSR_SHD_N | PIXPAPER_PSR_SHL | \ + PIXPAPER_PSR_UD) +/* 0x0F: Default settings, resolution set by R61H */ + +/* R00H PSR - Second Parameter */ +#define PIXPAPER_PSR2_VC_LUTZ \ + (1 << 0) /* Bit 0: VC_LUTZ, 1=VCOM float after refresh (default), 0=no effect */ +#define PIXPAPER_PSR2_NORG \ + (1 << 1) /* Bit 1: NORG, 1=VCOM to GND before power off, 0=no effect (default) */ +#define PIXPAPER_PSR2_TIEG \ + (1 << 2) /* Bit 2: TIEG, 1=VGN to GND on power off, 0=no effect (default) */ +#define PIXPAPER_PSR2_TS_AUTO \ + (1 << 3) /* Bit 3: TS_AUTO, 1=sensor on RST_N low to high (default), 0=on booster */ +#define PIXPAPER_PSR2_VCMZ \ + (1 << 4) /* Bit 4: VCMZ, 1=VCOM always floating, 0=no effect (default) */ +#define PIXPAPER_PSR2_FOPT \ + (1 << 5) /* Bit 5: FOPT, 0=scan 1 frame (default), 1=no scan, HiZ */ +#define PIXPAPER_PSR_CONFIG2 \ + (PIXPAPER_PSR2_VC_LUTZ | \ + PIXPAPER_PSR2_TS_AUTO) /* 0x09: Default VCOM and temp sensor settings */ + +/* R01H PWR - Power Setting Register */ +/* First Parameter */ +#define PIXPAPER_PWR_VDG_EN \ + (1 << 0) /* Bit 0: VDG_EN, 1=internal DCDC for VGP/VGN (default), 0=external */ +#define PIXPAPER_PWR_VDS_EN \ + (1 << 1) /* Bit 1: VDS_EN, 1=internal regulator for VSP/VSN (default), 0=external */ +#define PIXPAPER_PWR_VSC_EN \ + (1 << 2) /* Bit 2: VSC_EN, 1=internal regulator for VSPL (default), 0=external */ +#define PIXPAPER_PWR_V_MODE \ + (1 << 3) /* Bit 3: V_MODE, 0=Mode0 (default), 1=Mode1 */ +#define PIXPAPER_PWR_CONFIG1 \ + (PIXPAPER_PWR_VDG_EN | PIXPAPER_PWR_VDS_EN | \ + PIXPAPER_PWR_VSC_EN) /* 0x07: Internal power for VGP/VGN, VSP/VSN, VSPL */ + +/* Second Parameter */ +#define PIXPAPER_PWR_VGPN_MASK \ + (3 << 0) /* Bits 1-0: VGPN, VGP/VGN voltage levels */ +#define PIXPAPER_PWR_VGPN_20V (0x0 << 0) /* 00: VGP=20V, VGN=-20V (default) */ +#define PIXPAPER_PWR_VGPN_17V (0x1 << 0) /* 01: VGP=17V, VGN=-17V */ +#define PIXPAPER_PWR_VGPN_15V (0x2 << 0) /* 10: VGP=15V, VGN=-15V */ +#define PIXPAPER_PWR_VGPN_10V (0x3 << 0) /* 11: VGP=10V, VGN=-10V */ +#define PIXPAPER_PWR_CONFIG2 PIXPAPER_PWR_VGPN_20V /* 0x00: VGP=20V, VGN=-20V */ + +/* Third, Fourth, Sixth Parameters (VSP_1, VSPL_0, VSPL_1) */ +#define PIXPAPER_PWR_VSP_8_2V 0x22 /* VSP_1/VSPL_1: 8.2V (34 decimal) */ +#define PIXPAPER_PWR_VSPL_15V 0x78 /* VSPL_0: 15V (120 decimal) */ + +/* Fifth Parameter (VSN_1) */ +#define PIXPAPER_PWR_VSN_4V 0x0A /* VSN_1: -4V (10 decimal) */ + +/* R03H PFS - Power Off Sequence Setting Register */ +/* First Parameter */ +#define PIXPAPER_PFS_T_VDS_OFF_MASK \ + (3 << 0) /* Bits 1-0: T_VDS_OFF, VSP/VSN power-off sequence */ +#define PIXPAPER_PFS_T_VDS_OFF_20MS (0x0 << 0) /* 00: 20 ms (default) */ +#define PIXPAPER_PFS_T_VDS_OFF_40MS (0x1 << 0) /* 01: 40 ms */ +#define PIXPAPER_PFS_T_VDS_OFF_60MS (0x2 << 0) /* 10: 60 ms */ +#define PIXPAPER_PFS_T_VDS_OFF_80MS (0x3 << 0) /* 11: 80 ms */ +#define PIXPAPER_PFS_T_VDPG_OFF_MASK \ + (3 << 4) /* Bits 5-4: T_VDPG_OFF, VGP/VGN power-off sequence */ +#define PIXPAPER_PFS_T_VDPG_OFF_20MS (0x0 << 4) /* 00: 20 ms (default) */ +#define PIXPAPER_PFS_T_VDPG_OFF_40MS (0x1 << 4) /* 01: 40 ms */ +#define PIXPAPER_PFS_T_VDPG_OFF_60MS (0x2 << 4) /* 10: 60 ms */ +#define PIXPAPER_PFS_T_VDPG_OFF_80MS (0x3 << 4) /* 11: 80 ms */ +#define PIXPAPER_PFS_CONFIG1 \ + (PIXPAPER_PFS_T_VDS_OFF_20MS | \ + PIXPAPER_PFS_T_VDPG_OFF_20MS) /* 0x10: Default 20 ms for VSP/VSN and VGP/VGN */ + +/* Second Parameter */ +#define PIXPAPER_PFS_VGP_EXT_MASK \ + (0xF << 0) /* Bits 3-0: VGP_EXT, VGP extension time */ +#define PIXPAPER_PFS_VGP_EXT_0MS (0x0 << 0) /* 0000: 0 ms */ +#define PIXPAPER_PFS_VGP_EXT_500MS (0x1 << 0) /* 0001: 500 ms */ +#define PIXPAPER_PFS_VGP_EXT_1000MS (0x2 << 0) /* 0010: 1000 ms */ +#define PIXPAPER_PFS_VGP_EXT_1500MS (0x3 << 0) /* 0011: 1500 ms */ +#define PIXPAPER_PFS_VGP_EXT_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */ +#define PIXPAPER_PFS_VGP_EXT_2500MS (0x5 << 0) /* 0101: 2500 ms */ +#define PIXPAPER_PFS_VGP_EXT_3000MS (0x6 << 0) /* 0110: 3000 ms */ +#define PIXPAPER_PFS_VGP_EXT_3500MS (0x7 << 0) /* 0111: 3500 ms */ +#define PIXPAPER_PFS_VGP_EXT_4000MS (0x8 << 0) /* 1000: 4000 ms */ +#define PIXPAPER_PFS_VGP_EXT_4500MS (0x9 << 0) /* 1001: 4500 ms */ +#define PIXPAPER_PFS_VGP_EXT_5000MS (0xA << 0) /* 1010: 5000 ms */ +#define PIXPAPER_PFS_VGP_EXT_5500MS (0xB << 0) /* 1011: 5500 ms */ +#define PIXPAPER_PFS_VGP_EXT_6000MS (0xC << 0) /* 1100: 6000 ms */ +#define PIXPAPER_PFS_VGP_EXT_6500MS (0xD << 0) /* 1101: 6500 ms */ +#define PIXPAPER_PFS_VGP_LEN_MASK \ + (0xF << 4) /* Bits 7-4: VGP_LEN, VGP at 10V during power-off */ +#define PIXPAPER_PFS_VGP_LEN_0MS (0x0 << 4) /* 0000: 0 ms */ +#define PIXPAPER_PFS_VGP_LEN_500MS (0x1 << 4) /* 0001: 500 ms */ +#define PIXPAPER_PFS_VGP_LEN_1000MS (0x2 << 4) /* 0010: 1000 ms */ +#define PIXPAPER_PFS_VGP_LEN_1500MS (0x3 << 4) /* 0011: 1500 ms */ +#define PIXPAPER_PFS_VGP_LEN_2000MS (0x4 << 4) /* 0100: 2000 ms */ +#define PIXPAPER_PFS_VGP_LEN_2500MS (0x5 << 4) /* 0101: 2500 ms (default) */ +#define PIXPAPER_PFS_VGP_LEN_3000MS (0x6 << 4) /* 0110: 3000 ms */ +#define PIXPAPER_PFS_VGP_LEN_3500MS (0x7 << 4) /* 0111: 3500 ms */ +#define PIXPAPER_PFS_VGP_LEN_4000MS (0x8 << 4) /* 1000: 4000 ms */ +#define PIXPAPER_PFS_VGP_LEN_4500MS (0x9 << 4) /* 1001: 4500 ms */ +#define PIXPAPER_PFS_VGP_LEN_5000MS (0xA << 4) /* 1010: 5000 ms */ +#define PIXPAPER_PFS_VGP_LEN_5500MS (0xB << 4) /* 1011: 5500 ms */ +#define PIXPAPER_PFS_VGP_LEN_6000MS (0xC << 4) /* 1100: 6000 ms */ +#define PIXPAPER_PFS_VGP_LEN_6500MS (0xD << 4) /* 1101: 6500 ms */ +#define PIXPAPER_PFS_CONFIG2 \ + (PIXPAPER_PFS_VGP_EXT_1000MS | \ + PIXPAPER_PFS_VGP_LEN_2500MS) /* 0x54: VGP extension 1000 ms, VGP at 10V for 2500 ms */ + +/* Third Parameter */ +#define PIXPAPER_PFS_XON_LEN_MASK \ + (0xF << 0) /* Bits 3-0: XON_LEN, XON enable time */ +#define PIXPAPER_PFS_XON_LEN_0MS (0x0 << 0) /* 0000: 0 ms */ +#define PIXPAPER_PFS_XON_LEN_500MS (0x1 << 0) /* 0001: 500 ms */ +#define PIXPAPER_PFS_XON_LEN_1000MS (0x2 << 0) /* 0010: 1000 ms */ +#define PIXPAPER_PFS_XON_LEN_1500MS (0x3 << 0) /* 0011: 1500 ms */ +#define PIXPAPER_PFS_XON_LEN_2000MS (0x4 << 0) /* 0100: 2000 ms (default) */ +#define PIXPAPER_PFS_XON_LEN_2500MS (0x5 << 0) /* 0101: 2500 ms */ +#define PIXPAPER_PFS_XON_LEN_3000MS (0x6 << 0) /* 0110: 3000 ms */ +#define PIXPAPER_PFS_XON_LEN_3500MS (0x7 << 0) /* 0111: 3500 ms */ +#define PIXPAPER_PFS_XON_LEN_4000MS (0x8 << 0) /* 1000: 4000 ms */ +#define PIXPAPER_PFS_XON_LEN_4500MS (0x9 << 0) /* 1001: 4500 ms */ +#define PIXPAPER_PFS_XON_LEN_5000MS (0xA << 0) /* 1010: 5000 ms */ +#define PIXPAPER_PFS_XON_LEN_5500MS (0xB << 0) /* 1011: 5500 ms */ +#define PIXPAPER_PFS_XON_LEN_6000MS (0xC << 0) /* 1100: 6000 ms */ +#define PIXPAPER_PFS_XON_DLY_MASK \ + (0xF << 4) /* Bits 7-4: XON_DLY, XON delay time */ +#define PIXPAPER_PFS_XON_DLY_0MS (0x0 << 4) /* 0000: 0 ms */ +#define PIXPAPER_PFS_XON_DLY_500MS (0x1 << 4) /* 0001: 500 ms */ +#define PIXPAPER_PFS_XON_DLY_1000MS (0x2 << 4) /* 0010: 1000 ms */ +#define PIXPAPER_PFS_XON_DLY_1500MS (0x3 << 4) /* 0011: 1500 ms */ +#define PIXPAPER_PFS_XON_DLY_2000MS (0x4 << 4) /* 0100: 2000 ms (default) */ +#define PIXPAPER_PFS_XON_DLY_2500MS (0x5 << 4) /* 0101: 2500 ms */ +#define PIXPAPER_PFS_XON_DLY_3000MS (0x6 << 4) /* 0110: 3000 ms */ +#define PIXPAPER_PFS_XON_DLY_3500MS (0x7 << 4) /* 0111: 3500 ms */ +#define PIXPAPER_PFS_XON_DLY_4000MS (0x8 << 4) /* 1000: 4000 ms */ +#define PIXPAPER_PFS_XON_DLY_4500MS (0x9 << 4) /* 1001: 4500 ms */ +#define PIXPAPER_PFS_XON_DLY_5000MS (0xA << 4) /* 1010: 5000 ms */ +#define PIXPAPER_PFS_XON_DLY_5500MS (0xB << 4) /* 1011: 5500 ms */ +#define PIXPAPER_PFS_XON_DLY_6000MS (0xC << 4) /* 1100: 6000 ms */ +#define PIXPAPER_PFS_CONFIG3 \ + (PIXPAPER_PFS_XON_LEN_2000MS | \ + PIXPAPER_PFS_XON_DLY_2000MS) /* 0x44: XON enable and delay at 2000 ms */ + +/* R06H BTST - Booster Soft Start Command */ +/* First Parameter */ +#define PIXPAPER_BTST_PHA_SFT_MASK \ + (3 << 0) /* Bits 1-0: PHA_SFT, soft start period for phase A */ +#define PIXPAPER_BTST_PHA_SFT_10MS (0x0 << 0) /* 00: 10 ms (default) */ +#define PIXPAPER_BTST_PHA_SFT_20MS (0x1 << 0) /* 01: 20 ms */ +#define PIXPAPER_BTST_PHA_SFT_30MS (0x2 << 0) /* 10: 30 ms */ +#define PIXPAPER_BTST_PHA_SFT_40MS (0x3 << 0) /* 11: 40 ms */ +#define PIXPAPER_BTST_PHB_SFT_MASK \ + (3 << 2) /* Bits 3-2: PHB_SFT, soft start period for phase B */ +#define PIXPAPER_BTST_PHB_SFT_10MS (0x0 << 2) /* 00: 10 ms (default) */ +#define PIXPAPER_BTST_PHB_SFT_20MS (0x1 << 2) /* 01: 20 ms */ +#define PIXPAPER_BTST_PHB_SFT_30MS (0x2 << 2) /* 10: 30 ms */ +#define PIXPAPER_BTST_PHB_SFT_40MS (0x3 << 2) /* 11: 40 ms */ +#define PIXPAPER_BTST_CONFIG1 \ + (PIXPAPER_BTST_PHA_SFT_40MS | \ + PIXPAPER_BTST_PHB_SFT_40MS) /* 0x0F: 40 ms for phase A and B */ + +/* Second to Seventh Parameters (Driving Strength or Minimum OFF Time) */ +#define PIXPAPER_BTST_CONFIG2 0x0A /* Strength11 */ +#define PIXPAPER_BTST_CONFIG3 0x2F /* Period48 */ +#define PIXPAPER_BTST_CONFIG4 0x25 /* Strength38 */ +#define PIXPAPER_BTST_CONFIG5 0x22 /* Period35 */ +#define PIXPAPER_BTST_CONFIG6 0x2E /* Strength47 */ +#define PIXPAPER_BTST_CONFIG7 0x21 /* Period34 */ + +/* R12H: DRF (Display Refresh) */ +#define PIXPAPER_DRF_VCOM_AC 0x00 /* AC VCOM: VCOM follows LUTC (default) */ +#define PIXPAPER_DRF_VCOM_DC 0x01 /* DC VCOM: VCOM fixed to VCOMDC */ + +/* R30H PLL - PLL Control Register */ +/* First Parameter */ +#define PIXPAPER_PLL_FR_MASK (0x7 << 0) /* Bits 2-0: FR, frame rate */ +#define PIXPAPER_PLL_FR_12_5HZ (0x0 << 0) /* 000: 12.5 Hz */ +#define PIXPAPER_PLL_FR_25HZ (0x1 << 0) /* 001: 25 Hz */ +#define PIXPAPER_PLL_FR_50HZ (0x2 << 0) /* 010: 50 Hz (default) */ +#define PIXPAPER_PLL_FR_65HZ (0x3 << 0) /* 011: 65 Hz */ +#define PIXPAPER_PLL_FR_75HZ (0x4 << 0) /* 100: 75 Hz */ +#define PIXPAPER_PLL_FR_85HZ (0x5 << 0) /* 101: 85 Hz */ +#define PIXPAPER_PLL_FR_100HZ (0x6 << 0) /* 110: 100 Hz */ +#define PIXPAPER_PLL_FR_120HZ (0x7 << 0) /* 111: 120 Hz */ +#define PIXPAPER_PLL_DFR \ + (1 << 3) /* Bit 3: Dynamic frame rate, 0=disabled (default), 1=enabled */ +#define PIXPAPER_PLL_CONFIG \ + (PIXPAPER_PLL_FR_50HZ) /* 0x02: 50 Hz, dynamic frame rate disabled */ + +/* R41H TSE - Temperature Sensor Calibration Register */ +/* First Parameter */ +#define PIXPAPER_TSE_TO_MASK \ + (0xF << 0) /* Bits 3-0: TO[3:0], temperature offset */ +#define PIXPAPER_TSE_TO_POS_0C (0x0 << 0) /* 0000: +0°C (default) */ +#define PIXPAPER_TSE_TO_POS_0_5C (0x1 << 0) /* 0001: +0.5°C */ +#define PIXPAPER_TSE_TO_POS_1C (0x2 << 0) /* 0010: +1°C */ +#define PIXPAPER_TSE_TO_POS_1_5C (0x3 << 0) /* 0011: +1.5°C */ +#define PIXPAPER_TSE_TO_POS_2C (0x4 << 0) /* 0100: +2°C */ +#define PIXPAPER_TSE_TO_POS_2_5C (0x5 << 0) /* 0101: +2.5°C */ +#define PIXPAPER_TSE_TO_POS_3C (0x6 << 0) /* 0110: +3°C */ +#define PIXPAPER_TSE_TO_POS_3_5C (0x7 << 0) /* 0111: +3.5°C */ +#define PIXPAPER_TSE_TO_NEG_4C (0x8 << 0) /* 1000: -4°C */ +#define PIXPAPER_TSE_TO_NEG_3_5C (0x9 << 0) /* 1001: -3.5°C */ +#define PIXPAPER_TSE_TO_NEG_3C (0xA << 0) /* 1010: -3°C */ +#define PIXPAPER_TSE_TO_NEG_2_5C (0xB << 0) /* 1011: -2.5°C */ +#define PIXPAPER_TSE_TO_NEG_2C (0xC << 0) /* 1100: -2°C */ +#define PIXPAPER_TSE_TO_NEG_1_5C (0xD << 0) /* 1101: -1.5°C */ +#define PIXPAPER_TSE_TO_NEG_1C (0xE << 0) /* 1110: -1°C */ +#define PIXPAPER_TSE_TO_NEG_0_5C (0xF << 0) /* 1111: -0.5°C */ +#define PIXPAPER_TSE_TO_FINE_MASK \ + (0x3 << 4) /* Bits 5-4: TO[5:4], fine adjustment for positive offsets */ +#define PIXPAPER_TSE_TO_FINE_0C (0x0 << 4) /* 00: +0.0°C (default) */ +#define PIXPAPER_TSE_TO_FINE_0_25C (0x1 << 4) /* 01: +0.25°C */ +#define PIXPAPER_TSE_ENABLE \ + (0 << 7) /* Bit 7: TSE, 0=internal sensor enabled (default), 1=disabled (external) */ +#define PIXPAPER_TSE_DISABLE \ + (1 << 7) /* Bit 7: TSE, 1=internal sensor disabled, use external */ +#define PIXPAPER_TSE_CONFIG \ + (PIXPAPER_TSE_TO_POS_0C | PIXPAPER_TSE_TO_FINE_0C | \ + PIXPAPER_TSE_ENABLE) /* 0x00: Internal sensor enabled, +0°C offset */ + +/* R4DH */ +#define PIXPAPER_UNKNOWN_4D_CONFIG \ + 0x78 /* This value is essential for initialization, derived from userspace examples. */ + +/* R50H CDI - VCOM and DATA Interval Setting Register */ +/* First Parameter */ +#define PIXPAPER_CDI_INTERVAL_MASK \ + (0xF << 0) /* Bits 3-0: CDI[3:0], VCOM and data interval (hsync) */ +#define PIXPAPER_CDI_17_HSYNC (0x0 << 0) /* 0000: 17 hsync */ +#define PIXPAPER_CDI_16_HSYNC (0x1 << 0) /* 0001: 16 hsync */ +#define PIXPAPER_CDI_15_HSYNC (0x2 << 0) /* 0010: 15 hsync */ +#define PIXPAPER_CDI_14_HSYNC (0x3 << 0) /* 0011: 14 hsync */ +#define PIXPAPER_CDI_13_HSYNC (0x4 << 0) /* 0100: 13 hsync */ +#define PIXPAPER_CDI_12_HSYNC (0x5 << 0) /* 0101: 12 hsync */ +#define PIXPAPER_CDI_11_HSYNC (0x6 << 0) /* 0110: 11 hsync */ +#define PIXPAPER_CDI_10_HSYNC (0x7 << 0) /* 0111: 10 hsync (default) */ +#define PIXPAPER_CDI_9_HSYNC (0x8 << 0) /* 1000: 9 hsync */ +#define PIXPAPER_CDI_8_HSYNC (0x9 << 0) /* 1001: 8 hsync */ +#define PIXPAPER_CDI_7_HSYNC (0xA << 0) /* 1010: 7 hsync */ +#define PIXPAPER_CDI_6_HSYNC (0xB << 0) /* 1011: 6 hsync */ +#define PIXPAPER_CDI_5_HSYNC (0xC << 0) /* 1100: 5 hsync */ +#define PIXPAPER_CDI_4_HSYNC (0xD << 0) /* 1101: 4 hsync */ +#define PIXPAPER_CDI_3_HSYNC (0xE << 0) /* 1110: 3 hsync */ +#define PIXPAPER_CDI_2_HSYNC (0xF << 0) /* 1111: 2 hsync */ +#define PIXPAPER_CDI_DDX \ + (1 << 4) /* Bit 4: DDX, 0=grayscale mapping 0, 1=grayscale mapping 1 (default) */ +#define PIXPAPER_CDI_VBD_MASK \ + (0x7 << 5) /* Bits 7-5: VBD[2:0], border data selection */ +#define PIXPAPER_CDI_VBD_FLOAT (0x0 << 5) /* 000: Floating (DDX=0 or 1) */ +#define PIXPAPER_CDI_VBD_GRAY3_DDX0 \ + (0x1 << 5) /* 001: Gray3 (border_buf=011) when DDX=0 */ +#define PIXPAPER_CDI_VBD_GRAY2_DDX0 \ + (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=0 */ +#define PIXPAPER_CDI_VBD_GRAY1_DDX0 \ + (0x3 << 5) /* 011: Gray1 (border_buf=001) when DDX=0 */ +#define PIXPAPER_CDI_VBD_GRAY0_DDX0 \ + (0x4 << 5) /* 100: Gray0 (border_buf=000) when DDX=0 */ +#define PIXPAPER_CDI_VBD_GRAY0_DDX1 \ + (0x0 << 5) /* 000: Gray0 (border_buf=000) when DDX=1 */ +#define PIXPAPER_CDI_VBD_GRAY1_DDX1 \ + (0x1 << 5) /* 001: Gray1 (border_buf=001) when DDX=1 */ +#define PIXPAPER_CDI_VBD_GRAY2_DDX1 \ + (0x2 << 5) /* 010: Gray2 (border_buf=010) when DDX=1 */ +#define PIXPAPER_CDI_VBD_GRAY3_DDX1 \ + (0x3 << 5) /* 011: Gray3 (border_buf=011) when DDX=1 */ +#define PIXPAPER_CDI_VBD_FLOAT_DDX1 (0x4 << 5) /* 100: Floating when DDX=1 */ +#define PIXPAPER_CDI_CONFIG \ + (PIXPAPER_CDI_10_HSYNC | PIXPAPER_CDI_DDX | \ + PIXPAPER_CDI_VBD_GRAY1_DDX1) /* 0x37: 10 hsync, DDX=1, border Gray1 */ + +/* R60H */ +#define PIXPAPER_UNKNOWN_60_CONFIG1 \ + 0x02 /* This value is essential for initialization, derived from userspace examples. */ +#define PIXPAPER_UNKNOWN_60_CONFIG2 \ + 0x02 /* This value is essential for initialization, derived from userspace examples. */ + +/* R61H TRES - Resolution Setting Register */ +#define PIXPAPER_TRES_HRES_H \ + ((PIXPAPER_PANEL_BUFFER_WIDTH >> 8) & \ + 0xFF) /* HRES[9:8]: High byte of horizontal resolution (128) */ +#define PIXPAPER_TRES_HRES_L \ + (PIXPAPER_PANEL_BUFFER_WIDTH & \ + 0xFF) /* HRES[7:0]: Low byte of horizontal resolution (128 = 0x80) */ +#define PIXPAPER_TRES_VRES_H \ + ((PIXPAPER_HEIGHT >> 8) & \ + 0xFF) /* VRES[9:8]: High byte of vertical resolution (250) */ +#define PIXPAPER_TRES_VRES_L \ + (PIXPAPER_HEIGHT & \ + 0xFF) /* VRES[7:0]: Low byte of vertical resolution (250 = 0xFA) */ + +/* R65H GSST - Gate/Source Start Setting Register */ +#define PIXPAPER_GSST_S_START 0x00 /* S_Start[7:0]: First source line (S0) */ +#define PIXPAPER_GSST_RESERVED 0x00 /* Reserved byte */ +#define PIXPAPER_GSST_G_START_H \ + 0x00 /* G_Start[8]: High bit of first gate line (G0) */ +#define PIXPAPER_GSST_G_START_L \ + 0x00 /* G_Start[7:0]: Low byte of first gate line (G0) */ + +/* RB4H */ +#define PIXPAPER_UNKNOWN_B4_CONFIG \ + 0xD0 /* This value is essential for initialization, derived from userspace examples. */ + +/* RB5H */ +#define PIXPAPER_UNKNOWN_B5_CONFIG \ + 0x03 /* This value is essential for initialization, derived from userspace examples. */ + +/* RE0H */ +#define PIXPAPER_UNKNOWN_E0_CONFIG \ + 0x00 /* This value is essential for initialization, derived from userspace examples. */ + +/* RE3H PWS - Power Saving Register */ +/* First Parameter */ +#define PIXPAPER_PWS_VCOM_W_MASK \ + (0xF \ + << 4) /* Bits 7-4: VCOM_W[3:0], VCOM power-saving width (line periods) */ +#define PIXPAPER_PWS_VCOM_W_0 (0x0 << 4) /* 0000: 0 line periods */ +#define PIXPAPER_PWS_VCOM_W_1 (0x1 << 4) /* 0001: 1 line period */ +#define PIXPAPER_PWS_VCOM_W_2 (0x2 << 4) /* 0010: 2 line periods */ +#define PIXPAPER_PWS_VCOM_W_3 (0x3 << 4) /* 0011: 3 line periods */ +#define PIXPAPER_PWS_VCOM_W_4 (0x4 << 4) /* 0100: 4 line periods */ +#define PIXPAPER_PWS_VCOM_W_5 (0x5 << 4) /* 0101: 5 line periods */ +#define PIXPAPER_PWS_VCOM_W_6 (0x6 << 4) /* 0110: 6 line periods */ +#define PIXPAPER_PWS_VCOM_W_7 (0x7 << 4) /* 0111: 7 line periods */ +#define PIXPAPER_PWS_VCOM_W_8 (0x8 << 4) /* 1000: 8 line periods */ +#define PIXPAPER_PWS_VCOM_W_9 (0x9 << 4) /* 1001: 9 line periods */ +#define PIXPAPER_PWS_VCOM_W_10 (0xA << 4) /* 1010: 10 line periods */ +#define PIXPAPER_PWS_VCOM_W_11 (0xB << 4) /* 1011: 11 line periods */ +#define PIXPAPER_PWS_VCOM_W_12 (0xC << 4) /* 1100: 12 line periods */ +#define PIXPAPER_PWS_VCOM_W_13 (0xD << 4) /* 1101: 13 line periods */ +#define PIXPAPER_PWS_VCOM_W_14 (0xE << 4) /* 1110: 14 line periods */ +#define PIXPAPER_PWS_VCOM_W_15 (0xF << 4) /* 1111: 15 line periods */ +#define PIXPAPER_PWS_SD_W_MASK \ + (0xF << 0) /* Bits 3-0: SD_W[3:0], source power-saving width (660 ns units) */ +#define PIXPAPER_PWS_SD_W_0 (0x0 << 0) /* 0000: 0 ns */ +#define PIXPAPER_PWS_SD_W_1 (0x1 << 0) /* 0001: 660 ns */ +#define PIXPAPER_PWS_SD_W_2 (0x2 << 0) /* 0010: 1320 ns */ +#define PIXPAPER_PWS_SD_W_3 (0x3 << 0) /* 0011: 1980 ns */ +#define PIXPAPER_PWS_SD_W_4 (0x4 << 0) /* 0100: 2640 ns */ +#define PIXPAPER_PWS_SD_W_5 (0x5 << 0) /* 0101: 3300 ns */ +#define PIXPAPER_PWS_SD_W_6 (0x6 << 0) /* 0110: 3960 ns */ +#define PIXPAPER_PWS_SD_W_7 (0x7 << 0) /* 0111: 4620 ns */ +#define PIXPAPER_PWS_SD_W_8 (0x8 << 0) /* 1000: 5280 ns */ +#define PIXPAPER_PWS_SD_W_9 (0x9 << 0) /* 1001: 5940 ns */ +#define PIXPAPER_PWS_SD_W_10 (0xA << 0) /* 1010: 6600 ns */ +#define PIXPAPER_PWS_SD_W_11 (0xB << 0) /* 1011: 7260 ns */ +#define PIXPAPER_PWS_SD_W_12 (0xC << 0) /* 1100: 7920 ns */ +#define PIXPAPER_PWS_SD_W_13 (0xD << 0) /* 1101: 8580 ns */ +#define PIXPAPER_PWS_SD_W_14 (0xE << 0) /* 1110: 9240 ns */ +#define PIXPAPER_PWS_SD_W_15 (0xF << 0) /* 1111: 9900 ns */ +#define PIXPAPER_PWS_CONFIG \ + (PIXPAPER_PWS_VCOM_W_2 | \ + PIXPAPER_PWS_SD_W_2) /* 0x22: VCOM 2 line periods (160 µs), source 1320 ns */ + +/* RE7H */ +#define PIXPAPER_UNKNOWN_E7_CONFIG \ + 0x1C /* This value is essential for initialization, derived from userspace examples. */ + +/* RE9H */ +#define PIXPAPER_UNKNOWN_E9_CONFIG \ + 0x01 /* This value is essential for initialization, derived from userspace examples. */ + +MODULE_IMPORT_NS("DMA_BUF"); + +/* + * The panel has a visible resolution of 122x250. + * However, the controller requires the horizontal resolution to be aligned to 128 pixels. + * No porch or sync timing values are provided in the datasheet, so we define minimal + * placeholder values to satisfy the DRM framework. + */ + +/* Panel visible resolution */ +#define PIXPAPER_WIDTH 122 +#define PIXPAPER_HEIGHT 250 + +/* Controller requires 128 horizontal pixels total (for memory alignment) */ +#define PIXPAPER_HTOTAL 128 +#define PIXPAPER_HFP 2 +#define PIXPAPER_HSYNC 2 +#define PIXPAPER_HBP (PIXPAPER_HTOTAL - PIXPAPER_WIDTH - PIXPAPER_HFP - PIXPAPER_HSYNC) + +/* + * According to the datasheet, the total vertical blanking must be 55 lines, + * regardless of how the vertical back porch is set. + * Here we allocate VFP=2, VSYNC=2, and VBP=51 to sum up to 55 lines. + * Total vertical lines = 250 (visible) + 55 (blanking) = 305. + */ +#define PIXPAPER_VTOTAL (250 + 55) +#define PIXPAPER_VFP 2 +#define PIXPAPER_VSYNC 2 +#define PIXPAPER_VBP (55 - PIXPAPER_VFP - PIXPAPER_VSYNC) + +/* + * Pixel clock calculation: + * pixel_clock = htotal * vtotal * refresh_rate + * = 128 * 305 * 50 + * = 1,952,000 Hz = 1952 kHz + */ +#define PIXPAPER_PIXEL_CLOCK 1952 + +#define PIXPAPER_WIDTH_MM 24 /* approximate from 23.7046mm */ +#define PIXPAPER_HEIGHT_MM 49 /* approximate from 48.55mm */ + +#define PIXPAPER_SPI_BITS_PER_WORD 8 +#define PIXPAPER_SPI_SPEED_DEFAULT 1000000 + +#define PIXPAPER_PANEL_BUFFER_WIDTH 128 +#define PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW (PIXPAPER_PANEL_BUFFER_WIDTH / 4) + +#define PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL 60 +#define PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL 200 +#define PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN 180 + +struct pixpaper_error_ctx { + int errno_code; +}; + +struct pixpaper_panel { + struct drm_device drm; + struct drm_plane plane; + struct drm_crtc crtc; + struct drm_encoder encoder; + struct drm_connector connector; + + struct spi_device *spi; + struct gpio_desc *reset; + struct gpio_desc *busy; + struct gpio_desc *dc; +}; + +static inline struct pixpaper_panel *to_pixpaper_panel(struct drm_device *drm) +{ + return container_of(drm, struct pixpaper_panel, drm); +} + +static void pixpaper_wait_for_panel(struct pixpaper_panel *panel) +{ + unsigned int timeout_ms = 10000; + unsigned long timeout_jiffies = jiffies + msecs_to_jiffies(timeout_ms); + + usleep_range(1000, 1500); + while (gpiod_get_value_cansleep(panel->busy) != 1) { + if (time_after(jiffies, timeout_jiffies)) { + drm_warn(&panel->drm, "Busy wait timed out\n"); + return; + } + usleep_range(100, 200); + } +} + +static void pixpaper_spi_sync(struct spi_device *spi, struct spi_message *msg, + struct pixpaper_error_ctx *err) +{ + if (err->errno_code) + return; + + int ret = spi_sync(spi, msg); + + if (ret < 0) + err->errno_code = ret; +} + +static void pixpaper_send_cmd(struct pixpaper_panel *panel, u8 cmd, + struct pixpaper_error_ctx *err) +{ + if (err->errno_code) + return; + + struct spi_transfer xfer = { + .tx_buf = &cmd, + .len = 1, + }; + struct spi_message msg; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + gpiod_set_value_cansleep(panel->dc, 0); + usleep_range(1, 5); + pixpaper_spi_sync(panel->spi, &msg, err); +} + +static void pixpaper_send_data(struct pixpaper_panel *panel, u8 data, + struct pixpaper_error_ctx *err) +{ + if (err->errno_code) + return; + + struct spi_transfer xfer = { + .tx_buf = &data, + .len = 1, + }; + struct spi_message msg; + + spi_message_init(&msg); + spi_message_add_tail(&xfer, &msg); + + gpiod_set_value_cansleep(panel->dc, 1); + usleep_range(1, 5); + pixpaper_spi_sync(panel->spi, &msg, err); +} + +static int pixpaper_panel_hw_init(struct pixpaper_panel *panel) +{ + struct pixpaper_error_ctx err = { .errno_code = 0 }; + + gpiod_set_value_cansleep(panel->reset, 0); + msleep(50); + gpiod_set_value_cansleep(panel->reset, 1); + msleep(50); + + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_4D, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_4D_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_PANEL_SETTING, &err); + pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG, &err); + pixpaper_send_data(panel, PIXPAPER_PSR_CONFIG2, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SETTING, &err); + pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG1, &err); + pixpaper_send_data(panel, PIXPAPER_PWR_CONFIG2, &err); + pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err); + pixpaper_send_data(panel, PIXPAPER_PWR_VSPL_15V, &err); + pixpaper_send_data(panel, PIXPAPER_PWR_VSN_4V, &err); + pixpaper_send_data(panel, PIXPAPER_PWR_VSP_8_2V, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF_SEQUENCE, &err); + pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG1, &err); + pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG2, &err); + pixpaper_send_data(panel, PIXPAPER_PFS_CONFIG3, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_BOOSTER_SOFT_START, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG1, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG2, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG3, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG4, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG5, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG6, &err); + pixpaper_send_data(panel, PIXPAPER_BTST_CONFIG7, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_PLL_CONTROL, &err); + pixpaper_send_data(panel, PIXPAPER_PLL_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_TEMP_SENSOR_CALIB, &err); + pixpaper_send_data(panel, PIXPAPER_TSE_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_VCOM_INTERVAL, &err); + pixpaper_send_data(panel, PIXPAPER_CDI_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_60, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG1, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_60_CONFIG2, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_RESOLUTION_SETTING, &err); + pixpaper_send_data(panel, PIXPAPER_TRES_HRES_H, &err); + pixpaper_send_data(panel, PIXPAPER_TRES_HRES_L, &err); + pixpaper_send_data(panel, PIXPAPER_TRES_VRES_H, &err); + pixpaper_send_data(panel, PIXPAPER_TRES_VRES_L, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_GATE_SOURCE_START, &err); + pixpaper_send_data(panel, PIXPAPER_GSST_S_START, &err); + pixpaper_send_data(panel, PIXPAPER_GSST_RESERVED, &err); + pixpaper_send_data(panel, PIXPAPER_GSST_G_START_H, &err); + pixpaper_send_data(panel, PIXPAPER_GSST_G_START_L, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E7, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E7_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_SAVING, &err); + pixpaper_send_data(panel, PIXPAPER_PWS_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E0, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E0_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B4, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B4_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_B5, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_B5_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_UNKNOWN_E9, &err); + pixpaper_send_data(panel, PIXPAPER_UNKNOWN_E9_CONFIG, &err); + if (err.errno_code) + goto init_fail; + pixpaper_wait_for_panel(panel); + + return 0; + +init_fail: + drm_err(&panel->drm, "Hardware initialization failed (err=%d)\n", + err.errno_code); + return err.errno_code; +} + +/* + * Convert framebuffer pixels to 2-bit e-paper format: + * 00 - White + * 01 - Black + * 10 - Yellow + * 11 - Red + */ +static u8 pack_pixels_to_byte(__le32 *src_pixels, int i, int j, + struct drm_framebuffer *fb) +{ + u8 packed_byte = 0; + int k; + + for (k = 0; k < 4; k++) { + int current_pixel_x = j * 4 + k; + u8 two_bit_val; + + if (current_pixel_x < PIXPAPER_WIDTH) { + u32 pixel_offset = + (i * (fb->pitches[0] / 4)) + current_pixel_x; + u32 pixel = le32_to_cpu(src_pixels[pixel_offset]); + u32 r = (pixel >> 16) & 0xFF; + u32 g = (pixel >> 8) & 0xFF; + u32 b = pixel & 0xFF; + + if (r < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL && + g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL && + b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) { + two_bit_val = 0b00; + } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL && + g > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL && + b > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL) { + two_bit_val = 0b01; + } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL && + g < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL && + b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) { + two_bit_val = 0b11; + } else if (r > PIXPAPER_COLOR_THRESHOLD_HIGH_CHANNEL && + g > PIXPAPER_COLOR_THRESHOLD_YELLOW_MIN_GREEN && + b < PIXPAPER_COLOR_THRESHOLD_LOW_CHANNEL) { + two_bit_val = 0b10; + } else { + two_bit_val = 0b01; + } + } else { + two_bit_val = 0b01; + } + + packed_byte |= two_bit_val << ((3 - k) * 2); + } + + return packed_byte; +} + +static int pixpaper_plane_helper_atomic_check(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *new_plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_crtc *new_crtc = new_plane_state->crtc; + struct drm_crtc_state *new_crtc_state = NULL; + int ret; + + if (new_crtc) + new_crtc_state = drm_atomic_get_new_crtc_state(state, new_crtc); + + ret = drm_atomic_helper_check_plane_state(new_plane_state, + new_crtc_state, DRM_PLANE_NO_SCALING, + DRM_PLANE_NO_SCALING, false, false); + if (ret) + return ret; + else if (!new_plane_state->visible) + return 0; + + return 0; +} + +static int pixpaper_crtc_helper_atomic_check(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct drm_crtc_state *crtc_state = + drm_atomic_get_new_crtc_state(state, crtc); + + if (!crtc_state->enable) + return 0; + + return drm_atomic_helper_check_crtc_primary_plane(crtc_state); +} + +static void pixpaper_crtc_atomic_enable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev); + struct drm_device *drm = &panel->drm; + int idx; + struct pixpaper_error_ctx err = { .errno_code = 0 }; + + if (!drm_dev_enter(drm, &idx)) + return; + + pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err); + if (err.errno_code) { + drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code); + goto exit_drm_dev; + } + + pixpaper_wait_for_panel(panel); + + drm_dbg(drm, "Panel enabled and powered on\n"); + +exit_drm_dev: + drm_dev_exit(idx); +} + +static void pixpaper_crtc_atomic_disable(struct drm_crtc *crtc, + struct drm_atomic_state *state) +{ + struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev); + struct drm_device *drm = &panel->drm; + struct pixpaper_error_ctx err = { .errno_code = 0 }; + int idx; + + if (!drm_dev_enter(drm, &idx)) + return; + + pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_OFF, &err); + if (err.errno_code) { + drm_err_once(drm, "Failed to send POF command: %d\n", err.errno_code); + goto exit_drm_dev; + } + pixpaper_wait_for_panel(panel); + + drm_dbg(drm, "Panel disabled\n"); + +exit_drm_dev: + drm_dev_exit(idx); +} + +static void pixpaper_plane_atomic_update(struct drm_plane *plane, + struct drm_atomic_state *state) +{ + struct drm_plane_state *plane_state = + drm_atomic_get_new_plane_state(state, plane); + struct drm_shadow_plane_state *shadow_plane_state = + to_drm_shadow_plane_state(plane_state); + struct drm_crtc *crtc = plane_state->crtc; + struct pixpaper_panel *panel = to_pixpaper_panel(crtc->dev); + + struct drm_device *drm = &panel->drm; + struct drm_framebuffer *fb = plane_state->fb; + struct iosys_map map = shadow_plane_state->data[0]; + void *vaddr = map.vaddr; + int i, j, idx; + __le32 *src_pixels = NULL; + struct pixpaper_error_ctx err = { .errno_code = 0 }; + + if (!drm_dev_enter(drm, &idx)) + return; + + drm_dbg(drm, "Starting frame update (phys=%dx%d, buf_w=%d)\n", + PIXPAPER_WIDTH, PIXPAPER_HEIGHT, PIXPAPER_PANEL_BUFFER_WIDTH); + + if (!fb || !plane_state->visible) { + drm_err_once(drm, "No framebuffer or plane not visible, skipping update\n"); + goto update_cleanup; + } + + src_pixels = (__le32 *)vaddr; + + pixpaper_send_cmd(panel, PIXPAPER_CMD_DATA_START_TRANSMISSION, &err); + if (err.errno_code) + goto update_cleanup; + + pixpaper_wait_for_panel(panel); + + for (i = 0; i < PIXPAPER_HEIGHT; i++) { + for (j = 0; j < PIXPAPER_PANEL_BUFFER_TWO_BYTES_PER_ROW; j++) { + u8 packed_byte = + pack_pixels_to_byte(src_pixels, i, j, fb); + + pixpaper_wait_for_panel(panel); + pixpaper_send_data(panel, packed_byte, &err); + } + } + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_POWER_ON, &err); + if (err.errno_code) { + drm_err_once(drm, "Failed to send PON command: %d\n", err.errno_code); + goto update_cleanup; + } + pixpaper_wait_for_panel(panel); + + pixpaper_send_cmd(panel, PIXPAPER_CMD_DISPLAY_REFRESH, &err); + pixpaper_send_data(panel, PIXPAPER_DRF_VCOM_AC, &err); + if (err.errno_code) { + drm_err_once(drm, "Failed sending data after DRF: %d\n", err.errno_code); + goto update_cleanup; + } + pixpaper_wait_for_panel(panel); + +update_cleanup: + if (err.errno_code && err.errno_code != -ETIMEDOUT) + drm_err_once(drm, "Frame update function failed with error %d\n", err.errno_code); + + drm_dev_exit(idx); +} + +static const struct drm_display_mode pixpaper_mode = { + .clock = PIXPAPER_PIXEL_CLOCK, + .hdisplay = PIXPAPER_WIDTH, + .hsync_start = PIXPAPER_WIDTH + PIXPAPER_HFP, + .hsync_end = PIXPAPER_WIDTH + PIXPAPER_HFP + PIXPAPER_HSYNC, + .htotal = PIXPAPER_HTOTAL, + .vdisplay = PIXPAPER_HEIGHT, + .vsync_start = PIXPAPER_HEIGHT + PIXPAPER_VFP, + .vsync_end = PIXPAPER_HEIGHT + PIXPAPER_VFP + PIXPAPER_VSYNC, + .vtotal = PIXPAPER_VTOTAL, + .width_mm = PIXPAPER_WIDTH_MM, + .height_mm = PIXPAPER_HEIGHT_MM, + .type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED, +}; + +static int pixpaper_connector_get_modes(struct drm_connector *connector) +{ + return drm_connector_helper_get_modes_fixed(connector, &pixpaper_mode); +} + +static const struct drm_plane_funcs pixpaper_plane_funcs = { + .update_plane = drm_atomic_helper_update_plane, + .disable_plane = drm_atomic_helper_disable_plane, + .destroy = drm_plane_cleanup, + DRM_GEM_SHADOW_PLANE_FUNCS, +}; + +static const struct drm_plane_helper_funcs pixpaper_plane_helper_funcs = { + DRM_GEM_SHADOW_PLANE_HELPER_FUNCS, + .atomic_check = pixpaper_plane_helper_atomic_check, + .atomic_update = pixpaper_plane_atomic_update, +}; + +static const struct drm_crtc_funcs pixpaper_crtc_funcs = { + .set_config = drm_atomic_helper_set_config, + .page_flip = drm_atomic_helper_page_flip, + .reset = drm_atomic_helper_crtc_reset, + .destroy = drm_crtc_cleanup, + .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state, +}; + +static int pixpaper_mode_valid(struct drm_crtc *crtc, + const struct drm_display_mode *mode) +{ + if (mode->hdisplay == PIXPAPER_WIDTH && + mode->vdisplay == PIXPAPER_HEIGHT) { + return MODE_OK; + } + return MODE_BAD; +} + +static const struct drm_crtc_helper_funcs pixpaper_crtc_helper_funcs = { + .mode_valid = pixpaper_mode_valid, + .atomic_check = pixpaper_crtc_helper_atomic_check, + .atomic_enable = pixpaper_crtc_atomic_enable, + .atomic_disable = pixpaper_crtc_atomic_disable, +}; + +static const struct drm_encoder_funcs pixpaper_encoder_funcs = { + .destroy = drm_encoder_cleanup, +}; + +static const struct drm_connector_funcs pixpaper_connector_funcs = { + .reset = drm_atomic_helper_connector_reset, + .fill_modes = drm_helper_probe_single_connector_modes, + .destroy = drm_connector_cleanup, + .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state, + .atomic_destroy_state = drm_atomic_helper_connector_destroy_state, +}; + +static const struct drm_connector_helper_funcs pixpaper_connector_helper_funcs = { + .get_modes = pixpaper_connector_get_modes, +}; + +DEFINE_DRM_GEM_FOPS(pixpaper_fops); + +static struct drm_driver pixpaper_drm_driver = { + .driver_features = DRIVER_GEM | DRIVER_MODESET | DRIVER_ATOMIC, + .fops = &pixpaper_fops, + .name = "pixpaper", + .desc = "DRM driver for PIXPAPER e-ink", + .major = 1, + .minor = 0, + DRM_GEM_SHMEM_DRIVER_OPS, + DRM_FBDEV_SHMEM_DRIVER_OPS, +}; + +static const struct drm_mode_config_funcs pixpaper_mode_config_funcs = { + .fb_create = drm_gem_fb_create_with_dirty, + .atomic_check = drm_atomic_helper_check, + .atomic_commit = drm_atomic_helper_commit, +}; + +static int pixpaper_probe(struct spi_device *spi) +{ + struct device *dev = &spi->dev; + struct pixpaper_panel *panel; + struct drm_device *drm; + int ret; + + panel = devm_drm_dev_alloc(dev, &pixpaper_drm_driver, + struct pixpaper_panel, drm); + if (IS_ERR(panel)) + return PTR_ERR(panel); + + drm = &panel->drm; + panel->spi = spi; + spi_set_drvdata(spi, panel); + + spi->mode = SPI_MODE_0; + spi->bits_per_word = PIXPAPER_SPI_BITS_PER_WORD; + + if (!spi->max_speed_hz) { + drm_warn(drm, + "spi-max-frequency not specified in DT, using default %u Hz\n", + PIXPAPER_SPI_SPEED_DEFAULT); + spi->max_speed_hz = PIXPAPER_SPI_SPEED_DEFAULT; + } + + ret = spi_setup(spi); + if (ret < 0) { + drm_err(drm, "SPI setup failed: %d\n", ret); + return ret; + } + + if (!dev->dma_mask) + dev->dma_mask = &dev->coherent_dma_mask; + ret = dma_set_mask_and_coherent(dev, DMA_BIT_MASK(32)); + if (ret) { + drm_err(drm, "Failed to set DMA mask: %d\n", ret); + return ret; + } + + panel->reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_HIGH); + if (IS_ERR(panel->reset)) + return PTR_ERR(panel->reset); + + panel->busy = devm_gpiod_get(dev, "busy", GPIOD_IN); + if (IS_ERR(panel->busy)) + return PTR_ERR(panel->busy); + + panel->dc = devm_gpiod_get(dev, "dc", GPIOD_OUT_HIGH); + if (IS_ERR(panel->dc)) + return PTR_ERR(panel->dc); + + ret = pixpaper_panel_hw_init(panel); + if (ret) { + drm_err(drm, "Panel hardware initialization failed: %d\n", ret); + return ret; + } + + ret = drmm_mode_config_init(drm); + if (ret) + return ret; + drm->mode_config.funcs = &pixpaper_mode_config_funcs; + drm->mode_config.min_width = PIXPAPER_WIDTH; + drm->mode_config.max_width = PIXPAPER_WIDTH; + drm->mode_config.min_height = PIXPAPER_HEIGHT; + drm->mode_config.max_height = PIXPAPER_HEIGHT; + + ret = drm_universal_plane_init(drm, &panel->plane, 1, + &pixpaper_plane_funcs, + (const uint32_t[]){ DRM_FORMAT_XRGB8888 }, + 1, NULL, DRM_PLANE_TYPE_PRIMARY, NULL); + if (ret) + return ret; + drm_plane_helper_add(&panel->plane, &pixpaper_plane_helper_funcs); + + ret = drm_crtc_init_with_planes(drm, &panel->crtc, &panel->plane, NULL, + &pixpaper_crtc_funcs, NULL); + if (ret) + return ret; + drm_crtc_helper_add(&panel->crtc, &pixpaper_crtc_helper_funcs); + + ret = drm_encoder_init(drm, &panel->encoder, &pixpaper_encoder_funcs, + DRM_MODE_ENCODER_NONE, NULL); + if (ret) + return ret; + panel->encoder.possible_crtcs = drm_crtc_mask(&panel->crtc); + + ret = drm_connector_init(drm, &panel->connector, + &pixpaper_connector_funcs, + DRM_MODE_CONNECTOR_SPI); + if (ret) + return ret; + + drm_connector_helper_add(&panel->connector, + &pixpaper_connector_helper_funcs); + drm_connector_attach_encoder(&panel->connector, &panel->encoder); + + drm_mode_config_reset(drm); + + ret = drm_dev_register(drm, 0); + if (ret) + return ret; + + drm_client_setup(drm, NULL); + + return 0; +} + +static void pixpaper_remove(struct spi_device *spi) +{ + struct pixpaper_panel *panel = spi_get_drvdata(spi); + + if (!panel) + return; + + drm_dev_unplug(&panel->drm); + drm_atomic_helper_shutdown(&panel->drm); +} + +static const struct spi_device_id pixpaper_ids[] = { { "pixpaper", 0 }, {} }; +MODULE_DEVICE_TABLE(spi, pixpaper_ids); + +static const struct of_device_id pixpaper_dt_ids[] = { + { .compatible = "mayqueen,pixpaper" }, + {} +}; +MODULE_DEVICE_TABLE(of, pixpaper_dt_ids); + +static struct spi_driver pixpaper_spi_driver = { + .driver = { + .name = "pixpaper", + .of_match_table = pixpaper_dt_ids, + }, + .id_table = pixpaper_ids, + .probe = pixpaper_probe, + .remove = pixpaper_remove, +}; + +module_spi_driver(pixpaper_spi_driver); + +MODULE_AUTHOR("LiangCheng Wang"); +MODULE_DESCRIPTION("DRM SPI driver for PIXPAPER e-ink panel"); +MODULE_LICENSE("GPL"); diff --git a/drivers/gpu/drm/ttm/ttm_bo.c b/drivers/gpu/drm/ttm/ttm_bo.c index f4d9e68b21e7..29423ceeec5c 100644 --- a/drivers/gpu/drm/ttm/ttm_bo.c +++ b/drivers/gpu/drm/ttm/ttm_bo.c @@ -1283,3 +1283,18 @@ int ttm_bo_populate(struct ttm_buffer_object *bo, return 0; } EXPORT_SYMBOL(ttm_bo_populate); + +int ttm_bo_setup_export(struct ttm_buffer_object *bo, + struct ttm_operation_ctx *ctx) +{ + int ret; + + ret = ttm_bo_reserve(bo, false, false, NULL); + if (ret != 0) + return ret; + + ret = ttm_bo_populate(bo, ctx); + ttm_bo_unreserve(bo); + return ret; +} +EXPORT_SYMBOL(ttm_bo_setup_export); diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c index 2def155ce496..c5a3bbbc74c5 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.c +++ b/drivers/gpu/drm/v3d/v3d_drv.c @@ -157,12 +157,24 @@ v3d_open(struct drm_device *dev, struct drm_file *file) static void v3d_postclose(struct drm_device *dev, struct drm_file *file) { + struct v3d_dev *v3d = to_v3d_dev(dev); struct v3d_file_priv *v3d_priv = file->driver_priv; + unsigned long irqflags; enum v3d_queue q; - for (q = 0; q < V3D_MAX_QUEUES; q++) + for (q = 0; q < V3D_MAX_QUEUES; q++) { + struct v3d_queue_state *queue = &v3d->queue[q]; + struct v3d_job *job = queue->active_job; + drm_sched_entity_destroy(&v3d_priv->sched_entity[q]); + if (job && job->base.entity == &v3d_priv->sched_entity[q]) { + spin_lock_irqsave(&queue->queue_lock, irqflags); + job->file_priv = NULL; + spin_unlock_irqrestore(&queue->queue_lock, irqflags); + } + } + v3d_perfmon_close_file(v3d_priv); kfree(v3d_priv); } diff --git a/drivers/gpu/drm/v3d/v3d_drv.h b/drivers/gpu/drm/v3d/v3d_drv.h index 82d84a96235f..0317f3d7452a 100644 --- a/drivers/gpu/drm/v3d/v3d_drv.h +++ b/drivers/gpu/drm/v3d/v3d_drv.h @@ -58,6 +58,10 @@ struct v3d_queue_state { /* Stores the GPU stats for this queue in the global context. */ struct v3d_stats stats; + + /* Currently active job for this queue */ + struct v3d_job *active_job; + spinlock_t queue_lock; }; /* Performance monitor object. The perform lifetime is controlled by userspace @@ -159,18 +163,8 @@ struct v3d_dev { struct work_struct overflow_mem_work; - struct v3d_bin_job *bin_job; - struct v3d_render_job *render_job; - struct v3d_tfu_job *tfu_job; - struct v3d_csd_job *csd_job; - struct v3d_queue_state queue[V3D_MAX_QUEUES]; - /* Spinlock used to synchronize the overflow memory - * management against bin job submission. - */ - spinlock_t job_lock; - /* Used to track the active perfmon if any. */ struct v3d_perfmon *active_perfmon; @@ -327,9 +321,9 @@ struct v3d_job { struct v3d_perfmon *perfmon; /* File descriptor of the process that submitted the job that could be used - * for collecting stats by process of GPU usage. + * to collect per-process information about the GPU. */ - struct drm_file *file; + struct v3d_file_priv *file_priv; /* Callback for the freeing of the job on refcount going to 0. */ void (*free)(struct kref *ref); @@ -570,7 +564,7 @@ void v3d_get_stats(const struct v3d_stats *stats, u64 timestamp, /* v3d_fence.c */ extern const struct dma_fence_ops v3d_fence_ops; -struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue); +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q); /* v3d_gem.c */ int v3d_gem_init(struct drm_device *dev); @@ -614,7 +608,7 @@ void v3d_timestamp_query_info_free(struct v3d_timestamp_query_info *query_info, unsigned int count); void v3d_performance_query_info_free(struct v3d_performance_query_info *query_info, unsigned int count); -void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue); +void v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q); int v3d_sched_init(struct v3d_dev *v3d); void v3d_sched_fini(struct v3d_dev *v3d); diff --git a/drivers/gpu/drm/v3d/v3d_fence.c b/drivers/gpu/drm/v3d/v3d_fence.c index 89840ed212c0..8f8471adae34 100644 --- a/drivers/gpu/drm/v3d/v3d_fence.c +++ b/drivers/gpu/drm/v3d/v3d_fence.c @@ -3,8 +3,9 @@ #include "v3d_drv.h" -struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) +struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue q) { + struct v3d_queue_state *queue = &v3d->queue[q]; struct v3d_fence *fence; fence = kzalloc(sizeof(*fence), GFP_KERNEL); @@ -12,10 +13,10 @@ struct dma_fence *v3d_fence_create(struct v3d_dev *v3d, enum v3d_queue queue) return ERR_PTR(-ENOMEM); fence->dev = &v3d->drm; - fence->queue = queue; - fence->seqno = ++v3d->queue[queue].emit_seqno; - dma_fence_init(&fence->base, &v3d_fence_ops, &v3d->job_lock, - v3d->queue[queue].fence_context, fence->seqno); + fence->queue = q; + fence->seqno = ++queue->emit_seqno; + dma_fence_init(&fence->base, &v3d_fence_ops, &queue->queue_lock, + queue->fence_context, fence->seqno); return &fence->base; } diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c index 37bf5eecdd2c..c77d90aa9b82 100644 --- a/drivers/gpu/drm/v3d/v3d_gem.c +++ b/drivers/gpu/drm/v3d/v3d_gem.c @@ -271,10 +271,11 @@ v3d_gem_init(struct drm_device *dev) queue->fence_context = dma_fence_context_alloc(1); memset(&queue->stats, 0, sizeof(queue->stats)); seqcount_init(&queue->stats.lock); + + spin_lock_init(&queue->queue_lock); } spin_lock_init(&v3d->mm_lock); - spin_lock_init(&v3d->job_lock); ret = drmm_mutex_init(dev, &v3d->bo_lock); if (ret) return ret; @@ -324,6 +325,7 @@ void v3d_gem_destroy(struct drm_device *dev) { struct v3d_dev *v3d = to_v3d_dev(dev); + enum v3d_queue q; v3d_sched_fini(v3d); v3d_gemfs_fini(v3d); @@ -331,10 +333,8 @@ v3d_gem_destroy(struct drm_device *dev) /* Waiting for jobs to finish would need to be done before * unregistering V3D. */ - WARN_ON(v3d->bin_job); - WARN_ON(v3d->render_job); - WARN_ON(v3d->tfu_job); - WARN_ON(v3d->csd_job); + for (q = 0; q < V3D_MAX_QUEUES; q++) + WARN_ON(v3d->queue[q].active_job); drm_mm_takedown(&v3d->mm); diff --git a/drivers/gpu/drm/v3d/v3d_irq.c b/drivers/gpu/drm/v3d/v3d_irq.c index a515a301e480..31ecc5b4ba5a 100644 --- a/drivers/gpu/drm/v3d/v3d_irq.c +++ b/drivers/gpu/drm/v3d/v3d_irq.c @@ -42,6 +42,8 @@ v3d_overflow_mem_work(struct work_struct *work) container_of(work, struct v3d_dev, overflow_mem_work); struct drm_device *dev = &v3d->drm; struct v3d_bo *bo = v3d_bo_create(dev, NULL /* XXX: GMP */, 256 * 1024); + struct v3d_queue_state *queue = &v3d->queue[V3D_BIN]; + struct v3d_bin_job *bin_job; struct drm_gem_object *obj; unsigned long irqflags; @@ -60,15 +62,17 @@ v3d_overflow_mem_work(struct work_struct *work) * bin job got scheduled, that's fine. We'll just give them * some binner pool anyway. */ - spin_lock_irqsave(&v3d->job_lock, irqflags); - if (!v3d->bin_job) { - spin_unlock_irqrestore(&v3d->job_lock, irqflags); + spin_lock_irqsave(&queue->queue_lock, irqflags); + bin_job = (struct v3d_bin_job *)queue->active_job; + + if (!bin_job) { + spin_unlock_irqrestore(&queue->queue_lock, irqflags); goto out; } drm_gem_object_get(obj); - list_add_tail(&bo->unref_head, &v3d->bin_job->render->unref_list); - spin_unlock_irqrestore(&v3d->job_lock, irqflags); + list_add_tail(&bo->unref_head, &bin_job->render->unref_list); + spin_unlock_irqrestore(&queue->queue_lock, irqflags); v3d_mmu_flush_all(v3d); @@ -79,6 +83,20 @@ out: drm_gem_object_put(obj); } +static void +v3d_irq_signal_fence(struct v3d_dev *v3d, enum v3d_queue q, + void (*trace_irq)(struct drm_device *, uint64_t)) +{ + struct v3d_queue_state *queue = &v3d->queue[q]; + struct v3d_fence *fence = to_v3d_fence(queue->active_job->irq_fence); + + v3d_job_update_stats(queue->active_job, q); + trace_irq(&v3d->drm, fence->seqno); + + queue->active_job = NULL; + dma_fence_signal(&fence->base); +} + static irqreturn_t v3d_irq(int irq, void *arg) { @@ -102,41 +120,17 @@ v3d_irq(int irq, void *arg) } if (intsts & V3D_INT_FLDONE) { - struct v3d_fence *fence = - to_v3d_fence(v3d->bin_job->base.irq_fence); - - v3d_job_update_stats(&v3d->bin_job->base, V3D_BIN); - trace_v3d_bcl_irq(&v3d->drm, fence->seqno); - - v3d->bin_job = NULL; - dma_fence_signal(&fence->base); - + v3d_irq_signal_fence(v3d, V3D_BIN, trace_v3d_bcl_irq); status = IRQ_HANDLED; } if (intsts & V3D_INT_FRDONE) { - struct v3d_fence *fence = - to_v3d_fence(v3d->render_job->base.irq_fence); - - v3d_job_update_stats(&v3d->render_job->base, V3D_RENDER); - trace_v3d_rcl_irq(&v3d->drm, fence->seqno); - - v3d->render_job = NULL; - dma_fence_signal(&fence->base); - + v3d_irq_signal_fence(v3d, V3D_RENDER, trace_v3d_rcl_irq); status = IRQ_HANDLED; } if (intsts & V3D_INT_CSDDONE(v3d->ver)) { - struct v3d_fence *fence = - to_v3d_fence(v3d->csd_job->base.irq_fence); - - v3d_job_update_stats(&v3d->csd_job->base, V3D_CSD); - trace_v3d_csd_irq(&v3d->drm, fence->seqno); - - v3d->csd_job = NULL; - dma_fence_signal(&fence->base); - + v3d_irq_signal_fence(v3d, V3D_CSD, trace_v3d_csd_irq); status = IRQ_HANDLED; } @@ -168,15 +162,7 @@ v3d_hub_irq(int irq, void *arg) V3D_WRITE(V3D_HUB_INT_CLR, intsts); if (intsts & V3D_HUB_INT_TFUC) { - struct v3d_fence *fence = - to_v3d_fence(v3d->tfu_job->base.irq_fence); - - v3d_job_update_stats(&v3d->tfu_job->base, V3D_TFU); - trace_v3d_tfu_irq(&v3d->drm, fence->seqno); - - v3d->tfu_job = NULL; - dma_fence_signal(&fence->base); - + v3d_irq_signal_fence(v3d, V3D_TFU, trace_v3d_tfu_irq); status = IRQ_HANDLED; } diff --git a/drivers/gpu/drm/v3d/v3d_sched.c b/drivers/gpu/drm/v3d/v3d_sched.c index f9d9a198d718..0ec06bfbbebb 100644 --- a/drivers/gpu/drm/v3d/v3d_sched.c +++ b/drivers/gpu/drm/v3d/v3d_sched.c @@ -139,7 +139,7 @@ static void v3d_job_start_stats(struct v3d_job *job, enum v3d_queue queue) { struct v3d_dev *v3d = job->v3d; - struct v3d_file_priv *file = job->file->driver_priv; + struct v3d_file_priv *file = job->file_priv; struct v3d_stats *global_stats = &v3d->queue[queue].stats; struct v3d_stats *local_stats = &file->stats[queue]; u64 now = local_clock(); @@ -194,11 +194,11 @@ v3d_stats_update(struct v3d_stats *stats, u64 now) } void -v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) +v3d_job_update_stats(struct v3d_job *job, enum v3d_queue q) { struct v3d_dev *v3d = job->v3d; - struct v3d_file_priv *file = job->file->driver_priv; - struct v3d_stats *global_stats = &v3d->queue[queue].stats; + struct v3d_queue_state *queue = &v3d->queue[q]; + struct v3d_stats *global_stats = &queue->stats; u64 now = local_clock(); unsigned long flags; @@ -209,10 +209,10 @@ v3d_job_update_stats(struct v3d_job *job, enum v3d_queue queue) preempt_disable(); /* Don't update the local stats if the file context has already closed */ - if (file) - v3d_stats_update(&file->stats[queue], now); - else - drm_dbg(&v3d->drm, "The file descriptor was closed before job completion\n"); + spin_lock(&queue->queue_lock); + if (job->file_priv) + v3d_stats_update(&job->file_priv->stats[q], now); + spin_unlock(&queue->queue_lock); v3d_stats_update(global_stats, now); @@ -226,27 +226,28 @@ static struct dma_fence *v3d_bin_job_run(struct drm_sched_job *sched_job) { struct v3d_bin_job *job = to_bin_job(sched_job); struct v3d_dev *v3d = job->base.v3d; + struct v3d_queue_state *queue = &v3d->queue[V3D_BIN]; struct drm_device *dev = &v3d->drm; struct dma_fence *fence; unsigned long irqflags; if (unlikely(job->base.base.s_fence->finished.error)) { - spin_lock_irqsave(&v3d->job_lock, irqflags); - v3d->bin_job = NULL; - spin_unlock_irqrestore(&v3d->job_lock, irqflags); + spin_lock_irqsave(&queue->queue_lock, irqflags); + queue->active_job = NULL; + spin_unlock_irqrestore(&queue->queue_lock, irqflags); return NULL; } /* Lock required around bin_job update vs * v3d_overflow_mem_work(). */ - spin_lock_irqsave(&v3d->job_lock, irqflags); - v3d->bin_job = job; + spin_lock_irqsave(&queue->queue_lock, irqflags); + queue->active_job = &job->base; /* Clear out the overflow allocation, so we don't * reuse the overflow attached to a previous job. */ V3D_CORE_WRITE(0, V3D_PTB_BPOS, 0); - spin_unlock_irqrestore(&v3d->job_lock, irqflags); + spin_unlock_irqrestore(&queue->queue_lock, irqflags); v3d_invalidate_caches(v3d); @@ -290,11 +291,11 @@ static struct dma_fence *v3d_render_job_run(struct drm_sched_job *sched_job) struct dma_fence *fence; if (unlikely(job->base.base.s_fence->finished.error)) { - v3d->render_job = NULL; + v3d->queue[V3D_RENDER].active_job = NULL; return NULL; } - v3d->render_job = job; + v3d->queue[V3D_RENDER].active_job = &job->base; /* Can we avoid this flush? We need to be careful of * scheduling, though -- imagine job0 rendering to texture and @@ -338,11 +339,11 @@ v3d_tfu_job_run(struct drm_sched_job *sched_job) struct dma_fence *fence; if (unlikely(job->base.base.s_fence->finished.error)) { - v3d->tfu_job = NULL; + v3d->queue[V3D_TFU].active_job = NULL; return NULL; } - v3d->tfu_job = job; + v3d->queue[V3D_TFU].active_job = &job->base; fence = v3d_fence_create(v3d, V3D_TFU); if (IS_ERR(fence)) @@ -386,11 +387,11 @@ v3d_csd_job_run(struct drm_sched_job *sched_job) int i, csd_cfg0_reg; if (unlikely(job->base.base.s_fence->finished.error)) { - v3d->csd_job = NULL; + v3d->queue[V3D_CSD].active_job = NULL; return NULL; } - v3d->csd_job = job; + v3d->queue[V3D_CSD].active_job = &job->base; v3d_invalidate_caches(v3d); @@ -574,7 +575,7 @@ static void v3d_reset_performance_queries(struct v3d_cpu_job *job) { struct v3d_performance_query_info *performance_query = &job->performance_query; - struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_file_priv *v3d_priv = job->base.file_priv; struct v3d_dev *v3d = job->base.v3d; struct v3d_perfmon *perfmon; @@ -604,7 +605,7 @@ v3d_write_performance_query_result(struct v3d_cpu_job *job, void *data, { struct v3d_performance_query_info *performance_query = &job->performance_query; - struct v3d_file_priv *v3d_priv = job->base.file->driver_priv; + struct v3d_file_priv *v3d_priv = job->base.file_priv; struct v3d_performance_query *perf_query = &performance_query->queries[query]; struct v3d_dev *v3d = job->base.v3d; @@ -700,6 +701,7 @@ v3d_cpu_job_run(struct drm_sched_job *sched_job) trace_v3d_cpu_job_end(&v3d->drm, job->job_type); v3d_job_update_stats(&job->base, V3D_CPU); + /* Synchronous operation, so no fence to wait on. */ return NULL; } @@ -715,21 +717,24 @@ v3d_cache_clean_job_run(struct drm_sched_job *sched_job) v3d_job_update_stats(job, V3D_CACHE_CLEAN); + /* Synchronous operation, so no fence to wait on. */ return NULL; } static enum drm_gpu_sched_stat -v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) +v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job, + enum v3d_queue q) { struct v3d_job *job = to_v3d_job(sched_job); - struct v3d_file_priv *v3d_priv = job->file->driver_priv; - enum v3d_queue q; + struct v3d_file_priv *v3d_priv = job->file_priv; + unsigned long irqflags; + enum v3d_queue i; mutex_lock(&v3d->reset_lock); /* block scheduler */ - for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_stop(&v3d->queue[q].sched, sched_job); + for (i = 0; i < V3D_MAX_QUEUES; i++) + drm_sched_stop(&v3d->queue[i].sched, sched_job); if (sched_job) drm_sched_increase_karma(sched_job); @@ -738,15 +743,17 @@ v3d_gpu_reset_for_timeout(struct v3d_dev *v3d, struct drm_sched_job *sched_job) v3d_reset(v3d); v3d->reset_counter++; - v3d_priv->reset_counter++; + spin_lock_irqsave(&v3d->queue[q].queue_lock, irqflags); + if (v3d_priv) + v3d_priv->reset_counter++; + spin_unlock_irqrestore(&v3d->queue[q].queue_lock, irqflags); - for (q = 0; q < V3D_MAX_QUEUES; q++) - drm_sched_resubmit_jobs(&v3d->queue[q].sched); + for (i = 0; i < V3D_MAX_QUEUES; i++) + drm_sched_resubmit_jobs(&v3d->queue[i].sched); /* Unblock schedulers and restart their jobs. */ - for (q = 0; q < V3D_MAX_QUEUES; q++) { - drm_sched_start(&v3d->queue[q].sched, 0); - } + for (i = 0; i < V3D_MAX_QUEUES; i++) + drm_sched_start(&v3d->queue[i].sched, 0); mutex_unlock(&v3d->reset_lock); @@ -774,7 +781,7 @@ v3d_cl_job_timedout(struct drm_sched_job *sched_job, enum v3d_queue q, return DRM_GPU_SCHED_STAT_NO_HANG; } - return v3d_gpu_reset_for_timeout(v3d, sched_job); + return v3d_gpu_reset_for_timeout(v3d, sched_job, q); } static enum drm_gpu_sched_stat @@ -796,11 +803,11 @@ v3d_render_job_timedout(struct drm_sched_job *sched_job) } static enum drm_gpu_sched_stat -v3d_generic_job_timedout(struct drm_sched_job *sched_job) +v3d_tfu_job_timedout(struct drm_sched_job *sched_job) { struct v3d_job *job = to_v3d_job(sched_job); - return v3d_gpu_reset_for_timeout(job->v3d, sched_job); + return v3d_gpu_reset_for_timeout(job->v3d, sched_job, V3D_TFU); } static enum drm_gpu_sched_stat @@ -819,7 +826,7 @@ v3d_csd_job_timedout(struct drm_sched_job *sched_job) return DRM_GPU_SCHED_STAT_NO_HANG; } - return v3d_gpu_reset_for_timeout(v3d, sched_job); + return v3d_gpu_reset_for_timeout(v3d, sched_job, V3D_CSD); } static const struct drm_sched_backend_ops v3d_bin_sched_ops = { @@ -836,7 +843,7 @@ static const struct drm_sched_backend_ops v3d_render_sched_ops = { static const struct drm_sched_backend_ops v3d_tfu_sched_ops = { .run_job = v3d_tfu_job_run, - .timedout_job = v3d_generic_job_timedout, + .timedout_job = v3d_tfu_job_timedout, .free_job = v3d_sched_job_free, }; @@ -848,13 +855,11 @@ static const struct drm_sched_backend_ops v3d_csd_sched_ops = { static const struct drm_sched_backend_ops v3d_cache_clean_sched_ops = { .run_job = v3d_cache_clean_job_run, - .timedout_job = v3d_generic_job_timedout, .free_job = v3d_sched_job_free }; static const struct drm_sched_backend_ops v3d_cpu_sched_ops = { .run_job = v3d_cpu_job_run, - .timedout_job = v3d_generic_job_timedout, .free_job = v3d_cpu_job_free }; diff --git a/drivers/gpu/drm/v3d/v3d_submit.c b/drivers/gpu/drm/v3d/v3d_submit.c index 5171ffe9012d..f3652e90683c 100644 --- a/drivers/gpu/drm/v3d/v3d_submit.c +++ b/drivers/gpu/drm/v3d/v3d_submit.c @@ -166,7 +166,7 @@ v3d_job_init(struct v3d_dev *v3d, struct drm_file *file_priv, job->v3d = v3d; job->free = free; - job->file = file_priv; + job->file_priv = v3d_priv; ret = drm_sched_job_init(&job->base, &v3d_priv->sched_entity[queue], 1, v3d_priv, file_priv->client_id); diff --git a/drivers/gpu/drm/vkms/tests/vkms_config_test.c b/drivers/gpu/drm/vkms/tests/vkms_config_test.c index ff4566cf9925..b0d78a81d2df 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_config_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_config_test.c @@ -200,6 +200,7 @@ static void vkms_config_test_get_planes(struct kunit *test) KUNIT_ASSERT_EQ(test, n_planes, 0); plane_cfg1 = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1); vkms_config_for_each_plane(config, plane_cfg) { n_planes++; if (plane_cfg != plane_cfg1) @@ -209,6 +210,7 @@ static void vkms_config_test_get_planes(struct kunit *test) n_planes = 0; plane_cfg2 = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2); vkms_config_for_each_plane(config, plane_cfg) { n_planes++; if (plane_cfg != plane_cfg1 && plane_cfg != plane_cfg2) @@ -242,6 +244,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test) KUNIT_FAIL(test, "Unexpected CRTC"); crtc_cfg1 = vkms_config_create_crtc(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1); KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 1); vkms_config_for_each_crtc(config, crtc_cfg) { if (crtc_cfg != crtc_cfg1) @@ -249,6 +252,7 @@ static void vkms_config_test_get_crtcs(struct kunit *test) } crtc_cfg2 = vkms_config_create_crtc(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); KUNIT_ASSERT_EQ(test, vkms_config_get_num_crtcs(config), 2); vkms_config_for_each_crtc(config, crtc_cfg) { if (crtc_cfg != crtc_cfg1 && crtc_cfg != crtc_cfg2) @@ -280,6 +284,7 @@ static void vkms_config_test_get_encoders(struct kunit *test) KUNIT_ASSERT_EQ(test, n_encoders, 0); encoder_cfg1 = vkms_config_create_encoder(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1); vkms_config_for_each_encoder(config, encoder_cfg) { n_encoders++; if (encoder_cfg != encoder_cfg1) @@ -289,6 +294,7 @@ static void vkms_config_test_get_encoders(struct kunit *test) n_encoders = 0; encoder_cfg2 = vkms_config_create_encoder(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2); vkms_config_for_each_encoder(config, encoder_cfg) { n_encoders++; if (encoder_cfg != encoder_cfg1 && encoder_cfg != encoder_cfg2) @@ -324,6 +330,7 @@ static void vkms_config_test_get_connectors(struct kunit *test) KUNIT_ASSERT_EQ(test, n_connectors, 0); connector_cfg1 = vkms_config_create_connector(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1); vkms_config_for_each_connector(config, connector_cfg) { n_connectors++; if (connector_cfg != connector_cfg1) @@ -333,6 +340,7 @@ static void vkms_config_test_get_connectors(struct kunit *test) n_connectors = 0; connector_cfg2 = vkms_config_create_connector(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2); vkms_config_for_each_connector(config, connector_cfg) { n_connectors++; if (connector_cfg != connector_cfg1 && @@ -370,7 +378,7 @@ static void vkms_config_test_invalid_plane_number(struct kunit *test) /* Invalid: Too many planes */ for (n = 0; n <= 32; n++) - vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_plane(config)); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -395,6 +403,7 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) /* Invalid: No primary plane */ plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_OVERLAY); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); @@ -402,11 +411,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) /* Invalid: Multiple primary planes */ plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); @@ -419,11 +430,13 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) /* Invalid: Multiple cursor planes */ plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_CURSOR); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); @@ -437,12 +450,16 @@ static void vkms_config_test_valid_plane_type(struct kunit *test) /* Invalid: Second CRTC without primary plane */ crtc_cfg = vkms_config_create_crtc(config); encoder_cfg = vkms_config_create_encoder(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg); + err = vkms_config_encoder_attach_crtc(encoder_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); /* Valid: Second CRTC with a primary plane */ plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg); KUNIT_EXPECT_EQ(test, err, 0); @@ -486,7 +503,7 @@ static void vkms_config_test_invalid_crtc_number(struct kunit *test) /* Invalid: Too many CRTCs */ for (n = 0; n <= 32; n++) - vkms_config_create_crtc(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_crtc(config)); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -509,7 +526,7 @@ static void vkms_config_test_invalid_encoder_number(struct kunit *test) /* Invalid: Too many encoders */ for (n = 0; n <= 32; n++) - vkms_config_create_encoder(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_encoder(config)); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -531,12 +548,15 @@ static void vkms_config_test_valid_encoder_possible_crtcs(struct kunit *test) /* Invalid: Encoder without a possible CRTC */ encoder_cfg = vkms_config_create_encoder(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); /* Valid: Second CRTC with shared encoder */ crtc_cfg2 = vkms_config_create_crtc(config); - plane_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg); + vkms_config_plane_set_type(plane_cfg, DRM_PLANE_TYPE_PRIMARY); err = vkms_config_plane_attach_crtc(plane_cfg, crtc_cfg2); KUNIT_EXPECT_EQ(test, err, 0); @@ -577,7 +597,7 @@ static void vkms_config_test_invalid_connector_number(struct kunit *test) /* Invalid: Too many connectors */ for (n = 0; n <= 32; n++) - connector_cfg = vkms_config_create_connector(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, vkms_config_create_connector(config)); KUNIT_EXPECT_FALSE(test, vkms_config_is_valid(config)); @@ -669,13 +689,19 @@ static void vkms_config_test_plane_attach_crtc(struct kunit *test) KUNIT_ASSERT_NOT_ERR_OR_NULL(test, config); overlay_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, overlay_cfg); vkms_config_plane_set_type(overlay_cfg, DRM_PLANE_TYPE_OVERLAY); + primary_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, primary_cfg); vkms_config_plane_set_type(primary_cfg, DRM_PLANE_TYPE_PRIMARY); + cursor_cfg = vkms_config_create_plane(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, cursor_cfg); vkms_config_plane_set_type(cursor_cfg, DRM_PLANE_TYPE_CURSOR); crtc_cfg = vkms_config_create_crtc(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg); /* No primary or cursor planes */ KUNIT_EXPECT_NULL(test, vkms_config_crtc_primary_plane(config, crtc_cfg)); @@ -735,6 +761,11 @@ static void vkms_config_test_plane_get_possible_crtcs(struct kunit *test) crtc_cfg1 = vkms_config_create_crtc(config); crtc_cfg2 = vkms_config_create_crtc(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, plane_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); + /* No possible CRTCs */ vkms_config_plane_for_each_possible_crtc(plane_cfg1, idx, possible_crtc) KUNIT_FAIL(test, "Unexpected possible CRTC"); @@ -799,6 +830,11 @@ static void vkms_config_test_encoder_get_possible_crtcs(struct kunit *test) crtc_cfg1 = vkms_config_create_crtc(config); crtc_cfg2 = vkms_config_create_crtc(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, crtc_cfg2); + /* No possible CRTCs */ vkms_config_encoder_for_each_possible_crtc(encoder_cfg1, idx, possible_crtc) KUNIT_FAIL(test, "Unexpected possible CRTC"); @@ -863,6 +899,11 @@ static void vkms_config_test_connector_get_possible_encoders(struct kunit *test) encoder_cfg1 = vkms_config_create_encoder(config); encoder_cfg2 = vkms_config_create_encoder(config); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, connector_cfg2); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg1); + KUNIT_ASSERT_NOT_ERR_OR_NULL(test, encoder_cfg2); + /* No possible encoders */ vkms_config_connector_for_each_possible_encoder(connector_cfg1, idx, possible_encoder) diff --git a/drivers/gpu/drm/vkms/tests/vkms_format_test.c b/drivers/gpu/drm/vkms/tests/vkms_format_test.c index 2e1daef94831..a7788fbc45dc 100644 --- a/drivers/gpu/drm/vkms/tests/vkms_format_test.c +++ b/drivers/gpu/drm/vkms/tests/vkms_format_test.c @@ -14,20 +14,20 @@ MODULE_IMPORT_NS("EXPORTED_FOR_KUNIT_TESTING"); /** - * struct pixel_yuv_u8 - Internal representation of a pixel color. - * @y: Luma value, stored in 8 bits, without padding, using + * struct pixel_yuv_u16 - Internal representation of a pixel color. + * @y: Luma value, stored in 16 bits, without padding, using * machine endianness - * @u: Blue difference chroma value, stored in 8 bits, without padding, using + * @u: Blue difference chroma value, stored in 16 bits, without padding, using * machine endianness - * @v: Red difference chroma value, stored in 8 bits, without padding, using + * @v: Red difference chroma value, stored in 16 bits, without padding, using * machine endianness */ -struct pixel_yuv_u8 { - u8 y, u, v; +struct pixel_yuv_u16 { + u16 y, u, v; }; /* - * struct yuv_u8_to_argb_u16_case - Reference values to test the color + * struct yuv_u16_to_argb_u16_case - Reference values to test the color * conversions in VKMS between YUV to ARGB * * @encoding: Encoding used to convert RGB to YUV @@ -39,13 +39,13 @@ struct pixel_yuv_u8 { * @format_pair.yuv: Same color as @format_pair.rgb, but converted to * YUV using @encoding and @range. */ -struct yuv_u8_to_argb_u16_case { +struct yuv_u16_to_argb_u16_case { enum drm_color_encoding encoding; enum drm_color_range range; size_t n_colors; struct format_pair { char *name; - struct pixel_yuv_u8 yuv; + struct pixel_yuv_u16 yuv; struct pixel_argb_u16 argb; } colors[TEST_BUFF_SIZE]; }; @@ -57,14 +57,14 @@ struct yuv_u8_to_argb_u16_case { * For more information got to the docs: * https://colour.readthedocs.io/en/master/generated/colour.RGB_to_YCbCr.html */ -static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { +static struct yuv_u16_to_argb_u16_case yuv_u16_to_argb_u16_cases[] = { /* * colour.RGB_to_YCbCr(<rgb color in 16 bit form>, * K=colour.WEIGHTS_YCBCR["ITU-R BT.601"], * in_bits = 16, * in_legal = False, * in_int = True, - * out_bits = 8, + * out_bits = 16, * out_legal = False, * out_int = True) * @@ -76,13 +76,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { .range = DRM_COLOR_YCBCR_FULL_RANGE, .n_colors = 6, .colors = { - { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, - { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, - { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, - { "red", { 0x4c, 0x55, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, - { "green", { 0x96, 0x2c, 0x15 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, - { "blue", { 0x1d, 0xff, 0x6b }, { 0xffff, 0x0000, 0x0000, 0xffff }}, - }, + { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, + { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, + { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, + { "red", { 0x4c8b, 0x54ce, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, + { "green", { 0x9645, 0x2b33, 0x14d1 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, + { "blue", { 0x1d2f, 0xffff, 0x6b2f }, { 0xffff, 0x0000, 0x0000, 0xffff }}, + } }, /* * colour.RGB_to_YCbCr(<rgb color in 16 bit form>, @@ -90,7 +90,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { * in_bits = 16, * in_legal = False, * in_int = True, - * out_bits = 8, + * out_bits = 16, * out_legal = True, * out_int = True) * Tests cases for color conversion generated by converting RGB @@ -101,13 +101,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { .range = DRM_COLOR_YCBCR_LIMITED_RANGE, .n_colors = 6, .colors = { - { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, - { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, - { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, - { "red", { 0x51, 0x5a, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, - { "green", { 0x91, 0x36, 0x22 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, - { "blue", { 0x29, 0xf0, 0x6e }, { 0xffff, 0x0000, 0x0000, 0xffff }}, - }, + { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, + { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, + { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, + { "red", { 0x517b, 0x5a34, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, + { "green", { 0x908e, 0x35cc, 0x2237 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, + { "blue", { 0x28f7, 0xf000, 0x6dc9 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, + } }, /* * colour.RGB_to_YCbCr(<rgb color in 16 bit form>, @@ -115,7 +115,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { * in_bits = 16, * in_legal = False, * in_int = True, - * out_bits = 8, + * out_bits = 16, * out_legal = False, * out_int = True) * Tests cases for color conversion generated by converting RGB @@ -126,21 +126,21 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { .range = DRM_COLOR_YCBCR_FULL_RANGE, .n_colors = 6, .colors = { - { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, - { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, - { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, - { "red", { 0x36, 0x63, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, - { "green", { 0xb6, 0x1e, 0x0c }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, - { "blue", { 0x12, 0xff, 0x74 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, - }, + { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, + { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, + { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, + { "red", { 0x366d, 0x62ac, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, + { "green", { 0xb717, 0x1d55, 0x0bbd }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, + { "blue", { 0x127c, 0xffff, 0x7443 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, + } }, /* * colour.RGB_to_YCbCr(<rgb color in 16 bit form>, * K=colour.WEIGHTS_YCBCR["ITU-R BT.709"], * in_bits = 16, - * int_legal = False, + * in_legal = False, * in_int = True, - * out_bits = 8, + * out_bits = 16, * out_legal = True, * out_int = True) * Tests cases for color conversion generated by converting RGB @@ -151,13 +151,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { .range = DRM_COLOR_YCBCR_LIMITED_RANGE, .n_colors = 6, .colors = { - { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, - { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, - { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, - { "red", { 0x3f, 0x66, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, - { "green", { 0xad, 0x2a, 0x1a }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, - { "blue", { 0x20, 0xf0, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, - }, + { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, + { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, + { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, + { "red", { 0x3e8f, 0x6656, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, + { "green", { 0xaca1, 0x29aa, 0x1a45 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, + { "blue", { 0x1fd0, 0xf000, 0x75bb }, { 0xffff, 0x0000, 0x0000, 0xffff }}, + } }, /* * colour.RGB_to_YCbCr(<rgb color in 16 bit form>, @@ -165,7 +165,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { * in_bits = 16, * in_legal = False, * in_int = True, - * out_bits = 8, + * out_bits = 16, * out_legal = False, * out_int = True) * Tests cases for color conversion generated by converting RGB @@ -176,13 +176,13 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { .range = DRM_COLOR_YCBCR_FULL_RANGE, .n_colors = 6, .colors = { - { "white", { 0xff, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, - { "gray", { 0x80, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, - { "black", { 0x00, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, - { "red", { 0x43, 0x5c, 0xff }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, - { "green", { 0xad, 0x24, 0x0b }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, - { "blue", { 0x0f, 0xff, 0x76 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, - }, + { "white", { 0xffff, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, + { "gray", { 0x8080, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, + { "black", { 0x0000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, + { "red", { 0x4340, 0x5c41, 0xffff }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, + { "green", { 0xad91, 0x23bf, 0x0a4c }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, + { "blue", { 0x0f2e, 0xffff, 0x75b5 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, + } }, /* * colour.RGB_to_YCbCr(<rgb color in 16 bit form>, @@ -190,7 +190,7 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { * in_bits = 16, * in_legal = False, * in_int = True, - * out_bits = 8, + * out_bits = 16, * out_legal = True, * out_int = True) * Tests cases for color conversion generated by converting RGB @@ -201,32 +201,30 @@ static struct yuv_u8_to_argb_u16_case yuv_u8_to_argb_u16_cases[] = { .range = DRM_COLOR_YCBCR_LIMITED_RANGE, .n_colors = 6, .colors = { - { "white", { 0xeb, 0x80, 0x80 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, - { "gray", { 0x7e, 0x80, 0x80 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, - { "black", { 0x10, 0x80, 0x80 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, - { "red", { 0x4a, 0x61, 0xf0 }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, - { "green", { 0xa4, 0x2f, 0x19 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, - { "blue", { 0x1d, 0xf0, 0x77 }, { 0xffff, 0x0000, 0x0000, 0xffff }}, - }, + { "white", { 0xeb00, 0x8000, 0x8000 }, { 0xffff, 0xffff, 0xffff, 0xffff }}, + { "gray", { 0x7dee, 0x8000, 0x8000 }, { 0xffff, 0x8080, 0x8080, 0x8080 }}, + { "black", { 0x1000, 0x8000, 0x8000 }, { 0xffff, 0x0000, 0x0000, 0x0000 }}, + { "red", { 0x4988, 0x60b9, 0xf000 }, { 0xffff, 0xffff, 0x0000, 0x0000 }}, + { "green", { 0xa47b, 0x2f47, 0x1902 }, { 0xffff, 0x0000, 0xffff, 0x0000 }}, + { "blue", { 0x1cfd, 0xf000, 0x76fe }, { 0xffff, 0x0000, 0x0000, 0xffff }}, + } }, }; /* - * vkms_format_test_yuv_u8_to_argb_u16 - Testing the conversion between YUV + * vkms_format_test_yuv_u16_to_argb_u16 - Testing the conversion between YUV * colors to ARGB colors in VKMS * * This test will use the functions get_conversion_matrix_to_argb_u16 and - * argb_u16_from_yuv888 to convert YUV colors (stored in - * yuv_u8_to_argb_u16_cases) into ARGB colors. + * argb_u16_from_yuv161616 to convert YUV colors (stored in + * yuv_u16_to_argb_u16_cases) into ARGB colors. * * The conversion between YUV and RGB is not totally reversible, so there may be * some difference between the expected value and the result. - * In addition, there may be some rounding error as the input color is 8 bits - * and output color is 16 bits. */ -static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test) +static void vkms_format_test_yuv_u16_to_argb_u16(struct kunit *test) { - const struct yuv_u8_to_argb_u16_case *param = test->param_value; + const struct yuv_u16_to_argb_u16_case *param = test->param_value; struct pixel_argb_u16 argb; for (size_t i = 0; i < param->n_colors; i++) { @@ -236,7 +234,8 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test) get_conversion_matrix_to_argb_u16 (DRM_FORMAT_NV12, param->encoding, param->range, &matrix); - argb = argb_u16_from_yuv888(color->yuv.y, color->yuv.u, color->yuv.v, &matrix); + argb = argb_u16_from_yuv161616(&matrix, color->yuv.y, color->yuv.u, + color->yuv.v); KUNIT_EXPECT_LE_MSG(test, abs_diff(argb.a, color->argb.a), 0x1ff, "On the A channel of the color %s expected 0x%04x, got 0x%04x", @@ -253,19 +252,19 @@ static void vkms_format_test_yuv_u8_to_argb_u16(struct kunit *test) } } -static void vkms_format_test_yuv_u8_to_argb_u16_case_desc(struct yuv_u8_to_argb_u16_case *t, - char *desc) +static void vkms_format_test_yuv_u16_to_argb_u16_case_desc(struct yuv_u16_to_argb_u16_case *t, + char *desc) { snprintf(desc, KUNIT_PARAM_DESC_SIZE, "%s - %s", drm_get_color_encoding_name(t->encoding), drm_get_color_range_name(t->range)); } -KUNIT_ARRAY_PARAM(yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_cases, - vkms_format_test_yuv_u8_to_argb_u16_case_desc +KUNIT_ARRAY_PARAM(yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_cases, + vkms_format_test_yuv_u16_to_argb_u16_case_desc ); static struct kunit_case vkms_format_test_cases[] = { - KUNIT_CASE_PARAM(vkms_format_test_yuv_u8_to_argb_u16, yuv_u8_to_argb_u16_gen_params), + KUNIT_CASE_PARAM(vkms_format_test_yuv_u16_to_argb_u16, yuv_u16_to_argb_u16_gen_params), {} }; diff --git a/drivers/gpu/drm/vkms/vkms_formats.c b/drivers/gpu/drm/vkms/vkms_formats.c index 6d0227c6635a..dfb8e13cba87 100644 --- a/drivers/gpu/drm/vkms/vkms_formats.c +++ b/drivers/gpu/drm/vkms/vkms_formats.c @@ -259,16 +259,27 @@ static struct pixel_argb_u16 argb_u16_from_grayu16(u16 gray) return argb_u16_from_u16161616(0xFFFF, gray, gray, gray); } -VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2, - const struct conversion_matrix *matrix) +static struct pixel_argb_u16 argb_u16_from_BGR565(const __le16 *pixel) +{ + struct pixel_argb_u16 out_pixel; + + out_pixel = argb_u16_from_RGB565(pixel); + swap(out_pixel.r, out_pixel.b); + + return out_pixel; +} + +VISIBLE_IF_KUNIT +struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix, + u16 y, u16 channel_1, u16 channel_2) { u16 r, g, b; s64 fp_y, fp_channel_1, fp_channel_2; s64 fp_r, fp_g, fp_b; - fp_y = drm_int2fixp(((int)y - matrix->y_offset) * 257); - fp_channel_1 = drm_int2fixp(((int)channel_1 - 128) * 257); - fp_channel_2 = drm_int2fixp(((int)channel_2 - 128) * 257); + fp_y = drm_int2fixp((int)y - matrix->y_offset * 257); + fp_channel_1 = drm_int2fixp((int)channel_1 - 128 * 257); + fp_channel_2 = drm_int2fixp((int)channel_2 - 128 * 257); fp_r = drm_fixp_mul(matrix->matrix[0][0], fp_y) + drm_fixp_mul(matrix->matrix[0][1], fp_channel_1) + @@ -290,7 +301,65 @@ VISIBLE_IF_KUNIT struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, return argb_u16_from_u16161616(0xffff, r, g, b); } -EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv888); +EXPORT_SYMBOL_IF_KUNIT(argb_u16_from_yuv161616); + +/** + * READ_LINE() - Generic generator for a read_line function which can be used for format with one + * plane and a block_h == block_w == 1. + * + * @function_name: Function name to generate + * @pixel_name: Temporary pixel name used in the @__VA_ARGS__ parameters + * @pixel_type: Used to specify the type you want to cast the pixel pointer + * @callback: Callback to call for each pixels. This fonction should take @__VA_ARGS__ as parameter + * and return a pixel_argb_u16 + * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_name to access current + * pixel. + */ +#define READ_LINE(function_name, pixel_name, pixel_type, callback, ...) \ +static void function_name(const struct vkms_plane_state *plane, int x_start, \ + int y_start, enum pixel_read_direction direction, int count, \ + struct pixel_argb_u16 out_pixel[]) \ +{ \ + struct pixel_argb_u16 *end = out_pixel + count; \ + int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); \ + u8 *src_pixels; \ + \ + packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); \ + \ + while (out_pixel < end) { \ + pixel_type *(pixel_name) = (pixel_type *)src_pixels; \ + *out_pixel = (callback)(__VA_ARGS__); \ + out_pixel += 1; \ + src_pixels += step; \ + } \ +} + +/** + * READ_LINE_ARGB8888() - Generic generator for ARGB8888 formats. + * The pixel type used is u8, so pixel_name[0]..pixel_name[n] are the n components of the pixel. + * + * @function_name: Function name to generate + * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters + * @a: alpha value + * @r: red value + * @g: green value + * @b: blue value + */ +#define READ_LINE_ARGB8888(function_name, pixel_name, a, r, g, b) \ + READ_LINE(function_name, pixel_name, u8, argb_u16_from_u8888, a, r, g, b) +/** + * READ_LINE_le16161616() - Generic generator for ARGB16161616 formats. + * The pixel type used is u16, so pixel_name[0]..pixel_name[n] are the n components of the pixel. + * + * @function_name: Function name to generate + * @pixel_name: temporary pixel to use in @a, @r, @g and @b parameters + * @a: alpha value + * @r: red value + * @g: green value + * @b: blue value + */ +#define READ_LINE_le16161616(function_name, pixel_name, a, r, g, b) \ + READ_LINE(function_name, pixel_name, __le16, argb_u16_from_le16161616, a, r, g, b) /* * The following functions are read_line function for each pixel format supported by VKMS. @@ -378,138 +447,27 @@ static void R4_read_line(const struct vkms_plane_state *plane, int x_start, Rx_read_line(plane, x_start, y_start, direction, count, out_pixel); } -static void R8_read_line(const struct vkms_plane_state *plane, int x_start, - int y_start, enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); - - while (out_pixel < end) { - *out_pixel = argb_u16_from_gray8(*src_pixels); - src_pixels += step; - out_pixel += 1; - } -} - -static void ARGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, - enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); - - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); - - while (out_pixel < end) { - u8 *px = (u8 *)src_pixels; - *out_pixel = argb_u16_from_u8888(px[3], px[2], px[1], px[0]); - out_pixel += 1; - src_pixels += step; - } -} - -static void XRGB8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, - enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); - - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); - - while (out_pixel < end) { - u8 *px = (u8 *)src_pixels; - *out_pixel = argb_u16_from_u8888(255, px[2], px[1], px[0]); - out_pixel += 1; - src_pixels += step; - } -} - -static void ABGR8888_read_line(const struct vkms_plane_state *plane, int x_start, int y_start, - enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); - - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); - - while (out_pixel < end) { - u8 *px = (u8 *)src_pixels; - /* Switch blue and red pixels. */ - *out_pixel = argb_u16_from_u8888(px[3], px[0], px[1], px[2]); - out_pixel += 1; - src_pixels += step; - } -} - -static void ARGB16161616_read_line(const struct vkms_plane_state *plane, int x_start, - int y_start, enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); - - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); - while (out_pixel < end) { - u16 *px = (u16 *)src_pixels; - *out_pixel = argb_u16_from_u16161616(px[3], px[2], px[1], px[0]); - out_pixel += 1; - src_pixels += step; - } -} +READ_LINE_ARGB8888(XRGB8888_read_line, px, 0xFF, px[2], px[1], px[0]) +READ_LINE_ARGB8888(XBGR8888_read_line, px, 0xFF, px[0], px[1], px[2]) -static void XRGB16161616_read_line(const struct vkms_plane_state *plane, int x_start, - int y_start, enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); - - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); - - while (out_pixel < end) { - __le16 *px = (__le16 *)src_pixels; - *out_pixel = argb_u16_from_le16161616(cpu_to_le16(0xFFFF), px[2], px[1], px[0]); - out_pixel += 1; - src_pixels += step; - } -} - -static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start, - int y_start, enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - struct pixel_argb_u16 *end = out_pixel + count; - u8 *src_pixels; +READ_LINE_ARGB8888(ARGB8888_read_line, px, px[3], px[2], px[1], px[0]) +READ_LINE_ARGB8888(ABGR8888_read_line, px, px[3], px[0], px[1], px[2]) +READ_LINE_ARGB8888(RGBA8888_read_line, px, px[0], px[3], px[2], px[1]) +READ_LINE_ARGB8888(BGRA8888_read_line, px, px[0], px[1], px[2], px[3]) - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, &src_pixels); +READ_LINE_ARGB8888(RGB888_read_line, px, 0xFF, px[2], px[1], px[0]) +READ_LINE_ARGB8888(BGR888_read_line, px, 0xFF, px[0], px[1], px[2]) - int step = get_block_step_bytes(plane->frame_info->fb, direction, 0); +READ_LINE_le16161616(ARGB16161616_read_line, px, px[3], px[2], px[1], px[0]) +READ_LINE_le16161616(ABGR16161616_read_line, px, px[3], px[0], px[1], px[2]) +READ_LINE_le16161616(XRGB16161616_read_line, px, cpu_to_le16(0xFFFF), px[2], px[1], px[0]) +READ_LINE_le16161616(XBGR16161616_read_line, px, cpu_to_le16(0xFFFF), px[0], px[1], px[2]) - while (out_pixel < end) { - __le16 *px = (__le16 *)src_pixels; +READ_LINE(RGB565_read_line, px, __le16, argb_u16_from_RGB565, px) +READ_LINE(BGR565_read_line, px, __le16, argb_u16_from_BGR565, px) - *out_pixel = argb_u16_from_RGB565(px); - out_pixel += 1; - src_pixels += step; - } -} +READ_LINE(R8_read_line, px, u8, argb_u16_from_gray8, *px) /* * This callback can be used for YUV formats where U and V values are @@ -521,35 +479,57 @@ static void RGB565_read_line(const struct vkms_plane_state *plane, int x_start, * - Convert YUV and YVU with the same function (a column swap is needed when setting up * plane->conversion_matrix) */ -static void semi_planar_yuv_read_line(const struct vkms_plane_state *plane, int x_start, - int y_start, enum pixel_read_direction direction, int count, - struct pixel_argb_u16 out_pixel[]) -{ - u8 *y_plane; - u8 *uv_plane; - - packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, - &y_plane); - packed_pixels_addr_1x1(plane->frame_info, - x_start / plane->frame_info->fb->format->hsub, - y_start / plane->frame_info->fb->format->vsub, 1, - &uv_plane); - int step_y = get_block_step_bytes(plane->frame_info->fb, direction, 0); - int step_uv = get_block_step_bytes(plane->frame_info->fb, direction, 1); - int subsampling = get_subsampling(plane->frame_info->fb->format, direction); - int subsampling_offset = get_subsampling_offset(direction, x_start, y_start); - const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix; - - for (int i = 0; i < count; i++) { - *out_pixel = argb_u16_from_yuv888(y_plane[0], uv_plane[0], uv_plane[1], - conversion_matrix); - out_pixel += 1; - y_plane += step_y; - if ((i + subsampling_offset + 1) % subsampling == 0) - uv_plane += step_uv; - } -} +/** + * READ_LINE_YUV_SEMIPLANAR() - Generic generator for a read_line function which can be used for yuv + * formats with two planes and block_w == block_h == 1. + * + * @function_name: Function name to generate + * @pixel_1_name: temporary pixel name for the first plane used in the @__VA_ARGS__ parameters + * @pixel_2_name: temporary pixel name for the second plane used in the @__VA_ARGS__ parameters + * @pixel_1_type: Used to specify the type you want to cast the pixel pointer on the plane 1 + * @pixel_2_type: Used to specify the type you want to cast the pixel pointer on the plane 2 + * @callback: Callback to call for each pixels. This function should take + * (struct conversion_matrix*, @__VA_ARGS__) as parameter and return a pixel_argb_u16 + * __VA_ARGS__: Argument to pass inside the callback. You can use @pixel_1_name and @pixel_2_name + * to access current pixel values + */ +#define READ_LINE_YUV_SEMIPLANAR(function_name, pixel_1_name, pixel_2_name, pixel_1_type, \ + pixel_2_type, callback, ...) \ +static void function_name(const struct vkms_plane_state *plane, int x_start, \ + int y_start, enum pixel_read_direction direction, int count, \ + struct pixel_argb_u16 out_pixel[]) \ +{ \ + u8 *plane_1; \ + u8 *plane_2; \ + \ + packed_pixels_addr_1x1(plane->frame_info, x_start, y_start, 0, \ + &plane_1); \ + packed_pixels_addr_1x1(plane->frame_info, \ + x_start / plane->frame_info->fb->format->hsub, \ + y_start / plane->frame_info->fb->format->vsub, 1, \ + &plane_2); \ + int step_1 = get_block_step_bytes(plane->frame_info->fb, direction, 0); \ + int step_2 = get_block_step_bytes(plane->frame_info->fb, direction, 1); \ + int subsampling = get_subsampling(plane->frame_info->fb->format, direction); \ + int subsampling_offset = get_subsampling_offset(direction, x_start, y_start); \ + const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix; \ + \ + for (int i = 0; i < count; i++) { \ + pixel_1_type *(pixel_1_name) = (pixel_1_type *)plane_1; \ + pixel_2_type *(pixel_2_name) = (pixel_2_type *)plane_2; \ + *out_pixel = (callback)(conversion_matrix, __VA_ARGS__); \ + out_pixel += 1; \ + plane_1 += step_1; \ + if ((i + subsampling_offset + 1) % subsampling == 0) \ + plane_2 += step_2; \ + } \ +} + +READ_LINE_YUV_SEMIPLANAR(YUV888_semiplanar_read_line, y, uv, u8, u8, argb_u16_from_yuv161616, + y[0] * 257, uv[0] * 257, uv[1] * 257) +READ_LINE_YUV_SEMIPLANAR(YUV161616_semiplanar_read_line, y, uv, u16, u16, argb_u16_from_yuv161616, + y[0], uv[0], uv[1]) /* * This callback can be used for YUV format where each color component is * stored in a different plane (often called planar formats). It will @@ -586,8 +566,9 @@ static void planar_yuv_read_line(const struct vkms_plane_state *plane, int x_sta const struct conversion_matrix *conversion_matrix = &plane->conversion_matrix; for (int i = 0; i < count; i++) { - *out_pixel = argb_u16_from_yuv888(*y_plane, *channel_1_plane, *channel_2_plane, - conversion_matrix); + *out_pixel = argb_u16_from_yuv161616(conversion_matrix, + *y_plane * 257, *channel_1_plane * 257, + *channel_2_plane * 257); out_pixel += 1; y_plane += step_y; if ((i + subsampling_offset + 1) % subsampling == 0) { @@ -712,23 +693,43 @@ pixel_read_line_t get_pixel_read_line_function(u32 format) switch (format) { case DRM_FORMAT_ARGB8888: return &ARGB8888_read_line; - case DRM_FORMAT_XRGB8888: - return &XRGB8888_read_line; case DRM_FORMAT_ABGR8888: return &ABGR8888_read_line; + case DRM_FORMAT_BGRA8888: + return &BGRA8888_read_line; + case DRM_FORMAT_RGBA8888: + return &RGBA8888_read_line; + case DRM_FORMAT_XRGB8888: + return &XRGB8888_read_line; + case DRM_FORMAT_XBGR8888: + return &XBGR8888_read_line; + case DRM_FORMAT_RGB888: + return &RGB888_read_line; + case DRM_FORMAT_BGR888: + return &BGR888_read_line; case DRM_FORMAT_ARGB16161616: return &ARGB16161616_read_line; + case DRM_FORMAT_ABGR16161616: + return &ABGR16161616_read_line; case DRM_FORMAT_XRGB16161616: return &XRGB16161616_read_line; + case DRM_FORMAT_XBGR16161616: + return &XBGR16161616_read_line; case DRM_FORMAT_RGB565: return &RGB565_read_line; + case DRM_FORMAT_BGR565: + return &BGR565_read_line; case DRM_FORMAT_NV12: case DRM_FORMAT_NV16: case DRM_FORMAT_NV24: case DRM_FORMAT_NV21: case DRM_FORMAT_NV61: case DRM_FORMAT_NV42: - return &semi_planar_yuv_read_line; + return &YUV888_semiplanar_read_line; + case DRM_FORMAT_P010: + case DRM_FORMAT_P012: + case DRM_FORMAT_P016: + return &YUV161616_semiplanar_read_line; case DRM_FORMAT_YUV420: case DRM_FORMAT_YUV422: case DRM_FORMAT_YUV444: diff --git a/drivers/gpu/drm/vkms/vkms_formats.h b/drivers/gpu/drm/vkms/vkms_formats.h index b4fe62ab9c65..eeb208cdd6b1 100644 --- a/drivers/gpu/drm/vkms/vkms_formats.h +++ b/drivers/gpu/drm/vkms/vkms_formats.h @@ -14,8 +14,8 @@ void get_conversion_matrix_to_argb_u16(u32 format, enum drm_color_encoding encod struct conversion_matrix *matrix); #if IS_ENABLED(CONFIG_KUNIT) -struct pixel_argb_u16 argb_u16_from_yuv888(u8 y, u8 channel_1, u8 channel_2, - const struct conversion_matrix *matrix); +struct pixel_argb_u16 argb_u16_from_yuv161616(const struct conversion_matrix *matrix, + u16 y, u16 channel_1, u16 channel_2); #endif #endif /* _VKMS_FORMATS_H_ */ diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c index e3fdd161d0f0..e592e47a5736 100644 --- a/drivers/gpu/drm/vkms/vkms_plane.c +++ b/drivers/gpu/drm/vkms/vkms_plane.c @@ -14,11 +14,19 @@ static const u32 vkms_formats[] = { DRM_FORMAT_ARGB8888, - DRM_FORMAT_XRGB8888, DRM_FORMAT_ABGR8888, + DRM_FORMAT_BGRA8888, + DRM_FORMAT_RGBA8888, + DRM_FORMAT_XRGB8888, + DRM_FORMAT_XBGR8888, + DRM_FORMAT_RGB888, + DRM_FORMAT_BGR888, DRM_FORMAT_XRGB16161616, + DRM_FORMAT_XBGR16161616, DRM_FORMAT_ARGB16161616, + DRM_FORMAT_ABGR16161616, DRM_FORMAT_RGB565, + DRM_FORMAT_BGR565, DRM_FORMAT_NV12, DRM_FORMAT_NV16, DRM_FORMAT_NV24, @@ -31,6 +39,9 @@ static const u32 vkms_formats[] = { DRM_FORMAT_YVU420, DRM_FORMAT_YVU422, DRM_FORMAT_YVU444, + DRM_FORMAT_P010, + DRM_FORMAT_P012, + DRM_FORMAT_P016, DRM_FORMAT_R1, DRM_FORMAT_R2, DRM_FORMAT_R4, diff --git a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h index 9b7572e06f34..b8269391bc69 100644 --- a/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h +++ b/drivers/gpu/drm/xe/compat-i915-headers/i915_drv.h @@ -12,7 +12,6 @@ #include <drm/drm_drv.h> -#include "i915_utils.h" #include "xe_device.h" /* for xe_device_has_flat_ccs() */ #include "xe_device_types.h" @@ -26,34 +25,13 @@ static inline struct drm_i915_private *to_i915(const struct drm_device *dev) #define IS_I915G(dev_priv) (dev_priv && 0) #define IS_I915GM(dev_priv) (dev_priv && 0) #define IS_PINEVIEW(dev_priv) (dev_priv && 0) -#define IS_IVYBRIDGE(dev_priv) (dev_priv && 0) #define IS_VALLEYVIEW(dev_priv) (dev_priv && 0) #define IS_CHERRYVIEW(dev_priv) (dev_priv && 0) #define IS_HASWELL(dev_priv) (dev_priv && 0) #define IS_BROADWELL(dev_priv) (dev_priv && 0) -#define IS_SKYLAKE(dev_priv) (dev_priv && 0) #define IS_BROXTON(dev_priv) (dev_priv && 0) -#define IS_KABYLAKE(dev_priv) (dev_priv && 0) #define IS_GEMINILAKE(dev_priv) (dev_priv && 0) -#define IS_COFFEELAKE(dev_priv) (dev_priv && 0) -#define IS_COMETLAKE(dev_priv) (dev_priv && 0) -#define IS_ICELAKE(dev_priv) (dev_priv && 0) -#define IS_JASPERLAKE(dev_priv) (dev_priv && 0) -#define IS_ELKHARTLAKE(dev_priv) (dev_priv && 0) -#define IS_TIGERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_TIGERLAKE) -#define IS_ROCKETLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_ROCKETLAKE) -#define IS_DG1(dev_priv) IS_PLATFORM(dev_priv, XE_DG1) -#define IS_ALDERLAKE_S(dev_priv) IS_PLATFORM(dev_priv, XE_ALDERLAKE_S) -#define IS_ALDERLAKE_P(dev_priv) (IS_PLATFORM(dev_priv, XE_ALDERLAKE_P) || \ - IS_PLATFORM(dev_priv, XE_ALDERLAKE_N)) #define IS_DG2(dev_priv) IS_PLATFORM(dev_priv, XE_DG2) -#define IS_METEORLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_METEORLAKE) -#define IS_LUNARLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_LUNARLAKE) -#define IS_BATTLEMAGE(dev_priv) IS_PLATFORM(dev_priv, XE_BATTLEMAGE) -#define IS_PANTHERLAKE(dev_priv) IS_PLATFORM(dev_priv, XE_PANTHERLAKE) - -#define IS_HASWELL_ULT(dev_priv) (dev_priv && 0) -#define IS_BROADWELL_ULT(dev_priv) (dev_priv && 0) #define IS_MOBILE(xe) (xe && 0) diff --git a/drivers/gpu/drm/xe/display/ext/i915_utils.c b/drivers/gpu/drm/xe/display/ext/i915_utils.c index 43b10a2cc508..1421c2a7b64d 100644 --- a/drivers/gpu/drm/xe/display/ext/i915_utils.c +++ b/drivers/gpu/drm/xe/display/ext/i915_utils.c @@ -4,6 +4,7 @@ */ #include "i915_drv.h" +#include "i915_utils.h" bool i915_vtd_active(struct drm_i915_private *i915) { diff --git a/drivers/gpu/drm/xe/display/xe_display.c b/drivers/gpu/drm/xe/display/xe_display.c index e2e0771cf274..8b68d70db6c8 100644 --- a/drivers/gpu/drm/xe/display/xe_display.c +++ b/drivers/gpu/drm/xe/display/xe_display.c @@ -96,6 +96,7 @@ static void xe_display_fini_early(void *arg) if (!xe->info.probe_display) return; + intel_hpd_cancel_work(display); intel_display_driver_remove_nogem(display); intel_display_driver_remove_noirq(display); intel_opregion_cleanup(display); @@ -340,6 +341,8 @@ void xe_display_pm_suspend(struct xe_device *xe) xe_display_flush_cleanup_work(xe); + intel_encoder_block_all_hpds(display); + intel_hpd_cancel_work(display); if (has_display(xe)) { @@ -370,6 +373,7 @@ void xe_display_pm_shutdown(struct xe_device *xe) xe_display_flush_cleanup_work(xe); intel_dp_mst_suspend(display); + intel_encoder_block_all_hpds(display); intel_hpd_cancel_work(display); if (has_display(xe)) @@ -471,6 +475,8 @@ void xe_display_pm_resume(struct xe_device *xe) intel_hpd_init(display); + intel_encoder_unblock_all_hpds(display); + if (has_display(xe)) { intel_display_driver_resume(display); drm_kms_helper_poll_enable(&xe->drm); diff --git a/drivers/gpu/drm/xe/display/xe_fb_pin.c b/drivers/gpu/drm/xe/display/xe_fb_pin.c index f2cfba674899..f1f8b5ab53ef 100644 --- a/drivers/gpu/drm/xe/display/xe_fb_pin.c +++ b/drivers/gpu/drm/xe/display/xe_fb_pin.c @@ -383,6 +383,7 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state, const struct intel_plane_state *old_plane_state) { struct intel_framebuffer *fb = to_intel_framebuffer(new_plane_state->hw.fb); + struct intel_plane *plane = to_intel_plane(new_plane_state->uapi.plane); struct xe_device *xe = to_xe_device(fb->base.dev); struct intel_display *display = xe->display; struct i915_vma *vma; @@ -406,6 +407,10 @@ static bool reuse_vma(struct intel_plane_state *new_plane_state, found: refcount_inc(&vma->ref); new_plane_state->ggtt_vma = vma; + + new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) + + plane->surf_offset(new_plane_state); + return true; } @@ -432,6 +437,10 @@ int intel_plane_pin_fb(struct intel_plane_state *new_plane_state, return PTR_ERR(vma); new_plane_state->ggtt_vma = vma; + + new_plane_state->surf = i915_ggtt_offset(new_plane_state->ggtt_vma) + + plane->surf_offset(new_plane_state); + return 0; } diff --git a/drivers/gpu/drm/xe/display/xe_plane_initial.c b/drivers/gpu/drm/xe/display/xe_plane_initial.c index 00da1dff0c44..826ac3d578b7 100644 --- a/drivers/gpu/drm/xe/display/xe_plane_initial.c +++ b/drivers/gpu/drm/xe/display/xe_plane_initial.c @@ -10,6 +10,7 @@ #include "xe_ggtt.h" #include "xe_mmio.h" +#include "i915_vma.h" #include "intel_crtc.h" #include "intel_display.h" #include "intel_display_core.h" @@ -235,6 +236,9 @@ intel_find_initial_plane_obj(struct intel_crtc *crtc, goto nofb; plane_state->ggtt_vma = vma; + + plane_state->surf = i915_ggtt_offset(plane_state->ggtt_vma); + plane_state->uapi.src_x = 0; plane_state->uapi.src_y = 0; plane_state->uapi.src_w = fb->width << 16; diff --git a/drivers/gpu/drm/xe/xe_device_types.h b/drivers/gpu/drm/xe/xe_device_types.h index 646d04aec68c..092004d14db2 100644 --- a/drivers/gpu/drm/xe/xe_device_types.h +++ b/drivers/gpu/drm/xe/xe_device_types.h @@ -430,7 +430,7 @@ struct xe_device { /** @ordered_wq: used to serialize compute mode resume */ struct workqueue_struct *ordered_wq; - /** @unordered_wq: used to serialize unordered work, mostly display */ + /** @unordered_wq: used to serialize unordered work */ struct workqueue_struct *unordered_wq; /** @destroy_wq: used to serialize user destroy work, like queue */ @@ -622,7 +622,6 @@ struct xe_device { struct { unsigned int hpll_freq; unsigned int czclk_freq; - unsigned int fsb_freq, mem_freq, is_ddr3; }; #endif }; diff --git a/drivers/gpu/drm/xe/xe_dma_buf.c b/drivers/gpu/drm/xe/xe_dma_buf.c index 346f857f3837..71b70e17bddd 100644 --- a/drivers/gpu/drm/xe/xe_dma_buf.c +++ b/drivers/gpu/drm/xe/xe_dma_buf.c @@ -191,10 +191,22 @@ struct dma_buf *xe_gem_prime_export(struct drm_gem_object *obj, int flags) { struct xe_bo *bo = gem_to_xe_bo(obj); struct dma_buf *buf; + struct ttm_operation_ctx ctx = { + .interruptible = true, + .no_wait_gpu = true, + /* We opt to avoid OOM on system pages allocations */ + .gfp_retry_mayfail = true, + .allow_res_evict = false, + }; + int ret; if (bo->vm) return ERR_PTR(-EPERM); + ret = ttm_bo_setup_export(&bo->ttm, &ctx); + if (ret) + return ERR_PTR(ret); + buf = drm_gem_prime_export(obj, flags); if (!IS_ERR(buf)) buf->ops = &xe_dmabuf_ops; |
