aboutsummaryrefslogtreecommitdiffstats
path: root/drivers/gpu/drm
diff options
context:
space:
mode:
Diffstat (limited to 'drivers/gpu/drm')
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu.h43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c47
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c55
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c27
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c8
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c125
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c189
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h85
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c222
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_device.c218
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c24
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_job.c6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c32
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c43
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.c45
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_object.h6
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c13
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c22
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c21
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h7
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c126
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c56
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h5
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c35
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h1
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c23
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c87
-rw-r--r--drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/cik.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c11
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c9
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c18
-rw-r--r--drivers/gpu/drm/amd/amdgpu/kv_dpm.c49
-rw-r--r--drivers/gpu/drm/amd/amdgpu/psp_v10_0.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/si_dpm.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c4
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c90
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v2_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v3_0.c2
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vce_v4_0.c10
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c40
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c3
-rw-r--r--drivers/gpu/drm/amd/amdgpu/vi.c4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c54
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cik_int.h7
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h458
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_chardev.c69
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_crat.c57
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h37
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c48
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device.c122
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c283
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h32
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c9
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.c118
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_events.h1
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c22
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c6
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_iommu.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c18
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_module.c16
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c41
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h4
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c31
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c29
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c26
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_priv.h40
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process.c2
-rw-r--r--drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c43
-rw-r--r--drivers/gpu/drm/amd/display/Kconfig6
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c62
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h7
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c42
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c30
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c2
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c16
-rw-r--r--drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c1
-rw-r--r--drivers/gpu/drm/amd/display/dc/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table.c18
-rw-r--r--drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc.c21
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_debug.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link.c88
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c27
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_resource.c84
-rw-r--r--drivers/gpu/drm/amd/display/dc/core/dc_surface.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc.h27
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_hw_types.h4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dc_link.h3
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.c937
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_aux.h111
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c6
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c30
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c63
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c20
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c44
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c10
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c45
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c47
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c9
-rw-r--r--drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c50
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/engine.h1
-rw-r--r--drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c4
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/core_types.h9
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h180
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h2
-rw-r--r--drivers/gpu/drm/amd/display/dc/inc/resource.h5
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/Makefile2
-rw-r--r--drivers/gpu/drm/amd/display/dc/irq/irq_service.c2
-rw-r--r--drivers/gpu/drm/amd/display/dc/os_types.h2
-rw-r--r--drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h2
-rw-r--r--drivers/gpu/drm/amd/display/modules/color/color_gamma.c10
-rw-r--r--drivers/gpu/drm/amd/include/atomfirmware.h15
-rw-r--r--drivers/gpu/drm/amd/include/kgd_kfd_interface.h40
-rw-r--r--drivers/gpu/drm/amd/powerplay/amd_powerplay.c9
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c4
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c43
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c32
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c5
-rw-r--r--drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c2
-rw-r--r--drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c22
-rw-r--r--drivers/gpu/drm/arm/hdlcd_crtc.c35
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.c76
-rw-r--r--drivers/gpu/drm/arm/hdlcd_drv.h2
-rw-r--r--drivers/gpu/drm/arm/malidp_drv.c10
-rw-r--r--drivers/gpu/drm/arm/malidp_mw.c2
-rw-r--r--drivers/gpu/drm/arm/malidp_planes.c7
-rw-r--r--drivers/gpu/drm/armada/Makefile2
-rw-r--r--drivers/gpu/drm/armada/armada_510.c24
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.c1020
-rw-r--r--drivers/gpu/drm/armada/armada_crtc.h56
-rw-r--r--drivers/gpu/drm/armada/armada_drm.h14
-rw-r--r--drivers/gpu/drm/armada/armada_drv.c54
-rw-r--r--drivers/gpu/drm/armada/armada_fb.c7
-rw-r--r--drivers/gpu/drm/armada/armada_fb.h3
-rw-r--r--drivers/gpu/drm/armada/armada_fbdev.c4
-rw-r--r--drivers/gpu/drm/armada/armada_gem.c15
-rw-r--r--drivers/gpu/drm/armada/armada_hw.h16
-rw-r--r--drivers/gpu/drm/armada/armada_overlay.c663
-rw-r--r--drivers/gpu/drm/armada/armada_plane.c289
-rw-r--r--drivers/gpu/drm/armada/armada_plane.h15
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c100
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h1
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c92
-rw-r--r--drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c5
-rw-r--r--drivers/gpu/drm/bochs/bochs_drv.c18
-rw-r--r--drivers/gpu/drm/bochs/bochs_mm.c2
-rw-r--r--drivers/gpu/drm/bridge/Kconfig18
-rw-r--r--drivers/gpu/drm/bridge/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/adv7511/adv7511_drv.c12
-rw-r--r--drivers/gpu/drm/bridge/sil-sii8620.c86
-rw-r--r--drivers/gpu/drm/bridge/synopsys/Makefile2
-rw-r--r--drivers/gpu/drm/bridge/tc358764.c499
-rw-r--r--drivers/gpu/drm/bridge/ti-sn65dsi86.c779
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.c27
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_drv.h2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_fbdev.c51
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_main.c2
-rw-r--r--drivers/gpu/drm/cirrus/cirrus_mode.c4
-rw-r--r--drivers/gpu/drm/drm_atomic.c4
-rw-r--r--drivers/gpu/drm/drm_atomic_helper.c42
-rw-r--r--drivers/gpu/drm/drm_blend.c123
-rw-r--r--drivers/gpu/drm/drm_context.c2
-rw-r--r--drivers/gpu/drm/drm_debugfs_crc.c92
-rw-r--r--drivers/gpu/drm/drm_dp_cec.c20
-rw-r--r--drivers/gpu/drm/drm_dp_helper.c3
-rw-r--r--drivers/gpu/drm/drm_dp_mst_topology.c1
-rw-r--r--drivers/gpu/drm/drm_edid.c3
-rw-r--r--drivers/gpu/drm/drm_fb_cma_helper.c26
-rw-r--r--drivers/gpu/drm/drm_gem_cma_helper.c4
-rw-r--r--drivers/gpu/drm/drm_lease.c16
-rw-r--r--drivers/gpu/drm/drm_mipi_dsi.c2
-rw-r--r--drivers/gpu/drm/drm_panel.c2
-rw-r--r--drivers/gpu/drm/drm_print.c111
-rw-r--r--drivers/gpu/drm/drm_property.c6
-rw-r--r--drivers/gpu/drm/drm_syncobj.c15
-rw-r--r--drivers/gpu/drm/drm_vblank.c6
-rw-r--r--drivers/gpu/drm/drm_vma_manager.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.c27
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_drv.h3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem.c37
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c4
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.c3
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_gpu.h5
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c9
-rw-r--r--drivers/gpu/drm/etnaviv/etnaviv_sched.c52
-rw-r--r--drivers/gpu/drm/exynos/Makefile2
-rw-r--r--drivers/gpu/drm/exynos/exynos5433_drm_decon.c8
-rw-r--r--drivers/gpu/drm/exynos/exynos7_drm_decon.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_dp.c3
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_core.c119
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.c33
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_drv.h47
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_dsi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fb.c10
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimc.c17
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_fimd.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.c300
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_g2d.h11
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.c62
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gem.h24
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_gsc.c51
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_ipp.c120
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_mic.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_plane.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_rotator.c4
-rw-r--r--drivers/gpu/drm/exynos/exynos_drm_scaler.c44
-rw-r--r--drivers/gpu/drm/exynos/exynos_hdmi.c2
-rw-r--r--drivers/gpu/drm/exynos/exynos_mixer.c4
-rw-r--r--drivers/gpu/drm/exynos/regs-gsc.h1
-rw-r--r--drivers/gpu/drm/gma500/framebuffer.c14
-rw-r--r--drivers/gpu/drm/gma500/gem.c27
-rw-r--r--drivers/gpu/drm/gma500/psb_drv.h4
-rw-r--r--drivers/gpu/drm/i2c/tda9950.c5
-rw-r--r--drivers/gpu/drm/i2c/tda998x_drv.c372
-rw-r--r--drivers/gpu/drm/i915/Kconfig2
-rw-r--r--drivers/gpu/drm/i915/gvt/aperture_gm.c2
-rw-r--r--drivers/gpu/drm/i915/gvt/cmd_parser.c25
-rw-r--r--drivers/gpu/drm/i915/gvt/display.c6
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.c58
-rw-r--r--drivers/gpu/drm/i915/gvt/gtt.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.c15
-rw-r--r--drivers/gpu/drm/i915/gvt/gvt.h33
-rw-r--r--drivers/gpu/drm/i915/gvt/handlers.c24
-rw-r--r--drivers/gpu/drm/i915/gvt/kvmgt.c26
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio.h2
-rw-r--r--drivers/gpu/drm/i915/gvt/mmio_context.c4
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.c7
-rw-r--r--drivers/gpu/drm/i915/gvt/scheduler.h3
-rw-r--r--drivers/gpu/drm/i915/gvt/vgpu.c23
-rw-r--r--drivers/gpu/drm/i915/i915_gem_clflush.c7
-rw-r--r--drivers/gpu/drm/i915/i915_gem_userptr.c13
-rw-r--r--drivers/gpu/drm/i915/i915_gpu_error.c34
-rw-r--r--drivers/gpu/drm/i915/i915_pmu.c1
-rw-r--r--drivers/gpu/drm/i915/i915_utils.h2
-rw-r--r--drivers/gpu/drm/i915/intel_audio.c22
-rw-r--r--drivers/gpu/drm/i915/intel_display.c2
-rw-r--r--drivers/gpu/drm/i915/intel_display.h24
-rw-r--r--drivers/gpu/drm/i915/intel_drv.h11
-rw-r--r--drivers/gpu/drm/i915/intel_hdmi.c12
-rw-r--r--drivers/gpu/drm/i915/intel_lpe_audio.c1
-rw-r--r--drivers/gpu/drm/i915/intel_pipe_crc.c119
-rw-r--r--drivers/gpu/drm/i915/selftests/i915_sw_fence.c8
-rw-r--r--drivers/gpu/drm/imx/imx-drm-core.c47
-rw-r--r--drivers/gpu/drm/imx/imx-drm.h1
-rw-r--r--drivers/gpu/drm/imx/imx-ldb.c9
-rw-r--r--drivers/gpu/drm/imx/ipuv3-crtc.c1
-rw-r--r--drivers/gpu/drm/imx/ipuv3-plane.c9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_cec.c1
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_ovl.c11
-rw-r--r--drivers/gpu/drm/mediatek/mtk_disp_rdma.c92
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.c47
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_crtc.h3
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp.c18
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h9
-rw-r--r--drivers/gpu/drm/mediatek/mtk_drm_drv.c27
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_drv.c21
-rw-r--r--drivers/gpu/drm/mgag200/mgag200_main.c9
-rw-r--r--drivers/gpu/drm/msm/Kconfig1
-rw-r--r--drivers/gpu/drm/msm/Makefile34
-rw-r--r--drivers/gpu/drm/msm/adreno/a2xx.xml.h57
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx.xml.h24
-rw-r--r--drivers/gpu/drm/msm/adreno/a3xx_gpu.c30
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx.xml.h193
-rw-r--r--drivers/gpu/drm/msm/adreno/a4xx_gpu.c22
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx.xml.h483
-rw-r--r--drivers/gpu/drm/msm/adreno/a5xx_gpu.c249
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx.xml.h4562
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.c1207
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.h162
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h382
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.c818
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_gpu.h60
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.c435
-rw-r--r--drivers/gpu/drm/msm/adreno/a6xx_hfi.h127
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_common.xml.h38
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_device.c49
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.c217
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_gpu.h14
-rw-r--r--drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h497
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c479
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h153
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c637
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h133
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c2138
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h423
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c2393
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h103
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c2500
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h177
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h430
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c905
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c922
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c1173
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h88
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c155
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h53
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c511
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h804
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h168
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c323
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h139
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c540
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h218
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c1183
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h257
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c349
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c261
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h122
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h465
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c250
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h136
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c753
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h424
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c398
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h202
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c368
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h348
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c275
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h128
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h56
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c203
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h57
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c66
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h59
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c1345
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h290
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c245
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c1963
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h175
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c249
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h225
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c1079
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h199
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h1007
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c384
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h94
-rw-r--r--drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h1376
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h26
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c51
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c12
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c2
-rw-r--r--drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c154
-rw-r--r--drivers/gpu/drm/msm/disp/mdp_common.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.c3
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.h23
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi.xml.h13
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.c56
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_cfg.h12
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_host.c429
-rw-r--r--drivers/gpu/drm/msm/dsi/dsi_manager.c125
-rw-r--r--drivers/gpu/drm/msm/dsi/mmss_cc.xml.h26
-rw-r--r--drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c2
-rw-r--r--drivers/gpu/drm/msm/dsi/sfpb.xml.h26
-rw-r--r--drivers/gpu/drm/msm/edp/edp.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/hdmi.xml.h26
-rw-r--r--drivers/gpu/drm/msm/hdmi/qfprom.xml.h26
-rw-r--r--drivers/gpu/drm/msm/msm_atomic.c7
-rw-r--r--drivers/gpu/drm/msm/msm_debugfs.c93
-rw-r--r--drivers/gpu/drm/msm/msm_drv.c287
-rw-r--r--drivers/gpu/drm/msm/msm_drv.h103
-rw-r--r--drivers/gpu/drm/msm/msm_fence.c8
-rw-r--r--drivers/gpu/drm/msm/msm_gem.c33
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.c207
-rw-r--r--drivers/gpu/drm/msm/msm_gpu.h70
-rw-r--r--drivers/gpu/drm/msm/msm_kms.h29
-rw-r--r--drivers/gpu/drm/nouveau/dispnv04/disp.c3
-rw-r--r--drivers/gpu/drm/nouveau/dispnv50/disp.c53
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_backlight.c6
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.c26
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_connector.h36
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_display.c10
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_drm.c18
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_fence.c1
-rw-r--r--drivers/gpu/drm/nouveau/nouveau_gem.c4
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c3
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c9
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c1
-rw-r--r--drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h2
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c139
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-dvi.c175
-rw-r--r--drivers/gpu/drm/omapdrm/displays/connector-hdmi.c277
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-opa362.c132
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c187
-rw-r--r--drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c197
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dpi.c88
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c247
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c103
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c146
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c101
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c165
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c102
-rw-r--r--drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c192
-rw-r--r--drivers/gpu/drm/omapdrm/dss/base.c217
-rw-r--r--drivers/gpu/drm/omapdrm/dss/core.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dispc.c26
-rw-r--r--drivers/gpu/drm/omapdrm/dss/display.c134
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dpi.c192
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dsi.c569
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss-of.c47
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.c45
-rw-r--r--drivers/gpu/drm/omapdrm/dss/dss.h11
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi.h8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi4.c351
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5.c334
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi5_core.c6
-rw-r--r--drivers/gpu/drm/omapdrm/dss/hdmi_wp.c8
-rw-r--r--drivers/gpu/drm/omapdrm/dss/omapdss.h306
-rw-r--r--drivers/gpu/drm/omapdrm/dss/output.c208
-rw-r--r--drivers/gpu/drm/omapdrm/dss/sdi.c149
-rw-r--r--drivers/gpu/drm/omapdrm/dss/venc.c291
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.c371
-rw-r--r--drivers/gpu/drm/omapdrm/omap_connector.h7
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.c111
-rw-r--r--drivers/gpu/drm/omapdrm/omap_crtc.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.c302
-rw-r--r--drivers/gpu/drm/omapdrm/omap_drv.h19
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.c159
-rw-r--r--drivers/gpu/drm/omapdrm/omap_encoder.h6
-rw-r--r--drivers/gpu/drm/omapdrm/omap_fbdev.c4
-rw-r--r--drivers/gpu/drm/omapdrm/omap_irq.c4
-rw-r--r--drivers/gpu/drm/panel/panel-simple.c3
-rw-r--r--drivers/gpu/drm/qxl/qxl_display.c16
-rw-r--r--drivers/gpu/drm/qxl/qxl_drv.c28
-rw-r--r--drivers/gpu/drm/qxl/qxl_gem.c2
-rw-r--r--drivers/gpu/drm/qxl/qxl_kms.c80
-rw-r--r--drivers/gpu/drm/radeon/radeon_drv.c23
-rw-r--r--drivers/gpu/drm/radeon/radeon_mn.c22
-rw-r--r--drivers/gpu/drm/radeon/radeon_object.c7
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.c149
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_crtc.h3
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_plane.c6
-rw-r--r--drivers/gpu/drm/rcar-du/rcar_du_vsp.c5
-rw-r--r--drivers/gpu/drm/rockchip/Kconfig25
-rw-r--r--drivers/gpu/drm/rockchip/Makefile1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.c98
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_drv.h2
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.c48
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_drm_vop.h1
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.c173
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_rgb.h33
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.c215
-rw-r--r--drivers/gpu/drm/rockchip/rockchip_vop_reg.h99
-rw-r--r--drivers/gpu/drm/scheduler/Makefile1
-rw-r--r--drivers/gpu/drm/scheduler/gpu_scheduler.c104
-rw-r--r--drivers/gpu/drm/scheduler/sched_fence.c2
-rw-r--r--drivers/gpu/drm/sti/sti_hda.c1
-rw-r--r--drivers/gpu/drm/sti/sti_hdmi.c1
-rw-r--r--drivers/gpu/drm/sun4i/Makefile5
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.c81
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_backend.h3
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_drv.c19
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_layer.c4
-rw-r--r--drivers/gpu/drm/sun4i/sun4i_tcon.c119
-rw-r--r--drivers/gpu/drm/sun4i/sun6i_drc.c1
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c17
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h2
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_mixer.c24
-rw-r--r--drivers/gpu/drm/sun4i/sun8i_tcon_top.c3
-rw-r--r--drivers/gpu/drm/tegra/dc.c2
-rw-r--r--drivers/gpu/drm/tegra/drm.c6
-rw-r--r--drivers/gpu/drm/tinydrm/core/tinydrm-core.c6
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc.c62
-rw-r--r--drivers/gpu/drm/ttm/ttm_page_alloc_dma.c59
-rw-r--r--drivers/gpu/drm/ttm/ttm_tt.c25
-rw-r--r--drivers/gpu/drm/udl/udl_drv.h2
-rw-r--r--drivers/gpu/drm/udl/udl_fb.c28
-rw-r--r--drivers/gpu/drm/udl/udl_main.c45
-rw-r--r--drivers/gpu/drm/udl/udl_modeset.c7
-rw-r--r--drivers/gpu/drm/udl/udl_transfer.c49
-rw-r--r--drivers/gpu/drm/v3d/v3d_drv.c4
-rw-r--r--drivers/gpu/drm/v3d/v3d_gem.c2
-rw-r--r--drivers/gpu/drm/vc4/vc4_drv.c20
-rw-r--r--drivers/gpu/drm/vc4/vc4_plane.c7
-rw-r--r--drivers/gpu/drm/vgem/vgem_drv.c2
-rw-r--r--drivers/gpu/drm/vgem/vgem_fence.c13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_display.c4
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drm_bus.c26
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_drv.h13
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_fb.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_plane.c2
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_ttm.c39
-rw-r--r--drivers/gpu/drm/virtio/virtgpu_vq.c57
-rw-r--r--drivers/gpu/drm/vkms/Makefile2
-rw-r--r--drivers/gpu/drm/vkms/vkms_crc.c153
-rw-r--r--drivers/gpu/drm/vkms/vkms_crtc.c114
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.c2
-rw-r--r--drivers/gpu/drm/vkms/vkms_drv.h59
-rw-r--r--drivers/gpu/drm/vkms/vkms_gem.c83
-rw-r--r--drivers/gpu/drm/vkms/vkms_plane.c140
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c41
-rw-r--r--drivers/gpu/drm/vmwgfx/vmwgfx_kms.c4
-rw-r--r--drivers/gpu/drm/xen/xen_drm_front_gem.c2
540 files changed, 54406 insertions, 9498 deletions
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu.h b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
index 44f62fda4022..447c4c7a36d6 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu.h
@@ -74,6 +74,7 @@
#include "amdgpu_gart.h"
#include "amdgpu_debugfs.h"
#include "amdgpu_job.h"
+#include "amdgpu_bo_list.h"
/*
* Modules parameters.
@@ -690,45 +691,6 @@ struct amdgpu_fpriv {
};
/*
- * residency list
- */
-struct amdgpu_bo_list_entry {
- struct amdgpu_bo *robj;
- struct ttm_validate_buffer tv;
- struct amdgpu_bo_va *bo_va;
- uint32_t priority;
- struct page **user_pages;
- int user_invalidated;
-};
-
-struct amdgpu_bo_list {
- struct mutex lock;
- struct rcu_head rhead;
- struct kref refcount;
- struct amdgpu_bo *gds_obj;
- struct amdgpu_bo *gws_obj;
- struct amdgpu_bo *oa_obj;
- unsigned first_userptr;
- unsigned num_entries;
- struct amdgpu_bo_list_entry *array;
-};
-
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id);
-void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
- struct list_head *validated);
-void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list);
-int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
- struct drm_amdgpu_bo_list_entry **info_param);
-
-int amdgpu_bo_list_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
- struct amdgpu_bo_list **list);
-
-/*
* GFX stuff
*/
#include "clearstate_defs.h"
@@ -1748,6 +1710,7 @@ amdgpu_get_sdma_instance(struct amdgpu_ring *ring)
#define amdgpu_vm_write_pte(adev, ib, pe, value, count, incr) ((adev)->vm_manager.vm_pte_funcs->write_pte((ib), (pe), (value), (count), (incr)))
#define amdgpu_vm_set_pte_pde(adev, ib, pe, addr, count, incr, flags) ((adev)->vm_manager.vm_pte_funcs->set_pte_pde((ib), (pe), (addr), (count), (incr), (flags)))
#define amdgpu_ring_parse_cs(r, p, ib) ((r)->funcs->parse_cs((p), (ib)))
+#define amdgpu_ring_patch_cs_in_place(r, p, ib) ((r)->funcs->patch_cs_in_place((p), (ib)))
#define amdgpu_ring_test_ring(r) (r)->funcs->test_ring((r))
#define amdgpu_ring_test_ib(r, t) (r)->funcs->test_ib((r), (t))
#define amdgpu_ring_get_rptr(r) (r)->funcs->get_rptr((r))
@@ -1801,8 +1764,6 @@ void amdgpu_display_update_priority(struct amdgpu_device *adev);
void amdgpu_cs_report_moved_bytes(struct amdgpu_device *adev, u64 num_bytes,
u64 num_vis_bytes);
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
void amdgpu_device_vram_location(struct amdgpu_device *adev,
struct amdgpu_gmc *mc, u64 base);
void amdgpu_device_gart_location(struct amdgpu_device *adev,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
index f4c474a95875..71efcf38f11b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acp.c
@@ -57,6 +57,10 @@
#define ACP_I2S_COMP2_CAP_REG_OFFSET 0xa8
#define ACP_I2S_COMP1_PLAY_REG_OFFSET 0x6c
#define ACP_I2S_COMP2_PLAY_REG_OFFSET 0x68
+#define ACP_BT_PLAY_REGS_START 0x14970
+#define ACP_BT_PLAY_REGS_END 0x14a24
+#define ACP_BT_COMP1_REG_OFFSET 0xac
+#define ACP_BT_COMP2_REG_OFFSET 0xa8
#define mmACP_PGFSM_RETAIN_REG 0x51c9
#define mmACP_PGFSM_CONFIG_REG 0x51ca
@@ -77,7 +81,7 @@
#define ACP_SOFT_RESET_DONE_TIME_OUT_VALUE 0x000000FF
#define ACP_TIMEOUT_LOOP 0x000000FF
-#define ACP_DEVS 3
+#define ACP_DEVS 4
#define ACP_SRC_ID 162
enum {
@@ -316,14 +320,13 @@ static int acp_hw_init(void *handle)
if (adev->acp.acp_cell == NULL)
return -ENOMEM;
- adev->acp.acp_res = kcalloc(4, sizeof(struct resource), GFP_KERNEL);
-
+ adev->acp.acp_res = kcalloc(5, sizeof(struct resource), GFP_KERNEL);
if (adev->acp.acp_res == NULL) {
kfree(adev->acp.acp_cell);
return -ENOMEM;
}
- i2s_pdata = kcalloc(2, sizeof(struct i2s_platform_data), GFP_KERNEL);
+ i2s_pdata = kcalloc(3, sizeof(struct i2s_platform_data), GFP_KERNEL);
if (i2s_pdata == NULL) {
kfree(adev->acp.acp_res);
kfree(adev->acp.acp_cell);
@@ -358,6 +361,20 @@ static int acp_hw_init(void *handle)
i2s_pdata[1].i2s_reg_comp1 = ACP_I2S_COMP1_CAP_REG_OFFSET;
i2s_pdata[1].i2s_reg_comp2 = ACP_I2S_COMP2_CAP_REG_OFFSET;
+ i2s_pdata[2].quirks = DW_I2S_QUIRK_COMP_REG_OFFSET;
+ switch (adev->asic_type) {
+ case CHIP_STONEY:
+ i2s_pdata[2].quirks |= DW_I2S_QUIRK_16BIT_IDX_OVERRIDE;
+ break;
+ default:
+ break;
+ }
+
+ i2s_pdata[2].cap = DWC_I2S_PLAY | DWC_I2S_RECORD;
+ i2s_pdata[2].snd_rates = SNDRV_PCM_RATE_8000_96000;
+ i2s_pdata[2].i2s_reg_comp1 = ACP_BT_COMP1_REG_OFFSET;
+ i2s_pdata[2].i2s_reg_comp2 = ACP_BT_COMP2_REG_OFFSET;
+
adev->acp.acp_res[0].name = "acp2x_dma";
adev->acp.acp_res[0].flags = IORESOURCE_MEM;
adev->acp.acp_res[0].start = acp_base;
@@ -373,13 +390,18 @@ static int acp_hw_init(void *handle)
adev->acp.acp_res[2].start = acp_base + ACP_I2S_CAP_REGS_START;
adev->acp.acp_res[2].end = acp_base + ACP_I2S_CAP_REGS_END;
- adev->acp.acp_res[3].name = "acp2x_dma_irq";
- adev->acp.acp_res[3].flags = IORESOURCE_IRQ;
- adev->acp.acp_res[3].start = amdgpu_irq_create_mapping(adev, 162);
- adev->acp.acp_res[3].end = adev->acp.acp_res[3].start;
+ adev->acp.acp_res[3].name = "acp2x_dw_bt_i2s_play_cap";
+ adev->acp.acp_res[3].flags = IORESOURCE_MEM;
+ adev->acp.acp_res[3].start = acp_base + ACP_BT_PLAY_REGS_START;
+ adev->acp.acp_res[3].end = acp_base + ACP_BT_PLAY_REGS_END;
+
+ adev->acp.acp_res[4].name = "acp2x_dma_irq";
+ adev->acp.acp_res[4].flags = IORESOURCE_IRQ;
+ adev->acp.acp_res[4].start = amdgpu_irq_create_mapping(adev, 162);
+ adev->acp.acp_res[4].end = adev->acp.acp_res[4].start;
adev->acp.acp_cell[0].name = "acp_audio_dma";
- adev->acp.acp_cell[0].num_resources = 4;
+ adev->acp.acp_cell[0].num_resources = 5;
adev->acp.acp_cell[0].resources = &adev->acp.acp_res[0];
adev->acp.acp_cell[0].platform_data = &adev->asic_type;
adev->acp.acp_cell[0].pdata_size = sizeof(adev->asic_type);
@@ -396,6 +418,12 @@ static int acp_hw_init(void *handle)
adev->acp.acp_cell[2].platform_data = &i2s_pdata[1];
adev->acp.acp_cell[2].pdata_size = sizeof(struct i2s_platform_data);
+ adev->acp.acp_cell[3].name = "designware-i2s";
+ adev->acp.acp_cell[3].num_resources = 1;
+ adev->acp.acp_cell[3].resources = &adev->acp.acp_res[3];
+ adev->acp.acp_cell[3].platform_data = &i2s_pdata[2];
+ adev->acp.acp_cell[3].pdata_size = sizeof(struct i2s_platform_data);
+
r = mfd_add_hotplug_devices(adev->acp.parent, adev->acp.acp_cell,
ACP_DEVS);
if (r)
@@ -451,7 +479,6 @@ static int acp_hw_init(void *handle)
val = cgs_read_register(adev->acp.cgs_device, mmACP_SOFT_RESET);
val &= ~ACP_SOFT_RESET__SoftResetAud_MASK;
cgs_write_register(adev->acp.cgs_device, mmACP_SOFT_RESET, val);
-
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
index 0d8c3fc6eace..353993218f21 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_acpi.c
@@ -364,7 +364,6 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
struct acpi_bus_event *event)
{
struct amdgpu_atif *atif = adev->atif;
- struct atif_sbios_requests req;
int count;
DRM_DEBUG_DRIVER("event, device_class = %s, type = %#x\n",
@@ -379,42 +378,48 @@ static int amdgpu_atif_handler(struct amdgpu_device *adev,
/* Not our event */
return NOTIFY_DONE;
- /* Check pending SBIOS requests */
- count = amdgpu_atif_get_sbios_requests(atif, &req);
+ if (atif->functions.sbios_requests) {
+ struct atif_sbios_requests req;
- if (count <= 0)
- return NOTIFY_DONE;
+ /* Check pending SBIOS requests */
+ count = amdgpu_atif_get_sbios_requests(atif, &req);
+
+ if (count <= 0)
+ return NOTIFY_DONE;
- DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
+ DRM_DEBUG_DRIVER("ATIF: %d pending SBIOS requests\n", count);
- if (req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) {
- struct amdgpu_encoder *enc = atif->encoder_for_bl;
+ /* todo: add DC handling */
+ if ((req.pending & ATIF_PANEL_BRIGHTNESS_CHANGE_REQUEST) &&
+ !amdgpu_device_has_dc_support(adev)) {
+ struct amdgpu_encoder *enc = atif->encoder_for_bl;
- if (enc) {
- struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
+ if (enc) {
+ struct amdgpu_encoder_atom_dig *dig = enc->enc_priv;
- DRM_DEBUG_DRIVER("Changing brightness to %d\n",
- req.backlight_level);
+ DRM_DEBUG_DRIVER("Changing brightness to %d\n",
+ req.backlight_level);
- amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
+ amdgpu_display_backlight_set_level(adev, enc, req.backlight_level);
#if defined(CONFIG_BACKLIGHT_CLASS_DEVICE) || defined(CONFIG_BACKLIGHT_CLASS_DEVICE_MODULE)
- backlight_force_update(dig->bl_dev,
- BACKLIGHT_UPDATE_HOTKEY);
+ backlight_force_update(dig->bl_dev,
+ BACKLIGHT_UPDATE_HOTKEY);
#endif
+ }
}
- }
- if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
- if ((adev->flags & AMD_IS_PX) &&
- amdgpu_atpx_dgpu_req_power_for_displays()) {
- pm_runtime_get_sync(adev->ddev->dev);
- /* Just fire off a uevent and let userspace tell us what to do */
- drm_helper_hpd_irq_event(adev->ddev);
- pm_runtime_mark_last_busy(adev->ddev->dev);
- pm_runtime_put_autosuspend(adev->ddev->dev);
+ if (req.pending & ATIF_DGPU_DISPLAY_EVENT) {
+ if ((adev->flags & AMD_IS_PX) &&
+ amdgpu_atpx_dgpu_req_power_for_displays()) {
+ pm_runtime_get_sync(adev->ddev->dev);
+ /* Just fire off a uevent and let userspace tell us what to do */
+ drm_helper_hpd_irq_event(adev->ddev);
+ pm_runtime_mark_last_busy(adev->ddev->dev);
+ pm_runtime_put_autosuspend(adev->ddev->dev);
+ }
}
+ /* TODO: check other events */
}
- /* TODO: check other events */
/* We've handled the event, stop the notifier chain. The ACPI interface
* overloads ACPI_VIDEO_NOTIFY_PROBE, we don't want to send that to
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
index e3ed08dca7b7..f8bbbb3a9504 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.c
@@ -243,6 +243,33 @@ int amdgpu_amdkfd_resume(struct amdgpu_device *adev)
return r;
}
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->pre_reset(adev->kfd);
+
+ return r;
+}
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev)
+{
+ int r = 0;
+
+ if (adev->kfd)
+ r = kgd2kfd->post_reset(adev->kfd);
+
+ return r;
+}
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_device_gpu_recover(adev, NULL, false);
+}
+
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
void **cpu_ptr)
@@ -461,6 +488,14 @@ err:
return ret;
}
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle)
+{
+ struct amdgpu_device *adev = (struct amdgpu_device *)kgd;
+
+ amdgpu_dpm_switch_power_profile(adev,
+ PP_SMC_POWER_PROFILE_COMPUTE, !idle);
+}
+
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid)
{
if (adev->kfd) {
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
index a8418a3f4e9d..2f379c183ed2 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd.h
@@ -119,6 +119,7 @@ int amdgpu_amdkfd_evict_userptr(struct kgd_mem *mem, struct mm_struct *mm);
int amdgpu_amdkfd_submit_ib(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+void amdgpu_amdkfd_set_compute_idle(struct kgd_dev *kgd, bool idle);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void);
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void);
@@ -126,6 +127,12 @@ struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void);
bool amdgpu_amdkfd_is_kfd_vmid(struct amdgpu_device *adev, u32 vmid);
+int amdgpu_amdkfd_pre_reset(struct amdgpu_device *adev);
+
+int amdgpu_amdkfd_post_reset(struct amdgpu_device *adev);
+
+void amdgpu_amdkfd_gpu_reset(struct kgd_dev *kgd);
+
/* Shared API */
int alloc_gtt_mem(struct kgd_dev *kgd, size_t size,
void **mem_obj, uint64_t *gpu_addr,
@@ -183,6 +190,9 @@ int amdgpu_amdkfd_gpuvm_map_gtt_bo_to_kernel(struct kgd_dev *kgd,
int amdgpu_amdkfd_gpuvm_restore_process_bos(void *process_info,
struct dma_fence **ef);
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+
void amdgpu_amdkfd_gpuvm_init_mem_limits(void);
void amdgpu_amdkfd_unreserve_system_memory_limit(struct amdgpu_bo *bo);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
index ea79908dac4c..ea3f698aef5e 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v7.c
@@ -145,6 +145,7 @@ static void set_vm_context_page_table_base(struct kgd_dev *kgd, uint32_t vmid,
uint32_t page_table_base);
static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid);
static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid);
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd);
/* Because of REG_GET_FIELD() being used, we put this function in the
* asic specific file.
@@ -216,6 +217,10 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .read_vmid_from_vmfault_reg = read_vmid_from_vmfault_reg,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_7_get_functions(void)
@@ -571,6 +576,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
unsigned long flags, end_jiffies;
int retry;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
WREG32(mmCP_HQD_PQ_DOORBELL_CONTROL, 0);
@@ -882,6 +890,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
@@ -911,3 +922,19 @@ static int invalidate_tlbs_vmid(struct kgd_dev *kgd, uint16_t vmid)
RREG32(mmVM_INVALIDATE_RESPONSE);
return 0;
}
+
+ /**
+ * read_vmid_from_vmfault_reg - read vmid from register
+ *
+ * adev: amdgpu_device pointer
+ * @vmid: vmid pointer
+ * read vmid from register (CIK).
+ */
+static uint32_t read_vmid_from_vmfault_reg(struct kgd_dev *kgd)
+{
+ struct amdgpu_device *adev = get_amdgpu_device(kgd);
+
+ uint32_t status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
+
+ return REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS, VMID);
+}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
index 19dd665e7307..f6e53e9352bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v8.c
@@ -176,6 +176,9 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .get_vm_fault_info = amdgpu_amdkfd_gpuvm_get_vm_fault_info,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_8_0_get_functions(void)
@@ -568,6 +571,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
int retry;
struct vi_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -844,6 +850,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
unsigned int tmp;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
for (vmid = 0; vmid < 16; vmid++) {
if (!amdgpu_amdkfd_is_kfd_vmid(adev, vmid))
continue;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
index 1db60aa5b7f0..8efedfcb9dfc 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gfx_v9.c
@@ -213,6 +213,8 @@ static const struct kfd2kgd_calls kfd2kgd = {
.invalidate_tlbs = invalidate_tlbs,
.invalidate_tlbs_vmid = invalidate_tlbs_vmid,
.submit_ib = amdgpu_amdkfd_submit_ib,
+ .gpu_recover = amdgpu_amdkfd_gpu_reset,
+ .set_compute_idle = amdgpu_amdkfd_set_compute_idle
};
struct kfd2kgd_calls *amdgpu_amdkfd_gfx_9_0_get_functions(void)
@@ -679,6 +681,9 @@ static int kgd_hqd_destroy(struct kgd_dev *kgd, void *mqd,
uint32_t temp;
struct v9_mqd *m = get_mqd(mqd);
+ if (adev->in_gpu_reset)
+ return -EIO;
+
acquire_queue(kgd, pipe_id, queue_id);
if (m->cp_hqd_vmid == 0)
@@ -866,6 +871,9 @@ static int invalidate_tlbs(struct kgd_dev *kgd, uint16_t pasid)
int vmid;
struct amdgpu_ring *ring = &adev->gfx.kiq.ring;
+ if (adev->in_gpu_reset)
+ return -EIO;
+
if (ring->ready)
return invalidate_tlbs_with_kiq(adev, pasid);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
index 079af8ac2636..f92597c292fe 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_amdkfd_gpuvm.c
@@ -206,11 +206,9 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
struct amdgpu_amdkfd_fence ***ef_list,
unsigned int *ef_count)
{
- struct reservation_object_list *fobj;
- struct reservation_object *resv;
- unsigned int i = 0, j = 0, k = 0, shared_count;
- unsigned int count = 0;
- struct amdgpu_amdkfd_fence **fence_list;
+ struct reservation_object *resv = bo->tbo.resv;
+ struct reservation_object_list *old, *new;
+ unsigned int i, j, k;
if (!ef && !ef_list)
return -EINVAL;
@@ -220,76 +218,67 @@ static int amdgpu_amdkfd_remove_eviction_fence(struct amdgpu_bo *bo,
*ef_count = 0;
}
- resv = bo->tbo.resv;
- fobj = reservation_object_get_list(resv);
-
- if (!fobj)
+ old = reservation_object_get_list(resv);
+ if (!old)
return 0;
- preempt_disable();
- write_seqcount_begin(&resv->seq);
+ new = kmalloc(offsetof(typeof(*new), shared[old->shared_max]),
+ GFP_KERNEL);
+ if (!new)
+ return -ENOMEM;
- /* Go through all the shared fences in the resevation object. If
- * ef is specified and it exists in the list, remove it and reduce the
- * count. If ef is not specified, then get the count of eviction fences
- * present.
+ /* Go through all the shared fences in the resevation object and sort
+ * the interesting ones to the end of the list.
*/
- shared_count = fobj->shared_count;
- for (i = 0; i < shared_count; ++i) {
+ for (i = 0, j = old->shared_count, k = 0; i < old->shared_count; ++i) {
struct dma_fence *f;
- f = rcu_dereference_protected(fobj->shared[i],
+ f = rcu_dereference_protected(old->shared[i],
reservation_object_held(resv));
- if (ef) {
- if (f->context == ef->base.context) {
- dma_fence_put(f);
- fobj->shared_count--;
- } else {
- RCU_INIT_POINTER(fobj->shared[j++], f);
- }
- } else if (to_amdgpu_amdkfd_fence(f))
- count++;
+ if ((ef && f->context == ef->base.context) ||
+ (!ef && to_amdgpu_amdkfd_fence(f)))
+ RCU_INIT_POINTER(new->shared[--j], f);
+ else
+ RCU_INIT_POINTER(new->shared[k++], f);
}
- write_seqcount_end(&resv->seq);
- preempt_enable();
-
- if (ef || !count)
- return 0;
-
- /* Alloc memory for count number of eviction fence pointers. Fill the
- * ef_list array and ef_count
- */
- fence_list = kcalloc(count, sizeof(struct amdgpu_amdkfd_fence *),
- GFP_KERNEL);
- if (!fence_list)
- return -ENOMEM;
-
- preempt_disable();
- write_seqcount_begin(&resv->seq);
+ new->shared_max = old->shared_max;
+ new->shared_count = k;
- j = 0;
- for (i = 0; i < shared_count; ++i) {
- struct dma_fence *f;
- struct amdgpu_amdkfd_fence *efence;
+ if (!ef) {
+ unsigned int count = old->shared_count - j;
- f = rcu_dereference_protected(fobj->shared[i],
- reservation_object_held(resv));
+ /* Alloc memory for count number of eviction fence pointers.
+ * Fill the ef_list array and ef_count
+ */
+ *ef_list = kcalloc(count, sizeof(**ef_list), GFP_KERNEL);
+ *ef_count = count;
- efence = to_amdgpu_amdkfd_fence(f);
- if (efence) {
- fence_list[k++] = efence;
- fobj->shared_count--;
- } else {
- RCU_INIT_POINTER(fobj->shared[j++], f);
+ if (!*ef_list) {
+ kfree(new);
+ return -ENOMEM;
}
}
+ /* Install the new fence list, seqcount provides the barriers */
+ preempt_disable();
+ write_seqcount_begin(&resv->seq);
+ RCU_INIT_POINTER(resv->fence, new);
write_seqcount_end(&resv->seq);
preempt_enable();
- *ef_list = fence_list;
- *ef_count = k;
+ /* Drop the references to the removed fences or move them to ef_list */
+ for (i = j, k = 0; i < old->shared_count; ++i) {
+ struct dma_fence *f;
+
+ f = rcu_dereference_protected(new->shared[i],
+ reservation_object_held(resv));
+ if (!ef)
+ (*ef_list)[k++] = to_amdgpu_amdkfd_fence(f);
+ else
+ dma_fence_put(f);
+ }
+ kfree_rcu(old, rcu);
return 0;
}
@@ -334,7 +323,7 @@ static int amdgpu_amdkfd_bo_validate(struct amdgpu_bo *bo, uint32_t domain,
"Called with userptr BO"))
return -EINVAL;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
@@ -622,7 +611,7 @@ static int init_user_pages(struct kgd_mem *mem, struct mm_struct *mm,
pr_err("%s: Failed to reserve BO\n", __func__);
goto release_out;
}
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret)
pr_err("%s: failed to validate BO\n", __func__);
@@ -1621,6 +1610,20 @@ bo_reserve_failed:
return ret;
}
+int amdgpu_amdkfd_gpuvm_get_vm_fault_info(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *mem)
+{
+ struct amdgpu_device *adev;
+
+ adev = (struct amdgpu_device *)kgd;
+ if (atomic_read(&adev->gmc.vm_fault_info_updated) == 1) {
+ *mem = *adev->gmc.vm_fault_info;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+ }
+ return 0;
+}
+
/* Evict a userptr BO by stopping the queues if necessary
*
* Runs in MMU notifier, may be in RECLAIM_FS context. This means it
@@ -1680,7 +1683,7 @@ static int update_invalid_user_pages(struct amdkfd_process_info *process_info,
if (amdgpu_bo_reserve(bo, true))
return -EAGAIN;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_CPU);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (ret) {
@@ -1824,7 +1827,7 @@ static int validate_invalid_user_pages(struct amdkfd_process_info *process_info)
if (mem->user_pages[0]) {
amdgpu_ttm_tt_set_user_pages(bo->tbo.ttm,
mem->user_pages);
- amdgpu_ttm_placement_from_domain(bo, mem->domain);
+ amdgpu_bo_placement_from_domain(bo, mem->domain);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (ret) {
pr_err("%s: failed to validate BO\n", __func__);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
index b33f1680c9a3..a028661d9e20 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_atpx_handler.c
@@ -575,6 +575,7 @@ static const struct amdgpu_px_quirk amdgpu_px_quirk_list[] = {
{ 0x1002, 0x6900, 0x1002, 0x0124, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0812, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0x1002, 0x6900, 0x1028, 0x0813, AMDGPU_PX_QUIRK_FORCE_ATPX },
+ { 0x1002, 0x6900, 0x1025, 0x125A, AMDGPU_PX_QUIRK_FORCE_ATPX },
{ 0, 0, 0, 0, 0 },
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
index 7679c068c89a..d472a2c8399f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.c
@@ -35,83 +35,53 @@
#define AMDGPU_BO_LIST_MAX_PRIORITY 32u
#define AMDGPU_BO_LIST_NUM_BUCKETS (AMDGPU_BO_LIST_MAX_PRIORITY + 1)
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries);
+static void amdgpu_bo_list_free_rcu(struct rcu_head *rcu)
+{
+ struct amdgpu_bo_list *list = container_of(rcu, struct amdgpu_bo_list,
+ rhead);
+
+ kvfree(list);
+}
-static void amdgpu_bo_list_release_rcu(struct kref *ref)
+static void amdgpu_bo_list_free(struct kref *ref)
{
- unsigned i;
struct amdgpu_bo_list *list = container_of(ref, struct amdgpu_bo_list,
refcount);
+ struct amdgpu_bo_list_entry *e;
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
+ amdgpu_bo_list_for_each_entry(e, list)
+ amdgpu_bo_unref(&e->robj);
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree_rcu(list, rhead);
+ call_rcu(&list->rhead, amdgpu_bo_list_free_rcu);
}
-int amdgpu_bo_list_create(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries,
- struct amdgpu_bo_list **list_out)
+int amdgpu_bo_list_create(struct amdgpu_device *adev, struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries, struct amdgpu_bo_list **result)
{
+ unsigned last_entry = 0, first_userptr = num_entries;
+ struct amdgpu_bo_list_entry *array;
struct amdgpu_bo_list *list;
+ uint64_t total_size = 0;
+ size_t size;
+ unsigned i;
int r;
+ if (num_entries > SIZE_MAX / sizeof(struct amdgpu_bo_list_entry))
+ return -EINVAL;
- list = kzalloc(sizeof(struct amdgpu_bo_list), GFP_KERNEL);
+ size = sizeof(struct amdgpu_bo_list);
+ size += num_entries * sizeof(struct amdgpu_bo_list_entry);
+ list = kvmalloc(size, GFP_KERNEL);
if (!list)
return -ENOMEM;
- /* initialize bo list*/
- mutex_init(&list->lock);
kref_init(&list->refcount);
- r = amdgpu_bo_list_set(adev, filp, list, info, num_entries);
- if (r) {
- kfree(list);
- return r;
- }
-
- *list_out = list;
- return 0;
-}
-
-static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
-{
- struct amdgpu_bo_list *list;
+ list->gds_obj = adev->gds.gds_gfx_bo;
+ list->gws_obj = adev->gds.gws_gfx_bo;
+ list->oa_obj = adev->gds.oa_gfx_bo;
- mutex_lock(&fpriv->bo_list_lock);
- list = idr_remove(&fpriv->bo_list_handles, id);
- mutex_unlock(&fpriv->bo_list_lock);
- if (list)
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-static int amdgpu_bo_list_set(struct amdgpu_device *adev,
- struct drm_file *filp,
- struct amdgpu_bo_list *list,
- struct drm_amdgpu_bo_list_entry *info,
- unsigned num_entries)
-{
- struct amdgpu_bo_list_entry *array;
- struct amdgpu_bo *gds_obj = adev->gds.gds_gfx_bo;
- struct amdgpu_bo *gws_obj = adev->gds.gws_gfx_bo;
- struct amdgpu_bo *oa_obj = adev->gds.oa_gfx_bo;
-
- unsigned last_entry = 0, first_userptr = num_entries;
- unsigned i;
- int r;
- unsigned long total_size = 0;
-
- array = kvmalloc_array(num_entries, sizeof(struct amdgpu_bo_list_entry), GFP_KERNEL);
- if (!array)
- return -ENOMEM;
+ array = amdgpu_bo_list_array_entry(list, 0);
memset(array, 0, num_entries * sizeof(struct amdgpu_bo_list_entry));
for (i = 0; i < num_entries; ++i) {
@@ -148,59 +118,56 @@ static int amdgpu_bo_list_set(struct amdgpu_device *adev,
entry->tv.shared = !entry->robj->prime_shared_count;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GDS)
- gds_obj = entry->robj;
+ list->gds_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_GWS)
- gws_obj = entry->robj;
+ list->gws_obj = entry->robj;
if (entry->robj->preferred_domains == AMDGPU_GEM_DOMAIN_OA)
- oa_obj = entry->robj;
+ list->oa_obj = entry->robj;
total_size += amdgpu_bo_size(entry->robj);
trace_amdgpu_bo_list_set(list, entry->robj);
}
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- kvfree(list->array);
-
- list->gds_obj = gds_obj;
- list->gws_obj = gws_obj;
- list->oa_obj = oa_obj;
list->first_userptr = first_userptr;
- list->array = array;
list->num_entries = num_entries;
trace_amdgpu_cs_bo_status(list->num_entries, total_size);
+
+ *result = list;
return 0;
error_free:
while (i--)
amdgpu_bo_unref(&array[i].robj);
- kvfree(array);
+ kvfree(list);
return r;
+
}
-struct amdgpu_bo_list *
-amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id)
+static void amdgpu_bo_list_destroy(struct amdgpu_fpriv *fpriv, int id)
{
- struct amdgpu_bo_list *result;
+ struct amdgpu_bo_list *list;
+ mutex_lock(&fpriv->bo_list_lock);
+ list = idr_remove(&fpriv->bo_list_handles, id);
+ mutex_unlock(&fpriv->bo_list_lock);
+ if (list)
+ kref_put(&list->refcount, amdgpu_bo_list_free);
+}
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result)
+{
rcu_read_lock();
- result = idr_find(&fpriv->bo_list_handles, id);
+ *result = idr_find(&fpriv->bo_list_handles, id);
- if (result) {
- if (kref_get_unless_zero(&result->refcount)) {
- rcu_read_unlock();
- mutex_lock(&result->lock);
- } else {
- rcu_read_unlock();
- result = NULL;
- }
- } else {
+ if (*result && kref_get_unless_zero(&(*result)->refcount)) {
rcu_read_unlock();
+ return 0;
}
- return result;
+ rcu_read_unlock();
+ return -ENOENT;
}
void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
@@ -211,6 +178,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* concatenated in descending order.
*/
struct list_head bucket[AMDGPU_BO_LIST_NUM_BUCKETS];
+ struct amdgpu_bo_list_entry *e;
unsigned i;
for (i = 0; i < AMDGPU_BO_LIST_NUM_BUCKETS; i++)
@@ -221,14 +189,13 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
* in the list, the sort mustn't change the ordering of buffers
* with the same priority, i.e. it must be stable.
*/
- for (i = 0; i < list->num_entries; i++) {
- unsigned priority = list->array[i].priority;
+ amdgpu_bo_list_for_each_entry(e, list) {
+ unsigned priority = e->priority;
- if (!list->array[i].robj->parent)
- list_add_tail(&list->array[i].tv.head,
- &bucket[priority]);
+ if (!e->robj->parent)
+ list_add_tail(&e->tv.head, &bucket[priority]);
- list->array[i].user_pages = NULL;
+ e->user_pages = NULL;
}
/* Connect the sorted buckets in the output list. */
@@ -238,20 +205,7 @@ void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
void amdgpu_bo_list_put(struct amdgpu_bo_list *list)
{
- mutex_unlock(&list->lock);
- kref_put(&list->refcount, amdgpu_bo_list_release_rcu);
-}
-
-void amdgpu_bo_list_free(struct amdgpu_bo_list *list)
-{
- unsigned i;
-
- for (i = 0; i < list->num_entries; ++i)
- amdgpu_bo_unref(&list->array[i].robj);
-
- mutex_destroy(&list->lock);
- kvfree(list->array);
- kfree(list);
+ kref_put(&list->refcount, amdgpu_bo_list_free);
}
int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
@@ -304,7 +258,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
union drm_amdgpu_bo_list *args = data;
uint32_t handle = args->in.list_handle;
struct drm_amdgpu_bo_list_entry *info = NULL;
- struct amdgpu_bo_list *list;
+ struct amdgpu_bo_list *list, *old;
int r;
r = amdgpu_bo_create_list_entry_array(&args->in, &info);
@@ -322,7 +276,7 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
r = idr_alloc(&fpriv->bo_list_handles, list, 1, 0, GFP_KERNEL);
mutex_unlock(&fpriv->bo_list_lock);
if (r < 0) {
- amdgpu_bo_list_free(list);
+ amdgpu_bo_list_put(list);
return r;
}
@@ -335,17 +289,22 @@ int amdgpu_bo_list_ioctl(struct drm_device *dev, void *data,
break;
case AMDGPU_BO_LIST_OP_UPDATE:
- r = -ENOENT;
- list = amdgpu_bo_list_get(fpriv, handle);
- if (!list)
+ r = amdgpu_bo_list_create(adev, filp, info, args->in.bo_number,
+ &list);
+ if (r)
goto error_free;
- r = amdgpu_bo_list_set(adev, filp, list, info,
- args->in.bo_number);
- amdgpu_bo_list_put(list);
- if (r)
+ mutex_lock(&fpriv->bo_list_lock);
+ old = idr_replace(&fpriv->bo_list_handles, list, handle);
+ mutex_unlock(&fpriv->bo_list_lock);
+
+ if (IS_ERR(old)) {
+ amdgpu_bo_list_put(list);
+ r = PTR_ERR(old);
goto error_free;
+ }
+ amdgpu_bo_list_put(old);
break;
default:
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
new file mode 100644
index 000000000000..61b089768e1c
--- /dev/null
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_bo_list.h
@@ -0,0 +1,85 @@
+/*
+ * Copyright 2018 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ */
+#ifndef __AMDGPU_BO_LIST_H__
+#define __AMDGPU_BO_LIST_H__
+
+#include <drm/ttm/ttm_execbuf_util.h>
+#include <drm/amdgpu_drm.h>
+
+struct amdgpu_device;
+struct amdgpu_bo;
+struct amdgpu_bo_va;
+struct amdgpu_fpriv;
+
+struct amdgpu_bo_list_entry {
+ struct amdgpu_bo *robj;
+ struct ttm_validate_buffer tv;
+ struct amdgpu_bo_va *bo_va;
+ uint32_t priority;
+ struct page **user_pages;
+ int user_invalidated;
+};
+
+struct amdgpu_bo_list {
+ struct rcu_head rhead;
+ struct kref refcount;
+ struct amdgpu_bo *gds_obj;
+ struct amdgpu_bo *gws_obj;
+ struct amdgpu_bo *oa_obj;
+ unsigned first_userptr;
+ unsigned num_entries;
+};
+
+int amdgpu_bo_list_get(struct amdgpu_fpriv *fpriv, int id,
+ struct amdgpu_bo_list **result);
+void amdgpu_bo_list_get_list(struct amdgpu_bo_list *list,
+ struct list_head *validated);
+void amdgpu_bo_list_put(struct amdgpu_bo_list *list);
+int amdgpu_bo_create_list_entry_array(struct drm_amdgpu_bo_list_in *in,
+ struct drm_amdgpu_bo_list_entry **info_param);
+
+int amdgpu_bo_list_create(struct amdgpu_device *adev,
+ struct drm_file *filp,
+ struct drm_amdgpu_bo_list_entry *info,
+ unsigned num_entries,
+ struct amdgpu_bo_list **list);
+
+static inline struct amdgpu_bo_list_entry *
+amdgpu_bo_list_array_entry(struct amdgpu_bo_list *list, unsigned index)
+{
+ struct amdgpu_bo_list_entry *array = (void *)&list[1];
+
+ return &array[index];
+}
+
+#define amdgpu_bo_list_for_each_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, 0); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#define amdgpu_bo_list_for_each_userptr_entry(e, list) \
+ for (e = amdgpu_bo_list_array_entry(list, (list)->first_userptr); \
+ e != amdgpu_bo_list_array_entry(list, (list)->num_entries); \
+ ++e)
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
index 7c5cc33d0cda..b6e9df11115d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_cs.c
@@ -419,7 +419,7 @@ static int amdgpu_cs_bo_validate(struct amdgpu_cs_parser *p,
}
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
@@ -478,7 +478,7 @@ static bool amdgpu_cs_try_evict(struct amdgpu_cs_parser *p,
update_bytes_moved_vis =
!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
amdgpu_bo_in_cpu_visible_vram(bo);
- amdgpu_ttm_placement_from_domain(bo, other);
+ amdgpu_bo_placement_from_domain(bo, other);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
p->bytes_moved += ctx.bytes_moved;
if (update_bytes_moved_vis)
@@ -532,8 +532,8 @@ static int amdgpu_cs_list_validate(struct amdgpu_cs_parser *p,
/* Check if we have user pages and nobody bound the BO already */
if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm) &&
lobj->user_pages) {
- amdgpu_ttm_placement_from_domain(bo,
- AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(bo,
+ AMDGPU_GEM_DOMAIN_CPU);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
return r;
@@ -561,28 +561,38 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_vm *vm = &fpriv->vm;
struct amdgpu_bo_list_entry *e;
struct list_head duplicates;
- unsigned i, tries = 10;
struct amdgpu_bo *gds;
struct amdgpu_bo *gws;
struct amdgpu_bo *oa;
+ unsigned tries = 10;
int r;
INIT_LIST_HEAD(&p->validated);
/* p->bo_list could already be assigned if AMDGPU_CHUNK_ID_BO_HANDLES is present */
- if (!p->bo_list)
- p->bo_list = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle);
- else
- mutex_lock(&p->bo_list->lock);
+ if (cs->in.bo_list_handle) {
+ if (p->bo_list)
+ return -EINVAL;
- if (p->bo_list) {
- amdgpu_bo_list_get_list(p->bo_list, &p->validated);
- if (p->bo_list->first_userptr != p->bo_list->num_entries)
- p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+ r = amdgpu_bo_list_get(fpriv, cs->in.bo_list_handle,
+ &p->bo_list);
+ if (r)
+ return r;
+ } else if (!p->bo_list) {
+ /* Create a empty bo_list when no handle is provided */
+ r = amdgpu_bo_list_create(p->adev, p->filp, NULL, 0,
+ &p->bo_list);
+ if (r)
+ return r;
}
+ amdgpu_bo_list_get_list(p->bo_list, &p->validated);
+ if (p->bo_list->first_userptr != p->bo_list->num_entries)
+ p->mn = amdgpu_mn_get(p->adev, AMDGPU_MN_TYPE_GFX);
+
INIT_LIST_HEAD(&duplicates);
amdgpu_vm_get_pd_bo(&fpriv->vm, &p->validated, &p->vm_pd);
@@ -591,7 +601,6 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
while (1) {
struct list_head need_pages;
- unsigned i;
r = ttm_eu_reserve_buffers(&p->ticket, &p->validated, true,
&duplicates);
@@ -601,17 +610,9 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
goto error_free_pages;
}
- /* Without a BO list we don't have userptr BOs */
- if (!p->bo_list)
- break;
-
INIT_LIST_HEAD(&need_pages);
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo;
-
- e = &p->bo_list->array[i];
- bo = e->robj;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
if (amdgpu_ttm_tt_userptr_invalidated(bo->tbo.ttm,
&e->user_invalidated) && e->user_pages) {
@@ -703,23 +704,12 @@ static int amdgpu_cs_parser_bos(struct amdgpu_cs_parser *p,
amdgpu_cs_report_moved_bytes(p->adev, p->bytes_moved,
p->bytes_moved_vis);
- if (p->bo_list) {
- struct amdgpu_vm *vm = &fpriv->vm;
- unsigned i;
+ gds = p->bo_list->gds_obj;
+ gws = p->bo_list->gws_obj;
+ oa = p->bo_list->oa_obj;
- gds = p->bo_list->gds_obj;
- gws = p->bo_list->gws_obj;
- oa = p->bo_list->oa_obj;
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
- p->bo_list->array[i].bo_va = amdgpu_vm_bo_find(vm, bo);
- }
- } else {
- gds = p->adev->gds.gds_gfx_bo;
- gws = p->adev->gds.gws_gfx_bo;
- oa = p->adev->gds.oa_gfx_bo;
- }
+ amdgpu_bo_list_for_each_entry(e, p->bo_list)
+ e->bo_va = amdgpu_vm_bo_find(vm, e->robj);
if (gds) {
p->job->gds_base = amdgpu_bo_gpu_offset(gds);
@@ -747,18 +737,13 @@ error_validate:
error_free_pages:
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- e = &p->bo_list->array[i];
-
- if (!e->user_pages)
- continue;
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ if (!e->user_pages)
+ continue;
- release_pages(e->user_pages,
- e->robj->tbo.ttm->num_pages);
- kvfree(e->user_pages);
- }
+ release_pages(e->user_pages,
+ e->robj->tbo.ttm->num_pages);
+ kvfree(e->user_pages);
}
return r;
@@ -820,12 +805,13 @@ static void amdgpu_cs_parser_fini(struct amdgpu_cs_parser *parser, int error,
static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
{
- struct amdgpu_device *adev = p->adev;
struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
+ struct amdgpu_device *adev = p->adev;
struct amdgpu_vm *vm = &fpriv->vm;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_bo_va *bo_va;
struct amdgpu_bo *bo;
- int i, r;
+ int r;
r = amdgpu_vm_clear_freed(adev, vm, NULL);
if (r)
@@ -855,29 +841,26 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
return r;
}
- if (p->bo_list) {
- for (i = 0; i < p->bo_list->num_entries; i++) {
- struct dma_fence *f;
-
- /* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
- continue;
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
+ struct dma_fence *f;
- bo_va = p->bo_list->array[i].bo_va;
- if (bo_va == NULL)
- continue;
+ /* ignore duplicates */
+ bo = e->robj;
+ if (!bo)
+ continue;
- r = amdgpu_vm_bo_update(adev, bo_va, false);
- if (r)
- return r;
+ bo_va = e->bo_va;
+ if (bo_va == NULL)
+ continue;
- f = bo_va->last_pt_update;
- r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
- if (r)
- return r;
- }
+ r = amdgpu_vm_bo_update(adev, bo_va, false);
+ if (r)
+ return r;
+ f = bo_va->last_pt_update;
+ r = amdgpu_sync_fence(adev, &p->job->sync, f, false);
+ if (r)
+ return r;
}
r = amdgpu_vm_handle_moved(adev, vm);
@@ -892,15 +875,14 @@ static int amdgpu_bo_vm_update_pte(struct amdgpu_cs_parser *p)
if (r)
return r;
- if (amdgpu_vm_debug && p->bo_list) {
+ if (amdgpu_vm_debug) {
/* Invalidate all BOs to test for userspace bugs */
- for (i = 0; i < p->bo_list->num_entries; i++) {
+ amdgpu_bo_list_for_each_entry(e, p->bo_list) {
/* ignore duplicates */
- bo = p->bo_list->array[i].robj;
- if (!bo)
+ if (!e->robj)
continue;
- amdgpu_vm_bo_invalidate(adev, bo, false);
+ amdgpu_vm_bo_invalidate(adev, e->robj, false);
}
}
@@ -916,7 +898,7 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
int r;
/* Only for UVD/VCE VM emulation */
- if (p->ring->funcs->parse_cs) {
+ if (p->ring->funcs->parse_cs || p->ring->funcs->patch_cs_in_place) {
unsigned i, j;
for (i = 0, j = 0; i < p->nchunks && j < p->job->num_ibs; i++) {
@@ -957,12 +939,20 @@ static int amdgpu_cs_ib_vm_chunk(struct amdgpu_device *adev,
offset = m->start * AMDGPU_GPU_PAGE_SIZE;
kptr += va_start - offset;
- memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
- amdgpu_bo_kunmap(aobj);
-
- r = amdgpu_ring_parse_cs(ring, p, j);
- if (r)
- return r;
+ if (p->ring->funcs->parse_cs) {
+ memcpy(ib->ptr, kptr, chunk_ib->ib_bytes);
+ amdgpu_bo_kunmap(aobj);
+
+ r = amdgpu_ring_parse_cs(ring, p, j);
+ if (r)
+ return r;
+ } else {
+ ib->ptr = (uint32_t *)kptr;
+ r = amdgpu_ring_patch_cs_in_place(ring, p, j);
+ amdgpu_bo_kunmap(aobj);
+ if (r)
+ return r;
+ }
j++;
}
@@ -1022,13 +1012,9 @@ static int amdgpu_cs_ib_fill(struct amdgpu_device *adev,
if (r)
return r;
- if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT;
- if (!parser->ctx->preamble_presented) {
- parser->job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
- parser->ctx->preamble_presented = true;
- }
- }
+ if (chunk_ib->flags & AMDGPU_IB_FLAG_PREAMBLE)
+ parser->job->preamble_status |=
+ AMDGPU_PREAMBLE_IB_PRESENT;
if (parser->ring && parser->ring != ring)
return -EINVAL;
@@ -1207,36 +1193,32 @@ static void amdgpu_cs_post_dependencies(struct amdgpu_cs_parser *p)
static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
union drm_amdgpu_cs *cs)
{
+ struct amdgpu_fpriv *fpriv = p->filp->driver_priv;
struct amdgpu_ring *ring = p->ring;
struct drm_sched_entity *entity = &p->ctx->rings[ring->idx].entity;
enum drm_sched_priority priority;
+ struct amdgpu_bo_list_entry *e;
struct amdgpu_job *job;
- unsigned i;
uint64_t seq;
int r;
- amdgpu_mn_lock(p->mn);
- if (p->bo_list) {
- for (i = p->bo_list->first_userptr;
- i < p->bo_list->num_entries; ++i) {
- struct amdgpu_bo *bo = p->bo_list->array[i].robj;
-
- if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
- amdgpu_mn_unlock(p->mn);
- return -ERESTARTSYS;
- }
- }
- }
-
job = p->job;
p->job = NULL;
- r = drm_sched_job_init(&job->base, &ring->sched, entity, p->filp);
- if (r) {
- amdgpu_job_free(job);
- amdgpu_mn_unlock(p->mn);
- return r;
+ r = drm_sched_job_init(&job->base, entity, p->filp);
+ if (r)
+ goto error_unlock;
+
+ /* No memory allocation is allowed while holding the mn lock */
+ amdgpu_mn_lock(p->mn);
+ amdgpu_bo_list_for_each_userptr_entry(e, p->bo_list) {
+ struct amdgpu_bo *bo = e->robj;
+
+ if (amdgpu_ttm_tt_userptr_needs_pages(bo->tbo.ttm)) {
+ r = -ERESTARTSYS;
+ goto error_abort;
+ }
}
job->owner = p->filp;
@@ -1253,22 +1235,38 @@ static int amdgpu_cs_submit(struct amdgpu_cs_parser *p,
amdgpu_cs_post_dependencies(p);
+ if ((job->preamble_status & AMDGPU_PREAMBLE_IB_PRESENT) &&
+ !p->ctx->preamble_presented) {
+ job->preamble_status |= AMDGPU_PREAMBLE_IB_PRESENT_FIRST;
+ p->ctx->preamble_presented = true;
+ }
+
cs->out.handle = seq;
job->uf_sequence = seq;
amdgpu_job_free_resources(job);
trace_amdgpu_cs_ioctl(job);
+ amdgpu_vm_bo_trace_cs(&fpriv->vm, &p->ticket);
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->sched);
+ ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
ttm_eu_fence_buffer_objects(&p->ticket, &p->validated, p->fence);
amdgpu_mn_unlock(p->mn);
return 0;
+
+error_abort:
+ dma_fence_put(&job->base.s_fence->finished);
+ job->base.s_fence = NULL;
+
+error_unlock:
+ amdgpu_job_free(job);
+ amdgpu_mn_unlock(p->mn);
+ return r;
}
int amdgpu_cs_ioctl(struct drm_device *dev, void *data, struct drm_file *filp)
@@ -1655,7 +1653,7 @@ int amdgpu_cs_find_mapping(struct amdgpu_cs_parser *parser,
if (!((*bo)->flags & AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS)) {
(*bo)->flags |= AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
- amdgpu_ttm_placement_from_domain(*bo, (*bo)->allowed_domains);
+ amdgpu_bo_placement_from_domain(*bo, (*bo)->allowed_domains);
r = ttm_bo_validate(&(*bo)->tbo, &(*bo)->placement, &ctx);
if (r)
return r;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
index 83e3b320a793..df6965761046 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ctx.c
@@ -104,8 +104,7 @@ static int amdgpu_ctx_init(struct amdgpu_device *adev,
failed:
for (j = 0; j < i; j++)
- drm_sched_entity_destroy(&adev->rings[j]->sched,
- &ctx->rings[j].entity);
+ drm_sched_entity_destroy(&ctx->rings[j].entity);
kfree(ctx->fences);
ctx->fences = NULL;
return r;
@@ -178,8 +177,7 @@ static void amdgpu_ctx_do_release(struct kref *ref)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- drm_sched_entity_destroy(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_destroy(&ctx->rings[i].entity);
}
amdgpu_ctx_fini(ref);
@@ -466,8 +464,8 @@ void amdgpu_ctx_mgr_entity_flush(struct amdgpu_ctx_mgr *mgr)
if (ctx->adev->rings[i] == &ctx->adev->gfx.kiq.ring)
continue;
- max_wait = drm_sched_entity_flush(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity, max_wait);
+ max_wait = drm_sched_entity_flush(&ctx->rings[i].entity,
+ max_wait);
}
}
mutex_unlock(&mgr->lock);
@@ -492,8 +490,7 @@ void amdgpu_ctx_mgr_entity_fini(struct amdgpu_ctx_mgr *mgr)
continue;
if (kref_read(&ctx->refcount) == 1)
- drm_sched_entity_fini(&ctx->adev->rings[i]->sched,
- &ctx->rings[i].entity);
+ drm_sched_entity_fini(&ctx->rings[i].entity);
else
DRM_ERROR("ctx %p is still alive\n", ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
index 386a7b34d2f4..8ab5ccbc14ac 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_device.c
@@ -1926,7 +1926,7 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
}
/**
- * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ * amdgpu_device_ip_suspend_phase1 - run suspend for hardware IPs (phase 1)
*
* @adev: amdgpu_device pointer
*
@@ -1936,7 +1936,55 @@ static void amdgpu_device_ip_late_init_func_handler(struct work_struct *work)
* in each IP into a state suitable for suspend.
* Returns 0 on success, negative error code on failure.
*/
-int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+static int amdgpu_device_ip_suspend_phase1(struct amdgpu_device *adev)
+{
+ int i, r;
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_request_full_gpu(adev, false);
+
+ for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
+ if (!adev->ip_blocks[i].status.valid)
+ continue;
+ /* displays are handled separately */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE) {
+ /* ungate blocks so that suspend can properly shut them down */
+ if (adev->ip_blocks[i].version->funcs->set_clockgating_state) {
+ r = adev->ip_blocks[i].version->funcs->set_clockgating_state((void *)adev,
+ AMD_CG_STATE_UNGATE);
+ if (r) {
+ DRM_ERROR("set_clockgating_state(ungate) of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ /* XXX handle errors */
+ r = adev->ip_blocks[i].version->funcs->suspend(adev);
+ /* XXX handle errors */
+ if (r) {
+ DRM_ERROR("suspend of IP block <%s> failed %d\n",
+ adev->ip_blocks[i].version->funcs->name, r);
+ }
+ }
+ }
+
+ if (amdgpu_sriov_vf(adev))
+ amdgpu_virt_release_full_gpu(adev, false);
+
+ return 0;
+}
+
+/**
+ * amdgpu_device_ip_suspend_phase2 - run suspend for hardware IPs (phase 2)
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+static int amdgpu_device_ip_suspend_phase2(struct amdgpu_device *adev)
{
int i, r;
@@ -1957,6 +2005,9 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
for (i = adev->num_ip_blocks - 1; i >= 0; i--) {
if (!adev->ip_blocks[i].status.valid)
continue;
+ /* displays are handled in phase1 */
+ if (adev->ip_blocks[i].version->type == AMD_IP_BLOCK_TYPE_DCE)
+ continue;
/* ungate blocks so that suspend can properly shut them down */
if (adev->ip_blocks[i].version->type != AMD_IP_BLOCK_TYPE_SMC &&
adev->ip_blocks[i].version->funcs->set_clockgating_state) {
@@ -1982,6 +2033,29 @@ int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_device_ip_suspend - run suspend for hardware IPs
+ *
+ * @adev: amdgpu_device pointer
+ *
+ * Main suspend function for hardware IPs. The list of all the hardware
+ * IPs that make up the asic is walked, clockgating is disabled and the
+ * suspend callbacks are run. suspend puts the hardware and software state
+ * in each IP into a state suitable for suspend.
+ * Returns 0 on success, negative error code on failure.
+ */
+int amdgpu_device_ip_suspend(struct amdgpu_device *adev)
+{
+ int r;
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+ if (r)
+ return r;
+ r = amdgpu_device_ip_suspend_phase2(adev);
+
+ return r;
+}
+
static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
{
int i, r;
@@ -2004,7 +2078,7 @@ static int amdgpu_device_ip_reinit_early_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2039,7 +2113,7 @@ static int amdgpu_device_ip_reinit_late_sriov(struct amdgpu_device *adev)
continue;
r = block->version->funcs->hw_init(adev);
- DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"successed");
+ DRM_INFO("RE-INIT: %s %s\n", block->version->funcs->name, r?"failed":"succeeded");
if (r)
return r;
}
@@ -2200,7 +2274,7 @@ bool amdgpu_device_asic_has_dc_support(enum amd_asic_type asic_type)
case CHIP_VEGA10:
case CHIP_VEGA12:
case CHIP_VEGA20:
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
#endif
return amdgpu_dc != 0;
@@ -2628,6 +2702,9 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_kms_helper_poll_disable(dev);
+ if (fbcon)
+ amdgpu_fbdev_set_suspend(adev, 1);
+
if (!amdgpu_device_has_dc_support(adev)) {
/* turn off display hw */
drm_modeset_lock_all(dev);
@@ -2635,44 +2712,46 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
drm_helper_connector_dpms(connector, DRM_MODE_DPMS_OFF);
}
drm_modeset_unlock_all(dev);
- }
-
- amdgpu_amdkfd_suspend(adev);
-
- /* unpin the front buffers and cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
- struct drm_framebuffer *fb = crtc->primary->fb;
- struct amdgpu_bo *robj;
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- amdgpu_bo_unpin(aobj);
- amdgpu_bo_unreserve(aobj);
+ /* unpin the front buffers and cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+ struct drm_framebuffer *fb = crtc->primary->fb;
+ struct amdgpu_bo *robj;
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
- }
- if (fb == NULL || fb->obj[0] == NULL) {
- continue;
- }
- robj = gem_to_amdgpu_bo(fb->obj[0]);
- /* don't unpin kernel fb objects */
- if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
- r = amdgpu_bo_reserve(robj, true);
- if (r == 0) {
- amdgpu_bo_unpin(robj);
- amdgpu_bo_unreserve(robj);
+ if (fb == NULL || fb->obj[0] == NULL) {
+ continue;
+ }
+ robj = gem_to_amdgpu_bo(fb->obj[0]);
+ /* don't unpin kernel fb objects */
+ if (!amdgpu_fbdev_robj_is_fb(adev, robj)) {
+ r = amdgpu_bo_reserve(robj, true);
+ if (r == 0) {
+ amdgpu_bo_unpin(robj);
+ amdgpu_bo_unreserve(robj);
+ }
}
}
}
+
+ amdgpu_amdkfd_suspend(adev);
+
+ r = amdgpu_device_ip_suspend_phase1(adev);
+
/* evict vram memory */
amdgpu_bo_evict_vram(adev);
amdgpu_fence_driver_suspend(adev);
- r = amdgpu_device_ip_suspend(adev);
+ r = amdgpu_device_ip_suspend_phase2(adev);
/* evict remaining vram memory
* This second call to evict vram is to evict the gart page table
@@ -2691,11 +2770,6 @@ int amdgpu_device_suspend(struct drm_device *dev, bool suspend, bool fbcon)
DRM_ERROR("amdgpu asic reset failed\n");
}
- if (fbcon) {
- console_lock();
- amdgpu_fbdev_set_suspend(adev, 1);
- console_unlock();
- }
return 0;
}
@@ -2720,15 +2794,12 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (dev->switch_power_state == DRM_SWITCH_POWER_OFF)
return 0;
- if (fbcon)
- console_lock();
-
if (resume) {
pci_set_power_state(dev->pdev, PCI_D0);
pci_restore_state(dev->pdev);
r = pci_enable_device(dev->pdev);
if (r)
- goto unlock;
+ return r;
}
/* post card */
@@ -2741,28 +2812,30 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
r = amdgpu_device_ip_resume(adev);
if (r) {
DRM_ERROR("amdgpu_device_ip_resume failed (%d).\n", r);
- goto unlock;
+ return r;
}
amdgpu_fence_driver_resume(adev);
r = amdgpu_device_ip_late_init(adev);
if (r)
- goto unlock;
-
- /* pin cursors */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
-
- if (amdgpu_crtc->cursor_bo) {
- struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
- r = amdgpu_bo_reserve(aobj, true);
- if (r == 0) {
- r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
- if (r != 0)
- DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
- amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
- amdgpu_bo_unreserve(aobj);
+ return r;
+
+ if (!amdgpu_device_has_dc_support(adev)) {
+ /* pin cursors */
+ list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
+ struct amdgpu_crtc *amdgpu_crtc = to_amdgpu_crtc(crtc);
+
+ if (amdgpu_crtc->cursor_bo) {
+ struct amdgpu_bo *aobj = gem_to_amdgpu_bo(amdgpu_crtc->cursor_bo);
+ r = amdgpu_bo_reserve(aobj, true);
+ if (r == 0) {
+ r = amdgpu_bo_pin(aobj, AMDGPU_GEM_DOMAIN_VRAM);
+ if (r != 0)
+ DRM_ERROR("Failed to pin cursor BO (%d)\n", r);
+ amdgpu_crtc->cursor_addr = amdgpu_bo_gpu_offset(aobj);
+ amdgpu_bo_unreserve(aobj);
+ }
}
}
}
@@ -2770,6 +2843,9 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
if (r)
return r;
+ /* Make sure IB tests flushed */
+ flush_delayed_work(&adev->late_init_work);
+
/* blat the mode back in */
if (fbcon) {
if (!amdgpu_device_has_dc_support(adev)) {
@@ -2783,6 +2859,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
}
drm_modeset_unlock_all(dev);
}
+ amdgpu_fbdev_set_suspend(adev, 0);
}
drm_kms_helper_poll_enable(dev);
@@ -2806,15 +2883,7 @@ int amdgpu_device_resume(struct drm_device *dev, bool resume, bool fbcon)
#ifdef CONFIG_PM
dev->dev->power.disable_depth--;
#endif
-
- if (fbcon)
- amdgpu_fbdev_set_suspend(adev, 0);
-
-unlock:
- if (fbcon)
- console_unlock();
-
- return r;
+ return 0;
}
/**
@@ -3039,7 +3108,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
long tmo;
if (amdgpu_sriov_runtime(adev))
- tmo = msecs_to_jiffies(amdgpu_lockup_timeout);
+ tmo = msecs_to_jiffies(8000);
else
tmo = msecs_to_jiffies(100);
@@ -3091,7 +3160,7 @@ static int amdgpu_device_handle_vram_lost(struct amdgpu_device *adev)
* @adev: amdgpu device pointer
*
* attempt to do soft-reset or full-reset and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset(struct amdgpu_device *adev)
{
@@ -3169,7 +3238,7 @@ out:
* @from_hypervisor: request from hypervisor
*
* do VF FLR and reinitialize Asic
- * return 0 means successed otherwise failed
+ * return 0 means succeeded otherwise failed
*/
static int amdgpu_device_reset_sriov(struct amdgpu_device *adev,
bool from_hypervisor)
@@ -3241,6 +3310,9 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
atomic_inc(&adev->gpu_reset_counter);
adev->in_gpu_reset = 1;
+ /* Block kfd */
+ amdgpu_amdkfd_pre_reset(adev);
+
/* block TTM */
resched = ttm_bo_lock_delayed_workqueue(&adev->mman.bdev);
@@ -3256,7 +3328,7 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
if (job && job->base.sched == &ring->sched)
continue;
- drm_sched_hw_job_reset(&ring->sched, &job->base);
+ drm_sched_hw_job_reset(&ring->sched, job ? &job->base : NULL);
/* after all hw jobs are reset, hw fence is meaningless, so force_completion */
amdgpu_fence_driver_force_completion(ring);
@@ -3294,9 +3366,11 @@ int amdgpu_device_gpu_recover(struct amdgpu_device *adev,
dev_info(adev->dev, "GPU reset(%d) failed\n", atomic_read(&adev->gpu_reset_counter));
amdgpu_vf_error_put(adev, AMDGIM_ERROR_VF_GPU_RESET_FAIL, 0, r);
} else {
- dev_info(adev->dev, "GPU reset(%d) successed!\n",atomic_read(&adev->gpu_reset_counter));
+ dev_info(adev->dev, "GPU reset(%d) succeeded!\n",atomic_read(&adev->gpu_reset_counter));
}
+ /*unlock kfd */
+ amdgpu_amdkfd_post_reset(adev);
amdgpu_vf_error_trans_all(adev);
adev->in_gpu_reset = 0;
mutex_unlock(&adev->lock_reset);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
index 8843a06360fa..6870909da926 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_drv.c
@@ -785,28 +785,6 @@ MODULE_DEVICE_TABLE(pci, pciidlist);
static struct drm_driver kms_driver;
-static int amdgpu_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "amdgpudrmfb", primary);
- kfree(ap);
-
- return 0;
-}
-
-
static int amdgpu_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -834,7 +812,7 @@ static int amdgpu_pci_probe(struct pci_dev *pdev,
return ret;
/* Get rid of things like offb */
- ret = amdgpu_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "amdgpudrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
index d44b76455e89..69c5d22f29bd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_fb.c
@@ -373,8 +373,8 @@ void amdgpu_fbdev_fini(struct amdgpu_device *adev)
void amdgpu_fbdev_set_suspend(struct amdgpu_device *adev, int state)
{
if (adev->mode_info.rfbdev)
- drm_fb_helper_set_suspend(&adev->mode_info.rfbdev->helper,
- state);
+ drm_fb_helper_set_suspend_unlocked(&adev->mode_info.rfbdev->helper,
+ state);
}
int amdgpu_fbdev_total_size(struct amdgpu_device *adev)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
index bcbdcf997d20..71792d820ae0 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gem.c
@@ -344,7 +344,7 @@ int amdgpu_gem_userptr_ioctl(struct drm_device *dev, void *data,
if (r)
goto free_pages;
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
amdgpu_bo_unreserve(bo);
if (r)
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
index 6cb4948233cb..bb5a47a45790 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_gmc.h
@@ -105,6 +105,8 @@ struct amdgpu_gmc {
/* protects concurrent invalidation */
spinlock_t invalidate_lock;
bool translate_further;
+ struct kfd_vm_fault_info *vm_fault_info;
+ atomic_t vm_fault_info_updated;
const struct amdgpu_gmc_funcs *gmc_funcs;
};
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
index 5518e623fed2..51b5e977ca88 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ib.c
@@ -164,8 +164,10 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
return r;
}
+ need_ctx_switch = ring->current_ctx != fence_ctx;
if (ring->funcs->emit_pipeline_sync && job &&
((tmp = amdgpu_sync_get_fence(&job->sched_sync, NULL)) ||
+ (amdgpu_sriov_vf(adev) && need_ctx_switch) ||
amdgpu_vm_need_pipeline_sync(ring, job))) {
need_pipe_sync = true;
dma_fence_put(tmp);
@@ -196,7 +198,6 @@ int amdgpu_ib_schedule(struct amdgpu_ring *ring, unsigned num_ibs,
}
skip_preamble = ring->current_ctx == fence_ctx;
- need_ctx_switch = ring->current_ctx != fence_ctx;
if (job && ring->funcs->emit_cntxcntl) {
if (need_ctx_switch)
status |= AMDGPU_HAVE_CTX_SWITCH;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
index 5a2c26a85984..391e2f7c03aa 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_job.c
@@ -133,7 +133,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
if (!f)
return -EINVAL;
- r = drm_sched_job_init(&job->base, entity->sched, entity, owner);
+ r = drm_sched_job_init(&job->base, entity, owner);
if (r)
return r;
@@ -143,7 +143,7 @@ int amdgpu_job_submit(struct amdgpu_job *job, struct drm_sched_entity *entity,
priority = job->base.s_priority;
drm_sched_entity_push_job(&job->base, entity);
- ring = to_amdgpu_ring(entity->sched);
+ ring = to_amdgpu_ring(entity->rq->sched);
amdgpu_ring_priority_get(ring, priority);
return 0;
@@ -167,7 +167,7 @@ int amdgpu_job_submit_direct(struct amdgpu_job *job, struct amdgpu_ring *ring,
static struct dma_fence *amdgpu_job_dependency(struct drm_sched_job *sched_job,
struct drm_sched_entity *s_entity)
{
- struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->sched);
+ struct amdgpu_ring *ring = to_amdgpu_ring(s_entity->rq->sched);
struct amdgpu_job *job = to_amdgpu_job(sched_job);
struct amdgpu_vm *vm = job->vm;
struct dma_fence *fence;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
index 207f238649b4..bd98cc5fb97b 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_kms.c
@@ -328,64 +328,68 @@ static int amdgpu_info_ioctl(struct drm_device *dev, void *data, struct drm_file
case AMDGPU_HW_IP_GFX:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_gfx_rings; i++)
- ring_mask |= ((adev->gfx.gfx_ring[i].ready ? 1 : 0) << i);
+ ring_mask |= adev->gfx.gfx_ring[i].ready << i;
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_COMPUTE:
type = AMD_IP_BLOCK_TYPE_GFX;
for (i = 0; i < adev->gfx.num_compute_rings; i++)
- ring_mask |= ((adev->gfx.compute_ring[i].ready ? 1 : 0) << i);
+ ring_mask |= adev->gfx.compute_ring[i].ready << i;
ib_start_alignment = 32;
ib_size_alignment = 32;
break;
case AMDGPU_HW_IP_DMA:
type = AMD_IP_BLOCK_TYPE_SDMA;
for (i = 0; i < adev->sdma.num_instances; i++)
- ring_mask |= ((adev->sdma.instance[i].ring.ready ? 1 : 0) << i);
+ ring_mask |= adev->sdma.instance[i].ring.ready << i;
ib_start_alignment = 256;
ib_size_alignment = 4;
break;
case AMDGPU_HW_IP_UVD:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
- ring_mask |= ((adev->uvd.inst[i].ring.ready ? 1 : 0) << i);
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
+ ring_mask |= adev->uvd.inst[i].ring.ready;
+ }
ib_start_alignment = 64;
ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCE:
type = AMD_IP_BLOCK_TYPE_VCE;
for (i = 0; i < adev->vce.num_rings; i++)
- ring_mask |= ((adev->vce.ring[i].ready ? 1 : 0) << i);
+ ring_mask |= adev->vce.ring[i].ready << i;
ib_start_alignment = 4;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_UVD_ENC:
type = AMD_IP_BLOCK_TYPE_UVD;
- for (i = 0; i < adev->uvd.num_uvd_inst; i++)
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
for (j = 0; j < adev->uvd.num_enc_rings; j++)
- ring_mask |=
- ((adev->uvd.inst[i].ring_enc[j].ready ? 1 : 0) <<
- (j + i * adev->uvd.num_enc_rings));
+ ring_mask |= adev->uvd.inst[i].ring_enc[j].ready << j;
+ }
ib_start_alignment = 64;
ib_size_alignment = 64;
break;
case AMDGPU_HW_IP_VCN_DEC:
type = AMD_IP_BLOCK_TYPE_VCN;
- ring_mask = adev->vcn.ring_dec.ready ? 1 : 0;
+ ring_mask = adev->vcn.ring_dec.ready;
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
case AMDGPU_HW_IP_VCN_ENC:
type = AMD_IP_BLOCK_TYPE_VCN;
for (i = 0; i < adev->vcn.num_enc_rings; i++)
- ring_mask |= ((adev->vcn.ring_enc[i].ready ? 1 : 0) << i);
+ ring_mask |= adev->vcn.ring_enc[i].ready << i;
ib_start_alignment = 64;
ib_size_alignment = 1;
break;
case AMDGPU_HW_IP_VCN_JPEG:
type = AMD_IP_BLOCK_TYPE_VCN;
- ring_mask = adev->vcn.ring_jpeg.ready ? 1 : 0;
+ ring_mask = adev->vcn.ring_jpeg.ready;
ib_start_alignment = 16;
ib_size_alignment = 16;
break;
@@ -963,7 +967,7 @@ void amdgpu_driver_postclose_kms(struct drm_device *dev,
amdgpu_bo_unref(&pd);
idr_for_each_entry(&fpriv->bo_list_handles, list, handle)
- amdgpu_bo_list_free(list);
+ amdgpu_bo_list_put(list);
idr_destroy(&fpriv->bo_list_handles);
mutex_destroy(&fpriv->bo_list_lock);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
index a365ea2383d1..e55508b39496 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_mn.c
@@ -178,12 +178,18 @@ void amdgpu_mn_unlock(struct amdgpu_mn *mn)
*
* @amn: our notifier
*/
-static void amdgpu_mn_read_lock(struct amdgpu_mn *amn)
+static int amdgpu_mn_read_lock(struct amdgpu_mn *amn, bool blockable)
{
- mutex_lock(&amn->read_lock);
+ if (blockable)
+ mutex_lock(&amn->read_lock);
+ else if (!mutex_trylock(&amn->read_lock))
+ return -EAGAIN;
+
if (atomic_inc_return(&amn->recursion) == 1)
down_read_non_owner(&amn->lock);
mutex_unlock(&amn->read_lock);
+
+ return 0;
}
/**
@@ -239,10 +245,11 @@ static void amdgpu_mn_invalidate_node(struct amdgpu_mn_node *node,
* Block for operations on BOs to finish and mark pages as accessed and
* potentially dirty.
*/
-static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
@@ -250,17 +257,28 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(amn);
+ /* TODO we should be able to split locking for interval tree and
+ * amdgpu_mn_invalidate_node
+ */
+ if (amdgpu_mn_read_lock(amn, blockable))
+ return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
+ if (!blockable) {
+ amdgpu_mn_read_unlock(amn);
+ return -EAGAIN;
+ }
+
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
amdgpu_mn_invalidate_node(node, start, end);
}
+
+ return 0;
}
/**
@@ -275,10 +293,11 @@ static void amdgpu_mn_invalidate_range_start_gfx(struct mmu_notifier *mn,
* necessitates evicting all user-mode queues of the process. The BOs
* are restorted in amdgpu_mn_invalidate_range_end_hsa.
*/
-static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
+static int amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct amdgpu_mn *amn = container_of(mn, struct amdgpu_mn, mn);
struct interval_tree_node *it;
@@ -286,13 +305,19 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
/* notification is exclusive, but interval is inclusive */
end -= 1;
- amdgpu_mn_read_lock(amn);
+ if (amdgpu_mn_read_lock(amn, blockable))
+ return -EAGAIN;
it = interval_tree_iter_first(&amn->objects, start, end);
while (it) {
struct amdgpu_mn_node *node;
struct amdgpu_bo *bo;
+ if (!blockable) {
+ amdgpu_mn_read_unlock(amn);
+ return -EAGAIN;
+ }
+
node = container_of(it, struct amdgpu_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -304,6 +329,8 @@ static void amdgpu_mn_invalidate_range_start_hsa(struct mmu_notifier *mn,
amdgpu_amdkfd_evict_userptr(mem, mm);
}
}
+
+ return 0;
}
/**
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
index b12526ce1a9d..b0e14a3d54ef 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.c
@@ -51,7 +51,7 @@
*
*/
-static bool amdgpu_need_backup(struct amdgpu_device *adev)
+static bool amdgpu_bo_need_backup(struct amdgpu_device *adev)
{
if (adev->flags & AMD_IS_APU)
return false;
@@ -84,12 +84,12 @@ static void amdgpu_bo_subtract_pin_size(struct amdgpu_bo *bo)
}
}
-static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
+static void amdgpu_bo_destroy(struct ttm_buffer_object *tbo)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(tbo->bdev);
struct amdgpu_bo *bo = ttm_to_amdgpu_bo(tbo);
- if (WARN_ON_ONCE(bo->pin_count > 0))
+ if (bo->pin_count > 0)
amdgpu_bo_subtract_pin_size(bo);
if (bo->kfd_bo)
@@ -111,7 +111,7 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
}
/**
- * amdgpu_ttm_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
+ * amdgpu_bo_is_amdgpu_bo - check if the buffer object is an &amdgpu_bo
* @bo: buffer object to be checked
*
* Uses destroy function associated with the object to determine if this is
@@ -120,22 +120,22 @@ static void amdgpu_ttm_bo_destroy(struct ttm_buffer_object *tbo)
* Returns:
* true if the object belongs to &amdgpu_bo, false if not.
*/
-bool amdgpu_ttm_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo)
{
- if (bo->destroy == &amdgpu_ttm_bo_destroy)
+ if (bo->destroy == &amdgpu_bo_destroy)
return true;
return false;
}
/**
- * amdgpu_ttm_placement_from_domain - set buffer's placement
+ * amdgpu_bo_placement_from_domain - set buffer's placement
* @abo: &amdgpu_bo buffer object whose placement is to be set
* @domain: requested domain
*
* Sets buffer's placement according to requested domain and the buffer's
* flags.
*/
-void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
{
struct amdgpu_device *adev = amdgpu_ttm_adev(abo->tbo.bdev);
struct ttm_placement *placement = &abo->placement;
@@ -216,6 +216,8 @@ void amdgpu_ttm_placement_from_domain(struct amdgpu_bo *abo, u32 domain)
c++;
}
+ BUG_ON(c >= AMDGPU_BO_MAX_PLACEMENTS);
+
placement->num_placement = c;
placement->placement = places;
@@ -488,13 +490,13 @@ static int amdgpu_bo_do_create(struct amdgpu_device *adev,
#endif
bo->tbo.bdev = &adev->mman.bdev;
- amdgpu_ttm_placement_from_domain(bo, bp->domain);
+ amdgpu_bo_placement_from_domain(bo, bp->domain);
if (bp->type == ttm_bo_type_kernel)
bo->tbo.priority = 1;
r = ttm_bo_init_reserved(&adev->mman.bdev, &bo->tbo, size, bp->type,
&bo->placement, page_align, &ctx, acc_size,
- NULL, bp->resv, &amdgpu_ttm_bo_destroy);
+ NULL, bp->resv, &amdgpu_bo_destroy);
if (unlikely(r != 0))
return r;
@@ -594,7 +596,7 @@ int amdgpu_bo_create(struct amdgpu_device *adev,
if (r)
return r;
- if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_need_backup(adev)) {
+ if ((flags & AMDGPU_GEM_CREATE_SHADOW) && amdgpu_bo_need_backup(adev)) {
if (!bp->resv)
WARN_ON(reservation_object_lock((*bo_ptr)->tbo.resv,
NULL));
@@ -682,7 +684,7 @@ int amdgpu_bo_validate(struct amdgpu_bo *bo)
domain = bo->preferred_domains;
retry:
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (unlikely(r == -ENOMEM) && domain != bo->allowed_domains) {
domain = bo->allowed_domains;
@@ -823,7 +825,7 @@ struct amdgpu_bo *amdgpu_bo_ref(struct amdgpu_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -841,9 +843,8 @@ void amdgpu_bo_unref(struct amdgpu_bo **bo)
return;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
/**
@@ -915,7 +916,7 @@ int amdgpu_bo_pin_restricted(struct amdgpu_bo *bo, u32 domain,
/* force to pin into visible video ram */
if (!(bo->flags & AMDGPU_GEM_CREATE_NO_CPU_ACCESS))
bo->flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
for (i = 0; i < bo->placement.num_placement; i++) {
unsigned fpfn, lpfn;
@@ -1246,7 +1247,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
struct amdgpu_bo *abo;
struct ttm_mem_reg *old_mem = &bo->mem;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return;
abo = ttm_to_amdgpu_bo(bo);
@@ -1263,7 +1264,7 @@ void amdgpu_bo_move_notify(struct ttm_buffer_object *bo,
return;
/* move_notify is called before move happens */
- trace_amdgpu_ttm_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
+ trace_amdgpu_bo_move(abo, new_mem->mem_type, old_mem->mem_type);
}
/**
@@ -1285,7 +1286,7 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
unsigned long offset, size;
int r;
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo))
+ if (!amdgpu_bo_is_amdgpu_bo(bo))
return 0;
abo = ttm_to_amdgpu_bo(bo);
@@ -1307,8 +1308,8 @@ int amdgpu_bo_fault_reserve_notify(struct ttm_buffer_object *bo)
/* hurrah the memory is not visible ! */
atomic64_inc(&adev->num_vram_cpu_page_faults);
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
- AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ AMDGPU_GEM_DOMAIN_GTT);
/* Avoid costly evictions; only set GTT as a busy placement */
abo->placement.num_busy_placement = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
index 9c3e29a04eb1..18945dd6982d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_object.h
@@ -32,6 +32,7 @@
#include "amdgpu.h"
#define AMDGPU_BO_INVALID_OFFSET LONG_MAX
+#define AMDGPU_BO_MAX_PLACEMENTS 3
struct amdgpu_bo_param {
unsigned long size;
@@ -77,7 +78,7 @@ struct amdgpu_bo {
/* Protected by tbo.reserved */
u32 preferred_domains;
u32 allowed_domains;
- struct ttm_place placements[AMDGPU_GEM_DOMAIN_MAX + 1];
+ struct ttm_place placements[AMDGPU_BO_MAX_PLACEMENTS];
struct ttm_placement placement;
struct ttm_buffer_object tbo;
struct ttm_bo_kmap_obj kmap;
@@ -234,6 +235,9 @@ static inline bool amdgpu_bo_explicit_sync(struct amdgpu_bo *bo)
return bo->flags & AMDGPU_GEM_CREATE_EXPLICIT_SYNC;
}
+bool amdgpu_bo_is_amdgpu_bo(struct ttm_buffer_object *bo);
+void amdgpu_bo_placement_from_domain(struct amdgpu_bo *abo, u32 domain);
+
int amdgpu_bo_create(struct amdgpu_device *adev,
struct amdgpu_bo_param *bp,
struct amdgpu_bo **bo_ptr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
index 15a1192c1ec5..7b4e657a95c7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_pm.c
@@ -31,7 +31,7 @@
#include <linux/power_supply.h>
#include <linux/hwmon.h>
#include <linux/hwmon-sysfs.h>
-
+#include <linux/nospec.h>
static int amdgpu_debugfs_pm_init(struct amdgpu_device *adev);
@@ -403,6 +403,7 @@ static ssize_t amdgpu_set_pp_force_state(struct device *dev,
count = -EINVAL;
goto fail;
}
+ idx = array_index_nospec(idx, ARRAY_SIZE(data.states));
amdgpu_dpm_get_pp_num_states(adev, &data);
state = data.states[idx];
@@ -1185,7 +1186,7 @@ static ssize_t amdgpu_hwmon_show_vddnb(struct device *dev,
int r, size = sizeof(vddnb);
/* only APUs have vddnb */
- if (adev->flags & AMD_IS_APU)
+ if (!(adev->flags & AMD_IS_APU))
return -EINVAL;
/* Can't get voltage when the card is off */
@@ -1931,14 +1932,6 @@ void amdgpu_pm_compute_clocks(struct amdgpu_device *adev)
amdgpu_fence_wait_empty(ring);
}
- mutex_lock(&adev->pm.mutex);
- /* update battery/ac status */
- if (power_supply_is_system_supplied() > 0)
- adev->pm.ac_power = true;
- else
- adev->pm.ac_power = false;
- mutex_unlock(&adev->pm.mutex);
-
if (adev->powerplay.pp_funcs->dispatch_tasks) {
if (!amdgpu_device_has_dc_support(adev)) {
mutex_lock(&adev->pm.mutex);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
index 3ed02f472003..1c5d97f4b4dd 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_prime.c
@@ -323,7 +323,7 @@ static int amdgpu_gem_begin_cpu_access(struct dma_buf *dma_buf,
return ret;
if (!bo->pin_count && (bo->allowed_domains & AMDGPU_GEM_DOMAIN_GTT)) {
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_GTT);
ret = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
index 9f1a5bd39ae8..5b39d1399630 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_psp.c
@@ -131,6 +131,11 @@ psp_cmd_submit_buf(struct psp_context *psp,
msleep(1);
}
+ if (ucode) {
+ ucode->tmr_mc_addr_lo = psp->cmd_buf_mem->resp.fw_addr_lo;
+ ucode->tmr_mc_addr_hi = psp->cmd_buf_mem->resp.fw_addr_hi;
+ }
+
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
index ea9850c9224d..a172bba32b45 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_queue_mgr.c
@@ -66,8 +66,6 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
u32 ring,
struct amdgpu_ring **out_ring)
{
- u32 instance;
-
switch (mapper->hw_ip) {
case AMDGPU_HW_IP_GFX:
*out_ring = &adev->gfx.gfx_ring[ring];
@@ -79,16 +77,13 @@ static int amdgpu_identity_map(struct amdgpu_device *adev,
*out_ring = &adev->sdma.instance[ring].ring;
break;
case AMDGPU_HW_IP_UVD:
- instance = ring;
- *out_ring = &adev->uvd.inst[instance].ring;
+ *out_ring = &adev->uvd.inst[0].ring;
break;
case AMDGPU_HW_IP_VCE:
*out_ring = &adev->vce.ring[ring];
break;
case AMDGPU_HW_IP_UVD_ENC:
- instance = ring / adev->uvd.num_enc_rings;
- *out_ring =
- &adev->uvd.inst[instance].ring_enc[ring%adev->uvd.num_enc_rings];
+ *out_ring = &adev->uvd.inst[0].ring_enc[ring];
break;
case AMDGPU_HW_IP_VCN_DEC:
*out_ring = &adev->vcn.ring_dec;
@@ -219,7 +214,7 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
u32 hw_ip, u32 instance, u32 ring,
struct amdgpu_ring **out_ring)
{
- int r, ip_num_rings;
+ int i, r, ip_num_rings = 0;
struct amdgpu_queue_mapper *mapper = &mgr->mapper[hw_ip];
if (!adev || !mgr || !out_ring)
@@ -248,14 +243,21 @@ int amdgpu_queue_mgr_map(struct amdgpu_device *adev,
ip_num_rings = adev->sdma.num_instances;
break;
case AMDGPU_HW_IP_UVD:
- ip_num_rings = adev->uvd.num_uvd_inst;
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
break;
case AMDGPU_HW_IP_VCE:
ip_num_rings = adev->vce.num_rings;
break;
case AMDGPU_HW_IP_UVD_ENC:
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (!(adev->uvd.harvest_config & (1 << i)))
+ ip_num_rings++;
+ }
ip_num_rings =
- adev->uvd.num_enc_rings * adev->uvd.num_uvd_inst;
+ adev->uvd.num_enc_rings * ip_num_rings;
break;
case AMDGPU_HW_IP_VCN_DEC:
ip_num_rings = 1;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
index 5018c0b6bf1a..d242b9a51e90 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ring.h
@@ -123,6 +123,7 @@ struct amdgpu_ring_funcs {
void (*set_wptr)(struct amdgpu_ring *ring);
/* validating and patching of IBs */
int (*parse_cs)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
+ int (*patch_cs_in_place)(struct amdgpu_cs_parser *p, uint32_t ib_idx);
/* constants to calculate how many DW are needed for an emit */
unsigned emit_frame_size;
unsigned emit_ib_size;
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
index 86a0715d9431..1cafe8d83a4d 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_sched.c
@@ -53,9 +53,8 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
int fd,
enum drm_sched_priority priority)
{
- struct file *filp = fcheck(fd);
+ struct file *filp = fget(fd);
struct drm_file *file;
- struct pid *pid;
struct amdgpu_fpriv *fpriv;
struct amdgpu_ctx *ctx;
uint32_t id;
@@ -63,20 +62,12 @@ static int amdgpu_sched_process_priority_override(struct amdgpu_device *adev,
if (!filp)
return -EINVAL;
- pid = get_pid(((struct drm_file *)filp->private_data)->pid);
+ file = filp->private_data;
+ fpriv = file->driver_priv;
+ idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
+ amdgpu_ctx_priority_override(ctx, priority);
- mutex_lock(&adev->ddev->filelist_mutex);
- list_for_each_entry(file, &adev->ddev->filelist, lhead) {
- if (file->pid != pid)
- continue;
-
- fpriv = file->driver_priv;
- idr_for_each_entry(&fpriv->ctx_mgr.ctx_handles, ctx, id)
- amdgpu_ctx_priority_override(ctx, priority);
- }
- mutex_unlock(&adev->ddev->filelist_mutex);
-
- put_pid(pid);
+ fput(filp);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
index 76920035eb22..7206a0025b17 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_trace.h
@@ -314,6 +314,11 @@ DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_mapping,
TP_ARGS(mapping)
);
+DEFINE_EVENT(amdgpu_vm_mapping, amdgpu_vm_bo_cs,
+ TP_PROTO(struct amdgpu_bo_va_mapping *mapping),
+ TP_ARGS(mapping)
+);
+
TRACE_EVENT(amdgpu_vm_set_ptes,
TP_PROTO(uint64_t pe, uint64_t addr, unsigned count,
uint32_t incr, uint64_t flags),
@@ -436,7 +441,7 @@ TRACE_EVENT(amdgpu_cs_bo_status,
__entry->total_bo, __entry->total_size)
);
-TRACE_EVENT(amdgpu_ttm_bo_move,
+TRACE_EVENT(amdgpu_bo_move,
TP_PROTO(struct amdgpu_bo* bo, uint32_t new_placement, uint32_t old_placement),
TP_ARGS(bo, new_placement, old_placement),
TP_STRUCT__entry(
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
index 13977ea6a097..fcf421263fd9 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ttm.c
@@ -92,11 +92,9 @@ static void amdgpu_ttm_mem_global_release(struct drm_global_reference *ref)
}
/**
- * amdgpu_ttm_global_init - Initialize global TTM memory reference
- * structures.
+ * amdgpu_ttm_global_init - Initialize global TTM memory reference structures.
*
- * @adev: AMDGPU device for which the global structures need to be
- * registered.
+ * @adev: AMDGPU device for which the global structures need to be registered.
*
* This is called as part of the AMDGPU ttm init from amdgpu_ttm_init()
* during bring up.
@@ -162,13 +160,12 @@ static int amdgpu_invalidate_caches(struct ttm_bo_device *bdev, uint32_t flags)
}
/**
- * amdgpu_init_mem_type - Initialize a memory manager for a specific
- * type of memory request.
+ * amdgpu_init_mem_type - Initialize a memory manager for a specific type of
+ * memory request.
*
- * @bdev: The TTM BO device object (contains a reference to
- * amdgpu_device)
- * @type: The type of memory requested
- * @man:
+ * @bdev: The TTM BO device object (contains a reference to amdgpu_device)
+ * @type: The type of memory requested
+ * @man: The memory type manager for each domain
*
* This is called by ttm_bo_init_mm() when a buffer object is being
* initialized.
@@ -248,7 +245,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
}
/* Object isn't an AMDGPU object so ignore */
- if (!amdgpu_ttm_bo_is_amdgpu_bo(bo)) {
+ if (!amdgpu_bo_is_amdgpu_bo(bo)) {
placement->placement = &placements;
placement->busy_placement = &placements;
placement->num_placement = 1;
@@ -261,7 +258,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
case TTM_PL_VRAM:
if (!adev->mman.buffer_funcs_enabled) {
/* Move to system memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
} else if (!amdgpu_gmc_vram_full_visible(&adev->gmc) &&
!(abo->flags & AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED) &&
amdgpu_bo_in_cpu_visible_vram(abo)) {
@@ -271,7 +268,7 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
* BO will be evicted to GTT rather than causing other
* BOs to be evicted from VRAM
*/
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_VRAM |
AMDGPU_GEM_DOMAIN_GTT);
abo->placements[0].fpfn = adev->gmc.visible_vram_size >> PAGE_SHIFT;
abo->placements[0].lpfn = 0;
@@ -279,12 +276,12 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
abo->placement.num_busy_placement = 1;
} else {
/* Move to GTT memory */
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_GTT);
}
break;
case TTM_PL_TT:
default:
- amdgpu_ttm_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
+ amdgpu_bo_placement_from_domain(abo, AMDGPU_GEM_DOMAIN_CPU);
}
*placement = abo->placement;
}
@@ -292,8 +289,8 @@ static void amdgpu_evict_flags(struct ttm_buffer_object *bo,
/**
* amdgpu_verify_access - Verify access for a mmap call
*
- * @bo: The buffer object to map
- * @filp: The file pointer from the process performing the mmap
+ * @bo: The buffer object to map
+ * @filp: The file pointer from the process performing the mmap
*
* This is called by ttm_bo_mmap() to verify whether a process
* has the right to mmap a BO to their process space.
@@ -318,11 +315,10 @@ static int amdgpu_verify_access(struct ttm_buffer_object *bo, struct file *filp)
/**
* amdgpu_move_null - Register memory for a buffer object
*
- * @bo: The bo to assign the memory to
- * @new_mem: The memory to be assigned.
+ * @bo: The bo to assign the memory to
+ * @new_mem: The memory to be assigned.
*
- * Assign the memory from new_mem to the memory of the buffer object
- * bo.
+ * Assign the memory from new_mem to the memory of the buffer object bo.
*/
static void amdgpu_move_null(struct ttm_buffer_object *bo,
struct ttm_mem_reg *new_mem)
@@ -335,8 +331,12 @@ static void amdgpu_move_null(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT
- * buffer.
+ * amdgpu_mm_node_addr - Compute the GPU relative offset of a GTT buffer.
+ *
+ * @bo: The bo to assign the memory to.
+ * @mm_node: Memory manager node for drm allocator.
+ * @mem: The region where the bo resides.
+ *
*/
static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
struct drm_mm_node *mm_node,
@@ -352,10 +352,12 @@ static uint64_t amdgpu_mm_node_addr(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_find_mm_node - Helper function finds the drm_mm_node
- * corresponding to @offset. It also modifies
- * the offset to be within the drm_mm_node
- * returned
+ * amdgpu_find_mm_node - Helper function finds the drm_mm_node corresponding to
+ * @offset. It also modifies the offset to be within the drm_mm_node returned
+ *
+ * @mem: The region where the bo resides.
+ * @offset: The offset that drm_mm_node is used for finding.
+ *
*/
static struct drm_mm_node *amdgpu_find_mm_node(struct ttm_mem_reg *mem,
unsigned long *offset)
@@ -497,8 +499,8 @@ error:
/**
* amdgpu_move_blit - Copy an entire buffer to another buffer
*
- * This is a helper called by amdgpu_bo_move() and
- * amdgpu_move_vram_ram() to help move buffers to and from VRAM.
+ * This is a helper called by amdgpu_bo_move() and amdgpu_move_vram_ram() to
+ * help move buffers to and from VRAM.
*/
static int amdgpu_move_blit(struct ttm_buffer_object *bo,
bool evict, bool no_wait_gpu,
@@ -580,7 +582,7 @@ static int amdgpu_move_vram_ram(struct ttm_buffer_object *bo, bool evict,
}
/* blit VRAM to GTT */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, &tmp_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, &tmp_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -632,7 +634,7 @@ static int amdgpu_move_ram_vram(struct ttm_buffer_object *bo, bool evict,
}
/* copy to VRAM */
- r = amdgpu_move_blit(bo, true, ctx->no_wait_gpu, new_mem, old_mem);
+ r = amdgpu_move_blit(bo, evict, ctx->no_wait_gpu, new_mem, old_mem);
if (unlikely(r)) {
goto out_cleanup;
}
@@ -794,8 +796,8 @@ struct amdgpu_ttm_tt {
};
/**
- * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to
- * by a USERPTR pointer to memory
+ * amdgpu_ttm_tt_get_user_pages - Pin pages of memory pointed to by a USERPTR
+ * pointer to memory
*
* Called by amdgpu_gem_userptr_ioctl() and amdgpu_cs_parser_bos().
* This provides a wrapper around the get_user_pages() call to provide
@@ -818,8 +820,10 @@ int amdgpu_ttm_tt_get_user_pages(struct ttm_tt *ttm, struct page **pages)
down_read(&mm->mmap_sem);
if (gtt->userflags & AMDGPU_GEM_USERPTR_ANONONLY) {
- /* check that we only use anonymous memory
- to prevent problems with writeback */
+ /*
+ * check that we only use anonymous memory to prevent problems
+ * with writeback
+ */
unsigned long end = gtt->userptr + ttm->num_pages * PAGE_SIZE;
struct vm_area_struct *vma;
@@ -870,10 +874,9 @@ release_pages:
}
/**
- * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages
- * as necessary.
+ * amdgpu_ttm_tt_set_user_pages - Copy pages in, putting old pages as necessary.
*
- * Called by amdgpu_cs_list_validate(). This creates the page list
+ * Called by amdgpu_cs_list_validate(). This creates the page list
* that backs user memory and will ultimately be mapped into the device
* address space.
*/
@@ -915,8 +918,7 @@ void amdgpu_ttm_tt_mark_user_pages(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the
- * user pages
+ * amdgpu_ttm_tt_pin_userptr - prepare the sg table with the user pages
*
* Called by amdgpu_ttm_backend_bind()
**/
@@ -1295,8 +1297,8 @@ static void amdgpu_ttm_tt_unpopulate(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt
- * for the current task
+ * amdgpu_ttm_tt_set_userptr - Initialize userptr GTT ttm_tt for the current
+ * task
*
* @ttm: The ttm_tt object to bind this userptr object to
* @addr: The address in the current tasks VM space to use
@@ -1346,9 +1348,8 @@ struct mm_struct *amdgpu_ttm_tt_get_usermm(struct ttm_tt *ttm)
}
/**
- * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays
- * inside an address range for the
- * current task.
+ * amdgpu_ttm_tt_affect_userptr - Determine if a ttm_tt object lays inside an
+ * address range for the current task.
*
*/
bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
@@ -1386,8 +1387,7 @@ bool amdgpu_ttm_tt_affect_userptr(struct ttm_tt *ttm, unsigned long start,
}
/**
- * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been
- * invalidated?
+ * amdgpu_ttm_tt_userptr_invalidated - Has the ttm_tt object been invalidated?
*/
bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
int *last_invalidated)
@@ -1400,10 +1400,8 @@ bool amdgpu_ttm_tt_userptr_invalidated(struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this
- * ttm_tt object been invalidated
- * since the last time they've
- * been set?
+ * amdgpu_ttm_tt_userptr_needs_pages - Have the pages backing this ttm_tt object
+ * been invalidated since the last time they've been set?
*/
bool amdgpu_ttm_tt_userptr_needs_pages(struct ttm_tt *ttm)
{
@@ -1459,13 +1457,12 @@ uint64_t amdgpu_ttm_tt_pte_flags(struct amdgpu_device *adev, struct ttm_tt *ttm,
}
/**
- * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict
- * a buffer object.
+ * amdgpu_ttm_bo_eviction_valuable - Check to see if we can evict a buffer
+ * object.
*
- * Return true if eviction is sensible. Called by
- * ttm_mem_evict_first() on behalf of ttm_bo_mem_force_space()
- * which tries to evict buffer objects until it can find space
- * for a new object and by ttm_bo_force_list_clean() which is
+ * Return true if eviction is sensible. Called by ttm_mem_evict_first() on
+ * behalf of ttm_bo_mem_force_space() which tries to evict buffer objects until
+ * it can find space for a new object and by ttm_bo_force_list_clean() which is
* used to clean out a memory space.
*/
static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
@@ -1515,8 +1512,7 @@ static bool amdgpu_ttm_bo_eviction_valuable(struct ttm_buffer_object *bo,
}
/**
- * amdgpu_ttm_access_memory - Read or Write memory that backs a
- * buffer object.
+ * amdgpu_ttm_access_memory - Read or Write memory that backs a buffer object.
*
* @bo: The buffer object to read/write
* @offset: Offset into buffer object
@@ -1704,8 +1700,8 @@ error_create:
return r;
}
/**
- * amdgpu_ttm_init - Init the memory management (ttm) as well as
- * various gtt/vram related fields.
+ * amdgpu_ttm_init - Init the memory management (ttm) as well as various
+ * gtt/vram related fields.
*
* This initializes all of the memory space pools that the TTM layer
* will need such as the GTT space (system memory mapped to the device),
@@ -1856,8 +1852,7 @@ int amdgpu_ttm_init(struct amdgpu_device *adev)
}
/**
- * amdgpu_ttm_late_init - Handle any late initialization for
- * amdgpu_ttm
+ * amdgpu_ttm_late_init - Handle any late initialization for amdgpu_ttm
*/
void amdgpu_ttm_late_init(struct amdgpu_device *adev)
{
@@ -1925,8 +1920,9 @@ void amdgpu_ttm_set_buffer_funcs_status(struct amdgpu_device *adev, bool enable)
return;
}
} else {
- drm_sched_entity_destroy(adev->mman.entity.sched,
- &adev->mman.entity);
+ drm_sched_entity_destroy(&adev->mman.entity);
+ dma_fence_put(man->move);
+ man->move = NULL;
}
/* this just adjusts TTM size idea, which sets lpfn to the correct value */
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
index 08e38579af24..bdc472b6e641 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_ucode.h
@@ -194,6 +194,7 @@ enum AMDGPU_UCODE_ID {
AMDGPU_UCODE_ID_SMC,
AMDGPU_UCODE_ID_UVD,
AMDGPU_UCODE_ID_VCE,
+ AMDGPU_UCODE_ID_VCN,
AMDGPU_UCODE_ID_MAXIMUM,
};
@@ -226,6 +227,9 @@ struct amdgpu_firmware_info {
void *kaddr;
/* ucode_size_bytes */
uint32_t ucode_size;
+ /* starting tmr mc address */
+ uint32_t tmr_mc_addr_lo;
+ uint32_t tmr_mc_addr_hi;
};
void amdgpu_ucode_print_mc_hdr(const struct common_firmware_header *hdr);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
index 80b5c453f8c1..e5a6db6beab7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.c
@@ -122,8 +122,6 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work);
int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
unsigned long bo_size;
const char *fw_name;
const struct common_firmware_header *hdr;
@@ -255,7 +253,8 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
-
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr, &adev->uvd.inst[j].cpu_addr);
@@ -265,13 +264,6 @@ int amdgpu_uvd_sw_init(struct amdgpu_device *adev)
}
}
- ring = &adev->uvd.inst[0].ring;
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
- if (r) {
- DRM_ERROR("Failed setting up UVD kernel entity.\n");
- return r;
- }
for (i = 0; i < adev->uvd.max_handles; ++i) {
atomic_set(&adev->uvd.handles[i], 0);
adev->uvd.filp[i] = NULL;
@@ -305,11 +297,12 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
{
int i, j;
- drm_sched_entity_destroy(&adev->uvd.inst->ring.sched,
- &adev->uvd.entity);
+ drm_sched_entity_destroy(&adev->uvd.entity);
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
- kfree(adev->uvd.inst[j].saved_bo);
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
+ kvfree(adev->uvd.inst[j].saved_bo);
amdgpu_bo_free_kernel(&adev->uvd.inst[j].vcpu_bo,
&adev->uvd.inst[j].gpu_addr,
@@ -325,6 +318,29 @@ int amdgpu_uvd_sw_fini(struct amdgpu_device *adev)
return 0;
}
+/**
+ * amdgpu_uvd_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->uvd.inst[0].ring;
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->uvd.entity, &rq, 1, NULL);
+ if (r) {
+ DRM_ERROR("Failed setting up UVD kernel entity.\n");
+ return r;
+ }
+
+ return 0;
+}
+
int amdgpu_uvd_suspend(struct amdgpu_device *adev)
{
unsigned size;
@@ -344,13 +360,15 @@ int amdgpu_uvd_suspend(struct amdgpu_device *adev)
}
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (adev->uvd.inst[j].vcpu_bo == NULL)
continue;
size = amdgpu_bo_size(adev->uvd.inst[j].vcpu_bo);
ptr = adev->uvd.inst[j].cpu_addr;
- adev->uvd.inst[j].saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->uvd.inst[j].saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->uvd.inst[j].saved_bo)
return -ENOMEM;
@@ -366,6 +384,8 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->uvd.inst[i].vcpu_bo == NULL)
return -EINVAL;
@@ -374,7 +394,7 @@ int amdgpu_uvd_resume(struct amdgpu_device *adev)
if (adev->uvd.inst[i].saved_bo != NULL) {
memcpy_toio(ptr, adev->uvd.inst[i].saved_bo, size);
- kfree(adev->uvd.inst[i].saved_bo);
+ kvfree(adev->uvd.inst[i].saved_bo);
adev->uvd.inst[i].saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
@@ -473,7 +493,7 @@ static int amdgpu_uvd_cs_pass1(struct amdgpu_uvd_cs_ctx *ctx)
if (cmd == 0x0 || cmd == 0x3) {
/* yes, force it into VRAM */
uint32_t domain = AMDGPU_GEM_DOMAIN_VRAM;
- amdgpu_ttm_placement_from_domain(bo, domain);
+ amdgpu_bo_placement_from_domain(bo, domain);
}
amdgpu_uvd_force_into_uvd_segment(bo);
@@ -1014,7 +1034,7 @@ static int amdgpu_uvd_send_msg(struct amdgpu_ring *ring, struct amdgpu_bo *bo,
if (!ring->adev->uvd.address_64_bit) {
struct ttm_operation_ctx ctx = { true, false };
- amdgpu_ttm_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
+ amdgpu_bo_placement_from_domain(bo, AMDGPU_GEM_DOMAIN_VRAM);
amdgpu_uvd_force_into_uvd_segment(bo);
r = ttm_bo_validate(&bo->tbo, &bo->placement, &ctx);
if (r)
@@ -1160,6 +1180,8 @@ static void amdgpu_uvd_idle_work_handler(struct work_struct *work)
unsigned fences = 0, i, j;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring);
for (j = 0; j < adev->uvd.num_enc_rings; ++j) {
fences += amdgpu_fence_count_emitted(&adev->uvd.inst[i].ring_enc[j]);
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
index 66872286ab12..a3ab1a41060f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_uvd.h
@@ -48,6 +48,9 @@ struct amdgpu_uvd_inst {
uint32_t srbm_soft_reset;
};
+#define AMDGPU_UVD_HARVEST_UVD0 (1 << 0)
+#define AMDGPU_UVD_HARVEST_UVD1 (1 << 1)
+
struct amdgpu_uvd {
const struct firmware *fw; /* UVD firmware */
unsigned fw_version;
@@ -61,10 +64,12 @@ struct amdgpu_uvd {
atomic_t handles[AMDGPU_MAX_UVD_HANDLES];
struct drm_sched_entity entity;
struct delayed_work idle_work;
+ unsigned harvest_config;
};
int amdgpu_uvd_sw_init(struct amdgpu_device *adev);
int amdgpu_uvd_sw_fini(struct amdgpu_device *adev);
+int amdgpu_uvd_entity_init(struct amdgpu_device *adev);
int amdgpu_uvd_suspend(struct amdgpu_device *adev);
int amdgpu_uvd_resume(struct amdgpu_device *adev);
int amdgpu_uvd_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
index 86182c966ed6..0cc5190f4f36 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.c
@@ -90,8 +90,6 @@ static void amdgpu_vce_idle_work_handler(struct work_struct *work);
*/
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
{
- struct amdgpu_ring *ring;
- struct drm_sched_rq *rq;
const char *fw_name;
const struct common_firmware_header *hdr;
unsigned ucode_version, version_major, version_minor, binary_id;
@@ -188,14 +186,6 @@ int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size)
return r;
}
- ring = &adev->vce.ring[0];
- rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
- r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
- if (r != 0) {
- DRM_ERROR("Failed setting up VCE run queue.\n");
- return r;
- }
-
for (i = 0; i < AMDGPU_MAX_VCE_HANDLES; ++i) {
atomic_set(&adev->vce.handles[i], 0);
adev->vce.filp[i] = NULL;
@@ -221,7 +211,7 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
if (adev->vce.vcpu_bo == NULL)
return 0;
- drm_sched_entity_destroy(&adev->vce.ring[0].sched, &adev->vce.entity);
+ drm_sched_entity_destroy(&adev->vce.entity);
amdgpu_bo_free_kernel(&adev->vce.vcpu_bo, &adev->vce.gpu_addr,
(void **)&adev->vce.cpu_addr);
@@ -236,6 +226,29 @@ int amdgpu_vce_sw_fini(struct amdgpu_device *adev)
}
/**
+ * amdgpu_vce_entity_init - init entity
+ *
+ * @adev: amdgpu_device pointer
+ *
+ */
+int amdgpu_vce_entity_init(struct amdgpu_device *adev)
+{
+ struct amdgpu_ring *ring;
+ struct drm_sched_rq *rq;
+ int r;
+
+ ring = &adev->vce.ring[0];
+ rq = &ring->sched.sched_rq[DRM_SCHED_PRIORITY_NORMAL];
+ r = drm_sched_entity_init(&adev->vce.entity, &rq, 1, NULL);
+ if (r != 0) {
+ DRM_ERROR("Failed setting up VCE run queue.\n");
+ return r;
+ }
+
+ return 0;
+}
+
+/**
* amdgpu_vce_suspend - unpin VCE fw memory
*
* @adev: amdgpu_device pointer
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
index 71781267ee4c..a1f209eed4c4 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vce.h
@@ -55,6 +55,7 @@ struct amdgpu_vce {
int amdgpu_vce_sw_init(struct amdgpu_device *adev, unsigned long size);
int amdgpu_vce_sw_fini(struct amdgpu_device *adev);
+int amdgpu_vce_entity_init(struct amdgpu_device *adev);
int amdgpu_vce_suspend(struct amdgpu_device *adev);
int amdgpu_vce_resume(struct amdgpu_device *adev);
int amdgpu_vce_get_create_msg(struct amdgpu_ring *ring, uint32_t handle,
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
index 798648a19710..fd654a4406db 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vcn.c
@@ -111,9 +111,10 @@ int amdgpu_vcn_sw_init(struct amdgpu_device *adev)
version_major, version_minor, family_id);
}
- bo_size = AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8)
- + AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ bo_size = AMDGPU_VCN_STACK_SIZE + AMDGPU_VCN_HEAP_SIZE
+ AMDGPU_VCN_SESSION_SIZE * 40;
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP)
+ bo_size += AMDGPU_GPU_PAGE_ALIGN(le32_to_cpu(hdr->ucode_size_bytes) + 8);
r = amdgpu_bo_create_kernel(adev, bo_size, PAGE_SIZE,
AMDGPU_GEM_DOMAIN_VRAM, &adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr, &adev->vcn.cpu_addr);
@@ -129,7 +130,7 @@ int amdgpu_vcn_sw_fini(struct amdgpu_device *adev)
{
int i;
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
amdgpu_bo_free_kernel(&adev->vcn.vcpu_bo,
&adev->vcn.gpu_addr,
@@ -160,7 +161,7 @@ int amdgpu_vcn_suspend(struct amdgpu_device *adev)
size = amdgpu_bo_size(adev->vcn.vcpu_bo);
ptr = adev->vcn.cpu_addr;
- adev->vcn.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vcn.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vcn.saved_bo)
return -ENOMEM;
@@ -182,18 +183,20 @@ int amdgpu_vcn_resume(struct amdgpu_device *adev)
if (adev->vcn.saved_bo != NULL) {
memcpy_toio(ptr, adev->vcn.saved_bo, size);
- kfree(adev->vcn.saved_bo);
+ kvfree(adev->vcn.saved_bo);
adev->vcn.saved_bo = NULL;
} else {
const struct common_firmware_header *hdr;
unsigned offset;
hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
- offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
- memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
- le32_to_cpu(hdr->ucode_size_bytes));
- size -= le32_to_cpu(hdr->ucode_size_bytes);
- ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ if (adev->firmware.load_type != AMDGPU_FW_LOAD_PSP) {
+ offset = le32_to_cpu(hdr->ucode_array_offset_bytes);
+ memcpy_toio(adev->vcn.cpu_addr, adev->vcn.fw->data + offset,
+ le32_to_cpu(hdr->ucode_size_bytes));
+ size -= le32_to_cpu(hdr->ucode_size_bytes);
+ ptr += le32_to_cpu(hdr->ucode_size_bytes);
+ }
memset_io(ptr, 0, size);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
index 098dd1ba751a..b17771dd5ce7 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.c
@@ -172,6 +172,7 @@ static void amdgpu_vm_bo_base_init(struct amdgpu_vm_bo_base *base,
* is validated on next vm use to avoid fault.
* */
list_move_tail(&base->vm_status, &vm->evicted);
+ base->moved = true;
}
/**
@@ -369,7 +370,6 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
uint64_t addr;
int r;
- addr = amdgpu_bo_gpu_offset(bo);
entries = amdgpu_bo_size(bo) / 8;
if (pte_support_ats) {
@@ -387,7 +387,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
ats_entries = 0;
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
r = reservation_object_reserve_shared(bo->tbo.resv);
if (r)
@@ -401,6 +401,7 @@ static int amdgpu_vm_clear_bo(struct amdgpu_device *adev,
if (r)
goto error;
+ addr = amdgpu_bo_gpu_offset(bo);
if (ats_entries) {
uint64_t ats_value;
@@ -495,11 +496,12 @@ static int amdgpu_vm_alloc_levels(struct amdgpu_device *adev,
eaddr = eaddr & ((1 << shift) - 1);
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
+ if (vm->root.base.bo->shadow)
+ flags |= AMDGPU_GEM_CREATE_SHADOW;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
else
- flags |= (AMDGPU_GEM_CREATE_NO_CPU_ACCESS |
- AMDGPU_GEM_CREATE_SHADOW);
+ flags |= AMDGPU_GEM_CREATE_NO_CPU_ACCESS;
/* walk over the address space and allocate the page tables */
for (pt_idx = from; pt_idx <= to; ++pt_idx) {
@@ -1113,7 +1115,7 @@ restart:
struct amdgpu_ring *ring;
struct dma_fence *fence;
- ring = container_of(vm->entity.sched, struct amdgpu_ring,
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring,
sched);
amdgpu_ring_pad_ib(ring, params.ib);
@@ -1403,7 +1405,7 @@ static int amdgpu_vm_bo_update_mapping(struct amdgpu_device *adev,
addr, flags);
}
- ring = container_of(vm->entity.sched, struct amdgpu_ring, sched);
+ ring = container_of(vm->entity.rq->sched, struct amdgpu_ring, sched);
nptes = last - start + 1;
@@ -2344,6 +2346,35 @@ struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
}
/**
+ * amdgpu_vm_bo_trace_cs - trace all reserved mappings
+ *
+ * @vm: the requested vm
+ * @ticket: CS ticket
+ *
+ * Trace all mappings of BOs reserved during a command submission.
+ */
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket)
+{
+ struct amdgpu_bo_va_mapping *mapping;
+
+ if (!trace_amdgpu_vm_bo_cs_enabled())
+ return;
+
+ for (mapping = amdgpu_vm_it_iter_first(&vm->va, 0, U64_MAX); mapping;
+ mapping = amdgpu_vm_it_iter_next(mapping, 0, U64_MAX)) {
+ if (mapping->bo_va && mapping->bo_va->base.bo) {
+ struct amdgpu_bo *bo;
+
+ bo = mapping->bo_va->base.bo;
+ if (READ_ONCE(bo->tbo.resv->lock.ctx) != ticket)
+ continue;
+ }
+
+ trace_amdgpu_vm_bo_cs(mapping);
+ }
+}
+
+/**
* amdgpu_vm_bo_rmv - remove a bo to a specific vm
*
* @adev: amdgpu_device pointer
@@ -2453,28 +2484,52 @@ static uint32_t amdgpu_vm_get_block_size(uint64_t vm_size)
* amdgpu_vm_adjust_size - adjust vm size, block size and fragment size
*
* @adev: amdgpu_device pointer
- * @vm_size: the default vm size if it's set auto
+ * @min_vm_size: the minimum vm size in GB if it's set auto
* @fragment_size_default: Default PTE fragment size
* @max_level: max VMPT level
* @max_bits: max address space size in bits
*
*/
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits)
{
+ unsigned int max_size = 1 << (max_bits - 30);
+ unsigned int vm_size;
uint64_t tmp;
/* adjust vm size first */
if (amdgpu_vm_size != -1) {
- unsigned max_size = 1 << (max_bits - 30);
-
vm_size = amdgpu_vm_size;
if (vm_size > max_size) {
dev_warn(adev->dev, "VM size (%d) too large, max is %u GB\n",
amdgpu_vm_size, max_size);
vm_size = max_size;
}
+ } else {
+ struct sysinfo si;
+ unsigned int phys_ram_gb;
+
+ /* Optimal VM size depends on the amount of physical
+ * RAM available. Underlying requirements and
+ * assumptions:
+ *
+ * - Need to map system memory and VRAM from all GPUs
+ * - VRAM from other GPUs not known here
+ * - Assume VRAM <= system memory
+ * - On GFX8 and older, VM space can be segmented for
+ * different MTYPEs
+ * - Need to allow room for fragmentation, guard pages etc.
+ *
+ * This adds up to a rough guess of system memory x3.
+ * Round up to power of two to maximize the available
+ * VM size with the given page table size.
+ */
+ si_meminfo(&si);
+ phys_ram_gb = ((uint64_t)si.totalram * si.mem_unit +
+ (1 << 30) - 1) >> 30;
+ vm_size = roundup_pow_of_two(
+ min(max(phys_ram_gb * 3, min_vm_size), max_size));
}
adev->vm_manager.max_pfn = (uint64_t)vm_size << 18;
@@ -2587,7 +2642,7 @@ int amdgpu_vm_init(struct amdgpu_device *adev, struct amdgpu_vm *vm,
flags = AMDGPU_GEM_CREATE_VRAM_CONTIGUOUS;
if (vm->use_cpu_for_update)
flags |= AMDGPU_GEM_CREATE_CPU_ACCESS_REQUIRED;
- else
+ else if (vm_context != AMDGPU_VM_CONTEXT_COMPUTE)
flags |= AMDGPU_GEM_CREATE_SHADOW;
size = amdgpu_vm_bo_size(adev, adev->vm_manager.root_level);
@@ -2642,7 +2697,7 @@ error_free_root:
vm->root.base.bo = NULL;
error_free_sched_entity:
- drm_sched_entity_destroy(&ring->sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
return r;
}
@@ -2662,8 +2717,7 @@ error_free_sched_entity:
* - pasid (old PASID is released, because compute manages its own PASIDs)
*
* Reinitializes the page directory to reflect the changed ATS
- * setting. May leave behind an unused shadow BO for the page
- * directory when switching from SDMA updates to CPU updates.
+ * setting.
*
* Returns:
* 0 for success, -errno for errors.
@@ -2713,6 +2767,9 @@ int amdgpu_vm_make_compute(struct amdgpu_device *adev, struct amdgpu_vm *vm)
vm->pasid = 0;
}
+ /* Free the shadow bo for compute VM */
+ amdgpu_bo_unref(&vm->root.base.bo->shadow);
+
error:
amdgpu_bo_unreserve(vm->root.base.bo);
return r;
@@ -2779,7 +2836,7 @@ void amdgpu_vm_fini(struct amdgpu_device *adev, struct amdgpu_vm *vm)
spin_unlock_irqrestore(&adev->vm_manager.pasid_lock, flags);
}
- drm_sched_entity_destroy(vm->entity.sched, &vm->entity);
+ drm_sched_entity_destroy(&vm->entity);
if (!RB_EMPTY_ROOT(&vm->va.rb_root)) {
dev_err(adev->dev, "still active bo inside vm\n");
diff --git a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
index d416f895233d..9fa9df0c5e7f 100644
--- a/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
+++ b/drivers/gpu/drm/amd/amdgpu/amdgpu_vm.h
@@ -318,9 +318,10 @@ int amdgpu_vm_bo_clear_mappings(struct amdgpu_device *adev,
uint64_t saddr, uint64_t size);
struct amdgpu_bo_va_mapping *amdgpu_vm_bo_lookup_mapping(struct amdgpu_vm *vm,
uint64_t addr);
+void amdgpu_vm_bo_trace_cs(struct amdgpu_vm *vm, struct ww_acquire_ctx *ticket);
void amdgpu_vm_bo_rmv(struct amdgpu_device *adev,
struct amdgpu_bo_va *bo_va);
-void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t vm_size,
+void amdgpu_vm_adjust_size(struct amdgpu_device *adev, uint32_t min_vm_size,
uint32_t fragment_size_default, unsigned max_level,
unsigned max_bits);
int amdgpu_vm_ioctl(struct drm_device *dev, void *data, struct drm_file *filp);
diff --git a/drivers/gpu/drm/amd/amdgpu/cik.c b/drivers/gpu/drm/amd/amdgpu/cik.c
index 702e257a483f..78ab939ae5d8 100644
--- a/drivers/gpu/drm/amd/amdgpu/cik.c
+++ b/drivers/gpu/drm/amd/amdgpu/cik.c
@@ -1476,7 +1476,7 @@ static void cik_pcie_gen3_enable(struct amdgpu_device *adev)
tmp |= PCIE_LC_CNTL4__LC_REDO_EQ_MASK;
WREG32_PCIE(ixPCIE_LC_CNTL4, tmp);
- mdelay(100);
+ msleep(100);
/* linkctl */
pci_read_config_word(root, bridge_pos + PCI_EXP_LNKCTL, &tmp16);
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
index 5cd45210113f..5a9534a82d40 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v8_0.c
@@ -5664,6 +5664,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
if (amdgpu_sriov_vf(adev))
return 0;
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->enter_safe_mode(adev);
switch (adev->asic_type) {
case CHIP_CARRIZO:
case CHIP_STONEY:
@@ -5713,7 +5718,11 @@ static int gfx_v8_0_set_powergating_state(void *handle,
default:
break;
}
-
+ if (adev->pg_flags & (AMD_PG_SUPPORT_GFX_SMG |
+ AMD_PG_SUPPORT_RLC_SMU_HS |
+ AMD_PG_SUPPORT_CP |
+ AMD_PG_SUPPORT_GFX_DMG))
+ adev->gfx.rlc.funcs->exit_safe_mode(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
index 9ab39117cc4e..ef00d14f8645 100644
--- a/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gfx_v9_0.c
@@ -3490,7 +3490,7 @@ static void gfx_v9_0_enter_rlc_safe_mode(struct amdgpu_device *adev)
/* wait for RLC_SAFE_MODE */
for (i = 0; i < adev->usec_timeout; i++) {
- if (!REG_GET_FIELD(SOC15_REG_OFFSET(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
+ if (!REG_GET_FIELD(RREG32_SOC15(GC, 0, mmRLC_SAFE_MODE), RLC_SAFE_MODE, CMD))
break;
udelay(1);
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
index 75317f283c69..ad151fefa41f 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v6_0.c
@@ -632,12 +632,6 @@ static void gmc_v6_0_gart_disable(struct amdgpu_device *adev)
amdgpu_gart_table_vram_unpin(adev);
}
-static void gmc_v6_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
static void gmc_v6_0_vm_decode_fault(struct amdgpu_device *adev,
u32 status, u32 addr, u32 mc_client)
{
@@ -935,8 +929,9 @@ static int gmc_v6_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v6_0_gart_fini(adev);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
index 10920f0bd85f..f8d8a3a73e42 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v7_0.c
@@ -28,6 +28,7 @@
#include "cik.h"
#include "gmc_v7_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "bif/bif_4_1_d.h"
#include "bif/bif_4_1_sh_mask.h"
@@ -746,19 +747,6 @@ static void gmc_v7_0_gart_disable(struct amdgpu_device *adev)
}
/**
- * gmc_v7_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v7_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
-/**
* gmc_v7_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
@@ -1078,6 +1066,12 @@ static int gmc_v7_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1087,8 +1081,10 @@ static int gmc_v7_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v7_0_gart_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
@@ -1276,7 +1272,7 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
addr = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_ADDR);
status = RREG32(mmVM_CONTEXT1_PROTECTION_FAULT_STATUS);
@@ -1301,6 +1297,29 @@ static int gmc_v7_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
index 75f3ffb2891e..9333109b210d 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v8_0.c
@@ -26,6 +26,7 @@
#include "amdgpu.h"
#include "gmc_v8_0.h"
#include "amdgpu_ucode.h"
+#include "amdgpu_amdkfd.h"
#include "gmc/gmc_8_1_d.h"
#include "gmc/gmc_8_1_sh_mask.h"
@@ -968,19 +969,6 @@ static void gmc_v8_0_gart_disable(struct amdgpu_device *adev)
}
/**
- * gmc_v8_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v8_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
-/**
* gmc_v8_0_vm_decode_fault - print human readable fault info
*
* @adev: amdgpu_device pointer
@@ -1182,6 +1170,12 @@ static int gmc_v8_0_sw_init(void *handle)
adev->vm_manager.vram_base_offset = 0;
}
+ adev->gmc.vm_fault_info = kmalloc(sizeof(struct kfd_vm_fault_info),
+ GFP_KERNEL);
+ if (!adev->gmc.vm_fault_info)
+ return -ENOMEM;
+ atomic_set(&adev->gmc.vm_fault_info_updated, 0);
+
return 0;
}
@@ -1191,8 +1185,10 @@ static int gmc_v8_0_sw_fini(void *handle)
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v8_0_gart_fini(adev);
+ kfree(adev->gmc.vm_fault_info);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
release_firmware(adev->gmc.fw);
adev->gmc.fw = NULL;
@@ -1426,7 +1422,7 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
struct amdgpu_irq_src *source,
struct amdgpu_iv_entry *entry)
{
- u32 addr, status, mc_client;
+ u32 addr, status, mc_client, vmid;
if (amdgpu_sriov_vf(adev)) {
dev_err(adev->dev, "GPU fault detected: %d 0x%08x\n",
@@ -1463,6 +1459,29 @@ static int gmc_v8_0_process_interrupt(struct amdgpu_device *adev,
entry->pasid);
}
+ vmid = REG_GET_FIELD(status, VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ VMID);
+ if (amdgpu_amdkfd_is_kfd_vmid(adev, vmid)
+ && !atomic_read(&adev->gmc.vm_fault_info_updated)) {
+ struct kfd_vm_fault_info *info = adev->gmc.vm_fault_info;
+ u32 protections = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ PROTECTIONS);
+
+ info->vmid = vmid;
+ info->mc_id = REG_GET_FIELD(status,
+ VM_CONTEXT1_PROTECTION_FAULT_STATUS,
+ MEMORY_CLIENT_ID);
+ info->status = status;
+ info->page_addr = addr;
+ info->prot_valid = protections & 0x7 ? true : false;
+ info->prot_read = protections & 0x8 ? true : false;
+ info->prot_write = protections & 0x10 ? true : false;
+ info->prot_exec = protections & 0x20 ? true : false;
+ mb();
+ atomic_set(&adev->gmc.vm_fault_info_updated, 1);
+ }
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
index 9df94b45d17d..72f8018fa2a8 100644
--- a/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/gmc_v9_0.c
@@ -269,7 +269,7 @@ static int gmc_v9_0_process_interrupt(struct amdgpu_device *adev,
entry->src_id, entry->ring_id, entry->vmid,
entry->pasid, task_info.process_name, task_info.tgid,
task_info.task_name, task_info.pid);
- dev_err(adev->dev, " at page 0x%016llx from %d\n",
+ dev_err(adev->dev, " at address 0x%016llx from %d\n",
addr, entry->client_id);
if (!amdgpu_sriov_vf(adev))
dev_err(adev->dev,
@@ -942,26 +942,12 @@ static int gmc_v9_0_sw_init(void *handle)
return 0;
}
-/**
- * gmc_v9_0_gart_fini - vm fini callback
- *
- * @adev: amdgpu_device pointer
- *
- * Tears down the driver GART/VM setup (CIK).
- */
-static void gmc_v9_0_gart_fini(struct amdgpu_device *adev)
-{
- amdgpu_gart_table_vram_free(adev);
- amdgpu_gart_fini(adev);
-}
-
static int gmc_v9_0_sw_fini(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
amdgpu_gem_force_release(adev);
amdgpu_vm_manager_fini(adev);
- gmc_v9_0_gart_fini(adev);
/*
* TODO:
@@ -974,7 +960,9 @@ static int gmc_v9_0_sw_fini(void *handle)
*/
amdgpu_bo_free_kernel(&adev->stolen_vga_memory, NULL, NULL);
+ amdgpu_gart_table_vram_free(adev);
amdgpu_bo_fini(adev);
+ amdgpu_gart_fini(adev);
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
index 3f57f6463dc8..cb79a93c2eb7 100644
--- a/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/kv_dpm.c
@@ -65,8 +65,6 @@ static int kv_set_thermal_temperature_range(struct amdgpu_device *adev,
int min_temp, int max_temp);
static int kv_init_fps_limits(struct amdgpu_device *adev);
-static void kv_dpm_powergate_uvd(void *handle, bool gate);
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate);
static void kv_dpm_powergate_acp(struct amdgpu_device *adev, bool gate);
@@ -1354,8 +1352,6 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
return ret;
}
- kv_update_current_ps(adev, adev->pm.dpm.boot_ps);
-
if (adev->irq.installed &&
amdgpu_is_internal_thermal_sensor(adev->pm.int_thermal_type)) {
ret = kv_set_thermal_temperature_range(adev, KV_TEMP_RANGE_MIN, KV_TEMP_RANGE_MAX);
@@ -1374,6 +1370,8 @@ static int kv_dpm_enable(struct amdgpu_device *adev)
static void kv_dpm_disable(struct amdgpu_device *adev)
{
+ struct kv_power_info *pi = kv_get_pi(adev);
+
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
AMDGPU_THERMAL_IRQ_LOW_TO_HIGH);
amdgpu_irq_put(adev, &adev->pm.dpm.thermal.irq,
@@ -1387,8 +1385,10 @@ static void kv_dpm_disable(struct amdgpu_device *adev)
/* powerup blocks */
kv_dpm_powergate_acp(adev, false);
kv_dpm_powergate_samu(adev, false);
- kv_dpm_powergate_vce(adev, false);
- kv_dpm_powergate_uvd(adev, false);
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ if (pi->caps_uvd_pg) /* power on the UVD block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_UVDPowerON);
kv_enable_smc_cac(adev, false);
kv_enable_didt(adev, false);
@@ -1551,7 +1551,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
int ret;
if (amdgpu_new_state->evclk > 0 && amdgpu_current_state->evclk == 0) {
- kv_dpm_powergate_vce(adev, false);
if (pi->caps_stable_p_state)
pi->vce_boot_level = table->count - 1;
else
@@ -1573,7 +1572,6 @@ static int kv_update_vce_dpm(struct amdgpu_device *adev,
kv_enable_vce_dpm(adev, true);
} else if (amdgpu_new_state->evclk == 0 && amdgpu_current_state->evclk > 0) {
kv_enable_vce_dpm(adev, false);
- kv_dpm_powergate_vce(adev, true);
}
return 0;
@@ -1702,24 +1700,32 @@ static void kv_dpm_powergate_uvd(void *handle, bool gate)
}
}
-static void kv_dpm_powergate_vce(struct amdgpu_device *adev, bool gate)
+static void kv_dpm_powergate_vce(void *handle, bool gate)
{
+ struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct kv_power_info *pi = kv_get_pi(adev);
-
- if (pi->vce_power_gated == gate)
- return;
+ int ret;
pi->vce_power_gated = gate;
- if (!pi->caps_vce_pg)
- return;
-
- if (gate)
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
- else
- amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ if (gate) {
+ /* stop the VCE block */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_GATE);
+ kv_enable_vce_dpm(adev, false);
+ if (pi->caps_vce_pg) /* power off the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerOFF);
+ } else {
+ if (pi->caps_vce_pg) /* power on the VCE block */
+ amdgpu_kv_notify_message_to_smu(adev, PPSMC_MSG_VCEPowerON);
+ kv_enable_vce_dpm(adev, true);
+ /* re-init the VCE block */
+ ret = amdgpu_device_ip_set_powergating_state(adev, AMD_IP_BLOCK_TYPE_VCE,
+ AMD_PG_STATE_UNGATE);
+ }
}
+
static void kv_dpm_powergate_samu(struct amdgpu_device *adev, bool gate)
{
struct kv_power_info *pi = kv_get_pi(adev);
@@ -3061,7 +3067,7 @@ static int kv_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
@@ -3313,6 +3319,9 @@ static int kv_set_powergating_by_smu(void *handle,
case AMD_IP_BLOCK_TYPE_UVD:
kv_dpm_powergate_uvd(handle, gate);
break;
+ case AMD_IP_BLOCK_TYPE_VCE:
+ kv_dpm_powergate_vce(handle, gate);
+ break;
default:
break;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
index 0ff136d02d9b..02be34e72ed9 100644
--- a/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/psp_v10_0.c
@@ -88,6 +88,9 @@ psp_v10_0_get_fw_type(struct amdgpu_firmware_info *ucode, enum psp_gfx_fw_type *
case AMDGPU_UCODE_ID_VCE:
*type = GFX_FW_TYPE_VCE;
break;
+ case AMDGPU_UCODE_ID_VCN:
+ *type = GFX_FW_TYPE_VCN;
+ break;
case AMDGPU_UCODE_ID_MAXIMUM:
default:
return -EINVAL;
diff --git a/drivers/gpu/drm/amd/amdgpu/si_dpm.c b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
index db327b412562..1de96995e690 100644
--- a/drivers/gpu/drm/amd/amdgpu/si_dpm.c
+++ b/drivers/gpu/drm/amd/amdgpu/si_dpm.c
@@ -6887,7 +6887,6 @@ static int si_dpm_enable(struct amdgpu_device *adev)
si_enable_auto_throttle_source(adev, AMDGPU_DPM_AUTO_THROTTLE_SRC_THERMAL, true);
si_thermal_start_thermal_controller(adev);
- ni_update_current_ps(adev, boot_ps);
return 0;
}
@@ -7763,7 +7762,7 @@ static int si_dpm_hw_init(void *handle)
else
adev->pm.dpm_enabled = true;
mutex_unlock(&adev->pm.mutex);
-
+ amdgpu_pm_compute_clocks(adev);
return ret;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
index 6fed3d7797a8..8a926d1df939 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v4_2.c
@@ -123,6 +123,10 @@ static int uvd_v4_2_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
index aeaa1ca46a99..50248059412e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v5_0.c
@@ -120,6 +120,10 @@ static int uvd_v5_0_sw_init(void *handle)
ring = &adev->uvd.inst->ring;
sprintf(ring->name, "uvd");
r = amdgpu_ring_init(adev, ring, 512, &adev->uvd.inst->irq, 0);
+ if (r)
+ return r;
+
+ r = amdgpu_uvd_entity_init(adev);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
index 598dbeaba636..6ae82cc2e55e 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v6_0.c
@@ -440,6 +440,8 @@ static int uvd_v6_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_entity_init(adev);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
index db5f3d78ab12..9b7f8469bc5c 100644
--- a/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/uvd_v7_0.c
@@ -41,6 +41,12 @@
#include "mmhub/mmhub_1_0_sh_mask.h"
#include "ivsrcid/uvd/irqsrcs_uvd_7_0.h"
+#define mmUVD_PG0_CC_UVD_HARVESTING 0x00c7
+#define mmUVD_PG0_CC_UVD_HARVESTING_BASE_IDX 1
+//UVD_PG0_CC_UVD_HARVESTING
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE__SHIFT 0x1
+#define UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK 0x00000002L
+
#define UVD7_MAX_HW_INSTANCES_VEGA20 2
static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev);
@@ -370,10 +376,25 @@ error:
static int uvd_v7_0_early_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
- if (adev->asic_type == CHIP_VEGA20)
+
+ if (adev->asic_type == CHIP_VEGA20) {
+ u32 harvest;
+ int i;
+
adev->uvd.num_uvd_inst = UVD7_MAX_HW_INSTANCES_VEGA20;
- else
+ for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ harvest = RREG32_SOC15(UVD, i, mmUVD_PG0_CC_UVD_HARVESTING);
+ if (harvest & UVD_PG0_CC_UVD_HARVESTING__UVD_DISABLE_MASK) {
+ adev->uvd.harvest_config |= 1 << i;
+ }
+ }
+ if (adev->uvd.harvest_config == (AMDGPU_UVD_HARVEST_UVD0 |
+ AMDGPU_UVD_HARVEST_UVD1))
+ /* both instances are harvested, disable the block */
+ return -ENOENT;
+ } else {
adev->uvd.num_uvd_inst = 1;
+ }
if (amdgpu_sriov_vf(adev))
adev->uvd.num_enc_rings = 1;
@@ -389,10 +410,13 @@ static int uvd_v7_0_early_init(void *handle)
static int uvd_v7_0_sw_init(void *handle)
{
struct amdgpu_ring *ring;
+
int i, j, r;
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
/* UVD TRAP */
r = amdgpu_irq_add_id(adev, amdgpu_ih_clientid_uvds[j], UVD_7_0__SRCID__UVD_SYSTEM_MESSAGE_INTERRUPT, &adev->uvd.inst[j].irq);
if (r)
@@ -425,6 +449,8 @@ static int uvd_v7_0_sw_init(void *handle)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
if (!amdgpu_sriov_vf(adev)) {
ring = &adev->uvd.inst[j].ring;
sprintf(ring->name, "uvd<%d>", j);
@@ -453,6 +479,10 @@ static int uvd_v7_0_sw_init(void *handle)
}
}
+ r = amdgpu_uvd_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
@@ -472,6 +502,8 @@ static int uvd_v7_0_sw_fini(void *handle)
return r;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i)
amdgpu_ring_fini(&adev->uvd.inst[j].ring_enc[i]);
}
@@ -500,6 +532,8 @@ static int uvd_v7_0_hw_init(void *handle)
goto done;
for (j = 0; j < adev->uvd.num_uvd_inst; ++j) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
ring = &adev->uvd.inst[j].ring;
if (!amdgpu_sriov_vf(adev)) {
@@ -579,8 +613,11 @@ static int uvd_v7_0_hw_fini(void *handle)
DRM_DEBUG("For SRIOV client, shouldn't do anything.\n");
}
- for (i = 0; i < adev->uvd.num_uvd_inst; ++i)
+ for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.ready = false;
+ }
return 0;
}
@@ -623,6 +660,8 @@ static void uvd_v7_0_mc_resume(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
WREG32_SOC15(UVD, i, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->firmware.ucode[AMDGPU_UCODE_ID_UVD].mc_addr));
@@ -695,6 +734,8 @@ static int uvd_v7_0_mmsch_start(struct amdgpu_device *adev,
WREG32_SOC15(VCE, 0, mmVCE_MMSCH_VF_MAILBOX_RESP, 0);
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
WDOORBELL32(adev->uvd.inst[i].ring_enc[0].doorbell_index, 0);
adev->wb.wb[adev->uvd.inst[i].ring_enc[0].wptr_offs] = 0;
adev->uvd.inst[i].ring_enc[0].wptr = 0;
@@ -751,6 +792,8 @@ static int uvd_v7_0_sriov_start(struct amdgpu_device *adev)
init_table += header->uvd_table_offset;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
ring = &adev->uvd.inst[i].ring;
ring->wptr = 0;
size = AMDGPU_GPU_PAGE_ALIGN(adev->uvd.fw->size + 4);
@@ -890,6 +933,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
int i, j, k, r;
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
/* disable DPG */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_POWER_STATUS), 0,
~UVD_POWER_STATUS__UVD_PG_MODE_MASK);
@@ -902,6 +947,8 @@ static int uvd_v7_0_start(struct amdgpu_device *adev)
uvd_v7_0_mc_resume(adev);
for (k = 0; k < adev->uvd.num_uvd_inst; ++k) {
+ if (adev->uvd.harvest_config & (1 << k))
+ continue;
ring = &adev->uvd.inst[k].ring;
/* disable clock gating */
WREG32_P(SOC15_REG_OFFSET(UVD, k, mmUVD_CGC_CTRL), 0,
@@ -1069,6 +1116,8 @@ static void uvd_v7_0_stop(struct amdgpu_device *adev)
uint8_t i = 0;
for (i = 0; i < adev->uvd.num_uvd_inst; ++i) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
/* force RBC into idle state */
WREG32_SOC15(UVD, i, mmUVD_RBC_RB_CNTL, 0x11010101);
@@ -1206,6 +1255,34 @@ static int uvd_v7_0_ring_test_ring(struct amdgpu_ring *ring)
}
/**
+ * uvd_v7_0_ring_patch_cs_in_place - Patch the IB for command submission.
+ *
+ * @p: the CS parser with the IBs
+ * @ib_idx: which IB to patch
+ *
+ */
+static int uvd_v7_0_ring_patch_cs_in_place(struct amdgpu_cs_parser *p,
+ uint32_t ib_idx)
+{
+ struct amdgpu_ib *ib = &p->job->ibs[ib_idx];
+ unsigned i;
+
+ /* No patching necessary for the first instance */
+ if (!p->ring->me)
+ return 0;
+
+ for (i = 0; i < ib->length_dw; i += 2) {
+ uint32_t reg = amdgpu_get_ib_value(p, ib_idx, i);
+
+ reg -= p->adev->reg_offset[UVD_HWIP][0][1];
+ reg += p->adev->reg_offset[UVD_HWIP][1][1];
+
+ amdgpu_set_ib_value(p, ib_idx, i, reg);
+ }
+ return 0;
+}
+
+/**
* uvd_v7_0_ring_emit_ib - execute indirect buffer
*
* @ring: amdgpu_ring pointer
@@ -1697,6 +1774,7 @@ static const struct amdgpu_ring_funcs uvd_v7_0_ring_vm_funcs = {
.get_rptr = uvd_v7_0_ring_get_rptr,
.get_wptr = uvd_v7_0_ring_get_wptr,
.set_wptr = uvd_v7_0_ring_set_wptr,
+ .patch_cs_in_place = uvd_v7_0_ring_patch_cs_in_place,
.emit_frame_size =
6 + /* hdp invalidate */
SOC15_FLUSH_GPU_TLB_NUM_WREG * 6 +
@@ -1756,6 +1834,8 @@ static void uvd_v7_0_set_ring_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].ring.funcs = &uvd_v7_0_ring_vm_funcs;
adev->uvd.inst[i].ring.me = i;
DRM_INFO("UVD(%d) is enabled in VM mode\n", i);
@@ -1767,6 +1847,8 @@ static void uvd_v7_0_set_enc_ring_funcs(struct amdgpu_device *adev)
int i, j;
for (j = 0; j < adev->uvd.num_uvd_inst; j++) {
+ if (adev->uvd.harvest_config & (1 << j))
+ continue;
for (i = 0; i < adev->uvd.num_enc_rings; ++i) {
adev->uvd.inst[j].ring_enc[i].funcs = &uvd_v7_0_enc_ring_vm_funcs;
adev->uvd.inst[j].ring_enc[i].me = j;
@@ -1786,6 +1868,8 @@ static void uvd_v7_0_set_irq_funcs(struct amdgpu_device *adev)
int i;
for (i = 0; i < adev->uvd.num_uvd_inst; i++) {
+ if (adev->uvd.harvest_config & (1 << i))
+ continue;
adev->uvd.inst[i].irq.num_types = adev->uvd.num_enc_rings + 1;
adev->uvd.inst[i].irq.funcs = &uvd_v7_0_irq_funcs;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
index d48e877b682e..7eaa54ba016b 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v2_0.c
@@ -439,6 +439,8 @@ static int vce_v2_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
index cc6ce6cc03f4..c8390f9adfd6 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v3_0.c
@@ -448,6 +448,8 @@ static int vce_v3_0_sw_init(void *handle)
return r;
}
+ r = amdgpu_vce_entity_init(adev);
+
return r;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
index 65f8860169e9..2e4d1b5f6243 100644
--- a/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vce_v4_0.c
@@ -419,6 +419,7 @@ static int vce_v4_0_sw_init(void *handle)
{
struct amdgpu_device *adev = (struct amdgpu_device *)handle;
struct amdgpu_ring *ring;
+
unsigned size;
int r, i;
@@ -438,7 +439,7 @@ static int vce_v4_0_sw_init(void *handle)
const struct common_firmware_header *hdr;
unsigned size = amdgpu_bo_size(adev->vce.vcpu_bo);
- adev->vce.saved_bo = kmalloc(size, GFP_KERNEL);
+ adev->vce.saved_bo = kvmalloc(size, GFP_KERNEL);
if (!adev->vce.saved_bo)
return -ENOMEM;
@@ -474,6 +475,11 @@ static int vce_v4_0_sw_init(void *handle)
return r;
}
+
+ r = amdgpu_vce_entity_init(adev);
+ if (r)
+ return r;
+
r = amdgpu_virt_alloc_mm_table(adev);
if (r)
return r;
@@ -490,7 +496,7 @@ static int vce_v4_0_sw_fini(void *handle)
amdgpu_virt_free_mm_table(adev);
if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
- kfree(adev->vce.saved_bo);
+ kvfree(adev->vce.saved_bo);
adev->vce.saved_bo = NULL;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
index 2ce91a748c40..072371ef5975 100644
--- a/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
+++ b/drivers/gpu/drm/amd/amdgpu/vcn_v1_0.c
@@ -100,6 +100,16 @@ static int vcn_v1_0_sw_init(void *handle)
if (r)
return r;
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ const struct common_firmware_header *hdr;
+ hdr = (const struct common_firmware_header *)adev->vcn.fw->data;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].ucode_id = AMDGPU_UCODE_ID_VCN;
+ adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].fw = adev->vcn.fw;
+ adev->firmware.fw_size +=
+ ALIGN(le32_to_cpu(hdr->ucode_size_bytes), PAGE_SIZE);
+ DRM_INFO("PSP loading VCN firmware\n");
+ }
+
r = amdgpu_vcn_resume(adev);
if (r)
return r;
@@ -265,26 +275,38 @@ static int vcn_v1_0_resume(void *handle)
static void vcn_v1_0_mc_resume(struct amdgpu_device *adev)
{
uint32_t size = AMDGPU_GPU_PAGE_ALIGN(adev->vcn.fw->size + 4);
-
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ uint32_t offset;
+
+ if (adev->firmware.load_type == AMDGPU_FW_LOAD_PSP) {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_lo));
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ (adev->firmware.ucode[AMDGPU_UCODE_ID_VCN].tmr_mc_addr_hi));
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0, 0);
+ offset = 0;
+ } else {
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_LOW,
lower_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
+ WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE_64BIT_BAR_HIGH,
upper_32_bits(adev->vcn.gpu_addr));
- WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
- AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ offset = size;
+ WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET0,
+ AMDGPU_UVD_FIRMWARE_OFFSET >> 3);
+ }
+
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE0, size);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size));
+ lower_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE1_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size));
+ upper_32_bits(adev->vcn.gpu_addr + offset));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET1, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE1, AMDGPU_VCN_HEAP_SIZE);
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_LOW,
- lower_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ lower_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_LMI_VCPU_CACHE2_64BIT_BAR_HIGH,
- upper_32_bits(adev->vcn.gpu_addr + size + AMDGPU_VCN_HEAP_SIZE));
+ upper_32_bits(adev->vcn.gpu_addr + offset + AMDGPU_VCN_HEAP_SIZE));
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_OFFSET2, 0);
WREG32_SOC15(UVD, 0, mmUVD_VCPU_CACHE_SIZE2,
AMDGPU_VCN_STACK_SIZE + (AMDGPU_VCN_SESSION_SIZE * 40));
diff --git a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
index 52778de93ab0..2d4473557b0d 100644
--- a/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
+++ b/drivers/gpu/drm/amd/amdgpu/vega20_reg_init.c
@@ -38,6 +38,7 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[ATHUB_HWIP][i] = (uint32_t *)(&(ATHUB_BASE.instance[i]));
adev->reg_offset[NBIO_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
adev->reg_offset[MP0_HWIP][i] = (uint32_t *)(&(MP0_BASE.instance[i]));
+ adev->reg_offset[MP1_HWIP][i] = (uint32_t *)(&(MP1_BASE.instance[i]));
adev->reg_offset[UVD_HWIP][i] = (uint32_t *)(&(UVD_BASE.instance[i]));
adev->reg_offset[VCE_HWIP][i] = (uint32_t *)(&(VCE_BASE.instance[i]));
adev->reg_offset[DF_HWIP][i] = (uint32_t *)(&(DF_BASE.instance[i]));
@@ -46,6 +47,8 @@ int vega20_reg_base_init(struct amdgpu_device *adev)
adev->reg_offset[SDMA0_HWIP][i] = (uint32_t *)(&(SDMA0_BASE.instance[i]));
adev->reg_offset[SDMA1_HWIP][i] = (uint32_t *)(&(SDMA1_BASE.instance[i]));
adev->reg_offset[SMUIO_HWIP][i] = (uint32_t *)(&(SMUIO_BASE.instance[i]));
+ adev->reg_offset[NBIF_HWIP][i] = (uint32_t *)(&(NBIO_BASE.instance[i]));
+ adev->reg_offset[THM_HWIP][i] = (uint32_t *)(&(THM_BASE.instance[i]));
}
return 0;
}
diff --git a/drivers/gpu/drm/amd/amdgpu/vi.c b/drivers/gpu/drm/amd/amdgpu/vi.c
index 42c8ad105b05..88b57a5e9489 100644
--- a/drivers/gpu/drm/amd/amdgpu/vi.c
+++ b/drivers/gpu/drm/amd/amdgpu/vi.c
@@ -112,8 +112,8 @@ static u32 vi_smc_rreg(struct amdgpu_device *adev, u32 reg)
u32 r;
spin_lock_irqsave(&adev->smc_idx_lock, flags);
- WREG32(mmSMC_IND_INDEX_11, (reg));
- r = RREG32(mmSMC_IND_DATA_11);
+ WREG32_NO_KIQ(mmSMC_IND_INDEX_11, (reg));
+ r = RREG32_NO_KIQ(mmSMC_IND_DATA_11);
spin_unlock_irqrestore(&adev->smc_idx_lock, flags);
return r;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
index 49df6c791cfc..5d2475d5392c 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/cik_event_interrupt.c
@@ -25,12 +25,39 @@
#include "cik_int.h"
static bool cik_event_interrupt_isr(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
+ const struct kfd2kgd_calls *f2g = dev->kfd2kgd;
unsigned int vmid, pasid;
+ /* This workaround is due to HW/FW limitation on Hawaii that
+ * VMID and PASID are not written into ih_ring_entry
+ */
+ if ((ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) &&
+ dev->device_info->asic_family == CHIP_HAWAII) {
+ struct cik_ih_ring_entry *tmp_ihre =
+ (struct cik_ih_ring_entry *)patched_ihre;
+
+ *patched_flag = true;
+ *tmp_ihre = *ihre;
+
+ vmid = f2g->read_vmid_from_vmfault_reg(dev->kgd);
+ pasid = f2g->get_atc_vmid_pasid_mapping_pasid(dev->kgd, vmid);
+
+ tmp_ihre->ring_id &= 0x000000ff;
+ tmp_ihre->ring_id |= vmid << 8;
+ tmp_ihre->ring_id |= pasid << 16;
+
+ return (pasid != 0) &&
+ vmid >= dev->vm_info.first_vmid_kfd &&
+ vmid <= dev->vm_info.last_vmid_kfd;
+ }
+
/* Only handle interrupts from KFD VMIDs */
vmid = (ihre->ring_id & 0x0000ff00) >> 8;
if (vmid < dev->vm_info.first_vmid_kfd ||
@@ -48,18 +75,19 @@ static bool cik_event_interrupt_isr(struct kfd_dev *dev,
return ihre->source_id == CIK_INTSRC_CP_END_OF_PIPE ||
ihre->source_id == CIK_INTSRC_SDMA_TRAP ||
ihre->source_id == CIK_INTSRC_SQ_INTERRUPT_MSG ||
- ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE;
+ ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE ||
+ ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT;
}
static void cik_event_interrupt_wq(struct kfd_dev *dev,
const uint32_t *ih_ring_entry)
{
- unsigned int pasid;
const struct cik_ih_ring_entry *ihre =
(const struct cik_ih_ring_entry *)ih_ring_entry;
uint32_t context_id = ihre->data & 0xfffffff;
-
- pasid = (ihre->ring_id & 0xffff0000) >> 16;
+ unsigned int vmid = (ihre->ring_id & 0x0000ff00) >> 8;
+ unsigned int pasid = (ihre->ring_id & 0xffff0000) >> 16;
if (pasid == 0)
return;
@@ -72,6 +100,22 @@ static void cik_event_interrupt_wq(struct kfd_dev *dev,
kfd_signal_event_interrupt(pasid, context_id & 0xff, 8);
else if (ihre->source_id == CIK_INTSRC_CP_BAD_OPCODE)
kfd_signal_hw_exception_event(pasid);
+ else if (ihre->source_id == CIK_INTSRC_GFX_PAGE_INV_FAULT ||
+ ihre->source_id == CIK_INTSRC_GFX_MEM_PROT_FAULT) {
+ struct kfd_vm_fault_info info;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+
+ memset(&info, 0, sizeof(info));
+ dev->kfd2kgd->get_vm_fault_info(dev->kgd, &info);
+ if (!info.page_addr && !info.status)
+ return;
+
+ if (info.vmid == vmid)
+ kfd_signal_vm_fault_event(dev, pasid, &info);
+ else
+ kfd_signal_vm_fault_event(dev, pasid, NULL);
+ }
}
const struct kfd_event_interrupt_class event_interrupt_class_cik = {
diff --git a/drivers/gpu/drm/amd/amdkfd/cik_int.h b/drivers/gpu/drm/amd/amdkfd/cik_int.h
index 109298b9d507..76f8677a7926 100644
--- a/drivers/gpu/drm/amd/amdkfd/cik_int.h
+++ b/drivers/gpu/drm/amd/amdkfd/cik_int.h
@@ -20,8 +20,8 @@
* OTHER DEALINGS IN THE SOFTWARE.
*/
-#ifndef HSA_RADEON_CIK_INT_H_INCLUDED
-#define HSA_RADEON_CIK_INT_H_INCLUDED
+#ifndef CIK_INT_H_INCLUDED
+#define CIK_INT_H_INCLUDED
#include <linux/types.h>
@@ -34,9 +34,10 @@ struct cik_ih_ring_entry {
#define CIK_INTSRC_CP_END_OF_PIPE 0xB5
#define CIK_INTSRC_CP_BAD_OPCODE 0xB7
-#define CIK_INTSRC_DEQUEUE_COMPLETE 0xC6
#define CIK_INTSRC_SDMA_TRAP 0xE0
#define CIK_INTSRC_SQ_INTERRUPT_MSG 0xEF
+#define CIK_INTSRC_GFX_PAGE_INV_FAULT 0x92
+#define CIK_INTSRC_GFX_MEM_PROT_FAULT 0x93
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
index f68aef02fc1f..3621efbd5759 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler.h
@@ -21,18 +21,21 @@
*/
static const uint32_t cwsr_trap_gfx8_hex[] = {
- 0xbf820001, 0xbf820125,
+ 0xbf820001, 0xbf82012b,
0xb8f4f802, 0x89748674,
0xb8f5f803, 0x8675ff75,
- 0x00000400, 0xbf850011,
+ 0x00000400, 0xbf850017,
0xc00a1e37, 0x00000000,
0xbf8c007f, 0x87777978,
- 0xbf840002, 0xb974f802,
- 0xbe801d78, 0xb8f5f803,
- 0x8675ff75, 0x000001ff,
- 0xbf850002, 0x80708470,
- 0x82718071, 0x8671ff71,
- 0x0000ffff, 0xb974f802,
+ 0xbf840005, 0x8f728374,
+ 0xb972e0c2, 0xbf800002,
+ 0xb9740002, 0xbe801d78,
+ 0xb8f5f803, 0x8675ff75,
+ 0x000001ff, 0xbf850002,
+ 0x80708470, 0x82718071,
+ 0x8671ff71, 0x0000ffff,
+ 0x8f728374, 0xb972e0c2,
+ 0xbf800002, 0xb9740002,
0xbe801f70, 0xb8f5f803,
0x8675ff75, 0x00000100,
0xbf840006, 0xbefa0080,
@@ -168,7 +171,7 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x807c847c, 0x806eff6e,
0x00000400, 0xbf0a757c,
0xbf85ffef, 0xbf9c0000,
- 0xbf8200ca, 0xbef8007e,
+ 0xbf8200cd, 0xbef8007e,
0x8679ff7f, 0x0000ffff,
0x8779ff79, 0x00040000,
0xbefa0080, 0xbefb00ff,
@@ -268,16 +271,18 @@ static const uint32_t cwsr_trap_gfx8_hex[] = {
0x8f739773, 0xb976f807,
0x8671ff71, 0x0000ffff,
0x86fe7e7e, 0x86ea6a6a,
- 0xb974f802, 0xbf8a0000,
- 0x95807370, 0xbf810000,
+ 0x8f768374, 0xb976e0c2,
+ 0xbf800002, 0xb9740002,
+ 0xbf8a0000, 0x95807370,
+ 0xbf810000, 0x00000000,
};
static const uint32_t cwsr_trap_gfx9_hex[] = {
- 0xbf820001, 0xbf82015a,
+ 0xbf820001, 0xbf82015d,
0xb8f8f802, 0x89788678,
0xb8f1f803, 0x866eff71,
- 0x00000400, 0xbf850034,
+ 0x00000400, 0xbf850037,
0x866eff71, 0x00000800,
0xbf850003, 0x866eff71,
0x00000100, 0xbf840008,
@@ -303,258 +308,261 @@ static const uint32_t cwsr_trap_gfx9_hex[] = {
0x8f6e8b77, 0x866eff6e,
0x001f8000, 0xb96ef807,
0x86fe7e7e, 0x86ea6a6a,
- 0xb978f802, 0xbe801f6c,
- 0x866dff6d, 0x0000ffff,
- 0xbef00080, 0xb9700283,
- 0xb8f02407, 0x8e709c70,
- 0x876d706d, 0xb8f003c7,
- 0x8e709b70, 0x876d706d,
- 0xb8f0f807, 0x8670ff70,
- 0x00007fff, 0xb970f807,
- 0xbeee007e, 0xbeef007f,
- 0xbefe0180, 0xbf900004,
- 0x87708478, 0xb970f802,
- 0xbf8e0002, 0xbf88fffe,
- 0xb8f02a05, 0x80708170,
- 0x8e708a70, 0xb8f11605,
- 0x80718171, 0x8e718671,
- 0x80707170, 0x80707e70,
- 0x8271807f, 0x8671ff71,
- 0x0000ffff, 0xc0471cb8,
- 0x00000040, 0xbf8cc07f,
- 0xc04b1d38, 0x00000048,
- 0xbf8cc07f, 0xc0431e78,
- 0x00000058, 0xbf8cc07f,
- 0xc0471eb8, 0x0000005c,
- 0xbf8cc07f, 0xbef4007e,
- 0x8675ff7f, 0x0000ffff,
- 0x8775ff75, 0x00040000,
- 0xbef60080, 0xbef700ff,
- 0x00807fac, 0x8670ff7f,
- 0x08000000, 0x8f708370,
- 0x87777077, 0x8670ff7f,
- 0x70000000, 0x8f708170,
- 0x87777077, 0xbefb007c,
- 0xbefa0080, 0xb8fa2a05,
- 0x807a817a, 0x8e7a8a7a,
- 0xb8f01605, 0x80708170,
- 0x8e708670, 0x807a707a,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xbefe007c,
- 0xbefc007a, 0xc0611efa,
- 0x0000007c, 0xbf8cc07f,
- 0x807a847a, 0xbefc007e,
+ 0x8f6e8378, 0xb96ee0c2,
+ 0xbf800002, 0xb9780002,
+ 0xbe801f6c, 0x866dff6d,
+ 0x0000ffff, 0xbef00080,
+ 0xb9700283, 0xb8f02407,
+ 0x8e709c70, 0x876d706d,
+ 0xb8f003c7, 0x8e709b70,
+ 0x876d706d, 0xb8f0f807,
+ 0x8670ff70, 0x00007fff,
+ 0xb970f807, 0xbeee007e,
+ 0xbeef007f, 0xbefe0180,
+ 0xbf900004, 0x87708478,
+ 0xb970f802, 0xbf8e0002,
+ 0xbf88fffe, 0xb8f02a05,
+ 0x80708170, 0x8e708a70,
+ 0xb8f11605, 0x80718171,
+ 0x8e718671, 0x80707170,
+ 0x80707e70, 0x8271807f,
+ 0x8671ff71, 0x0000ffff,
+ 0xc0471cb8, 0x00000040,
+ 0xbf8cc07f, 0xc04b1d38,
+ 0x00000048, 0xbf8cc07f,
+ 0xc0431e78, 0x00000058,
+ 0xbf8cc07f, 0xc0471eb8,
+ 0x0000005c, 0xbf8cc07f,
+ 0xbef4007e, 0x8675ff7f,
+ 0x0000ffff, 0x8775ff75,
+ 0x00040000, 0xbef60080,
+ 0xbef700ff, 0x00807fac,
+ 0x8670ff7f, 0x08000000,
+ 0x8f708370, 0x87777077,
+ 0x8670ff7f, 0x70000000,
+ 0x8f708170, 0x87777077,
+ 0xbefb007c, 0xbefa0080,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f01605,
+ 0x80708170, 0x8e708670,
+ 0x807a707a, 0xbef60084,
+ 0xbef600ff, 0x01000000,
0xbefe007c, 0xbefc007a,
- 0xc0611b3a, 0x0000007c,
+ 0xc0611efa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611b7a,
+ 0xbefc007a, 0xc0611b3a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611bba, 0x0000007c,
+ 0xc0611b7a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611bfa,
+ 0xbefc007a, 0xc0611bba,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611e3a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8f1f803,
- 0xbefe007c, 0xbefc007a,
- 0xc0611c7a, 0x0000007c,
+ 0xc0611bfa, 0x0000007c,
0xbf8cc07f, 0x807a847a,
0xbefc007e, 0xbefe007c,
- 0xbefc007a, 0xc0611a3a,
+ 0xbefc007a, 0xc0611e3a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8f1f803, 0xbefe007c,
+ 0xbefc007a, 0xc0611c7a,
0x0000007c, 0xbf8cc07f,
0x807a847a, 0xbefc007e,
0xbefe007c, 0xbefc007a,
- 0xc0611a7a, 0x0000007c,
- 0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0xb8fbf801,
- 0xbefe007c, 0xbefc007a,
- 0xc0611efa, 0x0000007c,
+ 0xc0611a3a, 0x0000007c,
0xbf8cc07f, 0x807a847a,
- 0xbefc007e, 0x8670ff7f,
- 0x04000000, 0xbeef0080,
- 0x876f6f70, 0xb8fa2a05,
+ 0xbefc007e, 0xbefe007c,
+ 0xbefc007a, 0xc0611a7a,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0xb8fbf801, 0xbefe007c,
+ 0xbefc007a, 0xc0611efa,
+ 0x0000007c, 0xbf8cc07f,
+ 0x807a847a, 0xbefc007e,
+ 0x8670ff7f, 0x04000000,
+ 0xbeef0080, 0x876f6f70,
+ 0xb8fa2a05, 0x807a817a,
+ 0x8e7a8a7a, 0xb8f11605,
+ 0x80718171, 0x8e718471,
+ 0x8e768271, 0xbef600ff,
+ 0x01000000, 0xbef20174,
+ 0x80747a74, 0x82758075,
+ 0xbefc0080, 0xbf800000,
+ 0xbe802b00, 0xbe822b02,
+ 0xbe842b04, 0xbe862b06,
+ 0xbe882b08, 0xbe8a2b0a,
+ 0xbe8c2b0c, 0xbe8e2b0e,
+ 0xc06b003a, 0x00000000,
+ 0xbf8cc07f, 0xc06b013a,
+ 0x00000010, 0xbf8cc07f,
+ 0xc06b023a, 0x00000020,
+ 0xbf8cc07f, 0xc06b033a,
+ 0x00000030, 0xbf8cc07f,
+ 0x8074c074, 0x82758075,
+ 0x807c907c, 0xbf0a717c,
+ 0xbf85ffe7, 0xbef40172,
+ 0xbefa0080, 0xbefe00c1,
+ 0xbeff00c1, 0xbee80080,
+ 0xbee90080, 0xbef600ff,
+ 0x01000000, 0xe0724000,
+ 0x7a1d0000, 0xe0724100,
+ 0x7a1d0100, 0xe0724200,
+ 0x7a1d0200, 0xe0724300,
+ 0x7a1d0300, 0xbefe00c1,
+ 0xbeff00c1, 0xb8f14306,
+ 0x8671c171, 0xbf84002c,
+ 0xbf8a0000, 0x8670ff6f,
+ 0x04000000, 0xbf840028,
+ 0x8e718671, 0x8e718271,
+ 0xbef60071, 0xb8fa2a05,
0x807a817a, 0x8e7a8a7a,
- 0xb8f11605, 0x80718171,
- 0x8e718471, 0x8e768271,
+ 0xb8f01605, 0x80708170,
+ 0x8e708670, 0x807a707a,
+ 0x807aff7a, 0x00000080,
0xbef600ff, 0x01000000,
- 0xbef20174, 0x80747a74,
- 0x82758075, 0xbefc0080,
- 0xbf800000, 0xbe802b00,
- 0xbe822b02, 0xbe842b04,
- 0xbe862b06, 0xbe882b08,
- 0xbe8a2b0a, 0xbe8c2b0c,
- 0xbe8e2b0e, 0xc06b003a,
- 0x00000000, 0xbf8cc07f,
- 0xc06b013a, 0x00000010,
- 0xbf8cc07f, 0xc06b023a,
- 0x00000020, 0xbf8cc07f,
- 0xc06b033a, 0x00000030,
- 0xbf8cc07f, 0x8074c074,
- 0x82758075, 0x807c907c,
- 0xbf0a717c, 0xbf85ffe7,
- 0xbef40172, 0xbefa0080,
+ 0xbefc0080, 0xd28c0002,
+ 0x000100c1, 0xd28d0003,
+ 0x000204c1, 0xd1060002,
+ 0x00011103, 0x7e0602ff,
+ 0x00000200, 0xbefc00ff,
+ 0x00010000, 0xbe800077,
+ 0x8677ff77, 0xff7fffff,
+ 0x8777ff77, 0x00058000,
+ 0xd8ec0000, 0x00000002,
+ 0xbf8cc07f, 0xe0765000,
+ 0x7a1d0002, 0x68040702,
+ 0xd0c9006a, 0x0000e302,
+ 0xbf87fff7, 0xbef70000,
+ 0xbefa00ff, 0x00000400,
0xbefe00c1, 0xbeff00c1,
- 0xbee80080, 0xbee90080,
+ 0xb8f12a05, 0x80718171,
+ 0x8e718271, 0x8e768871,
0xbef600ff, 0x01000000,
+ 0xbefc0084, 0xbf0a717c,
+ 0xbf840015, 0xbf11017c,
+ 0x8071ff71, 0x00001000,
+ 0x7e000300, 0x7e020301,
+ 0x7e040302, 0x7e060303,
0xe0724000, 0x7a1d0000,
0xe0724100, 0x7a1d0100,
0xe0724200, 0x7a1d0200,
0xe0724300, 0x7a1d0300,
+ 0x807c847c, 0x807aff7a,
+ 0x00000400, 0xbf0a717c,
+ 0xbf85ffef, 0xbf9c0000,
+ 0xbf8200dc, 0xbef4007e,
+ 0x8675ff7f, 0x0000ffff,
+ 0x8775ff75, 0x00040000,
+ 0xbef60080, 0xbef700ff,
+ 0x00807fac, 0x866eff7f,
+ 0x08000000, 0x8f6e836e,
+ 0x87776e77, 0x866eff7f,
+ 0x70000000, 0x8f6e816e,
+ 0x87776e77, 0x866eff7f,
+ 0x04000000, 0xbf84001e,
0xbefe00c1, 0xbeff00c1,
- 0xb8f14306, 0x8671c171,
- 0xbf84002c, 0xbf8a0000,
- 0x8670ff6f, 0x04000000,
- 0xbf840028, 0x8e718671,
- 0x8e718271, 0xbef60071,
- 0xb8fa2a05, 0x807a817a,
- 0x8e7a8a7a, 0xb8f01605,
- 0x80708170, 0x8e708670,
- 0x807a707a, 0x807aff7a,
+ 0xb8ef4306, 0x866fc16f,
+ 0xbf840019, 0x8e6f866f,
+ 0x8e6f826f, 0xbef6006f,
+ 0xb8f82a05, 0x80788178,
+ 0x8e788a78, 0xb8ee1605,
+ 0x806e816e, 0x8e6e866e,
+ 0x80786e78, 0x8078ff78,
0x00000080, 0xbef600ff,
0x01000000, 0xbefc0080,
- 0xd28c0002, 0x000100c1,
- 0xd28d0003, 0x000204c1,
- 0xd1060002, 0x00011103,
- 0x7e0602ff, 0x00000200,
- 0xbefc00ff, 0x00010000,
- 0xbe800077, 0x8677ff77,
- 0xff7fffff, 0x8777ff77,
- 0x00058000, 0xd8ec0000,
- 0x00000002, 0xbf8cc07f,
- 0xe0765000, 0x7a1d0002,
- 0x68040702, 0xd0c9006a,
- 0x0000e302, 0xbf87fff7,
- 0xbef70000, 0xbefa00ff,
- 0x00000400, 0xbefe00c1,
- 0xbeff00c1, 0xb8f12a05,
- 0x80718171, 0x8e718271,
- 0x8e768871, 0xbef600ff,
- 0x01000000, 0xbefc0084,
- 0xbf0a717c, 0xbf840015,
- 0xbf11017c, 0x8071ff71,
- 0x00001000, 0x7e000300,
+ 0xe0510000, 0x781d0000,
+ 0xe0510100, 0x781d0000,
+ 0x807cff7c, 0x00000200,
+ 0x8078ff78, 0x00000200,
+ 0xbf0a6f7c, 0xbf85fff6,
+ 0xbef80080, 0xbefe00c1,
+ 0xbeff00c1, 0xb8ef2a05,
+ 0x806f816f, 0x8e6f826f,
+ 0x8e76886f, 0xbef600ff,
+ 0x01000000, 0xbeee0078,
+ 0x8078ff78, 0x00000400,
+ 0xbefc0084, 0xbf11087c,
+ 0x806fff6f, 0x00008000,
+ 0xe0524000, 0x781d0000,
+ 0xe0524100, 0x781d0100,
+ 0xe0524200, 0x781d0200,
+ 0xe0524300, 0x781d0300,
+ 0xbf8c0f70, 0x7e000300,
0x7e020301, 0x7e040302,
- 0x7e060303, 0xe0724000,
- 0x7a1d0000, 0xe0724100,
- 0x7a1d0100, 0xe0724200,
- 0x7a1d0200, 0xe0724300,
- 0x7a1d0300, 0x807c847c,
- 0x807aff7a, 0x00000400,
- 0xbf0a717c, 0xbf85ffef,
- 0xbf9c0000, 0xbf8200d9,
- 0xbef4007e, 0x8675ff7f,
- 0x0000ffff, 0x8775ff75,
- 0x00040000, 0xbef60080,
- 0xbef700ff, 0x00807fac,
- 0x866eff7f, 0x08000000,
- 0x8f6e836e, 0x87776e77,
- 0x866eff7f, 0x70000000,
- 0x8f6e816e, 0x87776e77,
- 0x866eff7f, 0x04000000,
- 0xbf84001e, 0xbefe00c1,
- 0xbeff00c1, 0xb8ef4306,
- 0x866fc16f, 0xbf840019,
- 0x8e6f866f, 0x8e6f826f,
- 0xbef6006f, 0xb8f82a05,
+ 0x7e060303, 0x807c847c,
+ 0x8078ff78, 0x00000400,
+ 0xbf0a6f7c, 0xbf85ffee,
+ 0xbf9c0000, 0xe0524000,
+ 0x6e1d0000, 0xe0524100,
+ 0x6e1d0100, 0xe0524200,
+ 0x6e1d0200, 0xe0524300,
+ 0x6e1d0300, 0xb8f82a05,
0x80788178, 0x8e788a78,
0xb8ee1605, 0x806e816e,
0x8e6e866e, 0x80786e78,
- 0x8078ff78, 0x00000080,
- 0xbef600ff, 0x01000000,
- 0xbefc0080, 0xe0510000,
- 0x781d0000, 0xe0510100,
- 0x781d0000, 0x807cff7c,
- 0x00000200, 0x8078ff78,
- 0x00000200, 0xbf0a6f7c,
- 0xbf85fff6, 0xbef80080,
- 0xbefe00c1, 0xbeff00c1,
- 0xb8ef2a05, 0x806f816f,
- 0x8e6f826f, 0x8e76886f,
- 0xbef600ff, 0x01000000,
- 0xbeee0078, 0x8078ff78,
- 0x00000400, 0xbefc0084,
- 0xbf11087c, 0x806fff6f,
- 0x00008000, 0xe0524000,
- 0x781d0000, 0xe0524100,
- 0x781d0100, 0xe0524200,
- 0x781d0200, 0xe0524300,
- 0x781d0300, 0xbf8c0f70,
- 0x7e000300, 0x7e020301,
- 0x7e040302, 0x7e060303,
- 0x807c847c, 0x8078ff78,
- 0x00000400, 0xbf0a6f7c,
- 0xbf85ffee, 0xbf9c0000,
- 0xe0524000, 0x6e1d0000,
- 0xe0524100, 0x6e1d0100,
- 0xe0524200, 0x6e1d0200,
- 0xe0524300, 0x6e1d0300,
+ 0x80f8c078, 0xb8ef1605,
+ 0x806f816f, 0x8e6f846f,
+ 0x8e76826f, 0xbef600ff,
+ 0x01000000, 0xbefc006f,
+ 0xc031003a, 0x00000078,
+ 0x80f8c078, 0xbf8cc07f,
+ 0x80fc907c, 0xbf800000,
+ 0xbe802d00, 0xbe822d02,
+ 0xbe842d04, 0xbe862d06,
+ 0xbe882d08, 0xbe8a2d0a,
+ 0xbe8c2d0c, 0xbe8e2d0e,
+ 0xbf06807c, 0xbf84fff0,
0xb8f82a05, 0x80788178,
0x8e788a78, 0xb8ee1605,
0x806e816e, 0x8e6e866e,
- 0x80786e78, 0x80f8c078,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f846f, 0x8e76826f,
+ 0x80786e78, 0xbef60084,
0xbef600ff, 0x01000000,
- 0xbefc006f, 0xc031003a,
- 0x00000078, 0x80f8c078,
- 0xbf8cc07f, 0x80fc907c,
- 0xbf800000, 0xbe802d00,
- 0xbe822d02, 0xbe842d04,
- 0xbe862d06, 0xbe882d08,
- 0xbe8a2d0a, 0xbe8c2d0c,
- 0xbe8e2d0e, 0xbf06807c,
- 0xbf84fff0, 0xb8f82a05,
- 0x80788178, 0x8e788a78,
- 0xb8ee1605, 0x806e816e,
- 0x8e6e866e, 0x80786e78,
- 0xbef60084, 0xbef600ff,
- 0x01000000, 0xc0211bfa,
+ 0xc0211bfa, 0x00000078,
+ 0x80788478, 0xc0211b3a,
0x00000078, 0x80788478,
- 0xc0211b3a, 0x00000078,
- 0x80788478, 0xc0211b7a,
+ 0xc0211b7a, 0x00000078,
+ 0x80788478, 0xc0211eba,
0x00000078, 0x80788478,
- 0xc0211eba, 0x00000078,
- 0x80788478, 0xc0211efa,
+ 0xc0211efa, 0x00000078,
+ 0x80788478, 0xc0211c3a,
0x00000078, 0x80788478,
- 0xc0211c3a, 0x00000078,
- 0x80788478, 0xc0211c7a,
+ 0xc0211c7a, 0x00000078,
+ 0x80788478, 0xc0211a3a,
0x00000078, 0x80788478,
- 0xc0211a3a, 0x00000078,
- 0x80788478, 0xc0211a7a,
+ 0xc0211a7a, 0x00000078,
+ 0x80788478, 0xc0211cfa,
0x00000078, 0x80788478,
- 0xc0211cfa, 0x00000078,
- 0x80788478, 0xbf8cc07f,
- 0xbefc006f, 0xbefe007a,
- 0xbeff007b, 0x866f71ff,
- 0x000003ff, 0xb96f4803,
- 0x866f71ff, 0xfffff800,
- 0x8f6f8b6f, 0xb96fa2c3,
- 0xb973f801, 0xb8ee2a05,
- 0x806e816e, 0x8e6e8a6e,
- 0xb8ef1605, 0x806f816f,
- 0x8e6f866f, 0x806e6f6e,
- 0x806e746e, 0x826f8075,
- 0x866fff6f, 0x0000ffff,
- 0xc0071cb7, 0x00000040,
- 0xc00b1d37, 0x00000048,
- 0xc0031e77, 0x00000058,
- 0xc0071eb7, 0x0000005c,
- 0xbf8cc07f, 0x866fff6d,
- 0xf0000000, 0x8f6f9c6f,
- 0x8e6f906f, 0xbeee0080,
- 0x876e6f6e, 0x866fff6d,
- 0x08000000, 0x8f6f9b6f,
- 0x8e6f8f6f, 0x876e6f6e,
- 0x866fff70, 0x00800000,
- 0x8f6f976f, 0xb96ef807,
- 0x866dff6d, 0x0000ffff,
- 0x86fe7e7e, 0x86ea6a6a,
- 0xb970f802, 0xbf8a0000,
+ 0xbf8cc07f, 0xbefc006f,
+ 0xbefe007a, 0xbeff007b,
+ 0x866f71ff, 0x000003ff,
+ 0xb96f4803, 0x866f71ff,
+ 0xfffff800, 0x8f6f8b6f,
+ 0xb96fa2c3, 0xb973f801,
+ 0xb8ee2a05, 0x806e816e,
+ 0x8e6e8a6e, 0xb8ef1605,
+ 0x806f816f, 0x8e6f866f,
+ 0x806e6f6e, 0x806e746e,
+ 0x826f8075, 0x866fff6f,
+ 0x0000ffff, 0xc0071cb7,
+ 0x00000040, 0xc00b1d37,
+ 0x00000048, 0xc0031e77,
+ 0x00000058, 0xc0071eb7,
+ 0x0000005c, 0xbf8cc07f,
+ 0x866fff6d, 0xf0000000,
+ 0x8f6f9c6f, 0x8e6f906f,
+ 0xbeee0080, 0x876e6f6e,
+ 0x866fff6d, 0x08000000,
+ 0x8f6f9b6f, 0x8e6f8f6f,
+ 0x876e6f6e, 0x866fff70,
+ 0x00800000, 0x8f6f976f,
+ 0xb96ef807, 0x866dff6d,
+ 0x0000ffff, 0x86fe7e7e,
+ 0x86ea6a6a, 0x8f6e8370,
+ 0xb96ee0c2, 0xbf800002,
+ 0xb9700002, 0xbf8a0000,
0x95806f6c, 0xbf810000,
};
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
index a2a04bb64096..abe1a5da29fb 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx8.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_SHIFT = 23
var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -251,7 +255,7 @@ if (!EMU_RUN_HACK)
s_waitcnt lgkmcnt(0)
s_or_b32 ttmp7, ttmp8, ttmp9
s_cbranch_scc0 L_NO_NEXT_TRAP //next level trap handler not been set
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_setpc_b64 [ttmp8,ttmp9] //jump to next level trap handler
L_NO_NEXT_TRAP:
@@ -262,7 +266,7 @@ L_NO_NEXT_TRAP:
s_addc_u32 ttmp1, ttmp1, 0
L_EXCP_CASE:
s_and_b32 ttmp1, ttmp1, 0xFFFF
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status //restore HW status(SCC)
+ set_status_without_spi_prio(s_save_status, ttmp2) //restore HW status(SCC)
s_rfe_b64 [ttmp0, ttmp1]
end
// ********* End handling of non-CWSR traps *******************
@@ -1053,7 +1057,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1134,3 +1138,11 @@ end
function get_hwreg_size_bytes
return 128 //HWREG size 128 bytes
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
index 998be96be736..0bb9c577b3a2 100644
--- a/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
+++ b/drivers/gpu/drm/amd/amdkfd/cwsr_trap_handler_gfx9.asm
@@ -103,6 +103,10 @@ var SQ_WAVE_STATUS_INST_ATC_MASK = 0x00800000
var SQ_WAVE_STATUS_SPI_PRIO_SHIFT = 1
var SQ_WAVE_STATUS_SPI_PRIO_MASK = 0x00000006
var SQ_WAVE_STATUS_HALT_MASK = 0x2000
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT = 0
+var SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE = 1
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT = 3
+var SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE = 29
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SHIFT = 12
var SQ_WAVE_LDS_ALLOC_LDS_SIZE_SIZE = 9
@@ -317,7 +321,7 @@ L_EXCP_CASE:
// Restore SQ_WAVE_STATUS.
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_save_status
+ set_status_without_spi_prio(s_save_status, ttmp2)
s_rfe_b64 [ttmp0, ttmp1]
end
@@ -1120,7 +1124,7 @@ end
s_and_b32 s_restore_pc_hi, s_restore_pc_hi, 0x0000ffff //pc[47:32] //Do it here in order not to affect STATUS
s_and_b64 exec, exec, exec // Restore STATUS.EXECZ, not writable by s_setreg_b32
s_and_b64 vcc, vcc, vcc // Restore STATUS.VCCZ, not writable by s_setreg_b32
- s_setreg_b32 hwreg(HW_REG_STATUS), s_restore_status // SCC is included, which is changed by previous salu
+ set_status_without_spi_prio(s_restore_status, s_restore_tmp) // SCC is included, which is changed by previous salu
s_barrier //barrier to ensure the readiness of LDS before access attempts from any other wave in the same TG //FIXME not performance-optimal at this time
@@ -1212,3 +1216,11 @@ function ack_sqc_store_workaround
s_waitcnt lgkmcnt(0)
end
end
+
+function set_status_without_spi_prio(status, tmp)
+ // Do not restore STATUS.SPI_PRIO since scheduler may have raised it.
+ s_lshr_b32 tmp, status, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_POST_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_POST_SPI_PRIO_SIZE), tmp
+ s_nop 0x2 // avoid S_SETREG => S_SETREG hazard
+ s_setreg_b32 hwreg(HW_REG_STATUS, SQ_WAVE_STATUS_PRE_SPI_PRIO_SHIFT, SQ_WAVE_STATUS_PRE_SPI_PRIO_SIZE), status
+end
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
index f64c5551cdba..297b36c26a05 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_chardev.c
@@ -122,6 +122,9 @@ static int kfd_open(struct inode *inode, struct file *filep)
if (IS_ERR(process))
return PTR_ERR(process);
+ if (kfd_is_locked())
+ return -EAGAIN;
+
dev_dbg(kfd_device, "process %d opened, compat mode (32 bit) - %d\n",
process->pasid, process->is_32bit_user_mode);
@@ -389,6 +392,61 @@ static int kfd_ioctl_update_queue(struct file *filp, struct kfd_process *p,
return retval;
}
+static int kfd_ioctl_set_cu_mask(struct file *filp, struct kfd_process *p,
+ void *data)
+{
+ int retval;
+ const int max_num_cus = 1024;
+ struct kfd_ioctl_set_cu_mask_args *args = data;
+ struct queue_properties properties;
+ uint32_t __user *cu_mask_ptr = (uint32_t __user *)args->cu_mask_ptr;
+ size_t cu_mask_size = sizeof(uint32_t) * (args->num_cu_mask / 32);
+
+ if ((args->num_cu_mask % 32) != 0) {
+ pr_debug("num_cu_mask 0x%x must be a multiple of 32",
+ args->num_cu_mask);
+ return -EINVAL;
+ }
+
+ properties.cu_mask_count = args->num_cu_mask;
+ if (properties.cu_mask_count == 0) {
+ pr_debug("CU mask cannot be 0");
+ return -EINVAL;
+ }
+
+ /* To prevent an unreasonably large CU mask size, set an arbitrary
+ * limit of max_num_cus bits. We can then just drop any CU mask bits
+ * past max_num_cus bits and just use the first max_num_cus bits.
+ */
+ if (properties.cu_mask_count > max_num_cus) {
+ pr_debug("CU mask cannot be greater than 1024 bits");
+ properties.cu_mask_count = max_num_cus;
+ cu_mask_size = sizeof(uint32_t) * (max_num_cus/32);
+ }
+
+ properties.cu_mask = kzalloc(cu_mask_size, GFP_KERNEL);
+ if (!properties.cu_mask)
+ return -ENOMEM;
+
+ retval = copy_from_user(properties.cu_mask, cu_mask_ptr, cu_mask_size);
+ if (retval) {
+ pr_debug("Could not copy CU mask from userspace");
+ kfree(properties.cu_mask);
+ return -EFAULT;
+ }
+
+ mutex_lock(&p->mutex);
+
+ retval = pqm_set_cu_mask(&p->pqm, args->queue_id, &properties);
+
+ mutex_unlock(&p->mutex);
+
+ if (retval)
+ kfree(properties.cu_mask);
+
+ return retval;
+}
+
static int kfd_ioctl_set_memory_policy(struct file *filep,
struct kfd_process *p, void *data)
{
@@ -754,7 +812,6 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
{
struct kfd_ioctl_get_clock_counters_args *args = data;
struct kfd_dev *dev;
- struct timespec64 time;
dev = kfd_device_by_id(args->gpu_id);
if (dev)
@@ -766,11 +823,8 @@ static int kfd_ioctl_get_clock_counters(struct file *filep,
args->gpu_clock_counter = 0;
/* No access to rdtsc. Using raw monotonic time */
- getrawmonotonic64(&time);
- args->cpu_clock_counter = (uint64_t)timespec64_to_ns(&time);
-
- get_monotonic_boottime64(&time);
- args->system_clock_counter = (uint64_t)timespec64_to_ns(&time);
+ args->cpu_clock_counter = ktime_get_raw_ns();
+ args->system_clock_counter = ktime_get_boot_ns();
/* Since the counter is in nano-seconds we use 1GHz frequency */
args->system_clock_freq = 1000000000;
@@ -1558,6 +1612,9 @@ static const struct amdkfd_ioctl_desc amdkfd_ioctls[] = {
AMDKFD_IOCTL_DEF(AMDKFD_IOC_UNMAP_MEMORY_FROM_GPU,
kfd_ioctl_unmap_memory_from_gpu, 0),
+ AMDKFD_IOCTL_DEF(AMDKFD_IOC_SET_CU_MASK,
+ kfd_ioctl_set_cu_mask, 0),
+
};
#define AMDKFD_CORE_IOCTL_COUNT ARRAY_SIZE(amdkfd_ioctls)
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
index 296b3f230280..ee4996029a86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_crat.c
@@ -189,6 +189,21 @@ static int kfd_parse_subtype_cu(struct crat_subtype_computeunit *cu,
return 0;
}
+static struct kfd_mem_properties *
+find_subtype_mem(uint32_t heap_type, uint32_t flags, uint32_t width,
+ struct kfd_topology_device *dev)
+{
+ struct kfd_mem_properties *props;
+
+ list_for_each_entry(props, &dev->mem_props, list) {
+ if (props->heap_type == heap_type
+ && props->flags == flags
+ && props->width == width)
+ return props;
+ }
+
+ return NULL;
+}
/* kfd_parse_subtype_mem - parse memory subtypes and attach it to correct
* topology device present in the device_list
*/
@@ -197,36 +212,56 @@ static int kfd_parse_subtype_mem(struct crat_subtype_memory *mem,
{
struct kfd_mem_properties *props;
struct kfd_topology_device *dev;
+ uint32_t heap_type;
+ uint64_t size_in_bytes;
+ uint32_t flags = 0;
+ uint32_t width;
pr_debug("Found memory entry in CRAT table with proximity_domain=%d\n",
mem->proximity_domain);
list_for_each_entry(dev, device_list, list) {
if (mem->proximity_domain == dev->proximity_domain) {
- props = kfd_alloc_struct(props);
- if (!props)
- return -ENOMEM;
-
/* We're on GPU node */
if (dev->node_props.cpu_cores_count == 0) {
/* APU */
if (mem->visibility_type == 0)
- props->heap_type =
+ heap_type =
HSA_MEM_HEAP_TYPE_FB_PRIVATE;
/* dGPU */
else
- props->heap_type = mem->visibility_type;
+ heap_type = mem->visibility_type;
} else
- props->heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
+ heap_type = HSA_MEM_HEAP_TYPE_SYSTEM;
if (mem->flags & CRAT_MEM_FLAGS_HOT_PLUGGABLE)
- props->flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
+ flags |= HSA_MEM_FLAGS_HOT_PLUGGABLE;
if (mem->flags & CRAT_MEM_FLAGS_NON_VOLATILE)
- props->flags |= HSA_MEM_FLAGS_NON_VOLATILE;
+ flags |= HSA_MEM_FLAGS_NON_VOLATILE;
- props->size_in_bytes =
+ size_in_bytes =
((uint64_t)mem->length_high << 32) +
mem->length_low;
- props->width = mem->width;
+ width = mem->width;
+
+ /* Multiple banks of the same type are aggregated into
+ * one. User mode doesn't care about multiple physical
+ * memory segments. It's managed as a single virtual
+ * heap for user mode.
+ */
+ props = find_subtype_mem(heap_type, flags, width, dev);
+ if (props) {
+ props->size_in_bytes += size_in_bytes;
+ break;
+ }
+
+ props = kfd_alloc_struct(props);
+ if (!props)
+ return -ENOMEM;
+
+ props->heap_type = heap_type;
+ props->flags = flags;
+ props->size_in_bytes = size_in_bytes;
+ props->width = width;
dev->node_props.mem_banks_count++;
list_add_tail(&props->list, &dev->mem_props);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
index afb26f205d29..a3441b0e385b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.c
@@ -38,7 +38,6 @@
#include "kfd_dbgmgr.h"
#include "kfd_dbgdev.h"
#include "kfd_device_queue_manager.h"
-#include "../../radeon/cik_reg.h"
static void dbgdev_address_watch_disable_nodiq(struct kfd_dev *dev)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
index 03424c20920c..0619c777b47e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_dbgdev.h
@@ -60,6 +60,9 @@ enum {
SH_REG_SIZE = SH_REG_END - SH_REG_BASE
};
+/* SQ_CMD definitions */
+#define SQ_CMD 0x8DEC
+
enum SQ_IND_CMD_CMD {
SQ_IND_CMD_CMD_NULL = 0x00000000,
SQ_IND_CMD_CMD_HALT = 0x00000001,
@@ -190,4 +193,38 @@ union ULARGE_INTEGER {
void kfd_dbgdev_init(struct kfd_dbgdev *pdbgdev, struct kfd_dev *pdev,
enum DBGDEV_TYPE type);
+union TCP_WATCH_CNTL_BITS {
+ struct {
+ uint32_t mask:24;
+ uint32_t vmid:4;
+ uint32_t atc:1;
+ uint32_t mode:2;
+ uint32_t valid:1;
+ } bitfields, bits;
+ uint32_t u32All;
+ signed int i32All;
+ float f32All;
+};
+
+enum {
+ ADDRESS_WATCH_REG_CNTL_ATC_BIT = 0x10000000UL,
+ ADDRESS_WATCH_REG_CNTL_DEFAULT_MASK = 0x00FFFFFF,
+ ADDRESS_WATCH_REG_ADDLOW_MASK_EXTENSION = 0x03000000,
+ /* extend the mask to 26 bits in order to match the low address field */
+ ADDRESS_WATCH_REG_ADDLOW_SHIFT = 6,
+ ADDRESS_WATCH_REG_ADDHIGH_MASK = 0xFFFF
+};
+
+enum {
+ MAX_TRAPID = 8, /* 3 bits in the bitfield. */
+ MAX_WATCH_ADDRESSES = 4
+};
+
+enum {
+ ADDRESS_WATCH_REG_ADDR_HI = 0,
+ ADDRESS_WATCH_REG_ADDR_LO,
+ ADDRESS_WATCH_REG_CNTL,
+ ADDRESS_WATCH_REG_MAX
+};
+
#endif /* KFD_DBGDEV_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
index 4bd6ebfaf425..ab37d36d9cd6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_debugfs.c
@@ -21,6 +21,8 @@
*/
#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+
#include "kfd_priv.h"
static struct dentry *debugfs_root;
@@ -32,6 +34,38 @@ static int kfd_debugfs_open(struct inode *inode, struct file *file)
return single_open(file, show, NULL);
}
+static ssize_t kfd_debugfs_hang_hws_write(struct file *file,
+ const char __user *user_buf, size_t size, loff_t *ppos)
+{
+ struct kfd_dev *dev;
+ char tmp[16];
+ uint32_t gpu_id;
+ int ret = -EINVAL;
+
+ memset(tmp, 0, 16);
+ if (size >= 16) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ if (copy_from_user(tmp, user_buf, size)) {
+ ret = -EFAULT;
+ goto out;
+ }
+ if (kstrtoint(tmp, 10, &gpu_id)) {
+ pr_err("Invalid input for gpu id.\n");
+ goto out;
+ }
+ dev = kfd_device_by_id(gpu_id);
+ if (dev) {
+ kfd_debugfs_hang_hws(dev);
+ ret = size;
+ } else
+ pr_err("Cannot find device %d.\n", gpu_id);
+
+out:
+ return ret;
+}
+
static const struct file_operations kfd_debugfs_fops = {
.owner = THIS_MODULE,
.open = kfd_debugfs_open,
@@ -40,6 +74,15 @@ static const struct file_operations kfd_debugfs_fops = {
.release = single_release,
};
+static const struct file_operations kfd_debugfs_hang_hws_fops = {
+ .owner = THIS_MODULE,
+ .open = kfd_debugfs_open,
+ .read = seq_read,
+ .write = kfd_debugfs_hang_hws_write,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
void kfd_debugfs_init(void)
{
struct dentry *ent;
@@ -65,6 +108,11 @@ void kfd_debugfs_init(void)
ent = debugfs_create_file("rls", S_IFREG | 0444, debugfs_root,
kfd_debugfs_rls_by_device,
&kfd_debugfs_fops);
+
+ ent = debugfs_create_file("hang_hws", S_IFREG | 0644, debugfs_root,
+ NULL,
+ &kfd_debugfs_hang_hws_fops);
+
if (!ent)
pr_warn("Failed to create rls in kfd debugfs\n");
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device.c b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
index 7ee6cec2c060..1b048715ab8a 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device.c
@@ -30,7 +30,13 @@
#include "kfd_iommu.h"
#define MQD_SIZE_ALIGNED 768
-static atomic_t kfd_device_suspended = ATOMIC_INIT(0);
+
+/*
+ * kfd_locked is used to lock the kfd driver during suspend or reset
+ * once locked, kfd driver will stop any further GPU execution.
+ * create process (open) will return -EAGAIN.
+ */
+static atomic_t kfd_locked = ATOMIC_INIT(0);
#ifdef KFD_SUPPORT_IOMMU_V2
static const struct kfd_device_info kaveri_device_info = {
@@ -46,6 +52,7 @@ static const struct kfd_device_info kaveri_device_info = {
.supports_cwsr = false,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info carrizo_device_info = {
@@ -61,6 +68,22 @@ static const struct kfd_device_info carrizo_device_info = {
.supports_cwsr = true,
.needs_iommu_device = true,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
+};
+
+static const struct kfd_device_info raven_device_info = {
+ .asic_family = CHIP_RAVEN,
+ .max_pasid_bits = 16,
+ .max_no_of_hqd = 24,
+ .doorbell_size = 8,
+ .ih_ring_entry_size = 8 * sizeof(uint32_t),
+ .event_interrupt_class = &event_interrupt_class_v9,
+ .num_of_watch_points = 4,
+ .mqd_size_aligned = MQD_SIZE_ALIGNED,
+ .supports_cwsr = true,
+ .needs_iommu_device = true,
+ .needs_pci_atomics = true,
+ .num_sdma_engines = 1,
};
#endif
@@ -77,6 +100,7 @@ static const struct kfd_device_info hawaii_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_device_info = {
@@ -91,6 +115,7 @@ static const struct kfd_device_info tonga_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info tonga_vf_device_info = {
@@ -105,6 +130,7 @@ static const struct kfd_device_info tonga_vf_device_info = {
.supports_cwsr = false,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_device_info = {
@@ -119,6 +145,7 @@ static const struct kfd_device_info fiji_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info fiji_vf_device_info = {
@@ -133,6 +160,7 @@ static const struct kfd_device_info fiji_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -148,6 +176,7 @@ static const struct kfd_device_info polaris10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris10_vf_device_info = {
@@ -162,6 +191,7 @@ static const struct kfd_device_info polaris10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info polaris11_device_info = {
@@ -176,6 +206,7 @@ static const struct kfd_device_info polaris11_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = true,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_device_info = {
@@ -190,6 +221,7 @@ static const struct kfd_device_info vega10_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
static const struct kfd_device_info vega10_vf_device_info = {
@@ -204,6 +236,7 @@ static const struct kfd_device_info vega10_vf_device_info = {
.supports_cwsr = true,
.needs_iommu_device = false,
.needs_pci_atomics = false,
+ .num_sdma_engines = 2,
};
@@ -241,6 +274,7 @@ static const struct kfd_deviceid supported_devices[] = {
{ 0x9875, &carrizo_device_info }, /* Carrizo */
{ 0x9876, &carrizo_device_info }, /* Carrizo */
{ 0x9877, &carrizo_device_info }, /* Carrizo */
+ { 0x15DD, &raven_device_info }, /* Raven */
#endif
{ 0x67A0, &hawaii_device_info }, /* Hawaii */
{ 0x67A1, &hawaii_device_info }, /* Hawaii */
@@ -514,13 +548,54 @@ void kgd2kfd_device_exit(struct kfd_dev *kfd)
kfree(kfd);
}
+int kgd2kfd_pre_reset(struct kfd_dev *kfd)
+{
+ if (!kfd->init_complete)
+ return 0;
+ kgd2kfd_suspend(kfd);
+
+ /* hold dqm->lock to prevent further execution*/
+ dqm_lock(kfd->dqm);
+
+ kfd_signal_reset_event(kfd);
+ return 0;
+}
+
+/*
+ * Fix me. KFD won't be able to resume existing process for now.
+ * We will keep all existing process in a evicted state and
+ * wait the process to be terminated.
+ */
+
+int kgd2kfd_post_reset(struct kfd_dev *kfd)
+{
+ int ret, count;
+
+ if (!kfd->init_complete)
+ return 0;
+
+ dqm_unlock(kfd->dqm);
+
+ ret = kfd_resume(kfd);
+ if (ret)
+ return ret;
+ count = atomic_dec_return(&kfd_locked);
+ WARN_ONCE(count != 0, "KFD reset ref. error");
+ return 0;
+}
+
+bool kfd_is_locked(void)
+{
+ return (atomic_read(&kfd_locked) > 0);
+}
+
void kgd2kfd_suspend(struct kfd_dev *kfd)
{
if (!kfd->init_complete)
return;
/* For first KFD device suspend all the KFD processes */
- if (atomic_inc_return(&kfd_device_suspended) == 1)
+ if (atomic_inc_return(&kfd_locked) == 1)
kfd_suspend_all_processes();
kfd->dqm->ops.stop(kfd->dqm);
@@ -539,7 +614,7 @@ int kgd2kfd_resume(struct kfd_dev *kfd)
if (ret)
return ret;
- count = atomic_dec_return(&kfd_device_suspended);
+ count = atomic_dec_return(&kfd_locked);
WARN_ONCE(count < 0, "KFD suspend / resume ref. error");
if (count == 0)
ret = kfd_resume_all_processes();
@@ -577,14 +652,24 @@ dqm_start_error:
/* This is called directly from KGD at ISR. */
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry)
{
+ uint32_t patched_ihre[KFD_MAX_RING_ENTRY_SIZE];
+ bool is_patched = false;
+
if (!kfd->init_complete)
return;
+ if (kfd->device_info->ih_ring_entry_size > sizeof(patched_ihre)) {
+ dev_err_once(kfd_device, "Ring entry too small\n");
+ return;
+ }
+
spin_lock(&kfd->interrupt_lock);
if (kfd->interrupts_active
- && interrupt_is_wanted(kfd, ih_ring_entry)
- && enqueue_ih_ring_entry(kfd, ih_ring_entry))
+ && interrupt_is_wanted(kfd, ih_ring_entry,
+ patched_ihre, &is_patched)
+ && enqueue_ih_ring_entry(kfd,
+ is_patched ? patched_ihre : ih_ring_entry))
queue_work(kfd->ih_wq, &kfd->interrupt_work);
spin_unlock(&kfd->interrupt_lock);
@@ -739,8 +824,8 @@ int kfd_gtt_sa_allocate(struct kfd_dev *kfd, unsigned int size,
if (size > kfd->gtt_sa_num_of_chunks * kfd->gtt_sa_chunk_size)
return -ENOMEM;
- *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
- if ((*mem_obj) == NULL)
+ *mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
+ if (!(*mem_obj))
return -ENOMEM;
pr_debug("Allocated mem_obj = %p for size = %d\n", *mem_obj, size);
@@ -857,3 +942,26 @@ int kfd_gtt_sa_free(struct kfd_dev *kfd, struct kfd_mem_obj *mem_obj)
kfree(mem_obj);
return 0;
}
+
+#if defined(CONFIG_DEBUG_FS)
+
+/* This function will send a package to HIQ to hang the HWS
+ * which will trigger a GPU reset and bring the HWS back to normal state
+ */
+int kfd_debugfs_hang_hws(struct kfd_dev *dev)
+{
+ int r = 0;
+
+ if (dev->dqm->sched_policy != KFD_SCHED_POLICY_HWS) {
+ pr_err("HWS is not enabled");
+ return -EINVAL;
+ }
+
+ r = pm_debugfs_hang_hws(&dev->dqm->packets);
+ if (!r)
+ r = dqm_debugfs_execute_queues(dev->dqm);
+
+ return r;
+}
+
+#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
index 668ad07ebe1f..ec0d62a16e53 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.c
@@ -61,6 +61,8 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id);
+static void kfd_process_hw_exception(struct work_struct *work);
+
static inline
enum KFD_MQD_TYPE get_mqd_type_from_queue_type(enum kfd_queue_type type)
{
@@ -99,6 +101,17 @@ unsigned int get_pipes_per_mec(struct device_queue_manager *dqm)
return dqm->dev->shared_resources.num_pipe_per_mec;
}
+static unsigned int get_num_sdma_engines(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines;
+}
+
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm)
+{
+ return dqm->dev->device_info->num_sdma_engines
+ * KFD_SDMA_QUEUES_PER_ENGINE;
+}
+
void program_sh_mem_settings(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
@@ -240,7 +253,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
print_queue(q);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -297,7 +310,7 @@ static int create_queue_nocpsch(struct device_queue_manager *dqm,
dqm->total_queue_count);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -346,10 +359,10 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_COMPUTE);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_hqd(dqm, q);
@@ -360,7 +373,7 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (retval)
goto out_deallocate_hqd;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -374,15 +387,15 @@ static int create_compute_queue_nocpsch(struct device_queue_manager *dqm,
if (!q->properties.is_active)
return 0;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue, &q->properties,
- q->process->mm);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
+ &q->properties, q->process->mm);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_hqd:
@@ -399,11 +412,11 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd)
+ if (!mqd_mgr)
return -ENOMEM;
if (q->properties.type == KFD_QUEUE_TYPE_COMPUTE) {
@@ -420,14 +433,14 @@ static int destroy_queue_nocpsch_locked(struct device_queue_manager *dqm,
deallocate_doorbell(qpd, q);
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
q->pipe, q->queue);
if (retval == -ETIME)
qpd->reset_wavefronts = true;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
list_del(&q->list);
if (list_empty(&qpd->queues_list)) {
@@ -457,9 +470,9 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
{
int retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
retval = destroy_queue_nocpsch_locked(dqm, qpd, q);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -467,19 +480,19 @@ static int destroy_queue_nocpsch(struct device_queue_manager *dqm,
static int update_queue(struct device_queue_manager *dqm, struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
bool prev_active = false;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
pdd = kfd_get_process_device_data(q->device, q->process);
if (!pdd) {
retval = -ENODEV;
goto out_unlock;
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_unlock;
}
@@ -506,7 +519,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
} else if (prev_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA)) {
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval) {
@@ -515,7 +528,7 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
}
}
- retval = mqd->update_mqd(mqd, q->mqd, &q->properties);
+ retval = mqd_mgr->update_mqd(mqd_mgr, q->mqd, &q->properties);
/*
* check active state vs. the previous state and modify
@@ -533,44 +546,44 @@ static int update_queue(struct device_queue_manager *dqm, struct queue *q)
else if (q->properties.is_active &&
(q->properties.type == KFD_QUEUE_TYPE_COMPUTE ||
q->properties.type == KFD_QUEUE_TYPE_SDMA))
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe, q->queue,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe, q->queue,
&q->properties, q->process->mm);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
static struct mqd_manager *get_mqd_manager(
struct device_queue_manager *dqm, enum KFD_MQD_TYPE type)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
pr_debug("mqd type %d\n", type);
- mqd = dqm->mqds[type];
- if (!mqd) {
- mqd = mqd_manager_init(type, dqm->dev);
- if (!mqd)
+ mqd_mgr = dqm->mqd_mgrs[type];
+ if (!mqd_mgr) {
+ mqd_mgr = mqd_manager_init(type, dqm->dev);
+ if (!mqd_mgr)
pr_err("mqd manager is NULL");
- dqm->mqds[type] = mqd;
+ dqm->mqd_mgrs[type] = mqd_mgr;
}
- return mqd;
+ return mqd_mgr;
}
static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -582,16 +595,16 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_active)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot evict queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = true;
q->properties.is_active = false;
- retval = mqd->destroy_mqd(mqd, q->mqd,
+ retval = mqd_mgr->destroy_mqd(mqd_mgr, q->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_DRAIN,
KFD_UNMAP_LATENCY_MS, q->pipe, q->queue);
if (retval)
@@ -600,7 +613,7 @@ static int evict_process_queues_nocpsch(struct device_queue_manager *dqm,
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -611,7 +624,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
struct kfd_process_device *pdd;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->evicted++ > 0) /* already evicted, do nothing */
goto out;
@@ -633,7 +646,7 @@ static int evict_process_queues_cpsch(struct device_queue_manager *dqm,
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -641,7 +654,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
struct qcm_process_device *qpd)
{
struct queue *q;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct kfd_process_device *pdd;
uint32_t pd_base;
int retval = 0;
@@ -650,7 +663,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -677,16 +690,16 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
list_for_each_entry(q, &qpd->queues_list, list) {
if (!q->properties.is_evicted)
continue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) { /* should not be here */
+ if (!mqd_mgr) { /* should not be here */
pr_err("Cannot restore queue, mqd mgr is NULL\n");
retval = -ENOMEM;
goto out;
}
q->properties.is_evicted = false;
q->properties.is_active = true;
- retval = mqd->load_mqd(mqd, q->mqd, q->pipe,
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, q->pipe,
q->queue, &q->properties,
q->process->mm);
if (retval)
@@ -695,7 +708,7 @@ static int restore_process_queues_nocpsch(struct device_queue_manager *dqm,
}
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -711,7 +724,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (WARN_ON_ONCE(!qpd->evicted)) /* already restored, do nothing */
goto out;
if (qpd->evicted > 1) { /* ref count still > 0, decrement & quit */
@@ -739,7 +752,7 @@ static int restore_process_queues_cpsch(struct device_queue_manager *dqm,
if (!retval)
qpd->evicted = 0;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -761,7 +774,7 @@ static int register_process(struct device_queue_manager *dqm,
/* Retrieve PD base */
pd_base = dqm->dev->kfd2kgd->get_process_page_dir(pdd->vm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_add(&n->list, &dqm->queues);
/* Update PD Base in QPD */
@@ -769,9 +782,10 @@ static int register_process(struct device_queue_manager *dqm,
retval = dqm->asic_ops.update_qpd(dqm, qpd);
- dqm->processes_count++;
+ if (dqm->processes_count++ == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(dqm->dev->kgd, false);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -786,20 +800,22 @@ static int unregister_process(struct device_queue_manager *dqm,
list_empty(&qpd->queues_list) ? "empty" : "not empty");
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_for_each_entry_safe(cur, next, &dqm->queues, list) {
if (qpd == cur->qpd) {
list_del(&cur->list);
kfree(cur);
- dqm->processes_count--;
+ if (--dqm->processes_count == 0)
+ dqm->dev->kfd2kgd->set_compute_idle(
+ dqm->dev->kgd, true);
goto out;
}
}
/* qpd not found in dqm list */
retval = 1;
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -838,7 +854,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
if (!dqm->allocated_queues)
return -ENOMEM;
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->next_pipe_to_allocate = 0;
dqm->sdma_queue_count = 0;
@@ -853,7 +869,7 @@ static int initialize_nocpsch(struct device_queue_manager *dqm)
}
dqm->vmid_bitmap = (1 << dqm->dev->vm_info.vmid_num_kfd) - 1;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
return 0;
}
@@ -866,8 +882,8 @@ static void uninitialize(struct device_queue_manager *dqm)
kfree(dqm->allocated_queues);
for (i = 0 ; i < KFD_MQD_TYPE_MAX ; i++)
- kfree(dqm->mqds[i]);
- mutex_destroy(&dqm->lock);
+ kfree(dqm->mqd_mgrs[i]);
+ mutex_destroy(&dqm->lock_hidden);
kfd_gtt_sa_free(dqm->dev, dqm->pipeline_mem);
}
@@ -901,7 +917,7 @@ static int allocate_sdma_queue(struct device_queue_manager *dqm,
static void deallocate_sdma_queue(struct device_queue_manager *dqm,
unsigned int sdma_queue_id)
{
- if (sdma_queue_id >= CIK_SDMA_QUEUES)
+ if (sdma_queue_id >= get_num_sdma_queues(dqm))
return;
dqm->sdma_bitmap |= (1 << sdma_queue_id);
}
@@ -910,19 +926,19 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
struct queue *q,
struct qcm_process_device *qpd)
{
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
int retval;
- mqd = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
- if (!mqd)
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm, KFD_MQD_TYPE_SDMA);
+ if (!mqd_mgr)
return -ENOMEM;
retval = allocate_sdma_queue(dqm, &q->sdma_id);
if (retval)
return retval;
- q->properties.sdma_queue_id = q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
- q->properties.sdma_engine_id = q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->properties.sdma_queue_id = q->sdma_id / get_num_sdma_engines(dqm);
+ q->properties.sdma_engine_id = q->sdma_id % get_num_sdma_engines(dqm);
retval = allocate_doorbell(qpd, q);
if (retval)
@@ -933,19 +949,20 @@ static int create_sdma_queue_nocpsch(struct device_queue_manager *dqm,
pr_debug("SDMA engine id: %d\n", q->properties.sdma_engine_id);
dqm->asic_ops.init_sdma_vm(dqm, q, qpd);
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
- retval = mqd->load_mqd(mqd, q->mqd, 0, 0, &q->properties, NULL);
+ retval = mqd_mgr->load_mqd(mqd_mgr, q->mqd, 0, 0, &q->properties,
+ NULL);
if (retval)
goto out_uninit_mqd;
return 0;
out_uninit_mqd:
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
out_deallocate_doorbell:
deallocate_doorbell(qpd, q);
out_deallocate_sdma_queue:
@@ -1003,12 +1020,14 @@ static int initialize_cpsch(struct device_queue_manager *dqm)
{
pr_debug("num of pipes: %d\n", get_pipes_per_mec(dqm));
- mutex_init(&dqm->lock);
+ mutex_init(&dqm->lock_hidden);
INIT_LIST_HEAD(&dqm->queues);
dqm->queue_count = dqm->processes_count = 0;
dqm->sdma_queue_count = 0;
dqm->active_runlist = false;
- dqm->sdma_bitmap = (1 << CIK_SDMA_QUEUES) - 1;
+ dqm->sdma_bitmap = (1 << get_num_sdma_queues(dqm)) - 1;
+
+ INIT_WORK(&dqm->hw_exception_work, kfd_process_hw_exception);
return 0;
}
@@ -1041,9 +1060,11 @@ static int start_cpsch(struct device_queue_manager *dqm)
init_interrupts(dqm);
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
+ /* clear hang status when driver try to start the hw scheduler */
+ dqm->is_hws_hang = false;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
fail_allocate_vidmem:
@@ -1055,9 +1076,9 @@ fail_packet_manager_init:
static int stop_cpsch(struct device_queue_manager *dqm)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
unmap_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
kfd_gtt_sa_free(dqm->dev, dqm->fence_mem);
pm_uninit(&dqm->packets);
@@ -1069,11 +1090,11 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new kernel queue because %d queues were already created\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return -EPERM;
}
@@ -1089,7 +1110,7 @@ static int create_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->queue_count++;
qpd->is_debug = true;
execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES, 0);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return 0;
}
@@ -1098,7 +1119,7 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
struct kernel_queue *kq,
struct qcm_process_device *qpd)
{
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
list_del(&kq->list);
dqm->queue_count--;
qpd->is_debug = false;
@@ -1110,18 +1131,18 @@ static void destroy_kernel_queue_cpsch(struct device_queue_manager *dqm,
dqm->total_queue_count--;
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
}
static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
struct qcm_process_device *qpd)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (dqm->total_queue_count >= max_num_of_queues_per_device) {
pr_warn("Can't create new usermode queue because %d queues were already created\n",
@@ -1135,19 +1156,19 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
if (retval)
goto out_unlock;
q->properties.sdma_queue_id =
- q->sdma_id / CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id / get_num_sdma_engines(dqm);
q->properties.sdma_engine_id =
- q->sdma_id % CIK_SDMA_QUEUES_PER_ENGINE;
+ q->sdma_id % get_num_sdma_engines(dqm);
}
retval = allocate_doorbell(qpd, q);
if (retval)
goto out_deallocate_sdma_queue;
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out_deallocate_doorbell;
}
@@ -1164,7 +1185,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
q->properties.tba_addr = qpd->tba_addr;
q->properties.tma_addr = qpd->tma_addr;
- retval = mqd->init_mqd(mqd, &q->mqd, &q->mqd_mem_obj,
+ retval = mqd_mgr->init_mqd(mqd_mgr, &q->mqd, &q->mqd_mem_obj,
&q->gart_mqd_addr, &q->properties);
if (retval)
goto out_deallocate_doorbell;
@@ -1188,7 +1209,7 @@ static int create_queue_cpsch(struct device_queue_manager *dqm, struct queue *q,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
out_deallocate_doorbell:
@@ -1197,7 +1218,8 @@ out_deallocate_sdma_queue:
if (q->properties.type == KFD_QUEUE_TYPE_SDMA)
deallocate_sdma_queue(dqm, q->sdma_id);
out_unlock:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
+
return retval;
}
@@ -1210,6 +1232,13 @@ int amdkfd_fence_wait_timeout(unsigned int *fence_addr,
while (*fence_addr != fence_value) {
if (time_after(jiffies, end_jiffies)) {
pr_err("qcm fence wait loop timeout expired\n");
+ /* In HWS case, this is used to halt the driver thread
+ * in order not to mess up CP states before doing
+ * scandumps for FW debugging.
+ */
+ while (halt_if_hws_hang)
+ schedule();
+
return -ETIME;
}
schedule();
@@ -1254,6 +1283,8 @@ static int unmap_queues_cpsch(struct device_queue_manager *dqm,
{
int retval = 0;
+ if (dqm->is_hws_hang)
+ return -EIO;
if (!dqm->active_runlist)
return retval;
@@ -1292,9 +1323,13 @@ static int execute_queues_cpsch(struct device_queue_manager *dqm,
{
int retval;
+ if (dqm->is_hws_hang)
+ return -EIO;
retval = unmap_queues_cpsch(dqm, filter, filter_param);
if (retval) {
pr_err("The cp might be in an unrecoverable state due to an unsuccessful queues preemption\n");
+ dqm->is_hws_hang = true;
+ schedule_work(&dqm->hw_exception_work);
return retval;
}
@@ -1306,7 +1341,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
struct queue *q)
{
int retval;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
bool preempt_all_queues;
preempt_all_queues = false;
@@ -1314,7 +1349,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
retval = 0;
/* remove queue from list to prevent rescheduling after preemption */
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (qpd->is_debug) {
/*
@@ -1326,9 +1361,9 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
}
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto failed;
}
@@ -1350,7 +1385,7 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
qpd->reset_wavefronts = true;
}
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
/*
* Unconditionally decrement this counter, regardless of the queue's
@@ -1360,14 +1395,14 @@ static int destroy_queue_cpsch(struct device_queue_manager *dqm,
pr_debug("Total of %d queues are accountable so far\n",
dqm->total_queue_count);
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
failed:
failed_try_destroy_debugged_queue:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1391,7 +1426,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
if (!dqm->asic_ops.set_cache_memory_policy)
return retval;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
if (alternate_aperture_size == 0) {
/* base > limit disables APE1 */
@@ -1437,7 +1472,7 @@ static bool set_cache_memory_policy(struct device_queue_manager *dqm,
qpd->sh_mem_ape1_limit);
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1468,7 +1503,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
struct device_process_node *cur, *next_dpn;
int retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clear all user mode queues */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
@@ -1489,7 +1524,7 @@ static int process_termination_nocpsch(struct device_queue_manager *dqm,
}
}
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1500,14 +1535,14 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
int retval;
struct queue *q, *next;
struct kernel_queue *kq, *kq_next;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct device_process_node *cur, *next_dpn;
enum kfd_unmap_queues_filter filter =
KFD_UNMAP_QUEUES_FILTER_DYNAMIC_QUEUES;
retval = 0;
- mutex_lock(&dqm->lock);
+ dqm_lock(dqm);
/* Clean all kernel queues */
list_for_each_entry_safe(kq, kq_next, &qpd->priv_queue_list, list) {
@@ -1542,7 +1577,7 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
}
retval = execute_queues_cpsch(dqm, filter, 0);
- if (retval || qpd->reset_wavefronts) {
+ if ((!dqm->is_hws_hang) && (retval || qpd->reset_wavefronts)) {
pr_warn("Resetting wave fronts (cpsch) on dev %p\n", dqm->dev);
dbgdev_wave_reset_wavefronts(dqm->dev, qpd->pqm->process);
qpd->reset_wavefronts = false;
@@ -1550,19 +1585,19 @@ static int process_termination_cpsch(struct device_queue_manager *dqm,
/* lastly, free mqd resources */
list_for_each_entry_safe(q, next, &qpd->queues_list, list) {
- mqd = dqm->ops.get_mqd_manager(dqm,
+ mqd_mgr = dqm->ops.get_mqd_manager(dqm,
get_mqd_type_from_queue_type(q->properties.type));
- if (!mqd) {
+ if (!mqd_mgr) {
retval = -ENOMEM;
goto out;
}
list_del(&q->list);
qpd->queue_count--;
- mqd->uninit_mqd(mqd, q->mqd, q->mqd_mem_obj);
+ mqd_mgr->uninit_mqd(mqd_mgr, q->mqd, q->mqd_mem_obj);
}
out:
- mutex_unlock(&dqm->lock);
+ dqm_unlock(dqm);
return retval;
}
@@ -1683,6 +1718,30 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm)
kfree(dqm);
}
+int kfd_process_vm_fault(struct device_queue_manager *dqm,
+ unsigned int pasid)
+{
+ struct kfd_process_device *pdd;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ int ret = 0;
+
+ if (!p)
+ return -EINVAL;
+ pdd = kfd_get_process_device_data(dqm->dev, p);
+ if (pdd)
+ ret = dqm->ops.evict_process_queues(dqm, &pdd->qpd);
+ kfd_unref_process(p);
+
+ return ret;
+}
+
+static void kfd_process_hw_exception(struct work_struct *work)
+{
+ struct device_queue_manager *dqm = container_of(work,
+ struct device_queue_manager, hw_exception_work);
+ dqm->dev->kfd2kgd->gpu_recover(dqm->dev->kgd);
+}
+
#if defined(CONFIG_DEBUG_FS)
static void seq_reg_dump(struct seq_file *m,
@@ -1746,8 +1805,8 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
}
}
- for (pipe = 0; pipe < CIK_SDMA_ENGINE_NUM; pipe++) {
- for (queue = 0; queue < CIK_SDMA_QUEUES_PER_ENGINE; queue++) {
+ for (pipe = 0; pipe < get_num_sdma_engines(dqm); pipe++) {
+ for (queue = 0; queue < KFD_SDMA_QUEUES_PER_ENGINE; queue++) {
r = dqm->dev->kfd2kgd->hqd_sdma_dump(
dqm->dev->kgd, pipe, queue, &dump, &n_regs);
if (r)
@@ -1764,4 +1823,16 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data)
return r;
}
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm)
+{
+ int r = 0;
+
+ dqm_lock(dqm);
+ dqm->active_runlist = true;
+ r = execute_queues_cpsch(dqm, KFD_UNMAP_QUEUES_FILTER_ALL_QUEUES, 0);
+ dqm_unlock(dqm);
+
+ return r;
+}
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
index 59a6b1956932..00da3169a004 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager.h
@@ -26,15 +26,14 @@
#include <linux/rwsem.h>
#include <linux/list.h>
+#include <linux/mutex.h>
+#include <linux/sched/mm.h>
#include "kfd_priv.h"
#include "kfd_mqd_manager.h"
#define KFD_UNMAP_LATENCY_MS (4000)
#define QUEUE_PREEMPT_DEFAULT_TIMEOUT_MS (2 * KFD_UNMAP_LATENCY_MS + 1000)
-
-#define CIK_SDMA_QUEUES (4)
-#define CIK_SDMA_QUEUES_PER_ENGINE (2)
-#define CIK_SDMA_ENGINE_NUM (2)
+#define KFD_SDMA_QUEUES_PER_ENGINE (2)
struct device_process_node {
struct qcm_process_device *qpd;
@@ -170,11 +169,12 @@ struct device_queue_manager {
struct device_queue_manager_ops ops;
struct device_queue_manager_asic_ops asic_ops;
- struct mqd_manager *mqds[KFD_MQD_TYPE_MAX];
+ struct mqd_manager *mqd_mgrs[KFD_MQD_TYPE_MAX];
struct packet_manager packets;
struct kfd_dev *dev;
- struct mutex lock;
+ struct mutex lock_hidden; /* use dqm_lock/unlock(dqm) */
struct list_head queues;
+ unsigned int saved_flags;
unsigned int processes_count;
unsigned int queue_count;
unsigned int sdma_queue_count;
@@ -190,6 +190,10 @@ struct device_queue_manager {
struct kfd_mem_obj *fence_mem;
bool active_runlist;
int sched_policy;
+
+ /* hw exception */
+ bool is_hws_hang;
+ struct work_struct hw_exception_work;
};
void device_queue_manager_init_cik(
@@ -207,6 +211,7 @@ void program_sh_mem_settings(struct device_queue_manager *dqm,
unsigned int get_queues_num(struct device_queue_manager *dqm);
unsigned int get_queues_per_pipe(struct device_queue_manager *dqm);
unsigned int get_pipes_per_mec(struct device_queue_manager *dqm);
+unsigned int get_num_sdma_queues(struct device_queue_manager *dqm);
static inline unsigned int get_sh_mem_bases_32(struct kfd_process_device *pdd)
{
@@ -219,4 +224,19 @@ get_sh_mem_bases_nybble_64(struct kfd_process_device *pdd)
return (pdd->lds_base >> 60) & 0x0E;
}
+/* The DQM lock can be taken in MMU notifiers. Make sure no reclaim-FS
+ * happens while holding this lock anywhere to prevent deadlocks when
+ * an MMU notifier runs in reclaim-FS context.
+ */
+static inline void dqm_lock(struct device_queue_manager *dqm)
+{
+ mutex_lock(&dqm->lock_hidden);
+ dqm->saved_flags = memalloc_nofs_save();
+}
+static inline void dqm_unlock(struct device_queue_manager *dqm)
+{
+ memalloc_nofs_restore(dqm->saved_flags);
+ mutex_unlock(&dqm->lock_hidden);
+}
+
#endif /* KFD_DEVICE_QUEUE_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
index 79e5bcf6367c..417515332c35 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_device_queue_manager_v9.c
@@ -60,7 +60,7 @@ static int update_qpd_v9(struct device_queue_manager *dqm,
qpd->sh_mem_config =
SH_MEM_ALIGNMENT_MODE_UNALIGNED <<
SH_MEM_CONFIG__ALIGNMENT_MODE__SHIFT;
- if (vega10_noretry &&
+ if (noretry &&
!dqm->dev->device_info->needs_iommu_device)
qpd->sh_mem_config |=
1 << SH_MEM_CONFIG__RETRY_DISABLE__SHIFT;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
index c3744d89352c..ebe79bf00145 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_doorbell.c
@@ -188,9 +188,9 @@ void __iomem *kfd_get_kernel_doorbell(struct kfd_dev *kfd,
*doorbell_off = kfd->doorbell_id_offset + inx;
pr_debug("Get kernel queue doorbell\n"
- " doorbell offset == 0x%08X\n"
- " kernel address == %p\n",
- *doorbell_off, (kfd->doorbell_kernel_ptr + inx));
+ " doorbell offset == 0x%08X\n"
+ " doorbell index == 0x%x\n",
+ *doorbell_off, inx);
return kfd->doorbell_kernel_ptr + inx;
}
@@ -199,7 +199,8 @@ void kfd_release_kernel_doorbell(struct kfd_dev *kfd, u32 __iomem *db_addr)
{
unsigned int inx;
- inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr);
+ inx = (unsigned int)(db_addr - kfd->doorbell_kernel_ptr)
+ * sizeof(u32) / kfd->device_info->doorbell_size;
mutex_lock(&kfd->doorbell_mutex);
__clear_bit(inx, kfd->doorbell_available_index);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.c b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
index 5562e94e786a..e9f0e0a1b41c 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.c
@@ -850,6 +850,13 @@ static void lookup_events_by_type_and_signal(struct kfd_process *p,
ev->memory_exception_data = *ev_data;
}
+ if (type == KFD_EVENT_TYPE_MEMORY) {
+ dev_warn(kfd_device,
+ "Sending SIGSEGV to HSA Process with PID %d ",
+ p->lead_thread->pid);
+ send_sig(SIGSEGV, p->lead_thread, 0);
+ }
+
/* Send SIGTERM no event of type "type" has been found*/
if (send_signal) {
if (send_sigterm) {
@@ -904,34 +911,41 @@ void kfd_signal_iommu_event(struct kfd_dev *dev, unsigned int pasid,
memory_exception_data.failure.NotPresent = 1;
memory_exception_data.failure.NoExecute = 0;
memory_exception_data.failure.ReadOnly = 0;
- if (vma) {
- if (vma->vm_start > address) {
- memory_exception_data.failure.NotPresent = 1;
- memory_exception_data.failure.NoExecute = 0;
+ if (vma && address >= vma->vm_start) {
+ memory_exception_data.failure.NotPresent = 0;
+
+ if (is_write_requested && !(vma->vm_flags & VM_WRITE))
+ memory_exception_data.failure.ReadOnly = 1;
+ else
memory_exception_data.failure.ReadOnly = 0;
- } else {
- memory_exception_data.failure.NotPresent = 0;
- if (is_write_requested && !(vma->vm_flags & VM_WRITE))
- memory_exception_data.failure.ReadOnly = 1;
- else
- memory_exception_data.failure.ReadOnly = 0;
- if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
- memory_exception_data.failure.NoExecute = 1;
- else
- memory_exception_data.failure.NoExecute = 0;
- }
+
+ if (is_execute_requested && !(vma->vm_flags & VM_EXEC))
+ memory_exception_data.failure.NoExecute = 1;
+ else
+ memory_exception_data.failure.NoExecute = 0;
}
up_read(&mm->mmap_sem);
mmput(mm);
- mutex_lock(&p->event_mutex);
+ pr_debug("notpresent %d, noexecute %d, readonly %d\n",
+ memory_exception_data.failure.NotPresent,
+ memory_exception_data.failure.NoExecute,
+ memory_exception_data.failure.ReadOnly);
- /* Lookup events by type and signal them */
- lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
- &memory_exception_data);
+ /* Workaround on Raven to not kill the process when memory is freed
+ * before IOMMU is able to finish processing all the excessive PPRs
+ */
+ if (dev->device_info->asic_family != CHIP_RAVEN) {
+ mutex_lock(&p->event_mutex);
+
+ /* Lookup events by type and signal them */
+ lookup_events_by_type_and_signal(p, KFD_EVENT_TYPE_MEMORY,
+ &memory_exception_data);
+
+ mutex_unlock(&p->event_mutex);
+ }
- mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
#endif /* KFD_SUPPORT_IOMMU_V2 */
@@ -956,3 +970,67 @@ void kfd_signal_hw_exception_event(unsigned int pasid)
mutex_unlock(&p->event_mutex);
kfd_unref_process(p);
}
+
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info)
+{
+ struct kfd_event *ev;
+ uint32_t id;
+ struct kfd_process *p = kfd_lookup_process_by_pasid(pasid);
+ struct kfd_hsa_memory_exception_data memory_exception_data;
+
+ if (!p)
+ return; /* Presumably process exited. */
+ memset(&memory_exception_data, 0, sizeof(memory_exception_data));
+ memory_exception_data.gpu_id = dev->id;
+ memory_exception_data.failure.imprecise = 1;
+ /* Set failure reason */
+ if (info) {
+ memory_exception_data.va = (info->page_addr) << PAGE_SHIFT;
+ memory_exception_data.failure.NotPresent =
+ info->prot_valid ? 1 : 0;
+ memory_exception_data.failure.NoExecute =
+ info->prot_exec ? 1 : 0;
+ memory_exception_data.failure.ReadOnly =
+ info->prot_write ? 1 : 0;
+ memory_exception_data.failure.imprecise = 0;
+ }
+ mutex_lock(&p->event_mutex);
+
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_MEMORY) {
+ ev->memory_exception_data = memory_exception_data;
+ set_event(ev);
+ }
+
+ mutex_unlock(&p->event_mutex);
+ kfd_unref_process(p);
+}
+
+void kfd_signal_reset_event(struct kfd_dev *dev)
+{
+ struct kfd_hsa_hw_exception_data hw_exception_data;
+ struct kfd_process *p;
+ struct kfd_event *ev;
+ unsigned int temp;
+ uint32_t id, idx;
+
+ /* Whole gpu reset caused by GPU hang and memory is lost */
+ memset(&hw_exception_data, 0, sizeof(hw_exception_data));
+ hw_exception_data.gpu_id = dev->id;
+ hw_exception_data.memory_lost = 1;
+
+ idx = srcu_read_lock(&kfd_processes_srcu);
+ hash_for_each_rcu(kfd_processes_table, temp, p, kfd_processes) {
+ mutex_lock(&p->event_mutex);
+ id = KFD_FIRST_NONSIGNAL_EVENT_ID;
+ idr_for_each_entry_continue(&p->event_idr, ev, id)
+ if (ev->type == KFD_EVENT_TYPE_HW_EXCEPTION) {
+ ev->hw_exception_data = hw_exception_data;
+ set_event(ev);
+ }
+ mutex_unlock(&p->event_mutex);
+ }
+ srcu_read_unlock(&kfd_processes_srcu, idx);
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_events.h b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
index abca5bfebbff..c7ac6c73af86 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_events.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_events.h
@@ -66,6 +66,7 @@ struct kfd_event {
/* type specific data */
union {
struct kfd_hsa_memory_exception_data memory_exception_data;
+ struct kfd_hsa_hw_exception_data hw_exception_data;
};
};
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
index 37029baa3346..f836897bbf58 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_int_process_v9.c
@@ -26,7 +26,9 @@
static bool event_interrupt_isr_v9(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry)
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre,
+ bool *patched_flag)
{
uint16_t source_id, client_id, pasid, vmid;
const uint32_t *data = ih_ring_entry;
@@ -57,7 +59,9 @@ static bool event_interrupt_isr_v9(struct kfd_dev *dev,
return source_id == SOC15_INTSRC_CP_END_OF_PIPE ||
source_id == SOC15_INTSRC_SDMA_TRAP ||
source_id == SOC15_INTSRC_SQ_INTERRUPT_MSG ||
- source_id == SOC15_INTSRC_CP_BAD_OPCODE;
+ source_id == SOC15_INTSRC_CP_BAD_OPCODE ||
+ client_id == SOC15_IH_CLIENTID_VMC ||
+ client_id == SOC15_IH_CLIENTID_UTCL2;
}
static void event_interrupt_wq_v9(struct kfd_dev *dev,
@@ -82,7 +86,19 @@ static void event_interrupt_wq_v9(struct kfd_dev *dev,
kfd_signal_hw_exception_event(pasid);
else if (client_id == SOC15_IH_CLIENTID_VMC ||
client_id == SOC15_IH_CLIENTID_UTCL2) {
- /* TODO */
+ struct kfd_vm_fault_info info = {0};
+ uint16_t ring_id = SOC15_RING_ID_FROM_IH_ENTRY(ih_ring_entry);
+
+ info.vmid = vmid;
+ info.mc_id = client_id;
+ info.page_addr = ih_ring_entry[4] |
+ (uint64_t)(ih_ring_entry[5] & 0xf) << 32;
+ info.prot_valid = ring_id & 0x08;
+ info.prot_read = ring_id & 0x10;
+ info.prot_write = ring_id & 0x20;
+
+ kfd_process_vm_fault(dev->dqm, pasid);
+ kfd_signal_vm_fault_event(dev, pasid, &info);
}
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
index db6d9336b80d..c56ac47cd318 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_interrupt.c
@@ -151,13 +151,15 @@ static void interrupt_wq(struct work_struct *work)
ih_ring_entry);
}
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry)
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag)
{
/* integer and bitwise OR so there is no boolean short-circuiting */
unsigned int wanted = 0;
wanted |= dev->device_info->event_interrupt_class->interrupt_isr(dev,
- ih_ring_entry);
+ ih_ring_entry, patched_ihre, flag);
return wanted != 0;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
index c71817963eea..7a61f38c09e6 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_iommu.c
@@ -190,7 +190,7 @@ static int iommu_invalid_ppr_cb(struct pci_dev *pdev, int pasid,
{
struct kfd_dev *dev;
- dev_warn(kfd_device,
+ dev_warn_ratelimited(kfd_device,
"Invalid PPR device %x:%x.%x pasid %d address 0x%lX flags 0x%X",
PCI_BUS_NUM(pdev->devfn),
PCI_SLOT(pdev->devfn),
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
index 476951d8c91c..9f84b4d9fb88 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.c
@@ -59,7 +59,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
switch (type) {
case KFD_QUEUE_TYPE_DIQ:
case KFD_QUEUE_TYPE_HIQ:
- kq->mqd = dev->dqm->ops.get_mqd_manager(dev->dqm,
+ kq->mqd_mgr = dev->dqm->ops.get_mqd_manager(dev->dqm,
KFD_MQD_TYPE_HIQ);
break;
default:
@@ -67,7 +67,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
return false;
}
- if (!kq->mqd)
+ if (!kq->mqd_mgr)
return false;
prop.doorbell_ptr = kfd_get_kernel_doorbell(dev, &prop.doorbell_off);
@@ -123,6 +123,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
prop.write_ptr = (uint32_t *) kq->wptr_gpu_addr;
prop.eop_ring_buffer_address = kq->eop_gpu_addr;
prop.eop_ring_buffer_size = PAGE_SIZE;
+ prop.cu_mask = NULL;
if (init_queue(&kq->queue, &prop) != 0)
goto err_init_queue;
@@ -130,7 +131,7 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
kq->queue->device = dev;
kq->queue->process = kfd_get_process(current);
- retval = kq->mqd->init_mqd(kq->mqd, &kq->queue->mqd,
+ retval = kq->mqd_mgr->init_mqd(kq->mqd_mgr, &kq->queue->mqd,
&kq->queue->mqd_mem_obj,
&kq->queue->gart_mqd_addr,
&kq->queue->properties);
@@ -142,9 +143,9 @@ static bool initialize(struct kernel_queue *kq, struct kfd_dev *dev,
pr_debug("Assigning hiq to hqd\n");
kq->queue->pipe = KFD_CIK_HIQ_PIPE;
kq->queue->queue = KFD_CIK_HIQ_QUEUE;
- kq->mqd->load_mqd(kq->mqd, kq->queue->mqd, kq->queue->pipe,
- kq->queue->queue, &kq->queue->properties,
- NULL);
+ kq->mqd_mgr->load_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->pipe, kq->queue->queue,
+ &kq->queue->properties, NULL);
} else {
/* allocate fence for DIQ */
@@ -182,7 +183,7 @@ err_get_kernel_doorbell:
static void uninitialize(struct kernel_queue *kq)
{
if (kq->queue->properties.type == KFD_QUEUE_TYPE_HIQ)
- kq->mqd->destroy_mqd(kq->mqd,
+ kq->mqd_mgr->destroy_mqd(kq->mqd_mgr,
kq->queue->mqd,
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
KFD_UNMAP_LATENCY_MS,
@@ -191,7 +192,8 @@ static void uninitialize(struct kernel_queue *kq)
else if (kq->queue->properties.type == KFD_QUEUE_TYPE_DIQ)
kfd_gtt_sa_free(kq->dev, kq->fence_mem_obj);
- kq->mqd->uninit_mqd(kq->mqd, kq->queue->mqd, kq->queue->mqd_mem_obj);
+ kq->mqd_mgr->uninit_mqd(kq->mqd_mgr, kq->queue->mqd,
+ kq->queue->mqd_mem_obj);
kfd_gtt_sa_free(kq->dev, kq->rptr_mem);
kfd_gtt_sa_free(kq->dev, kq->wptr_mem);
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
index 97aff2041a5d..a7116a939029 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_kernel_queue.h
@@ -70,7 +70,7 @@ struct kernel_queue {
/* data */
struct kfd_dev *dev;
- struct mqd_manager *mqd;
+ struct mqd_manager *mqd_mgr;
struct queue *queue;
uint64_t pending_wptr64;
uint32_t pending_wptr;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_module.c b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
index 76bf2dc8aec4..6e1f5c7c2d4b 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_module.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_module.c
@@ -47,6 +47,8 @@ static const struct kgd2kfd_calls kgd2kfd = {
.resume_mm = kgd2kfd_resume_mm,
.schedule_evict_and_restore_process =
kgd2kfd_schedule_evict_and_restore_process,
+ .pre_reset = kgd2kfd_pre_reset,
+ .post_reset = kgd2kfd_post_reset,
};
int sched_policy = KFD_SCHED_POLICY_HWS;
@@ -61,7 +63,7 @@ MODULE_PARM_DESC(hws_max_conc_proc,
int cwsr_enable = 1;
module_param(cwsr_enable, int, 0444);
-MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = Off, 1 = On (Default))");
+MODULE_PARM_DESC(cwsr_enable, "CWSR enable (0 = off, 1 = on (default))");
int max_num_of_queues_per_device = KFD_MAX_NUM_OF_QUEUES_PER_DEVICE_DEFAULT;
module_param(max_num_of_queues_per_device, int, 0444);
@@ -83,13 +85,19 @@ module_param(ignore_crat, int, 0444);
MODULE_PARM_DESC(ignore_crat,
"Ignore CRAT table during KFD initialization (0 = use CRAT (default), 1 = ignore CRAT)");
-int vega10_noretry;
-module_param_named(noretry, vega10_noretry, int, 0644);
+int noretry;
+module_param(noretry, int, 0644);
MODULE_PARM_DESC(noretry,
- "Set sh_mem_config.retry_disable on Vega10 (0 = retry enabled (default), 1 = retry disabled)");
+ "Set sh_mem_config.retry_disable on GFXv9+ dGPUs (0 = retry enabled (default), 1 = retry disabled)");
+
+int halt_if_hws_hang;
+module_param(halt_if_hws_hang, int, 0644);
+MODULE_PARM_DESC(halt_if_hws_hang, "Halt if HWS hang is detected (0 = off (default), 1 = on)");
+
static int amdkfd_init_completed;
+
int kgd2kfd_init(unsigned int interface_version,
const struct kgd2kfd_calls **g2f)
{
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
index 4b8eb506642b..3bc25ab84f34 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.c
@@ -21,7 +21,7 @@
*
*/
-#include "kfd_priv.h"
+#include "kfd_mqd_manager.h"
struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
struct kfd_dev *dev)
@@ -48,3 +48,42 @@ struct mqd_manager *mqd_manager_init(enum KFD_MQD_TYPE type,
return NULL;
}
+
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask)
+{
+ struct kfd_cu_info cu_info;
+ uint32_t cu_per_sh[4] = {0};
+ int i, se, cu = 0;
+
+ mm->dev->kfd2kgd->get_cu_info(mm->dev->kgd, &cu_info);
+
+ if (cu_mask_count > cu_info.cu_active_number)
+ cu_mask_count = cu_info.cu_active_number;
+
+ for (se = 0; se < cu_info.num_shader_engines; se++)
+ for (i = 0; i < 4; i++)
+ cu_per_sh[se] += hweight32(cu_info.cu_bitmap[se][i]);
+
+ /* Symmetrically map cu_mask to all SEs:
+ * cu_mask[0] bit0 -> se_mask[0] bit0;
+ * cu_mask[0] bit1 -> se_mask[1] bit0;
+ * ... (if # SE is 4)
+ * cu_mask[0] bit4 -> se_mask[0] bit1;
+ * ...
+ */
+ se = 0;
+ for (i = 0; i < cu_mask_count; i++) {
+ if (cu_mask[i / 32] & (1 << (i % 32)))
+ se_mask[se] |= 1 << cu;
+
+ do {
+ se++;
+ if (se == cu_info.num_shader_engines) {
+ se = 0;
+ cu++;
+ }
+ } while (cu >= cu_per_sh[se] && cu < 32);
+ }
+}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
index 8972bcfbf701..4e84052d4e21 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager.h
@@ -93,4 +93,8 @@ struct mqd_manager {
struct kfd_dev *dev;
};
+void mqd_symmetrically_map_cu_mask(struct mqd_manager *mm,
+ const uint32_t *cu_mask, uint32_t cu_mask_count,
+ uint32_t *se_mask);
+
#endif /* KFD_MQD_MANAGER_H_ */
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
index 06eaa218eba6..47243165a082 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_cik.c
@@ -41,6 +41,31 @@ static inline struct cik_sdma_rlc_registers *get_sdma_mqd(void *mqd)
return (struct cik_sdma_rlc_registers *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct cik_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
if (q->format == KFD_QUEUE_FORMAT_AQL)
m->cp_hqd_pq_control |= NO_UPDATE_RPTR;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -408,7 +435,7 @@ struct mqd_manager *mqd_manager_init_cik(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
index 684054ff02cd..f5fc3675f21e 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_v9.c
@@ -41,6 +41,31 @@ static inline struct v9_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct v9_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct v9_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -55,7 +80,7 @@ static int init_mqd(struct mqd_manager *mm, void **mqd,
* instead of sub-allocation function.
*/
if (kfd->cwsr_enabled && (q->type == KFD_QUEUE_TYPE_COMPUTE)) {
- *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_NOIO);
+ *mqd_mem_obj = kzalloc(sizeof(struct kfd_mem_obj), GFP_KERNEL);
if (!*mqd_mem_obj)
return -ENOMEM;
retval = kfd->kfd2kgd->init_gtt_mem_allocation(kfd->kgd,
@@ -198,6 +223,8 @@ static int update_mqd(struct mqd_manager *mm, void *mqd,
if (mm->dev->cwsr_enabled && q->ctx_save_restore_area_address)
m->cp_hqd_ctx_save_control = 0;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -393,7 +420,7 @@ struct mqd_manager *mqd_manager_init_v9(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
index 481307b8b4db..b81fda3754da 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_mqd_manager_vi.c
@@ -43,6 +43,31 @@ static inline struct vi_sdma_mqd *get_sdma_mqd(void *mqd)
return (struct vi_sdma_mqd *)mqd;
}
+static void update_cu_mask(struct mqd_manager *mm, void *mqd,
+ struct queue_properties *q)
+{
+ struct vi_mqd *m;
+ uint32_t se_mask[4] = {0}; /* 4 is the max # of SEs */
+
+ if (q->cu_mask_count == 0)
+ return;
+
+ mqd_symmetrically_map_cu_mask(mm,
+ q->cu_mask, q->cu_mask_count, se_mask);
+
+ m = get_mqd(mqd);
+ m->compute_static_thread_mgmt_se0 = se_mask[0];
+ m->compute_static_thread_mgmt_se1 = se_mask[1];
+ m->compute_static_thread_mgmt_se2 = se_mask[2];
+ m->compute_static_thread_mgmt_se3 = se_mask[3];
+
+ pr_debug("Update cu mask to %#x %#x %#x %#x\n",
+ m->compute_static_thread_mgmt_se0,
+ m->compute_static_thread_mgmt_se1,
+ m->compute_static_thread_mgmt_se2,
+ m->compute_static_thread_mgmt_se3);
+}
+
static int init_mqd(struct mqd_manager *mm, void **mqd,
struct kfd_mem_obj **mqd_mem_obj, uint64_t *gart_addr,
struct queue_properties *q)
@@ -196,6 +221,8 @@ static int __update_mqd(struct mqd_manager *mm, void *mqd,
atc_bit << CP_HQD_CTX_SAVE_CONTROL__ATC__SHIFT |
mtype << CP_HQD_CTX_SAVE_CONTROL__MTYPE__SHIFT;
+ update_cu_mask(mm, mqd, q);
+
q->is_active = (q->queue_size > 0 &&
q->queue_address != 0 &&
q->queue_percent > 0 &&
@@ -394,7 +421,7 @@ struct mqd_manager *mqd_manager_init_vi(enum KFD_MQD_TYPE type,
if (WARN_ON(type >= KFD_MQD_TYPE_MAX))
return NULL;
- mqd = kzalloc(sizeof(*mqd), GFP_NOIO);
+ mqd = kzalloc(sizeof(*mqd), GFP_KERNEL);
if (!mqd)
return NULL;
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
index c317feb43f69..1092631765cb 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_packet_manager.c
@@ -418,4 +418,30 @@ out:
return 0;
}
+int pm_debugfs_hang_hws(struct packet_manager *pm)
+{
+ uint32_t *buffer, size;
+ int r = 0;
+
+ size = pm->pmf->query_status_size;
+ mutex_lock(&pm->lock);
+ pm->priv_queue->ops.acquire_packet_buffer(pm->priv_queue,
+ size / sizeof(uint32_t), (unsigned int **)&buffer);
+ if (!buffer) {
+ pr_err("Failed to allocate buffer on kernel queue\n");
+ r = -ENOMEM;
+ goto out;
+ }
+ memset(buffer, 0x55, size);
+ pm->priv_queue->ops.submit_packet(pm->priv_queue);
+
+ pr_info("Submitting %x %x %x %x %x %x %x to HIQ to hang the HWS.",
+ buffer[0], buffer[1], buffer[2], buffer[3],
+ buffer[4], buffer[5], buffer[6]);
+out:
+ mutex_unlock(&pm->lock);
+ return r;
+}
+
+
#endif
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
index 5e3990bb4c4b..f971710f1c91 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_priv.h
@@ -73,7 +73,7 @@
/*
* When working with cp scheduler we should assign the HIQ manually or via
- * the radeon driver to a fixed hqd slot, here are the fixed HIQ hqd slot
+ * the amdgpu driver to a fixed hqd slot, here are the fixed HIQ hqd slot
* definitions for Kaveri. In Kaveri only the first ME queues participates
* in the cp scheduling taking that in mind we set the HIQ slot in the
* second ME.
@@ -142,7 +142,12 @@ extern int ignore_crat;
/*
* Set sh_mem_config.retry_disable on Vega10
*/
-extern int vega10_noretry;
+extern int noretry;
+
+/*
+ * Halt if HWS hang is detected
+ */
+extern int halt_if_hws_hang;
/**
* enum kfd_sched_policy
@@ -180,9 +185,10 @@ enum cache_policy {
struct kfd_event_interrupt_class {
bool (*interrupt_isr)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry, uint32_t *patched_ihre,
+ bool *patched_flag);
void (*interrupt_wq)(struct kfd_dev *dev,
- const uint32_t *ih_ring_entry);
+ const uint32_t *ih_ring_entry);
};
struct kfd_device_info {
@@ -197,6 +203,7 @@ struct kfd_device_info {
bool supports_cwsr;
bool needs_iommu_device;
bool needs_pci_atomics;
+ unsigned int num_sdma_engines;
};
struct kfd_mem_obj {
@@ -415,6 +422,9 @@ struct queue_properties {
uint32_t ctl_stack_size;
uint64_t tba_addr;
uint64_t tma_addr;
+ /* Relevant for CU */
+ uint32_t cu_mask_count; /* Must be a multiple of 32 */
+ uint32_t *cu_mask;
};
/**
@@ -806,12 +816,18 @@ int kfd_interrupt_init(struct kfd_dev *dev);
void kfd_interrupt_exit(struct kfd_dev *dev);
void kgd2kfd_interrupt(struct kfd_dev *kfd, const void *ih_ring_entry);
bool enqueue_ih_ring_entry(struct kfd_dev *kfd, const void *ih_ring_entry);
-bool interrupt_is_wanted(struct kfd_dev *dev, const uint32_t *ih_ring_entry);
+bool interrupt_is_wanted(struct kfd_dev *dev,
+ const uint32_t *ih_ring_entry,
+ uint32_t *patched_ihre, bool *flag);
/* Power Management */
void kgd2kfd_suspend(struct kfd_dev *kfd);
int kgd2kfd_resume(struct kfd_dev *kfd);
+/* GPU reset */
+int kgd2kfd_pre_reset(struct kfd_dev *kfd);
+int kgd2kfd_post_reset(struct kfd_dev *kfd);
+
/* amdkfd Apertures */
int kfd_init_apertures(struct kfd_process *process);
@@ -838,6 +854,7 @@ void device_queue_manager_uninit(struct device_queue_manager *dqm);
struct kernel_queue *kernel_queue_init(struct kfd_dev *dev,
enum kfd_queue_type type);
void kernel_queue_uninit(struct kernel_queue *kq);
+int kfd_process_vm_fault(struct device_queue_manager *dqm, unsigned int pasid);
/* Process Queue Manager */
struct process_queue_node {
@@ -858,6 +875,8 @@ int pqm_create_queue(struct process_queue_manager *pqm,
int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid);
int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
struct queue_properties *p);
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p);
struct kernel_queue *pqm_get_kernel_queue(struct process_queue_manager *pqm,
unsigned int qid);
@@ -964,10 +983,17 @@ int kfd_event_create(struct file *devkfd, struct kfd_process *p,
uint64_t *event_page_offset, uint32_t *event_slot_index);
int kfd_event_destroy(struct kfd_process *p, uint32_t event_id);
+void kfd_signal_vm_fault_event(struct kfd_dev *dev, unsigned int pasid,
+ struct kfd_vm_fault_info *info);
+
+void kfd_signal_reset_event(struct kfd_dev *dev);
+
void kfd_flush_tlb(struct kfd_process_device *pdd);
int dbgdev_wave_reset_wavefronts(struct kfd_dev *dev, struct kfd_process *p);
+bool kfd_is_locked(void);
+
/* Debugfs */
#if defined(CONFIG_DEBUG_FS)
@@ -980,6 +1006,10 @@ int dqm_debugfs_hqds(struct seq_file *m, void *data);
int kfd_debugfs_rls_by_device(struct seq_file *m, void *data);
int pm_debugfs_runlist(struct seq_file *m, void *data);
+int kfd_debugfs_hang_hws(struct kfd_dev *dev);
+int pm_debugfs_hang_hws(struct packet_manager *pm);
+int dqm_debugfs_execute_queues(struct device_queue_manager *dqm);
+
#else
static inline void kfd_debugfs_init(void) {}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process.c b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
index 1d80b4f7c681..4694386cc623 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process.c
@@ -244,6 +244,8 @@ struct kfd_process *kfd_get_process(const struct task_struct *thread)
return ERR_PTR(-EINVAL);
process = find_process(thread);
+ if (!process)
+ return ERR_PTR(-EINVAL);
return process;
}
diff --git a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
index d65ce0436b31..c8cad9c078ae 100644
--- a/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
+++ b/drivers/gpu/drm/amd/amdkfd/kfd_process_queue_manager.c
@@ -186,8 +186,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
switch (type) {
case KFD_QUEUE_TYPE_SDMA:
- if (dev->dqm->queue_count >=
- CIK_SDMA_QUEUES_PER_ENGINE * CIK_SDMA_ENGINE_NUM) {
+ if (dev->dqm->queue_count >= get_num_sdma_queues(dev->dqm)) {
pr_err("Over-subscription is not allowed for SDMA.\n");
retval = -EPERM;
goto err_create_queue;
@@ -209,7 +208,7 @@ int pqm_create_queue(struct process_queue_manager *pqm,
KFD_SCHED_POLICY_HWS_NO_OVERSUBSCRIPTION) &&
((dev->dqm->processes_count >= dev->vm_info.vmid_num_kfd) ||
(dev->dqm->queue_count >= get_queues_num(dev->dqm)))) {
- pr_err("Over-subscription is not allowed in radeon_kfd.sched_policy == 1\n");
+ pr_debug("Over-subscription is not allowed when amdkfd.sched_policy == 1\n");
retval = -EPERM;
goto err_create_queue;
}
@@ -326,6 +325,8 @@ int pqm_destroy_queue(struct process_queue_manager *pqm, unsigned int qid)
if (retval != -ETIME)
goto err_destroy_queue;
}
+ kfree(pqn->q->properties.cu_mask);
+ pqn->q->properties.cu_mask = NULL;
uninit_queue(pqn->q);
}
@@ -366,6 +367,34 @@ int pqm_update_queue(struct process_queue_manager *pqm, unsigned int qid,
return 0;
}
+int pqm_set_cu_mask(struct process_queue_manager *pqm, unsigned int qid,
+ struct queue_properties *p)
+{
+ int retval;
+ struct process_queue_node *pqn;
+
+ pqn = get_queue_by_qid(pqm, qid);
+ if (!pqn) {
+ pr_debug("No queue %d exists for update operation\n", qid);
+ return -EFAULT;
+ }
+
+ /* Free the old CU mask memory if it is already allocated, then
+ * allocate memory for the new CU mask.
+ */
+ kfree(pqn->q->properties.cu_mask);
+
+ pqn->q->properties.cu_mask_count = p->cu_mask_count;
+ pqn->q->properties.cu_mask = p->cu_mask;
+
+ retval = pqn->q->device->dqm->ops.update_queue(pqn->q->device->dqm,
+ pqn->q);
+ if (retval != 0)
+ return retval;
+
+ return 0;
+}
+
struct kernel_queue *pqm_get_kernel_queue(
struct process_queue_manager *pqm,
unsigned int qid)
@@ -387,7 +416,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
struct process_queue_node *pqn;
struct queue *q;
enum KFD_MQD_TYPE mqd_type;
- struct mqd_manager *mqd_manager;
+ struct mqd_manager *mqd_mgr;
int r = 0;
list_for_each_entry(pqn, &pqm->queues, process_queue_list) {
@@ -410,11 +439,11 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
q->properties.type, q->device->id);
continue;
}
- mqd_manager = q->device->dqm->ops.get_mqd_manager(
+ mqd_mgr = q->device->dqm->ops.get_mqd_manager(
q->device->dqm, mqd_type);
} else if (pqn->kq) {
q = pqn->kq->queue;
- mqd_manager = pqn->kq->mqd;
+ mqd_mgr = pqn->kq->mqd_mgr;
switch (q->properties.type) {
case KFD_QUEUE_TYPE_DIQ:
seq_printf(m, " DIQ on device %x\n",
@@ -434,7 +463,7 @@ int pqm_debugfs_mqds(struct seq_file *m, void *data)
continue;
}
- r = mqd_manager->debugfs_show_mqd(m, q->mqd);
+ r = mqd_mgr->debugfs_show_mqd(m, q->mqd);
if (r != 0)
break;
}
diff --git a/drivers/gpu/drm/amd/display/Kconfig b/drivers/gpu/drm/amd/display/Kconfig
index 325083b0297e..ed654a76c76a 100644
--- a/drivers/gpu/drm/amd/display/Kconfig
+++ b/drivers/gpu/drm/amd/display/Kconfig
@@ -4,11 +4,17 @@ menu "Display Engine Configuration"
config DRM_AMD_DC
bool "AMD DC - Enable new display engine"
default y
+ select DRM_AMD_DC_DCN1_0 if X86 && !(KCOV_INSTRUMENT_ALL && KCOV_ENABLE_COMPARISONS)
help
Choose this option if you want to use the new display engine
support for AMDGPU. This adds required support for Vega and
Raven ASICs.
+config DRM_AMD_DC_DCN1_0
+ def_bool n
+ help
+ RV family support for display engine
+
config DEBUG_KERNEL_DC
bool "Enable kgdb break in DC"
depends on DRM_AMD_DC
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
index 5fc13e71a3b5..af6adffba788 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.c
@@ -58,9 +58,7 @@
#include <drm/drm_fb_helper.h>
#include <drm/drm_edid.h>
-#include "modules/inc/mod_freesync.h"
-
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "ivsrcid/irqsrcs_dcn_1_0.h"
#include "dcn/dcn_1_0_offset.h"
@@ -898,6 +896,7 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
aconnector->dc_sink = sink;
if (sink->dc_edid.length == 0) {
aconnector->edid = NULL;
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
} else {
aconnector->edid =
(struct edid *) sink->dc_edid.raw_edid;
@@ -905,10 +904,13 @@ amdgpu_dm_update_connector_after_detect(struct amdgpu_dm_connector *aconnector)
drm_connector_update_edid_property(connector,
aconnector->edid);
+ drm_dp_cec_set_edid(&aconnector->dm_dp_aux.aux,
+ aconnector->edid);
}
amdgpu_dm_add_sink_to_freesync_module(connector, aconnector->edid);
} else {
+ drm_dp_cec_unset_edid(&aconnector->dm_dp_aux.aux);
amdgpu_dm_remove_sink_from_freesync_module(connector);
drm_connector_update_edid_property(connector, NULL);
aconnector->num_modes = 0;
@@ -1063,8 +1065,10 @@ static void handle_hpd_rx_irq(void *param)
(dc_link->type == dc_connection_mst_branch))
dm_handle_hpd_rx_irq(aconnector);
- if (dc_link->type != dc_connection_mst_branch)
+ if (dc_link->type != dc_connection_mst_branch) {
+ drm_dp_cec_irq(&aconnector->dm_dp_aux.aux);
mutex_unlock(&aconnector->hpd_lock);
+ }
}
static void register_hpd_handlers(struct amdgpu_device *adev)
@@ -1192,7 +1196,7 @@ static int dce110_register_irq_handlers(struct amdgpu_device *adev)
return 0;
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
/* Register IRQ sources and initialize IRQ callbacks */
static int dcn10_register_irq_handlers(struct amdgpu_device *adev)
{
@@ -1320,7 +1324,12 @@ static int amdgpu_dm_backlight_update_status(struct backlight_device *bd)
static int amdgpu_dm_backlight_get_brightness(struct backlight_device *bd)
{
- return bd->props.brightness;
+ struct amdgpu_display_manager *dm = bl_get_data(bd);
+ int ret = dc_link_get_backlight_level(dm->backlight_link);
+
+ if (ret == DC_ERROR_UNEXPECTED)
+ return bd->props.brightness;
+ return ret;
}
static const struct backlight_ops amdgpu_dm_backlight_ops = {
@@ -1335,6 +1344,7 @@ amdgpu_dm_register_backlight_device(struct amdgpu_display_manager *dm)
struct backlight_properties props = { 0 };
props.max_brightness = AMDGPU_MAX_BL_LEVEL;
+ props.brightness = AMDGPU_MAX_BL_LEVEL;
props.type = BACKLIGHT_RAW;
snprintf(bl_name, sizeof(bl_name), "amdgpu_bl%d",
@@ -1526,16 +1536,12 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
break;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
if (dcn10_register_irq_handlers(dm->adev)) {
DRM_ERROR("DM: Failed to initialize IRQ\n");
goto fail;
}
- /*
- * Temporary disable until pplib/smu interaction is implemented
- */
- dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
break;
#endif
default:
@@ -1543,6 +1549,9 @@ static int amdgpu_dm_initialize_drm_device(struct amdgpu_device *adev)
goto fail;
}
+ if (adev->asic_type != CHIP_CARRIZO && adev->asic_type != CHIP_STONEY)
+ dm->dc->debug.disable_stutter = amdgpu_pp_feature_mask & PP_STUTTER_MODE ? false : true;
+
return 0;
fail:
kfree(aencoder);
@@ -1574,18 +1583,6 @@ static void dm_bandwidth_update(struct amdgpu_device *adev)
/* TODO: implement later */
}
-static void dm_set_backlight_level(struct amdgpu_encoder *amdgpu_encoder,
- u8 level)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
-}
-
-static u8 dm_get_backlight_level(struct amdgpu_encoder *amdgpu_encoder)
-{
- /* TODO: translate amdgpu_encoder to display_index and call DAL */
- return 0;
-}
-
static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
struct drm_file *filp)
{
@@ -1614,10 +1611,8 @@ static int amdgpu_notify_freesync(struct drm_device *dev, void *data,
static const struct amdgpu_display_funcs dm_display_funcs = {
.bandwidth_update = dm_bandwidth_update, /* called unconditionally */
.vblank_get_counter = dm_vblank_get_counter,/* called unconditionally */
- .backlight_set_level =
- dm_set_backlight_level,/* called unconditionally */
- .backlight_get_level =
- dm_get_backlight_level,/* called unconditionally */
+ .backlight_set_level = NULL, /* never called for DC */
+ .backlight_get_level = NULL, /* never called for DC */
.hpd_sense = NULL,/* called unconditionally */
.hpd_set_polarity = NULL, /* called unconditionally */
.hpd_get_gpio_reg = NULL, /* VBIOS parsing. DAL does it. */
@@ -1725,7 +1720,7 @@ static int dm_early_init(void *handle)
adev->mode_info.num_dig = 6;
adev->mode_info.plane_type = dm_plane_type_default;
break;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case CHIP_RAVEN:
adev->mode_info.num_crtc = 4;
adev->mode_info.num_hpd = 4;
@@ -2124,13 +2119,8 @@ convert_color_depth_from_display_info(const struct drm_connector *connector)
static enum dc_aspect_ratio
get_aspect_ratio(const struct drm_display_mode *mode_in)
{
- int32_t width = mode_in->crtc_hdisplay * 9;
- int32_t height = mode_in->crtc_vdisplay * 16;
-
- if ((width - height) < 10 && (width - height) > -10)
- return ASPECT_RATIO_16_9;
- else
- return ASPECT_RATIO_4_3;
+ /* 1-1 mapping, since both enums follow the HDMI spec. */
+ return (enum dc_aspect_ratio) mode_in->picture_aspect_ratio;
}
static enum dc_color_space
@@ -2611,6 +2601,7 @@ static const struct drm_crtc_funcs amdgpu_dm_crtc_funcs = {
.atomic_duplicate_state = dm_crtc_duplicate_state,
.atomic_destroy_state = dm_crtc_destroy_state,
.set_crc_source = amdgpu_dm_crtc_set_crc_source,
+ .verify_crc_source = amdgpu_dm_crtc_verify_crc_source,
.enable_vblank = dm_enable_vblank,
.disable_vblank = dm_disable_vblank,
};
@@ -2746,6 +2737,7 @@ static void amdgpu_dm_connector_destroy(struct drm_connector *connector)
dm->backlight_dev = NULL;
}
#endif
+ drm_dp_cec_unregister_connector(&aconnector->dm_dp_aux.aux);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(connector);
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
index a29dc35954c9..54056d180003 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm.h
@@ -258,11 +258,14 @@ amdgpu_dm_remove_sink_from_freesync_module(struct drm_connector *connector);
/* amdgpu_dm_crc.c */
#ifdef CONFIG_DEBUG_FS
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
- size_t *values_cnt);
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *src_name,
+ size_t *values_cnt);
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc);
#else
#define amdgpu_dm_crtc_set_crc_source NULL
+#define amdgpu_dm_crtc_verify_crc_source NULL
#define amdgpu_dm_crtc_handle_crc_irq(x)
#endif
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
index b329393307e5..326f6fb7e0bc 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_color.c
@@ -231,18 +231,21 @@ void amdgpu_dm_set_ctm(struct dm_crtc_state *crtc)
* preparation for hardware commit. If no lut is specified by user, we default
* to SRGB degamma.
*
- * Currently, we only support degamma bypass, or preprogrammed SRGB degamma.
- * Programmable degamma is not supported, and an attempt to do so will return
- * -EINVAL.
+ * We support degamma bypass, predefined SRGB, and custom degamma
*
* RETURNS:
- * 0 on success, -EINVAL if custom degamma curve is given.
+ * 0 on success
+ * -EINVAL if crtc_state has a degamma_lut of invalid size
+ * -ENOMEM if gamma allocation fails
*/
int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
struct dc_plane_state *dc_plane_state)
{
struct drm_property_blob *blob = crtc_state->degamma_lut;
struct drm_color_lut *lut;
+ uint32_t lut_size;
+ struct dc_gamma *gamma;
+ bool ret;
if (!blob) {
/* Default to SRGB */
@@ -258,11 +261,30 @@ int amdgpu_dm_set_degamma_lut(struct drm_crtc_state *crtc_state,
return 0;
}
- /* Otherwise, assume SRGB, since programmable degamma is not
- * supported.
- */
- dc_plane_state->in_transfer_func->type = TF_TYPE_PREDEFINED;
- dc_plane_state->in_transfer_func->tf = TRANSFER_FUNCTION_SRGB;
- return -EINVAL;
+ gamma = dc_create_gamma();
+ if (!gamma)
+ return -ENOMEM;
+
+ lut_size = blob->length / sizeof(struct drm_color_lut);
+ gamma->num_entries = lut_size;
+ if (gamma->num_entries == MAX_COLOR_LUT_ENTRIES)
+ gamma->type = GAMMA_CUSTOM;
+ else {
+ dc_gamma_release(&gamma);
+ return -EINVAL;
+ }
+
+ __drm_lut_to_dc_gamma(lut, gamma, false);
+
+ dc_plane_state->in_transfer_func->type = TF_TYPE_DISTRIBUTED_POINTS;
+ ret = mod_color_calculate_degamma_params(dc_plane_state->in_transfer_func, gamma, true);
+ dc_gamma_release(&gamma);
+ if (!ret) {
+ dc_plane_state->in_transfer_func->type = TF_TYPE_BYPASS;
+ DRM_ERROR("Out of memory when calculating degamma params\n");
+ return -ENOMEM;
+ }
+
+ return 0;
}
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
index 52f2c01349e3..01fc5717b657 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_crc.c
@@ -46,8 +46,23 @@ static enum amdgpu_dm_pipe_crc_source dm_parse_crc_source(const char *source)
return AMDGPU_DM_PIPE_CRC_SOURCE_INVALID;
}
-int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
- size_t *values_cnt)
+int
+amdgpu_dm_crtc_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ enum amdgpu_dm_pipe_crc_source source = dm_parse_crc_source(src_name);
+
+ if (source < 0) {
+ DRM_DEBUG_DRIVER("Unknown CRC source %s for CRTC%d\n",
+ src_name, crtc->index);
+ return -EINVAL;
+ }
+
+ *values_cnt = 3;
+ return 0;
+}
+
+int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name)
{
struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
struct dc_stream_state *stream_state = crtc_state->stream;
@@ -83,7 +98,6 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
return -EINVAL;
}
- *values_cnt = 3;
/* Reset crc_skipped on dm state */
crtc_state->crc_skip_count = 0;
return 0;
@@ -98,10 +112,16 @@ int amdgpu_dm_crtc_set_crc_source(struct drm_crtc *crtc, const char *src_name,
*/
void amdgpu_dm_crtc_handle_crc_irq(struct drm_crtc *crtc)
{
- struct dm_crtc_state *crtc_state = to_dm_crtc_state(crtc->state);
- struct dc_stream_state *stream_state = crtc_state->stream;
+ struct dm_crtc_state *crtc_state;
+ struct dc_stream_state *stream_state;
uint32_t crcs[3];
+ if (crtc == NULL)
+ return;
+
+ crtc_state = to_dm_crtc_state(crtc->state);
+ stream_state = crtc_state->stream;
+
/* Early return if CRC capture is not enabled. */
if (!crtc_state->crc_enabled)
return;
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
index 9a300732ba37..18a3a6e5ffa0 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_mst_types.c
@@ -496,6 +496,8 @@ void amdgpu_dm_initialize_dp_connector(struct amdgpu_display_manager *dm,
aconnector->dm_dp_aux.ddc_service = aconnector->dc_link->ddc;
drm_dp_aux_register(&aconnector->dm_dp_aux.aux);
+ drm_dp_cec_register_connector(&aconnector->dm_dp_aux.aux,
+ aconnector->base.name, dm->adev->dev);
aconnector->mst_mgr.cbs = &dm_mst_cbs;
drm_dp_mst_topology_mgr_init(
&aconnector->mst_mgr,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
index c69ae78d82b2..4ba0003a9d32 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_pp_smu.c
@@ -469,8 +469,8 @@ bool dm_pp_get_static_clocks(
return false;
static_clk_info->max_clocks_state = pp_to_dc_powerlevel_state(pp_clk_info.max_clocks_state);
- static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock;
- static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock;
+ static_clk_info->max_mclk_khz = pp_clk_info.max_memory_clock * 10;
+ static_clk_info->max_sclk_khz = pp_clk_info.max_engine_clock * 10;
return true;
}
@@ -480,12 +480,20 @@ void pp_rv_set_display_requirement(struct pp_smu *pp,
{
struct dc_context *ctx = pp->ctx;
struct amdgpu_device *adev = ctx->driver_context;
+ void *pp_handle = adev->powerplay.pp_handle;
const struct amd_pm_funcs *pp_funcs = adev->powerplay.pp_funcs;
+ struct pp_display_clock_request clock = {0};
- if (!pp_funcs || !pp_funcs->display_configuration_changed)
+ if (!pp_funcs || !pp_funcs->display_clock_voltage_request)
return;
- amdgpu_dpm_display_configuration_changed(adev);
+ clock.clock_type = amd_pp_dcf_clock;
+ clock.clock_freq_in_khz = req->hard_min_dcefclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
+
+ clock.clock_type = amd_pp_f_clock;
+ clock.clock_freq_in_khz = req->hard_min_fclk_khz;
+ pp_funcs->display_clock_voltage_request(pp_handle, &clock);
}
void pp_rv_set_wm_ranges(struct pp_smu *pp,
diff --git a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
index 9f0a217603ad..516795342dd2 100644
--- a/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
+++ b/drivers/gpu/drm/amd/display/amdgpu_dm/amdgpu_dm_services.c
@@ -74,4 +74,3 @@ bool dm_read_persistent_data(struct dc_context *ctx,
/**** power component interfaces ****/
-
diff --git a/drivers/gpu/drm/amd/display/dc/Makefile b/drivers/gpu/drm/amd/display/dc/Makefile
index 532a515fda9a..aed538a4d1ba 100644
--- a/drivers/gpu/drm/amd/display/dc/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/Makefile
@@ -25,7 +25,7 @@
DC_LIBS = basics bios calcs dce gpio i2caux irq virtual
-ifdef CONFIG_X86
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
DC_LIBS += dcn10 dml
endif
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table.c b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
index 651e1fd4622f..a558bfaa0c46 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table.c
@@ -808,6 +808,24 @@ static enum bp_result transmitter_control_v1_5(
* (=1: 8bpp, =1.25: 10bpp, =1.5:12bpp, =2: 16bpp)
* LVDS mode: usPixelClock = pixel clock
*/
+ if (cntl->signal == SIGNAL_TYPE_HDMI_TYPE_A) {
+ switch (cntl->color_depth) {
+ case COLOR_DEPTH_101010:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 30) / 24);
+ break;
+ case COLOR_DEPTH_121212:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 36) / 24);
+ break;
+ case COLOR_DEPTH_161616:
+ params.usSymClock =
+ cpu_to_le16((le16_to_cpu(params.usSymClock) * 48) / 24);
+ break;
+ default:
+ break;
+ }
+ }
if (EXEC_BIOS_CMD_TABLE(UNIPHYTransmitterControl, params))
result = BP_RESULT_OK;
diff --git a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
index 770ff89ba7e1..bbbcef566c55 100644
--- a/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
+++ b/drivers/gpu/drm/amd/display/dc/bios/command_table_helper2.c
@@ -55,7 +55,7 @@ bool dal_bios_parser_init_cmd_tbl_helper2(
case DCE_VERSION_11_22:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
*h = dal_cmd_tbl_helper_dce112_get_table2();
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/Makefile b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
index 416500e51b8d..95f332ee3e7e 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/calcs/Makefile
@@ -38,7 +38,7 @@ CFLAGS_dcn_calc_math.o := $(calcs_ccflags) -Wno-tautological-compare
BW_CALCS = dce_calcs.o bw_fixed.o custom_float.o
-ifdef CONFIG_X86
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
BW_CALCS += dcn_calcs.o dcn_calc_math.o dcn_calc_auto.o
endif
diff --git a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
index 080f777d705e..bd039322f697 100644
--- a/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
+++ b/drivers/gpu/drm/amd/display/dc/calcs/dcn_calcs.c
@@ -676,7 +676,7 @@ static void hack_force_pipe_split(struct dcn_bw_internal_vars *v,
}
static void hack_bounding_box(struct dcn_bw_internal_vars *v,
- struct dc_debug *dbg,
+ struct dc_debug_options *dbg,
struct dc_state *context)
{
if (dbg->pipe_split_policy == MPC_SPLIT_AVOID)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc.c b/drivers/gpu/drm/amd/display/dc/core/dc.c
index 733ac224e7fd..6ae050dc3220 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc.c
@@ -52,6 +52,8 @@
#include "dm_helpers.h"
#include "mem_input.h"
#include "hubp.h"
+
+#include "dc_link_dp.h"
#define DC_LOGGER \
dc->ctx->logger
@@ -419,8 +421,17 @@ void dc_link_set_preferred_link_settings(struct dc *dc,
struct dc_link_settings *link_setting,
struct dc_link *link)
{
- link->preferred_link_setting = *link_setting;
- dp_retrain_link_dp_test(link, link_setting, false);
+ struct dc_link_settings store_settings = *link_setting;
+ struct dc_stream_state *link_stream =
+ link->dc->current_state->res_ctx.pipe_ctx[0].stream;
+
+ link->preferred_link_setting = store_settings;
+ if (link_stream)
+ decide_link_settings(link_stream, &store_settings);
+
+ if ((store_settings.lane_count != LANE_COUNT_UNKNOWN) &&
+ (store_settings.link_rate != LINK_RATE_UNKNOWN))
+ dp_retrain_link_dp_test(link, &store_settings, false);
}
void dc_link_enable_hpd(const struct dc_link *link)
@@ -476,7 +487,7 @@ static void destruct(struct dc *dc)
kfree(dc->bw_dceip);
dc->bw_dceip = NULL;
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
kfree(dc->dcn_soc);
dc->dcn_soc = NULL;
@@ -492,7 +503,7 @@ static bool construct(struct dc *dc,
struct dc_context *dc_ctx;
struct bw_calcs_dceip *dc_dceip;
struct bw_calcs_vbios *dc_vbios;
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
#endif
@@ -514,7 +525,7 @@ static bool construct(struct dc *dc,
}
dc->bw_vbios = dc_vbios;
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
dcn_soc = kzalloc(sizeof(*dcn_soc), GFP_KERNEL);
if (!dcn_soc) {
dm_error("%s: failed to create dcn_soc\n", __func__);
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
index caece7c13bc6..e1ebdf7b5eaf 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_debug.c
@@ -348,7 +348,7 @@ void context_clock_trace(
struct dc *dc,
struct dc_state *context)
{
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
DC_LOGGER_INIT(dc->ctx->logger);
CLOCK_TRACE("Current: dispclk_khz:%d max_dppclk_khz:%d dcfclk_khz:%d\n"
"dcfclk_deep_sleep_khz:%d fclk_khz:%d socclk_khz:%d\n",
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link.c b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
index a4429c90c60c..37eaf72ace54 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link.c
@@ -60,7 +60,14 @@
enum {
LINK_RATE_REF_FREQ_IN_MHZ = 27,
- PEAK_FACTOR_X1000 = 1006
+ PEAK_FACTOR_X1000 = 1006,
+ /*
+ * Some receivers fail to train on first try and are good
+ * on subsequent tries. 2 retries should be plenty. If we
+ * don't have a successful training then we don't expect to
+ * ever get one.
+ */
+ LINK_TRAINING_MAX_VERIFY_RETRY = 2
};
/*******************************************************************************
@@ -737,6 +744,22 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
break;
case EDID_NO_RESPONSE:
DC_LOG_ERROR("No EDID read.\n");
+
+ /*
+ * Abort detection for non-DP connectors if we have
+ * no EDID
+ *
+ * DP needs to report as connected if HDP is high
+ * even if we have no EDID in order to go to
+ * fail-safe mode
+ */
+ if (dc_is_hdmi_signal(link->connector_signal) ||
+ dc_is_dvi_signal(link->connector_signal)) {
+ if (prev_sink != NULL)
+ dc_sink_release(prev_sink);
+
+ return false;
+ }
default:
break;
}
@@ -745,30 +768,41 @@ bool dc_link_detect(struct dc_link *link, enum dc_detect_reason reason)
if ((prev_sink != NULL) && ((edid_status == EDID_THE_SAME) || (edid_status == EDID_OK)))
same_edid = is_same_edid(&prev_sink->dc_edid, &sink->dc_edid);
- // If both edid and dpcd are the same, then discard new sink and revert back to original sink
- if ((same_edid) && (same_dpcd)) {
- link_disconnect_remap(prev_sink, link);
- sink = prev_sink;
- prev_sink = NULL;
- } else {
- if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
- sink_caps.transaction_type ==
- DDC_TRANSACTION_TYPE_I2C_OVER_AUX) {
- /*
- * TODO debug why Dell 2413 doesn't like
- * two link trainings
- */
+ if (link->connector_signal == SIGNAL_TYPE_DISPLAY_PORT &&
+ sink_caps.transaction_type == DDC_TRANSACTION_TYPE_I2C_OVER_AUX &&
+ reason != DETECT_REASON_HPDRX) {
+ /*
+ * TODO debug why Dell 2413 doesn't like
+ * two link trainings
+ */
+
+ /* deal with non-mst cases */
+ for (i = 0; i < LINK_TRAINING_MAX_VERIFY_RETRY; i++) {
+ int fail_count = 0;
+
+ dp_verify_link_cap(link,
+ &link->reported_link_cap,
+ &fail_count);
- /* deal with non-mst cases */
- dp_hbr_verify_link_cap(link, &link->reported_link_cap);
+ if (fail_count == 0)
+ break;
}
- /* HDMI-DVI Dongle */
- if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
- !sink->edid_caps.edid_hdmi)
- sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+ } else {
+ // If edid is the same, then discard new sink and revert back to original sink
+ if (same_edid) {
+ link_disconnect_remap(prev_sink, link);
+ sink = prev_sink;
+ prev_sink = NULL;
+
+ }
}
+ /* HDMI-DVI Dongle */
+ if (sink->sink_signal == SIGNAL_TYPE_HDMI_TYPE_A &&
+ !sink->edid_caps.edid_hdmi)
+ sink->sink_signal = SIGNAL_TYPE_DVI_SINGLE_LINK;
+
/* Connectivity log: detection */
for (i = 0; i < sink->dc_edid.length / EDID_BLOCK_SIZE; i++) {
CONN_DATA_DETECT(link,
@@ -1016,6 +1050,9 @@ static bool construct(
goto create_fail;
}
+ if (link->dc->res_pool->funcs->link_init)
+ link->dc->res_pool->funcs->link_init(link);
+
hpd_gpio = get_hpd_gpio(link->ctx->dc_bios, link->link_id, link->ctx->gpio_service);
if (hpd_gpio != NULL)
@@ -1779,6 +1816,8 @@ static void enable_link_hdmi(struct pipe_ctx *pipe_ctx)
bool is_vga_mode = (stream->timing.h_addressable == 640)
&& (stream->timing.v_addressable == 480);
+ if (stream->phy_pix_clk == 0)
+ stream->phy_pix_clk = stream->timing.pix_clk_khz;
if (stream->phy_pix_clk > 340000)
is_over_340mhz = true;
@@ -1996,6 +2035,15 @@ enum dc_status dc_link_validate_mode_timing(
return DC_OK;
}
+int dc_link_get_backlight_level(const struct dc_link *link)
+{
+ struct abm *abm = link->ctx->dc->res_pool->abm;
+
+ if (abm == NULL || abm->funcs->get_current_backlight_8_bit == NULL)
+ return DC_ERROR_UNEXPECTED;
+
+ return (int) abm->funcs->get_current_backlight_8_bit(abm);
+}
bool dc_link_set_backlight_level(const struct dc_link *link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream)
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
index 08c9d73b9ab7..8def0d9fa0ff 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_ddc.c
@@ -33,10 +33,7 @@
#include "include/vector.h"
#include "core_types.h"
#include "dc_link_ddc.h"
-#include "i2caux/engine.h"
-#include "i2caux/i2c_engine.h"
-#include "i2caux/aux_engine.h"
-#include "i2caux/i2caux.h"
+#include "aux_engine.h"
#define AUX_POWER_UP_WA_DELAY 500
#define I2C_OVER_AUX_DEFER_WA_DELAY 70
@@ -641,9 +638,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
enum aux_transaction_type type,
enum i2caux_transaction_action action)
{
- struct i2caux *i2caux = ddc->ctx->i2caux;
struct ddc *ddc_pin = ddc->ddc_pin;
- struct aux_engine *engine;
+ struct aux_engine *aux_engine;
enum aux_channel_operation_result operation_result;
struct aux_request_transaction_data aux_req;
struct aux_reply_transaction_data aux_rep;
@@ -654,7 +650,8 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
memset(&aux_req, 0, sizeof(aux_req));
memset(&aux_rep, 0, sizeof(aux_rep));
- engine = i2caux->funcs->acquire_aux_engine(i2caux, ddc_pin);
+ aux_engine = ddc->ctx->dc->res_pool->engines[ddc_pin->pin_data->en];
+ aux_engine->funcs->acquire(aux_engine, ddc_pin);
aux_req.type = type;
aux_req.action = action;
@@ -664,15 +661,15 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
aux_req.length = size;
aux_req.data = buffer;
- engine->funcs->submit_channel_request(engine, &aux_req);
- operation_result = engine->funcs->get_channel_status(engine, &returned_bytes);
+ aux_engine->funcs->submit_channel_request(aux_engine, &aux_req);
+ operation_result = aux_engine->funcs->get_channel_status(aux_engine, &returned_bytes);
switch (operation_result) {
case AUX_CHANNEL_OPERATION_SUCCEEDED:
res = returned_bytes;
if (res <= size && res >= 0)
- res = engine->funcs->read_channel_reply(engine, size,
+ res = aux_engine->funcs->read_channel_reply(aux_engine, size,
buffer, reply,
&status);
@@ -686,8 +683,7 @@ int dc_link_aux_transfer(struct ddc_service *ddc,
res = -1;
break;
}
-
- i2caux->funcs->release_engine(i2caux, &engine->base);
+ aux_engine->funcs->release_engine(aux_engine);
return res;
}
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
index 474cd3e01752..a7553b6d59c2 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_link_dp.c
@@ -953,7 +953,10 @@ enum link_training_result dc_link_dp_perform_link_training(
* LINK_SPREAD_05_DOWNSPREAD_30KHZ :
* LINK_SPREAD_DISABLED;
*/
- lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
+ if (link->dp_ss_off)
+ lt_settings.link_settings.link_spread = LINK_SPREAD_DISABLED;
+ else
+ lt_settings.link_settings.link_spread = LINK_SPREAD_05_DOWNSPREAD_30KHZ;
/* 1. set link rate, lane count and spread*/
dpcd_set_link_settings(link, &lt_settings);
@@ -1029,7 +1032,7 @@ enum link_training_result dc_link_dp_perform_link_training(
lt_settings.lane_settings[0].PRE_EMPHASIS);
if (status != LINK_TRAINING_SUCCESS)
- link->ctx->dc->debug.debug_data.ltFailCount++;
+ link->ctx->dc->debug_data.ltFailCount++;
return status;
}
@@ -1086,9 +1089,10 @@ static struct dc_link_settings get_max_link_cap(struct dc_link *link)
return max_link_cap;
}
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting)
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count)
{
struct dc_link_settings max_link_cap = {0};
struct dc_link_settings cur_link_setting = {0};
@@ -1101,6 +1105,11 @@ bool dp_hbr_verify_link_cap(
enum clock_source_id dp_cs_id = CLOCK_SOURCE_ID_EXTERNAL;
enum link_training_result status;
+ if (link->dc->debug.skip_detection_link_training) {
+ link->verified_link_cap = *known_limit_link_setting;
+ return true;
+ }
+
success = false;
skip_link_training = false;
@@ -1155,6 +1164,8 @@ bool dp_hbr_verify_link_cap(
skip_video_pattern);
if (status == LINK_TRAINING_SUCCESS)
success = true;
+ else
+ (*fail_count)++;
}
if (success)
@@ -1776,12 +1787,10 @@ static void dp_test_send_link_training(struct dc_link *link)
dp_retrain_link_dp_test(link, &link_settings, false);
}
-/* TODO hbr2 compliance eye output is unstable
+/* TODO Raven hbr2 compliance eye output is unstable
* (toggling on and off) with debugger break
* This caueses intermittent PHY automation failure
* Need to look into the root cause */
-static uint8_t force_tps4_for_cp2520 = 1;
-
static void dp_test_send_phy_test_pattern(struct dc_link *link)
{
union phy_test_pattern dpcd_test_pattern;
@@ -1841,13 +1850,13 @@ static void dp_test_send_phy_test_pattern(struct dc_link *link)
break;
case PHY_TEST_PATTERN_CP2520_1:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
case PHY_TEST_PATTERN_CP2520_2:
/* CP2520 pattern is unstable, temporarily use TPS4 instead */
- test_pattern = (force_tps4_for_cp2520 == 1) ?
+ test_pattern = (link->dc->caps.force_dp_tps4_for_cp2520 == 1) ?
DP_TEST_PATTERN_TRAINING_PATTERN4 :
DP_TEST_PATTERN_HBR2_COMPLIANCE_EYE;
break;
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
index 2e65715f76a1..ea6beccfd89d 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_resource.c
@@ -41,7 +41,7 @@
#include "dce100/dce100_resource.h"
#include "dce110/dce110_resource.h"
#include "dce112/dce112_resource.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn10/dcn10_resource.h"
#endif
#include "dce120/dce120_resource.h"
@@ -85,7 +85,7 @@ enum dce_version resource_parse_asic_id(struct hw_asic_id asic_id)
case FAMILY_AI:
dc_version = DCE_VERSION_12_0;
break;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case FAMILY_RV:
dc_version = DCN_VERSION_1_0;
break;
@@ -136,7 +136,7 @@ struct resource_pool *dc_create_resource_pool(
num_virtual_links, dc);
break;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
res_pool = dcn10_create_resource_pool(
num_virtual_links, dc);
@@ -268,24 +268,30 @@ bool resource_construct(
return true;
}
+static int find_matching_clock_source(
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i;
+
+ for (i = 0; i < pool->clk_src_count; i++) {
+ if (pool->clock_sources[i] == clock_source)
+ return i;
+ }
+ return -1;
+}
void resource_unreference_clock_source(
struct resource_context *res_ctx,
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
-
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]--;
- break;
- }
-
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count--;
}
@@ -295,19 +301,31 @@ void resource_reference_clock_source(
const struct resource_pool *pool,
struct clock_source *clock_source)
{
- int i;
- for (i = 0; i < pool->clk_src_count; i++) {
- if (pool->clock_sources[i] != clock_source)
- continue;
+ int i = find_matching_clock_source(pool, clock_source);
+ if (i > -1)
res_ctx->clock_source_ref_count[i]++;
- break;
- }
if (pool->dp_clock_source == clock_source)
res_ctx->dp_clock_source_ref_count++;
}
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source)
+{
+ int i = find_matching_clock_source(pool, clock_source);
+
+ if (i > -1)
+ return res_ctx->clock_source_ref_count[i];
+
+ if (pool->dp_clock_source == clock_source)
+ return res_ctx->dp_clock_source_ref_count;
+
+ return -1;
+}
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2)
@@ -330,6 +348,9 @@ bool resource_are_streams_timing_synchronizable(
!= stream2->timing.pix_clk_khz)
return false;
+ if (stream1->clamping.c_depth != stream2->clamping.c_depth)
+ return false;
+
if (stream1->phy_pix_clk != stream2->phy_pix_clk
&& (!dc_is_dp_signal(stream1->signal)
|| !dc_is_dp_signal(stream2->signal)))
@@ -337,6 +358,20 @@ bool resource_are_streams_timing_synchronizable(
return true;
}
+static bool is_dp_and_hdmi_sharable(
+ struct dc_stream_state *stream1,
+ struct dc_stream_state *stream2)
+{
+ if (stream1->ctx->dc->caps.disable_dp_clk_share)
+ return false;
+
+ if (stream1->clamping.c_depth != COLOR_DEPTH_888 ||
+ stream2->clamping.c_depth != COLOR_DEPTH_888)
+ return false;
+
+ return true;
+
+}
static bool is_sharable_clk_src(
const struct pipe_ctx *pipe_with_clk_src,
@@ -348,15 +383,18 @@ static bool is_sharable_clk_src(
if (pipe_with_clk_src->stream->signal == SIGNAL_TYPE_VIRTUAL)
return false;
- if (dc_is_dp_signal(pipe_with_clk_src->stream->signal))
+ if (dc_is_dp_signal(pipe_with_clk_src->stream->signal) ||
+ (dc_is_dp_signal(pipe->stream->signal) &&
+ !is_dp_and_hdmi_sharable(pipe_with_clk_src->stream,
+ pipe->stream)))
return false;
if (dc_is_hdmi_signal(pipe_with_clk_src->stream->signal)
- && dc_is_dvi_signal(pipe->stream->signal))
+ && dc_is_dual_link_signal(pipe->stream->signal))
return false;
if (dc_is_hdmi_signal(pipe->stream->signal)
- && dc_is_dvi_signal(pipe_with_clk_src->stream->signal))
+ && dc_is_dual_link_signal(pipe_with_clk_src->stream->signal))
return false;
if (!resource_are_streams_timing_synchronizable(
@@ -1213,7 +1251,7 @@ static struct pipe_ctx *acquire_free_pipe_for_stream(
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
static int acquire_first_split_pipe(
struct resource_context *res_ctx,
const struct resource_pool *pool,
@@ -1284,7 +1322,7 @@ bool dc_add_plane_to_context(
free_pipe = acquire_free_pipe_for_stream(context, pool, stream);
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (!free_pipe) {
int pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
if (pipe_idx >= 0)
@@ -1882,7 +1920,7 @@ enum dc_status resource_map_pool_resources(
/* acquire new resources */
pipe_idx = acquire_first_free_pipe(&context->res_ctx, pool, stream);
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
if (pipe_idx < 0)
pipe_idx = acquire_first_split_pipe(&context->res_ctx, pool, stream);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
index 815dfb50089b..8fb3aefd195c 100644
--- a/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
+++ b/drivers/gpu/drm/amd/display/dc/core/dc_surface.c
@@ -192,7 +192,7 @@ void dc_transfer_func_release(struct dc_transfer_func *tf)
kref_put(&tf->refcount, dc_transfer_func_free);
}
-struct dc_transfer_func *dc_create_transfer_func()
+struct dc_transfer_func *dc_create_transfer_func(void)
{
struct dc_transfer_func *tf = kvzalloc(sizeof(*tf), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dc.h b/drivers/gpu/drm/amd/display/dc/dc.h
index ceb4c3725893..6c9990bef267 100644
--- a/drivers/gpu/drm/amd/display/dc/dc.h
+++ b/drivers/gpu/drm/amd/display/dc/dc.h
@@ -38,7 +38,7 @@
#include "inc/compressor.h"
#include "dml/display_mode_lib.h"
-#define DC_VER "3.1.56"
+#define DC_VER "3.1.59"
#define MAX_SURFACES 3
#define MAX_STREAMS 6
@@ -77,6 +77,9 @@ struct dc_caps {
bool is_apu;
bool dual_link_dvi;
bool post_blend_color_processing;
+ bool force_dp_tps4_for_cp2520;
+ bool disable_dp_clk_share;
+ bool psp_setup_panel_mode;
};
struct dc_dcc_surface_param {
@@ -207,7 +210,7 @@ struct dc_clocks {
int phyclk_khz;
};
-struct dc_debug {
+struct dc_debug_options {
enum visual_confirm visual_confirm;
bool sanity_checks;
bool max_disp_clk;
@@ -258,13 +261,16 @@ struct dc_debug {
bool avoid_vbios_exec_table;
bool scl_reset_length10;
bool hdmi20_disable;
+ bool skip_detection_link_training;
+};
- struct {
- uint32_t ltFailCount;
- uint32_t i2cErrorCount;
- uint32_t auxErrorCount;
- } debug_data;
+struct dc_debug_data {
+ uint32_t ltFailCount;
+ uint32_t i2cErrorCount;
+ uint32_t auxErrorCount;
};
+
+
struct dc_state;
struct resource_pool;
struct dce_hwseq;
@@ -273,8 +279,7 @@ struct dc {
struct dc_caps caps;
struct dc_cap_funcs cap_funcs;
struct dc_config config;
- struct dc_debug debug;
-
+ struct dc_debug_options debug;
struct dc_context *ctx;
uint8_t link_count;
@@ -289,7 +294,7 @@ struct dc {
/* Inputs into BW and WM calculations. */
struct bw_calcs_dceip *bw_dceip;
struct bw_calcs_vbios *bw_vbios;
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
struct dcn_soc_bounding_box *dcn_soc;
struct dcn_ip_params *dcn_ip;
struct display_mode_lib dml;
@@ -310,6 +315,8 @@ struct dc {
/* FBC compressor */
struct compressor *fbc_compressor;
+
+ struct dc_debug_data debug_data;
};
enum frame_buffer_mode {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
index 9cfd7ea845e3..b789cb2b354b 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_hw_types.h
@@ -192,7 +192,7 @@ enum surface_pixel_format {
/*swaped & float*/
SURFACE_PIXEL_FORMAT_GRPH_ABGR16161616F,
/*grow graphics here if necessary */
-
+ SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888,
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
SURFACE_PIXEL_FORMAT_VIDEO_420_YCbCr =
SURFACE_PIXEL_FORMAT_VIDEO_BEGIN,
@@ -417,6 +417,7 @@ enum {
GAMMA_RGB_256_ENTRIES = 256,
GAMMA_RGB_FLOAT_1024_ENTRIES = 1024,
GAMMA_CS_TFM_1D_ENTRIES = 4096,
+ GAMMA_CUSTOM_ENTRIES = 4096,
GAMMA_MAX_ENTRIES = 4096
};
@@ -424,6 +425,7 @@ enum dc_gamma_type {
GAMMA_RGB_256 = 1,
GAMMA_RGB_FLOAT_1024 = 2,
GAMMA_CS_TFM_1D = 3,
+ GAMMA_CUSTOM = 4,
};
struct dc_csc_transform {
diff --git a/drivers/gpu/drm/amd/display/dc/dc_link.h b/drivers/gpu/drm/amd/display/dc/dc_link.h
index 070a56926308..d43cefbc43d3 100644
--- a/drivers/gpu/drm/amd/display/dc/dc_link.h
+++ b/drivers/gpu/drm/amd/display/dc/dc_link.h
@@ -73,6 +73,7 @@ struct dc_link {
enum dc_irq_source irq_source_hpd;
enum dc_irq_source irq_source_hpd_rx;/* aka DP Short Pulse */
bool is_hpd_filter_disabled;
+ bool dp_ss_off;
/* caps is the same as reported_link_cap. link_traing use
* reported_link_cap. Will clean up. TODO
@@ -141,6 +142,8 @@ static inline struct dc_link *dc_get_link_at_index(struct dc *dc, uint32_t link_
bool dc_link_set_backlight_level(const struct dc_link *dc_link, uint32_t level,
uint32_t frame_ramp, const struct dc_stream_state *stream);
+int dc_link_get_backlight_level(const struct dc_link *dc_link);
+
bool dc_link_set_abm_disable(const struct dc_link *dc_link);
bool dc_link_set_psr_enable(const struct dc_link *dc_link, bool enable, bool wait);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/Makefile b/drivers/gpu/drm/amd/display/dc/dce/Makefile
index 11401fd8e535..825537bd4545 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/dce/Makefile
@@ -28,7 +28,7 @@
DCE = dce_audio.o dce_stream_encoder.o dce_link_encoder.o dce_hwseq.o \
dce_mem_input.o dce_clock_source.o dce_scl_filters.o dce_transform.o \
-dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o
+dce_clocks.o dce_opp.o dce_dmcu.o dce_abm.o dce_ipp.o dce_aux.o
AMD_DAL_DCE = $(addprefix $(AMDDALPATH)/dc/dce/,$(DCE))
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
new file mode 100644
index 000000000000..3f5b2e6f7553
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.c
@@ -0,0 +1,937 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#include "dm_services.h"
+#include "dce_aux.h"
+#include "dce/dce_11_0_sh_mask.h"
+
+#define CTX \
+ aux110->base.ctx
+#define REG(reg_name)\
+ (aux110->regs->reg_name)
+
+#define DC_LOGGER \
+ engine->ctx->logger
+
+#include "reg_helper.h"
+
+#define FROM_AUX_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine_dce110, base)
+
+#define FROM_ENGINE(ptr) \
+ FROM_AUX_ENGINE(container_of((ptr), struct aux_engine, base))
+
+#define FROM_AUX_ENGINE_ENGINE(ptr) \
+ container_of((ptr), struct aux_engine, base)
+enum {
+ AUX_INVALID_REPLY_RETRY_COUNTER = 1,
+ AUX_TIMED_OUT_RETRY_COUNTER = 2,
+ AUX_DEFER_RETRY_COUNTER = 6
+};
+static void release_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ dal_ddc_close(engine->ddc);
+
+ engine->ddc = NULL;
+
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_DONE_USING_AUX_REG, 1);
+}
+
+#define SW_CAN_ACCESS_AUX 1
+#define DMCU_CAN_ACCESS_AUX 2
+
+static bool is_engine_available(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field != DMCU_CAN_ACCESS_AUX);
+}
+static bool acquire_engine(
+ struct aux_engine *engine)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value = REG_READ(AUX_ARB_CONTROL);
+ uint32_t field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+ if (field == DMCU_CAN_ACCESS_AUX)
+ return false;
+ /* enable AUX before request SW to access AUX */
+ value = REG_READ(AUX_CONTROL);
+ field = get_reg_field_value(value,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (field == 0) {
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_EN);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*DP_AUX block as part of the enable sequence*/
+ set_reg_field_value(
+ value,
+ 1,
+ AUX_CONTROL,
+ AUX_RESET);
+ }
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ if (REG(AUX_RESET_MASK)) {
+ /*poll HW to make sure reset it done*/
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 1,
+ 1, 11);
+
+ set_reg_field_value(
+ value,
+ 0,
+ AUX_CONTROL,
+ AUX_RESET);
+
+ REG_WRITE(AUX_CONTROL, value);
+
+ REG_WAIT(AUX_CONTROL, AUX_RESET_DONE, 0,
+ 1, 11);
+ }
+ } /*if (field)*/
+
+ /* request SW to access AUX */
+ REG_UPDATE(AUX_ARB_CONTROL, AUX_SW_USE_AUX_REG_REQ, 1);
+
+ value = REG_READ(AUX_ARB_CONTROL);
+ field = get_reg_field_value(
+ value,
+ AUX_ARB_CONTROL,
+ AUX_REG_RW_CNTL_STATUS);
+
+ return (field == SW_CAN_ACCESS_AUX);
+}
+
+#define COMPOSE_AUX_SW_DATA_16_20(command, address) \
+ ((command) | ((0xF0000 & (address)) >> 16))
+
+#define COMPOSE_AUX_SW_DATA_8_15(address) \
+ ((0xFF00 & (address)) >> 8)
+
+#define COMPOSE_AUX_SW_DATA_0_7(address) \
+ (0xFF & (address))
+
+static void submit_channel_request(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t value;
+ uint32_t length;
+
+ bool is_write =
+ ((request->type == AUX_TRANSACTION_TYPE_DP) &&
+ (request->action == I2CAUX_TRANSACTION_ACTION_DP_WRITE)) ||
+ ((request->type == AUX_TRANSACTION_TYPE_I2C) &&
+ ((request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE) ||
+ (request->action == I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT)));
+ if (REG(AUXN_IMPCAL)) {
+ /* clear_aux_error */
+ REG_UPDATE_SEQ(AUXN_IMPCAL, AUXN_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_CALOUT_ERROR_AK,
+ 1,
+ 0);
+
+ /* force_default_calibrate */
+ REG_UPDATE_1BY1_2(AUXN_IMPCAL,
+ AUXN_IMPCAL_ENABLE, 1,
+ AUXN_IMPCAL_OVERRIDE_ENABLE, 0);
+
+ /* bug? why AUXN update EN and OVERRIDE_EN 1 by 1 while AUX P toggles OVERRIDE? */
+
+ REG_UPDATE_SEQ(AUXP_IMPCAL, AUXP_IMPCAL_OVERRIDE_ENABLE,
+ 1,
+ 0);
+ }
+ /* set the delay and the number of bytes to write */
+
+ /* The length include
+ * the 4 bit header and the 20 bit address
+ * (that is 3 byte).
+ * If the requested length is non zero this means
+ * an addition byte specifying the length is required.
+ */
+
+ length = request->length ? 4 : 3;
+ if (is_write)
+ length += request->length;
+
+ REG_UPDATE_2(AUX_SW_CONTROL,
+ AUX_SW_START_DELAY, request->delay,
+ AUX_SW_WR_BYTES, length);
+
+ /* program action and address and payload data (if 'is_write') */
+ value = REG_UPDATE_4(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_DATA_RW, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_16_20(request->action, request->address));
+
+ value = REG_SET_2(AUX_SW_DATA, value,
+ AUX_SW_AUTOINCREMENT_DISABLE, 0,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_8_15(request->address));
+
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, COMPOSE_AUX_SW_DATA_0_7(request->address));
+
+ if (request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->length - 1);
+ }
+
+ if (is_write) {
+ /* Load the HW buffer with the Data to be sent.
+ * This is relevant for write operation.
+ * For read, the data recived data will be
+ * processed in process_channel_reply().
+ */
+ uint32_t i = 0;
+
+ while (i < request->length) {
+ value = REG_SET(AUX_SW_DATA, value,
+ AUX_SW_DATA, request->data[i]);
+
+ ++i;
+ }
+ }
+
+ REG_UPDATE(AUX_INTERRUPT_CONTROL, AUX_SW_DONE_ACK, 1);
+ REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 0,
+ 10, aux110->timeout_period/10);
+ REG_UPDATE(AUX_SW_CONTROL, AUX_SW_GO, 1);
+}
+
+static int read_channel_reply(struct aux_engine *engine, uint32_t size,
+ uint8_t *buffer, uint8_t *reply_result,
+ uint32_t *sw_status)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+ uint32_t bytes_replied;
+ uint32_t reply_result_32;
+
+ *sw_status = REG_GET(AUX_SW_STATUS, AUX_SW_REPLY_BYTE_COUNT,
+ &bytes_replied);
+
+ /* In case HPD is LOW, exit AUX transaction */
+ if ((*sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return -1;
+
+ /* Need at least the status byte */
+ if (!bytes_replied)
+ return -1;
+
+ REG_UPDATE_1BY1_3(AUX_SW_DATA,
+ AUX_SW_INDEX, 0,
+ AUX_SW_AUTOINCREMENT_DISABLE, 1,
+ AUX_SW_DATA_RW, 1);
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &reply_result_32);
+ reply_result_32 = reply_result_32 >> 4;
+ *reply_result = (uint8_t)reply_result_32;
+
+ if (reply_result_32 == 0) { /* ACK */
+ uint32_t i = 0;
+
+ /* First byte was already used to get the command status */
+ --bytes_replied;
+
+ /* Do not overflow buffer */
+ if (bytes_replied > size)
+ return -1;
+
+ while (i < bytes_replied) {
+ uint32_t aux_sw_data_val;
+
+ REG_GET(AUX_SW_DATA, AUX_SW_DATA, &aux_sw_data_val);
+ buffer[i] = aux_sw_data_val;
+ ++i;
+ }
+
+ return i;
+ }
+
+ return 0;
+}
+
+static void process_channel_reply(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply)
+{
+ int bytes_replied;
+ uint8_t reply_result;
+ uint32_t sw_status;
+
+ bytes_replied = read_channel_reply(engine, reply->length, reply->data,
+ &reply_result, &sw_status);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+ return;
+ }
+
+ if (bytes_replied < 0) {
+ /* Need to handle an error case...
+ * Hopefully, upper layer function won't call this function if
+ * the number of bytes in the reply was 0, because there was
+ * surely an error that was asserted that should have been
+ * handled for hot plug case, this could happens
+ */
+ if (!(sw_status & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK)) {
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ ASSERT_CRITICAL(false);
+ return;
+ }
+ } else {
+
+ switch (reply_result) {
+ case 0: /* ACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_ACK;
+ break;
+ case 1: /* NACK */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_NACK;
+ break;
+ case 2: /* DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_AUX_DEFER;
+ break;
+ case 4: /* AUX ACK / I2C NACK */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_NACK;
+ break;
+ case 8: /* AUX ACK / I2C DEFER */
+ reply->status = AUX_TRANSACTION_REPLY_I2C_DEFER;
+ break;
+ default:
+ reply->status = AUX_TRANSACTION_REPLY_INVALID;
+ }
+ }
+}
+
+static enum aux_channel_operation_result get_channel_status(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes)
+{
+ struct aux_engine_dce110 *aux110 = FROM_AUX_ENGINE(engine);
+
+ uint32_t value;
+
+ if (returned_bytes == NULL) {
+ /*caller pass NULL pointer*/
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_REASON_UNKNOWN;
+ }
+ *returned_bytes = 0;
+
+ /* poll to make sure that SW_DONE is asserted */
+ value = REG_WAIT(AUX_SW_STATUS, AUX_SW_DONE, 1,
+ 10, aux110->timeout_period/10);
+
+ /* in case HPD is LOW, exit AUX transaction */
+ if ((value & AUX_SW_STATUS__AUX_SW_HPD_DISCON_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON;
+
+ /* Note that the following bits are set in 'status.bits'
+ * during CTS 4.2.1.2 (FW 3.3.1):
+ * AUX_SW_RX_MIN_COUNT_VIOL, AUX_SW_RX_INVALID_STOP,
+ * AUX_SW_RX_RECV_NO_DET, AUX_SW_RX_RECV_INVALID_H.
+ *
+ * AUX_SW_RX_MIN_COUNT_VIOL is an internal,
+ * HW debugging bit and should be ignored.
+ */
+ if (value & AUX_SW_STATUS__AUX_SW_DONE_MASK) {
+ if ((value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_STATE_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_TIMEOUT_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+
+ else if ((value & AUX_SW_STATUS__AUX_SW_RX_INVALID_STOP_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_NO_DET_MASK) ||
+ (value &
+ AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_H_MASK) ||
+ (value & AUX_SW_STATUS__AUX_SW_RX_RECV_INVALID_L_MASK))
+ return AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+
+ *returned_bytes = get_reg_field_value(value,
+ AUX_SW_STATUS,
+ AUX_SW_REPLY_BYTE_COUNT);
+
+ if (*returned_bytes == 0)
+ return
+ AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY;
+ else {
+ *returned_bytes -= 1;
+ return AUX_CHANNEL_OPERATION_SUCCEEDED;
+ }
+ } else {
+ /*time_elapsed >= aux_engine->timeout_period
+ * AUX_SW_STATUS__AUX_SW_HPD_DISCON = at this point
+ */
+ ASSERT_CRITICAL(false);
+ return AUX_CHANNEL_OPERATION_FAILED_TIMEOUT;
+ }
+}
+static void process_read_reply(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->defer_retry_aux = 0;
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else if (ctx->returned_byte < ctx->current_read_length) {
+ ctx->current_read_length -= ctx->returned_byte;
+
+ ctx->offset += ctx->returned_byte;
+
+ ++ctx->invalid_reply_retry_aux_on_ack;
+
+ if (ctx->invalid_reply_retry_aux_on_ack >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ }
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->transaction_complete = true;
+ ctx->operation_succeeded = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_read_request(
+ struct aux_engine *engine,
+ struct read_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ if (ctx->returned_byte > ctx->current_read_length) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else {
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->buffer;
+
+ process_read_reply(engine, ctx);
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool read_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct read_command_context ctx;
+
+ ctx.buffer = request->payload.data;
+ ctx.current_read_length = request->payload.length;
+ ctx.offset = 0;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.invalid_reply_retry_aux_on_ack = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_READ;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_READ_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_READ;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ do {
+ memset(ctx.buffer + ctx.offset, 0, ctx.current_read_length);
+
+ ctx.request.data = ctx.buffer + ctx.offset;
+ ctx.request.length = ctx.current_read_length;
+
+ process_read_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("READ: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+
+static void process_write_reply(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ engine->funcs->process_channel_reply(engine, &ctx->reply);
+
+ switch (ctx->reply.status) {
+ case AUX_TRANSACTION_REPLY_AUX_ACK:
+ ctx->operation_succeeded = true;
+
+ if (ctx->returned_byte) {
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ctx->current_write_length = 0;
+
+ ++ctx->ack_m_retry;
+
+ if (ctx->ack_m_retry > AUX_DEFER_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(300);
+ } else {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_SUCCEEDED;
+ ctx->defer_retry_aux = 0;
+ ctx->ack_m_retry = 0;
+ ctx->transaction_complete = true;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_NACK:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_NACK;
+ ctx->operation_succeeded = false;
+ break;
+ case AUX_TRANSACTION_REPLY_AUX_DEFER:
+ ++ctx->defer_retry_aux;
+
+ if (ctx->defer_retry_aux > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_I2C_DEFER:
+ ctx->defer_retry_aux = 0;
+ ctx->current_write_length = 0;
+
+ ctx->request.action = ctx->mot ?
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_STATUS_REQUEST;
+
+ ++ctx->defer_retry_i2c;
+
+ if (ctx->defer_retry_i2c > ctx->max_defer_retry) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ }
+ break;
+ case AUX_TRANSACTION_REPLY_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static void process_write_request(
+ struct aux_engine *engine,
+ struct write_command_context *ctx)
+{
+ enum aux_channel_operation_result operation_result;
+
+ engine->funcs->submit_channel_request(engine, &ctx->request);
+
+ operation_result = engine->funcs->get_channel_status(
+ engine, &ctx->returned_byte);
+
+ switch (operation_result) {
+ case AUX_CHANNEL_OPERATION_SUCCEEDED:
+ ctx->timed_out_retry_aux = 0;
+ ctx->invalid_reply_retry_aux = 0;
+
+ ctx->reply.length = ctx->returned_byte;
+ ctx->reply.data = ctx->reply_data;
+
+ process_write_reply(engine, ctx);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_INVALID_REPLY:
+ ++ctx->invalid_reply_retry_aux;
+
+ if (ctx->invalid_reply_retry_aux >
+ AUX_INVALID_REPLY_RETRY_COUNTER) {
+ ctx->status =
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR;
+ ctx->operation_succeeded = false;
+ } else
+ udelay(400);
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_TIMEOUT:
+ ++ctx->timed_out_retry_aux;
+
+ if (ctx->timed_out_retry_aux > AUX_TIMED_OUT_RETRY_COUNTER) {
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT;
+ ctx->operation_succeeded = false;
+ } else {
+ /* DP 1.2a, table 2-58:
+ * "S3: AUX Request CMD PENDING:
+ * retry 3 times, with 400usec wait on each"
+ * The HW timeout is set to 550usec,
+ * so we should not wait here
+ */
+ }
+ break;
+ case AUX_CHANNEL_OPERATION_FAILED_HPD_DISCON:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON;
+ ctx->operation_succeeded = false;
+ break;
+ default:
+ ctx->status = I2CAUX_TRANSACTION_STATUS_UNKNOWN;
+ ctx->operation_succeeded = false;
+ }
+}
+static bool write_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+ struct write_command_context ctx;
+
+ ctx.mot = middle_of_transaction;
+ ctx.buffer = request->payload.data;
+ ctx.current_write_length = request->payload.length;
+ ctx.timed_out_retry_aux = 0;
+ ctx.invalid_reply_retry_aux = 0;
+ ctx.defer_retry_aux = 0;
+ ctx.defer_retry_i2c = 0;
+ ctx.ack_m_retry = 0;
+ ctx.transaction_complete = false;
+ ctx.operation_succeeded = true;
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_DP;
+ ctx.request.action = I2CAUX_TRANSACTION_ACTION_DP_WRITE;
+ ctx.request.address = request->payload.address;
+ } else if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C) {
+ ctx.request.type = AUX_TRANSACTION_TYPE_I2C;
+ ctx.request.action = middle_of_transaction ?
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE_MOT :
+ I2CAUX_TRANSACTION_ACTION_I2C_WRITE;
+ ctx.request.address = request->payload.address >> 1;
+ } else {
+ /* in DAL2, there was no return in such case */
+ BREAK_TO_DEBUGGER();
+ return false;
+ }
+
+ ctx.request.delay = 0;
+
+ ctx.max_defer_retry =
+ (engine->max_defer_write_retry > AUX_DEFER_RETRY_COUNTER) ?
+ engine->max_defer_write_retry : AUX_DEFER_RETRY_COUNTER;
+
+ do {
+ ctx.request.data = ctx.buffer;
+ ctx.request.length = ctx.current_write_length;
+
+ process_write_request(engine, &ctx);
+
+ request->status = ctx.status;
+
+ if (ctx.operation_succeeded && !ctx.transaction_complete)
+ if (ctx.request.type == AUX_TRANSACTION_TYPE_I2C)
+ msleep(engine->delay);
+ } while (ctx.operation_succeeded && !ctx.transaction_complete);
+
+ if (request->payload.address_space ==
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD) {
+ DC_LOG_I2C_AUX("WRITE: addr:0x%x value:0x%x Result:%d",
+ request->payload.address,
+ request->payload.data[0],
+ ctx.operation_succeeded);
+ }
+
+ return ctx.operation_succeeded;
+}
+static bool end_of_transaction_command(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request)
+{
+ struct i2caux_transaction_request dummy_request;
+ uint8_t dummy_data;
+
+ /* [tcheng] We only need to send the stop (read with MOT = 0)
+ * for I2C-over-Aux, not native AUX
+ */
+
+ if (request->payload.address_space !=
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C)
+ return false;
+
+ dummy_request.operation = request->operation;
+ dummy_request.payload.address_space = request->payload.address_space;
+ dummy_request.payload.address = request->payload.address;
+
+ /*
+ * Add a dummy byte due to some receiver quirk
+ * where one byte is sent along with MOT = 0.
+ * Ideally this should be 0.
+ */
+
+ dummy_request.payload.length = 0;
+ dummy_request.payload.data = &dummy_data;
+
+ if (request->operation == I2CAUX_TRANSACTION_READ)
+ return read_command(engine, &dummy_request, false);
+ else
+ return write_command(engine, &dummy_request, false);
+
+ /* according Syed, it does not need now DoDummyMOT */
+}
+static bool submit_request(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction)
+{
+
+ bool result;
+ bool mot_used = true;
+
+ switch (request->operation) {
+ case I2CAUX_TRANSACTION_READ:
+ result = read_command(engine, request, mot_used);
+ break;
+ case I2CAUX_TRANSACTION_WRITE:
+ result = write_command(engine, request, mot_used);
+ break;
+ default:
+ result = false;
+ }
+
+ /* [tcheng]
+ * need to send stop for the last transaction to free up the AUX
+ * if the above command fails, this would be the last transaction
+ */
+
+ if (!middle_of_transaction || !result)
+ end_of_transaction_command(engine, request);
+
+ /* mask AUX interrupt */
+
+ return result;
+}
+enum i2caux_engine_type get_engine_type(
+ const struct aux_engine *engine)
+{
+ return I2CAUX_ENGINE_TYPE_AUX;
+}
+
+static bool acquire(
+ struct aux_engine *engine,
+ struct ddc *ddc)
+{
+
+ enum gpio_result result;
+
+ if (engine->funcs->is_engine_available) {
+ /*check whether SW could use the engine*/
+ if (!engine->funcs->is_engine_available(engine))
+ return false;
+ }
+
+ result = dal_ddc_open(ddc, GPIO_MODE_HARDWARE,
+ GPIO_DDC_CONFIG_TYPE_MODE_AUX);
+
+ if (result != GPIO_RESULT_OK)
+ return false;
+
+ if (!engine->funcs->acquire_engine(engine)) {
+ dal_ddc_close(ddc);
+ return false;
+ }
+
+ engine->ddc = ddc;
+
+ return true;
+}
+
+static const struct aux_engine_funcs aux_engine_funcs = {
+ .acquire_engine = acquire_engine,
+ .submit_channel_request = submit_channel_request,
+ .process_channel_reply = process_channel_reply,
+ .read_channel_reply = read_channel_reply,
+ .get_channel_status = get_channel_status,
+ .is_engine_available = is_engine_available,
+ .release_engine = release_engine,
+ .destroy_engine = dce110_engine_destroy,
+ .submit_request = submit_request,
+ .get_engine_type = get_engine_type,
+ .acquire = acquire,
+};
+
+void dce110_engine_destroy(struct aux_engine **engine)
+{
+
+ struct aux_engine_dce110 *engine110 = FROM_AUX_ENGINE(*engine);
+
+ kfree(engine110);
+ *engine = NULL;
+
+}
+struct aux_engine *dce110_aux_engine_construct(struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs)
+{
+ aux_engine110->base.ddc = NULL;
+ aux_engine110->base.ctx = ctx;
+ aux_engine110->base.delay = 0;
+ aux_engine110->base.max_defer_write_retry = 0;
+ aux_engine110->base.funcs = &aux_engine_funcs;
+ aux_engine110->base.inst = inst;
+ aux_engine110->timeout_period = timeout_period;
+ aux_engine110->regs = regs;
+
+ return &aux_engine110->base;
+}
+
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
new file mode 100644
index 000000000000..f7caab85dc80
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_aux.h
@@ -0,0 +1,111 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_DCE110_H__
+#define __DAL_AUX_ENGINE_DCE110_H__
+#include "aux_engine.h"
+
+#define AUX_COMMON_REG_LIST(id)\
+ SRI(AUX_CONTROL, DP_AUX, id), \
+ SRI(AUX_ARB_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_DATA, DP_AUX, id), \
+ SRI(AUX_SW_CONTROL, DP_AUX, id), \
+ SRI(AUX_INTERRUPT_CONTROL, DP_AUX, id), \
+ SRI(AUX_SW_STATUS, DP_AUX, id), \
+ SR(AUXN_IMPCAL), \
+ SR(AUXP_IMPCAL)
+
+struct dce110_aux_registers {
+ uint32_t AUX_CONTROL;
+ uint32_t AUX_ARB_CONTROL;
+ uint32_t AUX_SW_DATA;
+ uint32_t AUX_SW_CONTROL;
+ uint32_t AUX_INTERRUPT_CONTROL;
+ uint32_t AUX_SW_STATUS;
+ uint32_t AUXN_IMPCAL;
+ uint32_t AUXP_IMPCAL;
+
+ uint32_t AUX_RESET_MASK;
+};
+
+enum { /* This is the timeout as defined in DP 1.2a,
+ * 2.3.4 "Detailed uPacket TX AUX CH State Description".
+ */
+ AUX_TIMEOUT_PERIOD = 400,
+
+ /* Ideally, the SW timeout should be just above 550usec
+ * which is programmed in HW.
+ * But the SW timeout of 600usec is not reliable,
+ * because on some systems, delay_in_microseconds()
+ * returns faster than it should.
+ * EPR #379763: by trial-and-error on different systems,
+ * 700usec is the minimum reliable SW timeout for polling
+ * the AUX_SW_STATUS.AUX_SW_DONE bit.
+ * This timeout expires *only* when there is
+ * AUX Error or AUX Timeout conditions - not during normal operation.
+ * During normal operation, AUX_SW_STATUS.AUX_SW_DONE bit is set
+ * at most within ~240usec. That means,
+ * increasing this timeout will not affect normal operation,
+ * and we'll timeout after
+ * SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD = 1600usec.
+ * This timeout is especially important for
+ * resume from S3 and CTS.
+ */
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER = 4
+};
+struct aux_engine_dce110 {
+ struct aux_engine base;
+ const struct dce110_aux_registers *regs;
+ struct {
+ uint32_t aux_control;
+ uint32_t aux_arb_control;
+ uint32_t aux_sw_data;
+ uint32_t aux_sw_control;
+ uint32_t aux_interrupt_control;
+ uint32_t aux_sw_status;
+ } addr;
+ uint32_t timeout_period;
+};
+
+struct aux_engine_dce110_init_data {
+ uint32_t engine_id;
+ uint32_t timeout_period;
+ struct dc_context *ctx;
+ const struct dce110_aux_registers *regs;
+};
+
+struct aux_engine *dce110_aux_engine_construct(
+ struct aux_engine_dce110 *aux_engine110,
+ struct dc_context *ctx,
+ uint32_t inst,
+ uint32_t timeout_period,
+ const struct dce110_aux_registers *regs);
+
+void dce110_engine_destroy(struct aux_engine **engine);
+
+bool dce110_aux_engine_acquire(
+ struct aux_engine *aux_engine,
+ struct ddc *ddc);
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
index 439dcf3b596c..ca137757a69e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.c
@@ -592,7 +592,7 @@ static uint32_t dce110_get_pix_clk_dividers(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
#endif
@@ -909,7 +909,7 @@ static bool dce110_program_pix_clk(
struct dce110_clk_src *clk_src = TO_DCE110_CLK_SRC(clock_source);
struct bp_pixel_clock_parameters bp_pc_params = {0};
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (IS_FPGA_MAXIMUS_DC(clock_source->ctx->dce_environment)) {
unsigned int inst = pix_clk_params->controller_id - CONTROLLER_ID_D0;
unsigned dp_dto_ref_kHz = 700000;
@@ -982,7 +982,7 @@ static bool dce110_program_pix_clk(
case DCE_VERSION_11_2:
case DCE_VERSION_11_22:
case DCE_VERSION_12_0:
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
index 801bb65707b3..c45e2f76189e 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clock_source.h
@@ -55,7 +55,7 @@
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_DCCG_DEEP_COLOR_CNTL, mask_sh),\
CS_SF(PHYPLLA_PIXCLK_RESYNC_CNTL, PHYPLLA_PIXCLK_DOUBLE_RATE_ENABLE, mask_sh)
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#define CS_COMMON_REG_LIST_DCN1_0(index, pllid) \
SRI(PIXCLK_RESYNC_CNTL, PHYPLL, pllid),\
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
index 8f8a2abac3f3..fb1f373d08a1 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.c
@@ -30,7 +30,7 @@
#include "bios_parser_interface.h"
#include "dc.h"
#include "dmcu.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn_calcs.h"
#endif
#include "core_types.h"
@@ -106,7 +106,8 @@ enum dentist_base_divider_id {
DENTIST_BASE_DID_1 = 0x08,
DENTIST_BASE_DID_2 = 0x40,
DENTIST_BASE_DID_3 = 0x60,
- DENTIST_MAX_DID = 0x80
+ DENTIST_BASE_DID_4 = 0x7e,
+ DENTIST_MAX_DID = 0x7f
};
/* Starting point and step size for each divider range.*/
@@ -117,6 +118,8 @@ enum dentist_divider_range {
DENTIST_DIVIDER_RANGE_2_STEP = 2, /* 0.50 */
DENTIST_DIVIDER_RANGE_3_START = 128, /* 32.00 */
DENTIST_DIVIDER_RANGE_3_STEP = 4, /* 1.00 */
+ DENTIST_DIVIDER_RANGE_4_START = 248, /* 62.00 */
+ DENTIST_DIVIDER_RANGE_4_STEP = 264, /* 66.00 */
DENTIST_DIVIDER_RANGE_SCALE_FACTOR = 4
};
@@ -133,9 +136,12 @@ static int dentist_get_divider_from_did(int did)
} else if (did < DENTIST_BASE_DID_3) {
return DENTIST_DIVIDER_RANGE_2_START + DENTIST_DIVIDER_RANGE_2_STEP
* (did - DENTIST_BASE_DID_2);
- } else {
+ } else if (did < DENTIST_BASE_DID_4) {
return DENTIST_DIVIDER_RANGE_3_START + DENTIST_DIVIDER_RANGE_3_STEP
* (did - DENTIST_BASE_DID_3);
+ } else {
+ return DENTIST_DIVIDER_RANGE_4_START + DENTIST_DIVIDER_RANGE_4_STEP
+ * (did - DENTIST_BASE_DID_4);
}
}
@@ -337,7 +343,7 @@ static int dce112_set_clock(
static void dce_clock_read_integrated_info(struct dce_dccg *clk_dce)
{
- struct dc_debug *debug = &clk_dce->base.ctx->dc->debug;
+ struct dc_debug_options *debug = &clk_dce->base.ctx->dc->debug;
struct dc_bios *bp = clk_dce->base.ctx->dc_bios;
struct integrated_info info = { { { 0 } } };
struct dc_firmware_info fw_info = { { 0 } };
@@ -463,7 +469,7 @@ static void dce12_update_clocks(struct dccg *dccg,
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
clock_voltage_req.clk_type = DM_PP_CLOCK_TYPE_DISPLAY_CLK;
clock_voltage_req.clocks_in_khz = new_clocks->dispclk_khz;
- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
dm_pp_apply_clock_for_voltage_request(dccg->ctx, &clock_voltage_req);
@@ -478,7 +484,7 @@ static void dce12_update_clocks(struct dccg *dccg,
}
}
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
static int dcn1_determine_dppclk_threshold(struct dccg *dccg, struct dc_clocks *new_clocks)
{
bool request_dpp_div = new_clocks->dispclk_khz > new_clocks->dppclk_khz;
@@ -625,7 +631,9 @@ static void dcn1_update_clocks(struct dccg *dccg,
}
/* dcn1 dppclk is tied to dispclk */
- if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
+ /* program dispclk on = as a w/a for sleep resume clock ramping issues */
+ if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)
+ || new_clocks->dispclk_khz == dccg->clks.dispclk_khz) {
dcn1_ramp_up_dispclk_with_dpp(dccg, new_clocks);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
@@ -661,12 +669,12 @@ static void dce_update_clocks(struct dccg *dccg,
}
if (should_set_clock(safe_to_lower, new_clocks->dispclk_khz, dccg->clks.dispclk_khz)) {
- dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
+ new_clocks->dispclk_khz = dccg->funcs->set_dispclk(dccg, new_clocks->dispclk_khz);
dccg->clks.dispclk_khz = new_clocks->dispclk_khz;
}
}
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
static const struct display_clock_funcs dcn1_funcs = {
.get_dp_ref_clk_frequency = dce12_get_dp_ref_freq_khz,
.set_dispclk = dce112_set_clock,
@@ -821,10 +829,10 @@ struct dccg *dce120_dccg_create(struct dc_context *ctx)
return &clk_dce->base;
}
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
struct dccg *dcn1_dccg_create(struct dc_context *ctx)
{
- struct dc_debug *debug = &ctx->dc->debug;
+ struct dc_debug_options *debug = &ctx->dc->debug;
struct dc_bios *bp = ctx->dc_bios;
struct dc_firmware_info fw_info = { { 0 } };
struct dce_dccg *clk_dce = kzalloc(sizeof(*clk_dce), GFP_KERNEL);
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
index e5e44adc6c27..8a6b2d328467 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_clocks.h
@@ -111,7 +111,7 @@ struct dccg *dce112_dccg_create(
struct dccg *dce120_dccg_create(struct dc_context *ctx);
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
struct dccg *dcn1_dccg_create(struct dc_context *ctx);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
index 062a46543887..dea40b322191 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_dmcu.c
@@ -150,7 +150,7 @@ static void dce_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
}
}
-static void dce_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dce_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -261,6 +261,8 @@ static void dce_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ return true;
}
static bool dce_is_dmcu_initialized(struct dmcu *dmcu)
@@ -314,7 +316,7 @@ static void dce_get_psr_wait_loop(
return;
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
static void dcn10_get_dmcu_state(struct dmcu *dmcu)
{
struct dce_dmcu *dmcu_dce = TO_DCE_DMCU(dmcu);
@@ -545,24 +547,25 @@ static void dcn10_dmcu_set_psr_enable(struct dmcu *dmcu, bool enable, bool wait)
* least a few frames. Should never hit the max retry assert below.
*/
if (wait == true) {
- for (retryCount = 0; retryCount <= 1000; retryCount++) {
- dcn10_get_dmcu_psr_state(dmcu, &psr_state);
- if (enable) {
- if (psr_state != 0)
- break;
- } else {
- if (psr_state == 0)
- break;
+ for (retryCount = 0; retryCount <= 1000; retryCount++) {
+ dcn10_get_dmcu_psr_state(dmcu, &psr_state);
+ if (enable) {
+ if (psr_state != 0)
+ break;
+ } else {
+ if (psr_state == 0)
+ break;
+ }
+ udelay(500);
}
- udelay(500);
- }
- /* assert if max retry hit */
- ASSERT(retryCount <= 1000);
+ /* assert if max retry hit */
+ if (retryCount >= 1000)
+ ASSERT(0);
}
}
-static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
+static bool dcn10_dmcu_setup_psr(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context)
{
@@ -577,7 +580,7 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* If microcontroller is not running, do nothing */
if (dmcu->dmcu_state != DMCU_RUNNING)
- return;
+ return false;
link->link_enc->funcs->psr_program_dp_dphy_fast_training(link->link_enc,
psr_context->psrExitLinkTrainingRequired);
@@ -677,6 +680,11 @@ static void dcn10_dmcu_setup_psr(struct dmcu *dmcu,
/* notifyDMCUMsg */
REG_UPDATE(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 1);
+
+ /* waitDMCUReadyForCmd */
+ REG_WAIT(MASTER_COMM_CNTL_REG, MASTER_COMM_INTERRUPT, 0, 1, 10000);
+
+ return true;
}
static void dcn10_psr_wait_loop(
@@ -735,7 +743,7 @@ static const struct dmcu_funcs dce_funcs = {
.is_dmcu_initialized = dce_is_dmcu_initialized
};
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
static const struct dmcu_funcs dcn10_funcs = {
.dmcu_init = dcn10_dmcu_init,
.load_iram = dcn10_dmcu_load_iram,
@@ -787,7 +795,7 @@ struct dmcu *dce_dmcu_create(
return &dmcu_dce->base;
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
struct dmcu *dcn10_dmcu_create(
struct dc_context *ctx,
const struct dce_dmcu_registers *regs,
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
index 60e3c6a73d37..eff7d22d78fb 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_link_encoder.c
@@ -256,6 +256,11 @@ static void setup_panel_mode(
enum dp_panel_mode panel_mode)
{
uint32_t value;
+ struct dc_context *ctx = enc110->base.ctx;
+
+ /* if psp set panel mode, dal should be program it */
+ if (ctx->dc->caps.psp_setup_panel_mode)
+ return;
ASSERT(REG(DP_DPHY_INTERNAL_CTRL));
value = REG_READ(DP_DPHY_INTERNAL_CTRL);
@@ -925,7 +930,7 @@ void dce110_link_encoder_enable_tmds_output(
enum bp_result result;
/* Enable the PHY */
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
@@ -967,7 +972,7 @@ void dce110_link_encoder_enable_dp_output(
* We need to set number of lanes manually.
*/
configure_encoder(enc110, link_settings);
-
+ cntl.connector_obj_id = enc110->base.connector;
cntl.action = TRANSMITTER_CONTROL_ENABLE;
cntl.engine_id = enc->preferred_engine;
cntl.transmitter = enc110->base.transmitter;
diff --git a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
index b139b4017820..91642e684858 100644
--- a/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
+++ b/drivers/gpu/drm/amd/display/dc/dce/dce_stream_encoder.c
@@ -135,7 +135,7 @@ static void dce110_update_generic_info_packet(
AFMT_GENERIC0_UPDATE, (packet_index == 0),
AFMT_GENERIC2_UPDATE, (packet_index == 2));
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (REG(AFMT_VBI_PACKET_CONTROL1)) {
switch (packet_index) {
case 0:
@@ -229,7 +229,7 @@ static void dce110_update_hdmi_info_packet(
HDMI_GENERIC1_SEND, send,
HDMI_GENERIC1_LINE, line);
break;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case 4:
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_UPDATE_3(HDMI_GENERIC_PACKET_CONTROL2,
@@ -274,7 +274,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
struct dc_crtc_timing *crtc_timing,
enum dc_color_space output_color_space)
{
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
uint32_t h_active_start;
uint32_t v_active_start;
uint32_t misc0 = 0;
@@ -317,7 +317,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
if (enc110->se_mask->DP_VID_M_DOUBLE_VALUE_EN)
REG_UPDATE(DP_VID_TIMING, DP_VID_M_DOUBLE_VALUE_EN, 1);
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (enc110->se_mask->DP_VID_N_MUL)
REG_UPDATE(DP_VID_TIMING, DP_VID_N_MUL, 1);
#endif
@@ -328,7 +328,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
break;
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (REG(DP_MSA_MISC))
misc1 = REG_READ(DP_MSA_MISC);
#endif
@@ -362,7 +362,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
/* set dynamic range and YCbCr range */
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
switch (crtc_timing->display_color_depth) {
case COLOR_DEPTH_666:
colorimetry_bpc = 0;
@@ -441,7 +441,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
DP_DYN_RANGE, dynamic_range_rgb,
DP_YCBCR_RANGE, dynamic_range_ycbcr);
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (REG(DP_MSA_COLORIMETRY))
REG_SET(DP_MSA_COLORIMETRY, 0, DP_MSA_MISC0, misc0);
@@ -476,7 +476,7 @@ static void dce110_stream_encoder_dp_set_stream_attribute(
crtc_timing->v_front_porch;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
/* start at begining of left border */
if (REG(DP_MSA_TIMING_PARAM2))
REG_SET_2(DP_MSA_TIMING_PARAM2, 0,
@@ -751,7 +751,7 @@ static void dce110_stream_encoder_update_hdmi_info_packets(
dce110_update_hdmi_info_packet(enc110, 3, &info_frame->hdrsmd);
}
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
if (enc110->se_mask->HDMI_DB_DISABLE) {
/* for bring up, disable dp double TODO */
if (REG(HDMI_DB_CONTROL))
@@ -789,7 +789,7 @@ static void dce110_stream_encoder_stop_hdmi_info_packets(
HDMI_GENERIC1_LINE, 0,
HDMI_GENERIC1_SEND, 0);
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
/* stop generic packets 2 & 3 on HDMI */
if (REG(HDMI_GENERIC_PACKET_CONTROL2))
REG_SET_6(HDMI_GENERIC_PACKET_CONTROL2, 0,
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
index ec3221333011..74c05e878807 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_hw_sequencer.c
@@ -149,10 +149,6 @@ static uint32_t get_max_pixel_clock_for_all_paths(
max_pix_clk =
pipe_ctx->stream_res.pix_clk_params.requested_pix_clk;
}
-
- if (max_pix_clk == 0)
- ASSERT(0);
-
return max_pix_clk;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
index 8ed8eace42be..3f76e6019546 100644
--- a/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce100/dce100_resource.c
@@ -52,6 +52,7 @@
#include "dce/dce_10_0_sh_mask.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#ifndef mmMC_HUB_RDREQ_DMIF_LIMIT
@@ -279,7 +280,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_100(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -572,6 +586,23 @@ struct output_pixel_processor *dce100_opp_create(
return &opp->base;
}
+struct aux_engine *dce100_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce100_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -624,6 +655,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -678,9 +713,22 @@ bool dce100_validate_bandwidth(
struct dc *dc,
struct dc_state *context)
{
- /* TODO implement when needed but for now hardcode max value*/
- context->bw.dce.dispclk_khz = 681000;
- context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ int i;
+ bool at_least_one_pipe = false;
+
+ for (i = 0; i < dc->res_pool->pipe_count; i++) {
+ if (context->res_ctx.pipe_ctx[i].stream)
+ at_least_one_pipe = true;
+ }
+
+ if (at_least_one_pipe) {
+ /* TODO implement when needed but for now hardcode max value*/
+ context->bw.dce.dispclk_khz = 681000;
+ context->bw.dce.yclk_khz = 250000 * MEMORY_TYPE_MULTIPLIER;
+ } else {
+ context->bw.dce.dispclk_khz = 0;
+ context->bw.dce.yclk_khz = 0;
+ }
return true;
}
@@ -871,7 +919,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 40;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
-
+ dc->caps.disable_dp_clk_share = true;
for (i = 0; i < pool->base.pipe_count; i++) {
pool->base.timing_generators[i] =
dce100_timing_generator_create(
@@ -915,6 +963,13 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce100_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
index 33a14e163f88..14384d9675a8 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.c
@@ -1250,7 +1250,7 @@ static void program_scaler(const struct dc *dc,
{
struct tg_color color = {0};
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
/* TOFPGA */
if (pipe_ctx->plane_res.xfm->funcs->transform_set_pixel_storage_depth == NULL)
return;
@@ -1588,7 +1588,13 @@ void dce110_enable_accelerated_mode(struct dc *dc, struct dc_state *context)
bool can_eDP_fast_boot_optimize = false;
if (edp_link) {
- can_eDP_fast_boot_optimize =
+ /* this seems to cause blank screens on DCE8 */
+ if ((dc->ctx->dce_version == DCE_VERSION_8_0) ||
+ (dc->ctx->dce_version == DCE_VERSION_8_1) ||
+ (dc->ctx->dce_version == DCE_VERSION_8_3))
+ can_eDP_fast_boot_optimize = false;
+ else
+ can_eDP_fast_boot_optimize =
edp_link->link_enc->funcs->is_dig_enabled(edp_link->link_enc);
}
@@ -1908,7 +1914,9 @@ static void dce110_reset_hw_ctx_wrap(
pipe_ctx_old->plane_res.mi->funcs->free_mem_input(
pipe_ctx_old->plane_res.mi, dc->current_state->stream_count);
- if (old_clk)
+ if (old_clk && 0 == resource_get_clock_source_reference(&context->res_ctx,
+ dc->res_pool,
+ old_clk))
old_clk->funcs->cs_power_down(old_clk);
dc->hwss.disable_plane(dc, pipe_ctx_old);
@@ -2530,7 +2538,7 @@ static void pplib_apply_display_requirements(
/* TODO: dce11.2*/
pp_display_cfg->avail_mclk_switch_time_in_disp_active_us = 0;
- pp_display_cfg->disp_clk_khz = context->bw.dce.dispclk_khz;
+ pp_display_cfg->disp_clk_khz = dc->res_pool->dccg->clks.dispclk_khz;
dce110_fill_display_configs(context, pp_display_cfg);
@@ -2552,14 +2560,14 @@ static void pplib_apply_display_requirements(
dc->prev_display_config = *pp_display_cfg;
}
-static void dce110_set_bandwidth(
+void dce110_set_bandwidth(
struct dc *dc,
struct dc_state *context,
bool decrease_allowed)
{
struct dc_clocks req_clks;
- req_clks.dispclk_khz = context->bw.dce.dispclk_khz * 115 / 100;
+ req_clks.dispclk_khz = context->bw.dce.dispclk_khz;
req_clks.phyclk_khz = get_max_pixel_clock_for_all_paths(dc, context);
if (decrease_allowed)
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
index d6db3dbd9015..e4c5db75c4c6 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_hw_sequencer.h
@@ -68,6 +68,11 @@ void dce110_fill_display_configs(
const struct dc_state *context,
struct dm_pp_display_configuration *pp_display_cfg);
+void dce110_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed);
+
uint32_t dce110_get_min_vblank_time_us(const struct dc_state *context);
void dp_receiver_power_ctrl(struct dc_link *link, bool on);
diff --git a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
index 1c902e49a712..e5e9e92521e9 100644
--- a/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce110/dce110_resource.c
@@ -49,6 +49,7 @@
#include "dce/dce_clock_source.h"
#include "dce/dce_hwseq.h"
#include "dce110/dce110_hw_sequencer.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
@@ -306,6 +307,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_110(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ static struct output_pixel_processor *dce110_opp_create(
return &opp->base;
}
+struct aux_engine *dce110_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce110_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -651,6 +684,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -1258,6 +1295,14 @@ static bool construct(
"DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce110_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->fbc_compressor = dce110_compressor_create(ctx);
diff --git a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
index 30d5b32892d6..288129343c77 100644
--- a/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce112/dce112_resource.c
@@ -49,6 +49,7 @@
#include "dce112/dce112_hw_sequencer.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "reg_helper.h"
@@ -314,6 +315,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_112(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -588,6 +604,23 @@ struct output_pixel_processor *dce112_opp_create(
return &opp->base;
}
+struct aux_engine *dce112_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
struct clock_source *dce112_clock_source_create(
struct dc_context *ctx,
struct dc_bios *bios,
@@ -625,6 +658,9 @@ static void destruct(struct dce110_resource_pool *pool)
if (pool->base.opps[i] != NULL)
dce110_opp_destroy(&pool->base.opps[i]);
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
if (pool->base.transforms[i] != NULL)
dce112_transform_destroy(&pool->base.transforms[i]);
@@ -640,6 +676,7 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -1208,6 +1245,13 @@ static bool construct(
"DC:failed to create output pixel processor!\n");
goto res_create_fail;
}
+ pool->base.engines[i] = dce112_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
if (!resource_construct(num_virtual_links, dc, &pool->base,
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
index e96ff86d2fc3..5853522a6182 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_hw_sequencer.c
@@ -244,7 +244,16 @@ static void dce120_update_dchub(
dh_data->dchub_info_valid = false;
}
+static void dce120_set_bandwidth(
+ struct dc *dc,
+ struct dc_state *context,
+ bool decrease_allowed)
+{
+ if (context->stream_count <= 0)
+ return;
+ dce110_set_bandwidth(dc, context, decrease_allowed);
+}
void dce120_hw_sequencer_construct(struct dc *dc)
{
@@ -254,5 +263,6 @@ void dce120_hw_sequencer_construct(struct dc *dc)
dce110_hw_sequencer_construct(dc);
dc->hwss.enable_display_power_gating = dce120_enable_display_power_gating;
dc->hwss.update_dchub = dce120_update_dchub;
+ dc->hwss.set_bandwidth = dce120_set_bandwidth;
}
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
index 8381f27a2361..d43f37d99c7d 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_resource.c
@@ -53,6 +53,7 @@
#include "dce/dce_hwseq.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_12_0_offset.h"
#include "dce/dce_12_0_sh_mask.h"
@@ -297,6 +298,20 @@ static const struct dce_opp_shift opp_shift = {
static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_120(_MASK)
};
+ #define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
#define audio_regs(id)\
[id] = {\
@@ -361,6 +376,22 @@ struct output_pixel_processor *dce120_opp_create(
ctx, inst, &opp_regs[inst], &opp_shift, &opp_mask);
return &opp->base;
}
+struct aux_engine *dce120_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
static const struct bios_registers bios_regs = {
.BIOS_SCRATCH_6 = mmBIOS_SCRATCH_6 + NBIO_BASE(mmBIOS_SCRATCH_6_BASE_IDX)
@@ -373,7 +404,7 @@ static const struct resource_caps res_cap = {
.num_pll = 6,
};
-static const struct dc_debug debug_defaults = {
+static const struct dc_debug_options debug_defaults = {
.disable_clock_gate = true,
};
@@ -467,6 +498,10 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
+
}
for (i = 0; i < pool->base.audio_count; i++) {
@@ -848,6 +883,7 @@ static bool construct(
dc->caps.i2c_speed_in_khz = 100;
dc->caps.max_cursor_size = 128;
dc->caps.dual_link_dvi = true;
+ dc->caps.psp_setup_panel_mode = true;
dc->debug = debug_defaults;
@@ -984,6 +1020,13 @@ static bool construct(
dm_error(
"DC: failed to create output pixel processor!\n");
}
+ pool->base.engines[i] = dce120_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
/* check next valid pipe */
j++;
diff --git a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
index 2ea490f8482e..04b866f0fa1f 100644
--- a/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
+++ b/drivers/gpu/drm/amd/display/dc/dce120/dce120_timing_generator.c
@@ -772,7 +772,7 @@ void dce120_tg_set_blank(struct timing_generator *tg,
CRTC_REG_SET(
CRTC0_CRTC_DOUBLE_BUFFER_CONTROL,
- CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 0);
+ CRTC_BLANK_DATA_DOUBLE_BUFFER_EN, 1);
if (enable_blanking)
CRTC_REG_SET(CRTC0_CRTC_BLANK_CONTROL, CRTC_BLANK_DATA_EN, 1);
diff --git a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
index 2ac95ec2bf96..604c62969ead 100644
--- a/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dce80/dce80_resource.c
@@ -54,6 +54,7 @@
#include "reg_helper.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
#include "dce/dce_abm.h"
/* TODO remove this include */
@@ -298,6 +299,21 @@ static const struct dce_opp_mask opp_mask = {
OPP_COMMON_MASK_SH_LIST_DCE_80(_MASK)
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define audio_regs(id)\
[id] = {\
AUD_COMMON_REG_LIST(id)\
@@ -448,6 +464,23 @@ static struct output_pixel_processor *dce80_opp_create(
return &opp->base;
}
+struct aux_engine *dce80_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct stream_encoder *dce80_stream_encoder_create(
enum engine_id eng_id,
struct dc_context *ctx)
@@ -655,6 +688,9 @@ static void destruct(struct dce110_resource_pool *pool)
kfree(DCE110TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ dce110_engine_destroy(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++) {
@@ -899,9 +935,18 @@ static bool dce80_construct(
dm_error("DC: failed to create output pixel processor!\n");
goto res_create_fail;
}
+
+ pool->base.engines[i] = dce80_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto res_create_fail;
+ }
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1087,6 +1132,7 @@ static bool dce81_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
@@ -1268,6 +1314,7 @@ static bool dce83_construct(
}
dc->caps.max_planes = pool->base.pipe_count;
+ dc->caps.disable_dp_clk_share = true;
if (!resource_construct(num_virtual_links, dc, &pool->base,
&res_create_funcs))
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
index 332354ca6529..2138cd3c5d1d 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hubp.c
@@ -294,6 +294,10 @@ void hubp1_program_pixel_format(
REG_UPDATE(DCSURF_SURFACE_CONFIG,
SURFACE_PIXEL_FORMAT, 66);
break;
+ case SURFACE_PIXEL_FORMAT_VIDEO_AYCrCb8888:
+ REG_UPDATE(DCSURF_SURFACE_CONFIG,
+ SURFACE_PIXEL_FORMAT, 12);
+ break;
default:
BREAK_TO_DEBUGGER();
break;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
index c87f6e603055..cfcc54f2ce65 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_hw_sequencer.c
@@ -1089,6 +1089,8 @@ static void dcn10_init_hw(struct dc *dc)
}
enable_power_gating_plane(dc->hwseq, true);
+
+ memset(&dc->res_pool->dccg->clks, 0, sizeof(dc->res_pool->dccg->clks));
}
static void reset_hw_ctx_wrap(
@@ -1213,8 +1215,11 @@ static bool dcn10_set_input_transfer_func(struct pipe_ctx *pipe_ctx,
} else if (tf->type == TF_TYPE_BYPASS) {
dpp_base->funcs->dpp_set_degamma(dpp_base, IPP_DEGAMMA_MODE_BYPASS);
} else {
- /*TF_TYPE_DISTRIBUTED_POINTS*/
- result = false;
+ cm_helper_translate_curve_to_degamma_hw_format(tf,
+ &dpp_base->degamma_params);
+ dpp_base->funcs->dpp_program_degamma_pwl(dpp_base,
+ &dpp_base->degamma_params);
+ result = true;
}
return result;
diff --git a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
index 84581b3c392b..6b44ed3697a4 100644
--- a/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
+++ b/drivers/gpu/drm/amd/display/dc/dcn10/dcn10_resource.c
@@ -64,6 +64,7 @@
#include "reg_helper.h"
#include "dce/dce_abm.h"
#include "dce/dce_dmcu.h"
+#include "dce/dce_aux.h"
const struct _vcs_dpi_ip_params_st dcn1_0_ip = {
.rob_buffer_size_kbytes = 64,
@@ -356,6 +357,21 @@ static const struct dcn10_opp_mask opp_mask = {
OPP_MASK_SH_LIST_DCN10(_MASK),
};
+#define aux_engine_regs(id)\
+[id] = {\
+ AUX_COMMON_REG_LIST(id), \
+ .AUX_RESET_MASK = 0 \
+}
+
+static const struct dce110_aux_registers aux_engine_regs[] = {
+ aux_engine_regs(0),
+ aux_engine_regs(1),
+ aux_engine_regs(2),
+ aux_engine_regs(3),
+ aux_engine_regs(4),
+ aux_engine_regs(5)
+};
+
#define tf_regs(id)\
[id] = {\
TF_REG_LIST_DCN10(id),\
@@ -486,7 +502,7 @@ static const struct resource_caps res_cap = {
.num_pll = 4,
};
-static const struct dc_debug debug_defaults_drv = {
+static const struct dc_debug_options debug_defaults_drv = {
.sanity_checks = true,
.disable_dmcu = true,
.force_abm_enable = false,
@@ -514,7 +530,7 @@ static const struct dc_debug debug_defaults_drv = {
.max_downscale_src_width = 3840,
};
-static const struct dc_debug debug_defaults_diags = {
+static const struct dc_debug_options debug_defaults_diags = {
.disable_dmcu = true,
.force_abm_enable = false,
.timing_trace = true,
@@ -578,6 +594,23 @@ static struct output_pixel_processor *dcn10_opp_create(
return &opp->base;
}
+struct aux_engine *dcn10_aux_engine_create(
+ struct dc_context *ctx,
+ uint32_t inst)
+{
+ struct aux_engine_dce110 *aux_engine =
+ kzalloc(sizeof(struct aux_engine_dce110), GFP_KERNEL);
+
+ if (!aux_engine)
+ return NULL;
+
+ dce110_aux_engine_construct(aux_engine, ctx, inst,
+ SW_AUX_TIMEOUT_PERIOD_MULTIPLIER * AUX_TIMEOUT_PERIOD,
+ &aux_engine_regs[inst]);
+
+ return &aux_engine->base;
+}
+
static struct mpc *dcn10_mpc_create(struct dc_context *ctx)
{
struct dcn10_mpc *mpc10 = kzalloc(sizeof(struct dcn10_mpc),
@@ -826,6 +859,9 @@ static void destruct(struct dcn10_resource_pool *pool)
kfree(DCN10TG_FROM_TG(pool->base.timing_generators[i]));
pool->base.timing_generators[i] = NULL;
}
+
+ if (pool->base.engines[i] != NULL)
+ pool->base.engines[i]->funcs->destroy_engine(&pool->base.engines[i]);
}
for (i = 0; i < pool->base.stream_enc_count; i++)
@@ -1091,6 +1127,8 @@ static bool construct(
dc->caps.max_slave_planes = 1;
dc->caps.is_apu = true;
dc->caps.post_blend_color_processing = false;
+ /* Raven DP PHY HBR2 eye diagram pattern is not stable. Use TP4 */
+ dc->caps.force_dp_tps4_for_cp2520 = true;
if (dc->ctx->dce_environment == DCE_ENV_PRODUCTION_DRV)
dc->debug = debug_defaults_drv;
@@ -1255,6 +1293,14 @@ static bool construct(
goto fail;
}
+ pool->base.engines[i] = dcn10_aux_engine_create(ctx, i);
+ if (pool->base.engines[i] == NULL) {
+ BREAK_TO_DEBUGGER();
+ dm_error(
+ "DC:failed to create aux engine!!\n");
+ goto fail;
+ }
+
/* check next valid pipe */
j++;
}
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/Makefile b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
index b9d9930a4974..562ee189d780 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/gpio/Makefile
@@ -61,7 +61,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_GPIO_DCE120)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_X86
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
GPIO_DCN10 = hw_translate_dcn10.o hw_factory_dcn10.o
AMD_DAL_GPIO_DCN10 = $(addprefix $(AMDDALPATH)/dc/gpio/dcn10/,$(GPIO_DCN10))
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
index 83df779984e5..0caee3523017 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_factory.c
@@ -43,7 +43,7 @@
#include "dce80/hw_factory_dce80.h"
#include "dce110/hw_factory_dce110.h"
#include "dce120/hw_factory_dce120.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn10/hw_factory_dcn10.h"
#endif
@@ -81,7 +81,7 @@ bool dal_hw_factory_init(
case DCE_VERSION_12_0:
dal_hw_factory_dce120_init(factory);
return true;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
dal_hw_factory_dcn10_init(factory);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
index e7541310480b..55c707488541 100644
--- a/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
+++ b/drivers/gpu/drm/amd/display/dc/gpio/hw_translate.c
@@ -43,7 +43,7 @@
#include "dce80/hw_translate_dce80.h"
#include "dce110/hw_translate_dce110.h"
#include "dce120/hw_translate_dce120.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn10/hw_translate_dcn10.h"
#endif
@@ -78,7 +78,7 @@ bool dal_hw_translate_init(
case DCE_VERSION_12_0:
dal_hw_translate_dce120_init(translate);
return true;
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
dal_hw_translate_dcn10_init(translate);
return true;
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
index a851d07f0190..352885cb4d07 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/Makefile
@@ -71,7 +71,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_I2CAUX_DCE112)
###############################################################################
# DCN 1.0 family
###############################################################################
-ifdef CONFIG_X86
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
I2CAUX_DCN1 = i2caux_dcn10.o
AMD_DAL_I2CAUX_DCN1 = $(addprefix $(AMDDALPATH)/dc/i2caux/dcn10/,$(I2CAUX_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
index 1e8a1585e401..b16fb1ff687d 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/engine.h
@@ -96,6 +96,7 @@ struct engine_funcs {
struct engine {
const struct engine_funcs *funcs;
+ uint32_t inst;
struct ddc *ddc;
struct dc_context *ctx;
};
diff --git a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
index f7ed355fc84f..9b0bcc6b769b 100644
--- a/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
+++ b/drivers/gpu/drm/amd/display/dc/i2caux/i2caux.c
@@ -59,7 +59,7 @@
#include "dce120/i2caux_dce120.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn10/i2caux_dcn10.h"
#endif
@@ -91,7 +91,7 @@ struct i2caux *dal_i2caux_create(
return dal_i2caux_dce100_create(ctx);
case DCE_VERSION_12_0:
return dal_i2caux_dce120_create(ctx);
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
case DCN_VERSION_1_0:
return dal_i2caux_dcn10_create(ctx);
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/core_types.h b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
index 4446652a9a9e..c0b9ca13393b 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/core_types.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/core_types.h
@@ -33,7 +33,7 @@
#include "dc_bios_types.h"
#include "mem_input.h"
#include "hubp.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "mpc.h"
#endif
@@ -92,6 +92,7 @@ struct resource_context;
struct resource_funcs {
void (*destroy)(struct resource_pool **pool);
+ void (*link_init)(struct dc_link *link);
struct link_encoder *(*link_enc_create)(
const struct encoder_init_data *init);
@@ -138,7 +139,7 @@ struct resource_pool {
struct output_pixel_processor *opps[MAX_PIPES];
struct timing_generator *timing_generators[MAX_PIPES];
struct stream_encoder *stream_enc[MAX_PIPES * 2];
-
+ struct aux_engine *engines[MAX_PIPES];
struct hubbub *hubbub;
struct mpc *mpc;
struct pp_smu_funcs_rv *pp_smu;
@@ -221,7 +222,7 @@ struct pipe_ctx {
struct pipe_ctx *top_pipe;
struct pipe_ctx *bottom_pipe;
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
struct _vcs_dpi_display_dlg_regs_st dlg_regs;
struct _vcs_dpi_display_ttu_regs_st ttu_regs;
struct _vcs_dpi_display_rq_regs_st rq_regs;
@@ -276,7 +277,7 @@ struct dc_state {
/* Note: these are big structures, do *not* put on stack! */
struct dm_pp_display_configuration pp_display_cfg;
-#ifdef CONFIG_X86
+#ifdef CONFIG_DRM_AMD_DC_DCN1_0
struct dcn_bw_internal_vars dcn_bw_vars;
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
index 2f783c650084..a37255c757e0 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/dc_link_dp.h
@@ -33,9 +33,10 @@ struct dc_link;
struct dc_stream_state;
struct dc_link_settings;
-bool dp_hbr_verify_link_cap(
+bool dp_verify_link_cap(
struct dc_link *link,
- struct dc_link_settings *known_limit_link_setting);
+ struct dc_link_settings *known_limit_link_setting,
+ int *fail_count);
bool dp_validate_mode_timing(
struct dc_link *link,
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
new file mode 100644
index 000000000000..e79cd4e92919
--- /dev/null
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/aux_engine.h
@@ -0,0 +1,180 @@
+/*
+ * Copyright 2012-15 Advanced Micro Devices, Inc.
+ *
+ * Permission is hereby granted, free of charge, to any person obtaining a
+ * copy of this software and associated documentation files (the "Software"),
+ * to deal in the Software without restriction, including without limitation
+ * the rights to use, copy, modify, merge, publish, distribute, sublicense,
+ * and/or sell copies of the Software, and to permit persons to whom the
+ * Software is furnished to do so, subject to the following conditions:
+ *
+ * The above copyright notice and this permission notice shall be included in
+ * all copies or substantial portions of the Software.
+ *
+ * THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
+ * IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
+ * FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL
+ * THE COPYRIGHT HOLDER(S) OR AUTHOR(S) BE LIABLE FOR ANY CLAIM, DAMAGES OR
+ * OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE,
+ * ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
+ * OTHER DEALINGS IN THE SOFTWARE.
+ *
+ * Authors: AMD
+ *
+ */
+
+#ifndef __DAL_AUX_ENGINE_H__
+#define __DAL_AUX_ENGINE_H__
+
+#include "dc_ddc_types.h"
+#include "include/i2caux_interface.h"
+
+enum i2caux_transaction_operation {
+ I2CAUX_TRANSACTION_READ,
+ I2CAUX_TRANSACTION_WRITE
+};
+
+enum i2caux_transaction_address_space {
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_I2C = 1,
+ I2CAUX_TRANSACTION_ADDRESS_SPACE_DPCD
+};
+
+struct i2caux_transaction_payload {
+ enum i2caux_transaction_address_space address_space;
+ uint32_t address;
+ uint32_t length;
+ uint8_t *data;
+};
+
+enum i2caux_transaction_status {
+ I2CAUX_TRANSACTION_STATUS_UNKNOWN = (-1L),
+ I2CAUX_TRANSACTION_STATUS_SUCCEEDED,
+ I2CAUX_TRANSACTION_STATUS_FAILED_CHANNEL_BUSY,
+ I2CAUX_TRANSACTION_STATUS_FAILED_TIMEOUT,
+ I2CAUX_TRANSACTION_STATUS_FAILED_PROTOCOL_ERROR,
+ I2CAUX_TRANSACTION_STATUS_FAILED_NACK,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INCOMPLETE,
+ I2CAUX_TRANSACTION_STATUS_FAILED_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_INVALID_OPERATION,
+ I2CAUX_TRANSACTION_STATUS_FAILED_BUFFER_OVERFLOW,
+ I2CAUX_TRANSACTION_STATUS_FAILED_HPD_DISCON
+};
+
+struct i2caux_transaction_request {
+ enum i2caux_transaction_operation operation;
+ struct i2caux_transaction_payload payload;
+ enum i2caux_transaction_status status;
+};
+
+enum i2caux_engine_type {
+ I2CAUX_ENGINE_TYPE_UNKNOWN = (-1L),
+ I2CAUX_ENGINE_TYPE_AUX,
+ I2CAUX_ENGINE_TYPE_I2C_DDC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_GENERIC_HW,
+ I2CAUX_ENGINE_TYPE_I2C_SW
+};
+
+enum i2c_default_speed {
+ I2CAUX_DEFAULT_I2C_HW_SPEED = 50,
+ I2CAUX_DEFAULT_I2C_SW_SPEED = 50
+};
+
+union aux_config;
+
+struct aux_engine {
+ uint32_t inst;
+ struct ddc *ddc;
+ struct dc_context *ctx;
+ const struct aux_engine_funcs *funcs;
+ /* following values are expressed in milliseconds */
+ uint32_t delay;
+ uint32_t max_defer_write_retry;
+ bool acquire_reset;
+};
+
+struct read_command_context {
+ uint8_t *buffer;
+ uint32_t current_read_length;
+ uint32_t offset;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t invalid_reply_retry_aux_on_ack;
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+struct write_command_context {
+ bool mot;
+
+ uint8_t *buffer;
+ uint32_t current_write_length;
+ enum i2caux_transaction_status status;
+
+ struct aux_request_transaction_data request;
+ struct aux_reply_transaction_data reply;
+
+ uint8_t returned_byte;
+
+ uint32_t timed_out_retry_aux;
+ uint32_t invalid_reply_retry_aux;
+ uint32_t defer_retry_aux;
+ uint32_t defer_retry_i2c;
+ uint32_t max_defer_retry;
+ uint32_t ack_m_retry;
+
+ uint8_t reply_data[DEFAULT_AUX_MAX_DATA_SIZE];
+
+ bool transaction_complete;
+ bool operation_succeeded;
+};
+
+
+struct aux_engine_funcs {
+ void (*destroy)(
+ struct aux_engine **ptr);
+ bool (*acquire_engine)(
+ struct aux_engine *engine);
+ void (*configure)(
+ struct aux_engine *engine,
+ union aux_config cfg);
+ void (*submit_channel_request)(
+ struct aux_engine *engine,
+ struct aux_request_transaction_data *request);
+ void (*process_channel_reply)(
+ struct aux_engine *engine,
+ struct aux_reply_transaction_data *reply);
+ int (*read_channel_reply)(
+ struct aux_engine *engine,
+ uint32_t size,
+ uint8_t *buffer,
+ uint8_t *reply_result,
+ uint32_t *sw_status);
+ enum aux_channel_operation_result (*get_channel_status)(
+ struct aux_engine *engine,
+ uint8_t *returned_bytes);
+ bool (*is_engine_available)(struct aux_engine *engine);
+ enum i2caux_engine_type (*get_engine_type)(
+ const struct aux_engine *engine);
+ bool (*acquire)(
+ struct aux_engine *engine,
+ struct ddc *ddc);
+ bool (*submit_request)(
+ struct aux_engine *engine,
+ struct i2caux_transaction_request *request,
+ bool middle_of_transaction);
+ void (*release_engine)(
+ struct aux_engine *engine);
+ void (*destroy_engine)(
+ struct aux_engine **engine);
+};
+#endif
diff --git a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
index de60f940030d..4550747fb61c 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/hw/dmcu.h
@@ -48,7 +48,7 @@ struct dmcu_funcs {
const char *src,
unsigned int bytes);
void (*set_psr_enable)(struct dmcu *dmcu, bool enable, bool wait);
- void (*setup_psr)(struct dmcu *dmcu,
+ bool (*setup_psr)(struct dmcu *dmcu,
struct dc_link *link,
struct psr_context *psr_context);
void (*get_psr_state)(struct dmcu *dmcu, uint32_t *psr_state);
diff --git a/drivers/gpu/drm/amd/display/dc/inc/resource.h b/drivers/gpu/drm/amd/display/dc/inc/resource.h
index e92facbd038f..5b321008b0b5 100644
--- a/drivers/gpu/drm/amd/display/dc/inc/resource.h
+++ b/drivers/gpu/drm/amd/display/dc/inc/resource.h
@@ -103,6 +103,11 @@ void resource_reference_clock_source(
const struct resource_pool *pool,
struct clock_source *clock_source);
+int resource_get_clock_source_reference(
+ struct resource_context *res_ctx,
+ const struct resource_pool *pool,
+ struct clock_source *clock_source);
+
bool resource_are_streams_timing_synchronizable(
struct dc_stream_state *stream1,
struct dc_stream_state *stream2);
diff --git a/drivers/gpu/drm/amd/display/dc/irq/Makefile b/drivers/gpu/drm/amd/display/dc/irq/Makefile
index a76ee600ecee..498515aad4a5 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/Makefile
+++ b/drivers/gpu/drm/amd/display/dc/irq/Makefile
@@ -60,7 +60,7 @@ AMD_DISPLAY_FILES += $(AMD_DAL_IRQ_DCE12)
###############################################################################
# DCN 1x
###############################################################################
-ifdef CONFIG_X86
+ifdef CONFIG_DRM_AMD_DC_DCN1_0
IRQ_DCN1 = irq_service_dcn10.o
AMD_DAL_IRQ_DCN1 = $(addprefix $(AMDDALPATH)/dc/irq/dcn10/,$(IRQ_DCN1))
diff --git a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
index ae3fd0a235ba..604bea01fc13 100644
--- a/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
+++ b/drivers/gpu/drm/amd/display/dc/irq/irq_service.c
@@ -36,7 +36,7 @@
#include "dce120/irq_service_dce120.h"
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include "dcn10/irq_service_dcn10.h"
#endif
diff --git a/drivers/gpu/drm/amd/display/dc/os_types.h b/drivers/gpu/drm/amd/display/dc/os_types.h
index c9fce9066ad8..a407892905af 100644
--- a/drivers/gpu/drm/amd/display/dc/os_types.h
+++ b/drivers/gpu/drm/amd/display/dc/os_types.h
@@ -48,7 +48,7 @@
#define dm_error(fmt, ...) DRM_ERROR(fmt, ##__VA_ARGS__)
-#ifdef CONFIG_X86
+#if defined(CONFIG_DRM_AMD_DC_DCN1_0)
#include <asm/fpu/api.h>
#endif
diff --git a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
index 36bbad594267..f312834fef50 100644
--- a/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
+++ b/drivers/gpu/drm/amd/display/include/grph_object_ctrl_defs.h
@@ -395,6 +395,8 @@ struct integrated_info {
struct i2c_reg_info dp3_ext_hdmi_reg_settings[9];
unsigned char dp3_ext_hdmi_6g_reg_num;
struct i2c_reg_info dp3_ext_hdmi_6g_reg_settings[3];
+ /* V11 */
+ uint32_t dp_ss_control;
};
/**
diff --git a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
index ee69c949bfbf..bf29733958c3 100644
--- a/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
+++ b/drivers/gpu/drm/amd/display/modules/color/color_gamma.c
@@ -997,7 +997,9 @@ static void scale_user_regamma_ramp(struct pwl_float_data *pwl_rgb,
* norm_y = 4095*regamma_y, and index is just truncating to nearest integer
* lut1 = lut1D[index], lut2 = lut1D[index+1]
*
- *adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ * adjustedY is then linearly interpolating regamma Y between lut1 and lut2
+ *
+ * Custom degamma on Linux uses the same interpolation math, so is handled here
*/
static void apply_lut_1d(
const struct dc_gamma *ramp,
@@ -1018,7 +1020,7 @@ static void apply_lut_1d(
struct fixed31_32 delta_lut;
struct fixed31_32 delta_index;
- if (ramp->type != GAMMA_CS_TFM_1D)
+ if (ramp->type != GAMMA_CS_TFM_1D && ramp->type != GAMMA_CUSTOM)
return; // this is not expected
for (i = 0; i < num_hw_points; i++) {
@@ -1636,7 +1638,9 @@ bool mod_color_calculate_degamma_params(struct dc_transfer_func *input_tf,
map_regamma_hw_to_x_user(ramp, coeff, rgb_user,
coordinates_x, axix_x, curve,
MAX_HW_POINTS, tf_pts,
- mapUserRamp);
+ mapUserRamp && ramp->type != GAMMA_CUSTOM);
+ if (ramp->type == GAMMA_CUSTOM)
+ apply_lut_1d(ramp, MAX_HW_POINTS, tf_pts);
ret = true;
diff --git a/drivers/gpu/drm/amd/include/atomfirmware.h b/drivers/gpu/drm/amd/include/atomfirmware.h
index 33b4de4ad66e..4bc118df3bc4 100644
--- a/drivers/gpu/drm/amd/include/atomfirmware.h
+++ b/drivers/gpu/drm/amd/include/atomfirmware.h
@@ -1074,7 +1074,7 @@ struct atom_integrated_system_info_v1_11
uint16_t dpphy_override; // bit vector, enum of atom_sysinfo_dpphy_override_def
uint16_t lvds_misc; // enum of atom_sys_info_lvds_misc_def
uint16_t backlight_pwm_hz; // pwm frequency in hz
- uint8_t memorytype; // enum of atom_sys_mem_type
+ uint8_t memorytype; // enum of atom_dmi_t17_mem_type_def, APU memory type indication.
uint8_t umachannelnumber; // number of memory channels
uint8_t pwr_on_digon_to_de; /* all pwr sequence numbers below are in uint of 4ms */
uint8_t pwr_on_de_to_vary_bl;
@@ -1084,18 +1084,25 @@ struct atom_integrated_system_info_v1_11
uint8_t pwr_on_vary_bl_to_blon;
uint8_t pwr_down_bloff_to_vary_bloff;
uint8_t min_allowed_bl_level;
+ uint8_t htc_hyst_limit;
+ uint8_t htc_tmp_limit;
+ uint8_t reserved1;
+ uint8_t reserved2;
struct atom_external_display_connection_info extdispconninfo;
struct atom_14nm_dpphy_dvihdmi_tuningset dvi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi_tuningset;
struct atom_14nm_dpphy_dvihdmi_tuningset hdmi6g_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_tuningset;
- struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset;
+ struct atom_14nm_dpphy_dp_tuningset dp_tuningset; // rbr 1.62G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr3_tuningset; // HBR3 dp tuning set
struct atom_camera_data camera_info;
struct atom_hdmi_retimer_redriver_set dp0_retimer_set; //for DP0
struct atom_hdmi_retimer_redriver_set dp1_retimer_set; //for DP1
struct atom_hdmi_retimer_redriver_set dp2_retimer_set; //for DP2
struct atom_hdmi_retimer_redriver_set dp3_retimer_set; //for DP3
- uint32_t reserved[108];
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr_tuningset; //hbr 2.7G dp tuning set
+ struct atom_14nm_dpphy_dp_tuningset dp_hbr2_tuningset; //hbr2 5.4G dp turnig set
+ struct atom_14nm_dpphy_dp_tuningset edp_tuningset; //edp tuning set
+ uint32_t reserved[66];
};
diff --git a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
index 5733fbee07f7..14391b06080c 100644
--- a/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
+++ b/drivers/gpu/drm/amd/include/kgd_kfd_interface.h
@@ -47,6 +47,17 @@ enum kfd_preempt_type {
KFD_PREEMPT_TYPE_WAVEFRONT_RESET,
};
+struct kfd_vm_fault_info {
+ uint64_t page_addr;
+ uint32_t vmid;
+ uint32_t mc_id;
+ uint32_t status;
+ bool prot_valid;
+ bool prot_read;
+ bool prot_write;
+ bool prot_exec;
+};
+
struct kfd_cu_info {
uint32_t num_shader_engines;
uint32_t num_shader_arrays_per_engine;
@@ -259,6 +270,21 @@ struct tile_config {
* IB to the corresponding ring (ring type). The IB is executed with the
* specified VMID in a user mode context.
*
+ * @get_vm_fault_info: Return information about a recent VM fault on
+ * GFXv7 and v8. If multiple VM faults occurred since the last call of
+ * this function, it will return information about the first of those
+ * faults. On GFXv9 VM fault information is fully contained in the IH
+ * packet and this function is not needed.
+ *
+ * @read_vmid_from_vmfault_reg: On Hawaii the VMID is not set in the
+ * IH ring entry. This function allows the KFD ISR to get the VMID
+ * from the fault status register as early as possible.
+ *
+ * @gpu_recover: let kgd reset gpu after kfd detect CPC hang
+ *
+ * @set_compute_idle: Indicates that compute is idle on a device. This
+ * can be used to change power profiles depending on compute activity.
+ *
* This structure contains function pointers to services that the kgd driver
* provides to amdkfd driver.
*
@@ -374,6 +400,14 @@ struct kfd2kgd_calls {
int (*submit_ib)(struct kgd_dev *kgd, enum kgd_engine_type engine,
uint32_t vmid, uint64_t gpu_addr,
uint32_t *ib_cmd, uint32_t ib_len);
+
+ int (*get_vm_fault_info)(struct kgd_dev *kgd,
+ struct kfd_vm_fault_info *info);
+ uint32_t (*read_vmid_from_vmfault_reg)(struct kgd_dev *kgd);
+
+ void (*gpu_recover)(struct kgd_dev *kgd);
+
+ void (*set_compute_idle)(struct kgd_dev *kgd, bool idle);
};
/**
@@ -399,6 +433,10 @@ struct kfd2kgd_calls {
* @schedule_evict_and_restore_process: Schedules work queue that will prepare
* for safe eviction of KFD BOs that belong to the specified process.
*
+ * @pre_reset: Notifies amdkfd that amdgpu about to reset the gpu
+ *
+ * @post_reset: Notify amdkfd that amgpu successfully reseted the gpu
+ *
* This structure contains function callback pointers so the kgd driver
* will notify to the amdkfd about certain status changes.
*
@@ -417,6 +455,8 @@ struct kgd2kfd_calls {
int (*resume_mm)(struct mm_struct *mm);
int (*schedule_evict_and_restore_process)(struct mm_struct *mm,
struct dma_fence *fence);
+ int (*pre_reset)(struct kfd_dev *kfd);
+ int (*post_reset)(struct kfd_dev *kfd);
};
int kgd2kfd_init(unsigned interface_version,
diff --git a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
index 75c208283e5f..7a646f94b478 100644
--- a/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
+++ b/drivers/gpu/drm/amd/powerplay/amd_powerplay.c
@@ -998,7 +998,7 @@ static int pp_get_display_power_level(void *handle,
static int pp_get_current_clocks(void *handle,
struct amd_pp_clock_info *clocks)
{
- struct amd_pp_simple_clock_info simple_clocks;
+ struct amd_pp_simple_clock_info simple_clocks = { 0 };
struct pp_clock_info hw_clocks;
struct pp_hwmgr *hwmgr = handle;
int ret = 0;
@@ -1034,7 +1034,10 @@ static int pp_get_current_clocks(void *handle,
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
clocks->min_engine_clock_in_sr = hw_clocks.min_eng_clk;
- clocks->max_clocks_state = simple_clocks.level;
+ if (simple_clocks.level == 0)
+ clocks->max_clocks_state = PP_DAL_POWERLEVEL_7;
+ else
+ clocks->max_clocks_state = simple_clocks.level;
if (0 == phm_get_current_shallow_sleep_clocks(hwmgr, &hwmgr->current_ps->hardware, &hw_clocks)) {
clocks->max_engine_clock_in_sr = hw_clocks.max_eng_clk;
@@ -1137,6 +1140,8 @@ static int pp_get_display_mode_validation_clocks(void *handle,
if (!hwmgr || !hwmgr->pm_en ||!clocks)
return -EINVAL;
+ clocks->level = PP_DAL_POWERLEVEL_7;
+
mutex_lock(&hwmgr->smu_lock);
if (phm_cap_enabled(hwmgr->platform_descriptor.platformCaps, PHM_PlatformCaps_DynamicPatchPowerState))
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
index 53207e76b0f3..6ef3c875fedd 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/hardwaremanager.c
@@ -75,10 +75,12 @@ int phm_set_power_state(struct pp_hwmgr *hwmgr,
int phm_enable_dynamic_state_management(struct pp_hwmgr *hwmgr)
{
+ struct amdgpu_device *adev = NULL;
int ret = -EINVAL;;
PHM_FUNC_CHECK(hwmgr);
+ adev = hwmgr->adev;
- if (smum_is_dpm_running(hwmgr)) {
+ if (smum_is_dpm_running(hwmgr) && !amdgpu_passthrough(adev)) {
pr_info("dpm has been enabled\n");
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
index c952845833d7..5e19f5977eb1 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu7_powertune.c
@@ -403,6 +403,49 @@ static const struct gpu_pt_config_reg DIDTConfig_Polaris12[] = {
{ ixDIDT_SQ_CTRL1, DIDT_SQ_CTRL1__MAX_POWER_MASK, DIDT_SQ_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__UNUSED_0_MASK, DIDT_SQ_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL_OCP, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_SQ_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__MAX_POWER_DELTA_MASK, DIDT_SQ_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_0_MASK, DIDT_SQ_CTRL2__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE_MASK, DIDT_SQ_CTRL2__SHORT_TERM_INTERVAL_SIZE__SHIFT, 0x005a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_1_MASK, DIDT_SQ_CTRL2__UNUSED_1__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO_MASK, DIDT_SQ_CTRL2__LONG_TERM_INTERVAL_RATIO__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL2, DIDT_SQ_CTRL2__UNUSED_2_MASK, DIDT_SQ_CTRL2__UNUSED_2__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_CTRL_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_HI__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO_MASK, DIDT_SQ_STALL_CTRL__DIDT_STALL_DELAY_LO__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD_MASK, DIDT_SQ_STALL_CTRL__DIDT_HI_POWER_THRESHOLD__SHIFT, 0x0ebb, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_STALL_CTRL, DIDT_SQ_STALL_CTRL__UNUSED_0_MASK, DIDT_SQ_STALL_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE_MASK, DIDT_SQ_TUNING_CTRL__DIDT_TUNING_ENABLE__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_HI__SHIFT, 0x3853, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO_MASK, DIDT_SQ_TUNING_CTRL__MAX_POWER_DELTA_LO__SHIFT, 0x3153, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_TUNING_CTRL, DIDT_SQ_TUNING_CTRL__UNUSED_0_MASK, DIDT_SQ_TUNING_CTRL__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_EN_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_EN__SHIFT, 0x0001, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__USE_REF_CLOCK_MASK, DIDT_SQ_CTRL0__USE_REF_CLOCK__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__PHASE_OFFSET_MASK, DIDT_SQ_CTRL0__PHASE_OFFSET__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CTRL_RST_MASK, DIDT_SQ_CTRL0__DIDT_CTRL_RST__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE_MASK, DIDT_SQ_CTRL0__DIDT_CLK_EN_OVERRIDE__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_HI__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO_MASK, DIDT_SQ_CTRL0__DIDT_MAX_STALLS_ALLOWED_LO__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_SQ_CTRL0, DIDT_SQ_CTRL0__UNUSED_0_MASK, DIDT_SQ_CTRL0__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT0_MASK, DIDT_TD_WEIGHT0_3__WEIGHT0__SHIFT, 0x000a, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT1_MASK, DIDT_TD_WEIGHT0_3__WEIGHT1__SHIFT, 0x0010, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT2_MASK, DIDT_TD_WEIGHT0_3__WEIGHT2__SHIFT, 0x0017, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT0_3, DIDT_TD_WEIGHT0_3__WEIGHT3_MASK, DIDT_TD_WEIGHT0_3__WEIGHT3__SHIFT, 0x002f, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT4_MASK, DIDT_TD_WEIGHT4_7__WEIGHT4__SHIFT, 0x0046, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT5_MASK, DIDT_TD_WEIGHT4_7__WEIGHT5__SHIFT, 0x005d, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT6_MASK, DIDT_TD_WEIGHT4_7__WEIGHT6__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_WEIGHT4_7, DIDT_TD_WEIGHT4_7__WEIGHT7_MASK, DIDT_TD_WEIGHT4_7__WEIGHT7__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MIN_POWER_MASK, DIDT_TD_CTRL1__MIN_POWER__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
+ { ixDIDT_TD_CTRL1, DIDT_TD_CTRL1__MAX_POWER_MASK, DIDT_TD_CTRL1__MAX_POWER__SHIFT, 0xffff, GPU_CONFIGREG_DIDT_IND },
+
+ { ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__UNUSED_0_MASK, DIDT_TD_CTRL_OCP__UNUSED_0__SHIFT, 0x0000, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL_OCP, DIDT_TD_CTRL_OCP__OCP_MAX_POWER_MASK, DIDT_TD_CTRL_OCP__OCP_MAX_POWER__SHIFT, 0x00ff, GPU_CONFIGREG_DIDT_IND },
{ ixDIDT_TD_CTRL2, DIDT_TD_CTRL2__MAX_POWER_DELTA_MASK, DIDT_TD_CTRL2__MAX_POWER_DELTA__SHIFT, 0x3fff, GPU_CONFIGREG_DIDT_IND },
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
index 288802f209dd..0adfc5392cd3 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/smu8_hwmgr.c
@@ -244,6 +244,7 @@ static int smu8_initialize_dpm_defaults(struct pp_hwmgr *hwmgr)
return 0;
}
+/* convert form 8bit vid to real voltage in mV*4 */
static uint32_t smu8_convert_8Bit_index_to_voltage(
struct pp_hwmgr *hwmgr, uint16_t voltage)
{
@@ -1702,13 +1703,13 @@ static int smu8_read_sensor(struct pp_hwmgr *hwmgr, int idx,
case AMDGPU_PP_SENSOR_VDDNB:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_NB_CURRENTVID) &
CURRENT_NB_VID_MASK) >> CURRENT_NB_VID__SHIFT;
- vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp);
+ vddnb = smu8_convert_8Bit_index_to_voltage(hwmgr, tmp) / 4;
*((uint32_t *)value) = vddnb;
return 0;
case AMDGPU_PP_SENSOR_VDDGFX:
tmp = (cgs_read_ind_register(hwmgr->device, CGS_IND_REG__SMC, ixSMUSVI_GFX_CURRENTVID) &
CURRENT_GFX_VID_MASK) >> CURRENT_GFX_VID__SHIFT;
- vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp);
+ vddgfx = smu8_convert_8Bit_index_to_voltage(hwmgr, (u16)tmp) / 4;
*((uint32_t *)value) = vddgfx;
return 0;
case AMDGPU_PP_SENSOR_UVD_VCLK:
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
index 1a0dccb3fac1..fb86c24394ff 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega10_hwmgr.c
@@ -289,7 +289,15 @@ static int vega10_odn_initial_default_setting(struct pp_hwmgr *hwmgr)
struct phm_ppt_v1_voltage_lookup_table *vddc_lookup_table;
struct phm_ppt_v1_clock_voltage_dependency_table *dep_table[3];
struct phm_ppt_v1_clock_voltage_dependency_table *od_table[3];
+ struct pp_atomfwctrl_avfs_parameters avfs_params = {0};
uint32_t i;
+ int result;
+
+ result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
+ if (!result) {
+ data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
+ data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
+ }
od_lookup_table = &odn_table->vddc_lookup_table;
vddc_lookup_table = table_info->vddc_lookup_table;
@@ -2072,9 +2080,6 @@ static int vega10_populate_avfs_parameters(struct pp_hwmgr *hwmgr)
if (data->smu_features[GNLD_AVFS].supported) {
result = pp_atomfwctrl_get_avfs_information(hwmgr, &avfs_params);
if (!result) {
- data->odn_dpm_table.max_vddc = avfs_params.ulMaxVddc;
- data->odn_dpm_table.min_vddc = avfs_params.ulMinVddc;
-
pp_table->MinVoltageVid = (uint8_t)
convert_to_vid((uint16_t)(avfs_params.ulMinVddc));
pp_table->MaxVoltageVid = (uint8_t)
@@ -3254,10 +3259,25 @@ static int vega10_populate_and_upload_sclk_mclk_dpm_levels(
{
int result = 0;
struct vega10_hwmgr *data = hwmgr->backend;
+ struct vega10_dpm_table *dpm_table = &data->dpm_table;
+ struct vega10_odn_dpm_table *odn_table = &data->odn_dpm_table;
+ struct vega10_odn_clock_voltage_dependency_table *odn_clk_table = &odn_table->vdd_dep_on_sclk;
+ int count;
if (!data->need_update_dpm_table)
return 0;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_SCLK) {
+ for (count = 0; count < dpm_table->gfx_table.count; count++)
+ dpm_table->gfx_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
+ odn_clk_table = &odn_table->vdd_dep_on_mclk;
+ if (hwmgr->od_enabled && data->need_update_dpm_table & DPMTABLE_OD_UPDATE_MCLK) {
+ for (count = 0; count < dpm_table->mem_table.count; count++)
+ dpm_table->mem_table.dpm_levels[count].value = odn_clk_table->entries[count].clk;
+ }
+
if (data->need_update_dpm_table &
(DPMTABLE_OD_UPDATE_SCLK + DPMTABLE_UPDATE_SCLK + DPMTABLE_UPDATE_SOCCLK)) {
result = vega10_populate_all_graphic_levels(hwmgr);
@@ -3705,7 +3725,7 @@ static void vega10_notify_smc_display_change(struct pp_hwmgr *hwmgr,
{
smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
}
int vega10_display_clock_voltage_request(struct pp_hwmgr *hwmgr,
@@ -3780,7 +3800,9 @@ static int vega10_notify_smc_display_config_after_ps_adjustment(
uint32_t i;
struct pp_display_clock_request clock_req;
- if (hwmgr->display_config->num_display > 1)
+ if ((hwmgr->display_config->num_display > 1) &&
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega10_notify_smc_display_change(hwmgr, false);
else
vega10_notify_smc_display_change(hwmgr, true);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
index 4ed218dd8ba7..0789d64246ca 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_hwmgr.c
@@ -1334,7 +1334,7 @@ static int vega12_notify_smc_display_change(struct pp_hwmgr *hwmgr,
if (data->smu_features[GNLD_DPM_UCLK].enabled)
return smum_send_msg_to_smc_with_parameter(hwmgr,
PPSMC_MSG_SetUclkFastSwitch,
- has_disp ? 0 : 1);
+ has_disp ? 1 : 0);
return 0;
}
@@ -1389,7 +1389,8 @@ static int vega12_notify_smc_display_config_after_ps_adjustment(
struct pp_display_clock_request clock_req;
if ((hwmgr->display_config->num_display > 1) &&
- !hwmgr->display_config->multi_monitor_in_sync)
+ !hwmgr->display_config->multi_monitor_in_sync &&
+ !hwmgr->display_config->nb_pstate_switch_disable)
vega12_notify_smc_display_change(hwmgr, false);
else
vega12_notify_smc_display_change(hwmgr, true);
diff --git a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
index f4f366b26fd1..cb3a5b1737c8 100644
--- a/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
+++ b/drivers/gpu/drm/amd/powerplay/hwmgr/vega12_processpptables.c
@@ -226,6 +226,8 @@ static int append_vbios_pptable(struct pp_hwmgr *hwmgr, PPTable_t *ppsmc_pptable
ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+ ppsmc_pptable->Vr2_I2C_address = smc_dpm_table.Vr2_I2C_address;
+
return 0;
}
diff --git a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
index a4ce199af475..1276f168ff68 100644
--- a/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
+++ b/drivers/gpu/drm/amd/powerplay/smumgr/polaris10_smumgr.c
@@ -1204,7 +1204,6 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
(struct phm_ppt_v1_information *)(hwmgr->pptable);
SMIO_Pattern vol_level;
uint32_t mvdd;
- uint16_t us_mvdd;
table->ACPILevel.Flags &= ~PPSMC_SWSTATE_FLAG_DC;
@@ -1255,16 +1254,11 @@ static int polaris10_populate_smc_acpi_level(struct pp_hwmgr *hwmgr,
"in Clock Dependency Table",
);
- us_mvdd = 0;
- if ((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
- (data->mclk_dpm_key_disabled))
- us_mvdd = data->vbios_boot_state.mvdd_bootup_value;
- else {
- if (!polaris10_populate_mvdd_value(hwmgr,
+ if (!((SMU7_VOLTAGE_CONTROL_NONE == data->mvdd_control) ||
+ (data->mclk_dpm_key_disabled)))
+ polaris10_populate_mvdd_value(hwmgr,
data->dpm_table.mclk_table.dpm_levels[0].value,
- &vol_level))
- us_mvdd = vol_level.Voltage;
- }
+ &vol_level);
if (0 == polaris10_populate_mvdd_value(hwmgr, 0, &vol_level))
table->MemoryACPILevel.MinMvdd = PP_HOST_TO_SMC_UL(vol_level.Voltage);
@@ -1517,7 +1511,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
uint32_t ro, efuse, volt_without_cks, volt_with_cks, value, max, min;
struct polaris10_smumgr *smu_data = (struct polaris10_smumgr *)(hwmgr->smu_backend);
- uint8_t i, stretch_amount, stretch_amount2, volt_offset = 0;
+ uint8_t i, stretch_amount, volt_offset = 0;
struct phm_ppt_v1_information *table_info =
(struct phm_ppt_v1_information *)(hwmgr->pptable);
struct phm_ppt_v1_clock_voltage_dependency_table *sclk_table =
@@ -1568,11 +1562,7 @@ static int polaris10_populate_clock_stretcher_data_table(struct pp_hwmgr *hwmgr)
smu_data->smc_state_table.LdoRefSel = (table_info->cac_dtp_table->ucCKS_LDO_REFSEL != 0) ? table_info->cac_dtp_table->ucCKS_LDO_REFSEL : 6;
/* Populate CKS Lookup Table */
- if (stretch_amount == 1 || stretch_amount == 2 || stretch_amount == 5)
- stretch_amount2 = 0;
- else if (stretch_amount == 3 || stretch_amount == 4)
- stretch_amount2 = 1;
- else {
+ if (stretch_amount == 0 || stretch_amount > 5) {
phm_cap_unset(hwmgr->platform_descriptor.platformCaps,
PHM_PlatformCaps_ClockStretcher);
PP_ASSERT_WITH_CODE(false,
diff --git a/drivers/gpu/drm/arm/hdlcd_crtc.c b/drivers/gpu/drm/arm/hdlcd_crtc.c
index f3f08cd6e9ef..e4d67b70244d 100644
--- a/drivers/gpu/drm/arm/hdlcd_crtc.c
+++ b/drivers/gpu/drm/arm/hdlcd_crtc.c
@@ -229,6 +229,8 @@ static const struct drm_crtc_helper_funcs hdlcd_crtc_helper_funcs = {
static int hdlcd_plane_atomic_check(struct drm_plane *plane,
struct drm_plane_state *state)
{
+ int i;
+ struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
u32 src_h = state->src_h >> 16;
@@ -238,20 +240,17 @@ static int hdlcd_plane_atomic_check(struct drm_plane *plane,
return -EINVAL;
}
- if (!state->fb || !state->crtc)
- return 0;
-
- crtc_state = drm_atomic_get_existing_crtc_state(state->state,
- state->crtc);
- if (!crtc_state) {
- DRM_DEBUG_KMS("Invalid crtc state\n");
- return -EINVAL;
+ for_each_new_crtc_in_state(state->state, crtc, crtc_state, i) {
+ /* we cannot disable the plane while the CRTC is active */
+ if (!state->fb && crtc_state->active)
+ return -EINVAL;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
}
- return drm_atomic_helper_check_plane_state(state, crtc_state,
- DRM_PLANE_HELPER_NO_SCALING,
- DRM_PLANE_HELPER_NO_SCALING,
- false, true);
+ return 0;
}
static void hdlcd_plane_atomic_update(struct drm_plane *plane,
@@ -280,16 +279,10 @@ static const struct drm_plane_helper_funcs hdlcd_plane_helper_funcs = {
.atomic_update = hdlcd_plane_atomic_update,
};
-static void hdlcd_plane_destroy(struct drm_plane *plane)
-{
- drm_plane_helper_disable(plane, NULL);
- drm_plane_cleanup(plane);
-}
-
static const struct drm_plane_funcs hdlcd_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
- .destroy = hdlcd_plane_destroy,
+ .destroy = drm_plane_cleanup,
.reset = drm_atomic_helper_plane_reset,
.atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
.atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
@@ -334,10 +327,8 @@ int hdlcd_setup_crtc(struct drm_device *drm)
ret = drm_crtc_init_with_planes(drm, &hdlcd->crtc, primary, NULL,
&hdlcd_crtc_funcs, NULL);
- if (ret) {
- hdlcd_plane_destroy(primary);
+ if (ret)
return ret;
- }
drm_crtc_helper_add(&hdlcd->crtc, &hdlcd_crtc_helper_funcs);
return 0;
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.c b/drivers/gpu/drm/arm/hdlcd_drv.c
index feaa8bc3d7b7..0ed1cde98cf8 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.c
+++ b/drivers/gpu/drm/arm/hdlcd_drv.c
@@ -27,6 +27,7 @@
#include <drm/drm_fb_cma_helper.h>
#include <drm/drm_gem_cma_helper.h>
#include <drm/drm_gem_framebuffer_helper.h>
+#include <drm/drm_modeset_helper.h>
#include <drm/drm_of.h>
#include "hdlcd_drv.h"
@@ -100,16 +101,9 @@ setup_fail:
return ret;
}
-static void hdlcd_fb_output_poll_changed(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_hotplug_event(hdlcd->fbdev);
-}
-
static const struct drm_mode_config_funcs hdlcd_mode_config_funcs = {
.fb_create = drm_gem_fb_create,
- .output_poll_changed = hdlcd_fb_output_poll_changed,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
.atomic_check = drm_atomic_helper_check,
.atomic_commit = drm_atomic_helper_commit,
};
@@ -124,13 +118,6 @@ static void hdlcd_setup_mode_config(struct drm_device *drm)
drm->mode_config.funcs = &hdlcd_mode_config_funcs;
}
-static void hdlcd_lastclose(struct drm_device *drm)
-{
- struct hdlcd_drm_private *hdlcd = drm->dev_private;
-
- drm_fbdev_cma_restore_mode(hdlcd->fbdev);
-}
-
static irqreturn_t hdlcd_irq(int irq, void *arg)
{
struct drm_device *drm = arg;
@@ -246,7 +233,7 @@ static struct drm_driver hdlcd_driver = {
.driver_features = DRIVER_HAVE_IRQ | DRIVER_GEM |
DRIVER_MODESET | DRIVER_PRIME |
DRIVER_ATOMIC,
- .lastclose = hdlcd_lastclose,
+ .lastclose = drm_fb_helper_lastclose,
.irq_handler = hdlcd_irq,
.irq_preinstall = hdlcd_irq_preinstall,
.irq_postinstall = hdlcd_irq_postinstall,
@@ -321,14 +308,9 @@ static int hdlcd_drm_bind(struct device *dev)
drm_mode_config_reset(drm);
drm_kms_helper_poll_init(drm);
- hdlcd->fbdev = drm_fbdev_cma_init(drm, 32,
- drm->mode_config.num_connector);
-
- if (IS_ERR(hdlcd->fbdev)) {
- ret = PTR_ERR(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
+ ret = drm_fb_cma_fbdev_init(drm, 32, 0);
+ if (ret)
goto err_fbdev;
- }
ret = drm_dev_register(drm, 0);
if (ret)
@@ -337,15 +319,13 @@ static int hdlcd_drm_bind(struct device *dev)
return 0;
err_register:
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
err_fbdev:
drm_kms_helper_poll_fini(drm);
err_vblank:
pm_runtime_disable(drm->dev);
err_pm_active:
+ drm_atomic_helper_shutdown(drm);
component_unbind_all(dev, drm);
err_unload:
of_node_put(hdlcd->crtc.port);
@@ -366,23 +346,23 @@ static void hdlcd_drm_unbind(struct device *dev)
struct hdlcd_drm_private *hdlcd = drm->dev_private;
drm_dev_unregister(drm);
- if (hdlcd->fbdev) {
- drm_fbdev_cma_fini(hdlcd->fbdev);
- hdlcd->fbdev = NULL;
- }
+ drm_fb_cma_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
component_unbind_all(dev, drm);
of_node_put(hdlcd->crtc.port);
hdlcd->crtc.port = NULL;
- pm_runtime_get_sync(drm->dev);
+ pm_runtime_get_sync(dev);
+ drm_crtc_vblank_off(&hdlcd->crtc);
drm_irq_uninstall(drm);
- pm_runtime_put_sync(drm->dev);
- pm_runtime_disable(drm->dev);
- of_reserved_mem_device_release(drm->dev);
+ drm_atomic_helper_shutdown(drm);
+ pm_runtime_put(dev);
+ if (pm_runtime_enabled(dev))
+ pm_runtime_disable(dev);
+ of_reserved_mem_device_release(dev);
drm_mode_config_cleanup(drm);
- drm_dev_put(drm);
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
+ drm_dev_put(drm);
}
static const struct component_master_ops hdlcd_master_ops = {
@@ -427,35 +407,15 @@ MODULE_DEVICE_TABLE(of, hdlcd_of_match);
static int __maybe_unused hdlcd_pm_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_kms_helper_poll_disable(drm);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 1);
-
- hdlcd->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(hdlcd->state)) {
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(hdlcd->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int __maybe_unused hdlcd_pm_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct hdlcd_drm_private *hdlcd = drm ? drm->dev_private : NULL;
-
- if (!hdlcd)
- return 0;
- drm_atomic_helper_resume(drm, hdlcd->state);
- drm_fbdev_cma_set_suspend_unlocked(hdlcd->fbdev, 0);
- drm_kms_helper_poll_enable(drm);
+ drm_mode_config_helper_resume(drm);
return 0;
}
diff --git a/drivers/gpu/drm/arm/hdlcd_drv.h b/drivers/gpu/drm/arm/hdlcd_drv.h
index 56f34dfff640..fd438d177b64 100644
--- a/drivers/gpu/drm/arm/hdlcd_drv.h
+++ b/drivers/gpu/drm/arm/hdlcd_drv.h
@@ -9,10 +9,8 @@
struct hdlcd_drm_private {
void __iomem *mmio;
struct clk *clk;
- struct drm_fbdev_cma *fbdev;
struct drm_crtc crtc;
struct drm_plane *plane;
- struct drm_atomic_state *state;
#ifdef CONFIG_DEBUG_FS
atomic_t buffer_underrun_count;
atomic_t bus_error_count;
diff --git a/drivers/gpu/drm/arm/malidp_drv.c b/drivers/gpu/drm/arm/malidp_drv.c
index 5b7260557391..08b5bb219816 100644
--- a/drivers/gpu/drm/arm/malidp_drv.c
+++ b/drivers/gpu/drm/arm/malidp_drv.c
@@ -616,6 +616,7 @@ static int malidp_bind(struct device *dev)
struct malidp_hw_device *hwdev;
struct platform_device *pdev = to_platform_device(dev);
struct of_device_id const *dev_id;
+ struct drm_encoder *encoder;
/* number of lines for the R, G and B output */
u8 output_width[MAX_OUTPUT_CHANNELS];
int ret = 0, i;
@@ -737,6 +738,15 @@ static int malidp_bind(struct device *dev)
goto bind_fail;
}
+ /* We expect to have a maximum of two encoders one for the actual
+ * display and a virtual one for the writeback connector
+ */
+ WARN_ON(drm->mode_config.num_encoder > 2);
+ list_for_each_entry(encoder, &drm->mode_config.encoder_list, head) {
+ encoder->possible_clones =
+ (1 << drm->mode_config.num_encoder) - 1;
+ }
+
ret = malidp_irq_init(pdev);
if (ret < 0)
goto irq_init_fail;
diff --git a/drivers/gpu/drm/arm/malidp_mw.c b/drivers/gpu/drm/arm/malidp_mw.c
index cfd718e7e97c..ba6ae66387c9 100644
--- a/drivers/gpu/drm/arm/malidp_mw.c
+++ b/drivers/gpu/drm/arm/malidp_mw.c
@@ -73,7 +73,7 @@ static void malidp_mw_connector_reset(struct drm_connector *connector)
static enum drm_connector_status
malidp_mw_connector_detect(struct drm_connector *connector, bool force)
{
- return connector_status_disconnected;
+ return connector_status_connected;
}
static void malidp_mw_connector_destroy(struct drm_connector *connector)
diff --git a/drivers/gpu/drm/arm/malidp_planes.c b/drivers/gpu/drm/arm/malidp_planes.c
index 29409a65d864..49c37f6dd63e 100644
--- a/drivers/gpu/drm/arm/malidp_planes.c
+++ b/drivers/gpu/drm/arm/malidp_planes.c
@@ -78,11 +78,8 @@ static void malidp_plane_reset(struct drm_plane *plane)
kfree(state);
plane->state = NULL;
state = kzalloc(sizeof(*state), GFP_KERNEL);
- if (state) {
- state->base.plane = plane;
- state->base.rotation = DRM_MODE_ROTATE_0;
- plane->state = &state->base;
- }
+ if (state)
+ __drm_atomic_helper_plane_reset(plane, &state->base);
}
static struct
diff --git a/drivers/gpu/drm/armada/Makefile b/drivers/gpu/drm/armada/Makefile
index ecf25cf9f9f5..9bc3c3213724 100644
--- a/drivers/gpu/drm/armada/Makefile
+++ b/drivers/gpu/drm/armada/Makefile
@@ -1,6 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
armada-y := armada_crtc.o armada_drv.o armada_fb.o armada_fbdev.o \
- armada_gem.o armada_overlay.o armada_trace.o
+ armada_gem.o armada_overlay.o armada_plane.o armada_trace.o
armada-y += armada_510.o
armada-$(CONFIG_DEBUG_FS) += armada_debugfs.o
diff --git a/drivers/gpu/drm/armada/armada_510.c b/drivers/gpu/drm/armada/armada_510.c
index 41a784f5a5e6..2f7c048c5361 100644
--- a/drivers/gpu/drm/armada/armada_510.c
+++ b/drivers/gpu/drm/armada/armada_510.c
@@ -27,6 +27,10 @@ static int armada510_crtc_init(struct armada_crtc *dcrtc, struct device *dev)
/* Lower the watermark so to eliminate jitter at higher bandwidths */
armada_updatel(0x20, (1 << 11) | 0xff, dcrtc->base + LCD_CFG_RDREG4F);
+ /* Initialise SPU register */
+ writel_relaxed(ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
+ dcrtc->base + LCD_SPU_ADV_REG);
+
return 0;
}
@@ -75,9 +79,27 @@ static int armada510_crtc_compute_clock(struct armada_crtc *dcrtc,
return 0;
}
+static void armada510_crtc_disable(struct armada_crtc *dcrtc)
+{
+ if (!IS_ERR(dcrtc->clk)) {
+ clk_disable_unprepare(dcrtc->clk);
+ dcrtc->clk = ERR_PTR(-EINVAL);
+ }
+}
+
+static void armada510_crtc_enable(struct armada_crtc *dcrtc,
+ const struct drm_display_mode *mode)
+{
+ if (IS_ERR(dcrtc->clk)) {
+ dcrtc->clk = dcrtc->extclk[0];
+ WARN_ON(clk_prepare_enable(dcrtc->clk));
+ }
+}
+
const struct armada_variant armada510_ops = {
.has_spu_adv_reg = true,
- .spu_adv_reg = ADV_HWC32ENABLE | ADV_HWC32ARGB | ADV_HWC32BLEND,
.init = armada510_crtc_init,
.compute_clock = armada510_crtc_compute_clock,
+ .disable = armada510_crtc_disable,
+ .enable = armada510_crtc_enable,
};
diff --git a/drivers/gpu/drm/armada/armada_crtc.c b/drivers/gpu/drm/armada/armada_crtc.c
index 03eeee11dd5b..da9360688b55 100644
--- a/drivers/gpu/drm/armada/armada_crtc.c
+++ b/drivers/gpu/drm/armada/armada_crtc.c
@@ -11,6 +11,7 @@
#include <linux/of_device.h>
#include <linux/platform_device.h>
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_plane_helper.h>
#include <drm/drm_atomic_helper.h>
@@ -19,33 +20,9 @@
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-enum csc_mode {
- CSC_AUTO = 0,
- CSC_YUV_CCIR601 = 1,
- CSC_YUV_CCIR709 = 2,
- CSC_RGB_COMPUTER = 1,
- CSC_RGB_STUDIO = 2,
-};
-
-static const uint32_t armada_primary_formats[] = {
- DRM_FORMAT_UYVY,
- DRM_FORMAT_YUYV,
- DRM_FORMAT_VYUY,
- DRM_FORMAT_YVYU,
- DRM_FORMAT_ARGB8888,
- DRM_FORMAT_ABGR8888,
- DRM_FORMAT_XRGB8888,
- DRM_FORMAT_XBGR8888,
- DRM_FORMAT_RGB888,
- DRM_FORMAT_BGR888,
- DRM_FORMAT_ARGB1555,
- DRM_FORMAT_ABGR1555,
- DRM_FORMAT_RGB565,
- DRM_FORMAT_BGR565,
-};
-
/*
* A note about interlacing. Let's consider HDMI 1920x1080i.
* The timing parameters we have from X are:
@@ -115,15 +92,13 @@ armada_drm_crtc_update_regs(struct armada_crtc *dcrtc, struct armada_regs *regs)
}
}
-#define dpms_blanked(dpms) ((dpms) != DRM_MODE_DPMS_ON)
-
-static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
+static void armada_drm_crtc_update(struct armada_crtc *dcrtc, bool enable)
{
uint32_t dumb_ctrl;
dumb_ctrl = dcrtc->cfg_dumb_ctrl;
- if (!dpms_blanked(dcrtc->dpms))
+ if (enable)
dumb_ctrl |= CFG_DUMB_ENA;
/*
@@ -132,295 +107,26 @@ static void armada_drm_crtc_update(struct armada_crtc *dcrtc)
* force LCD_D[23:0] to output blank color, overriding the GPIO or
* SPI usage. So leave it as-is unless in DUMB24_RGB888_0 mode.
*/
- if (dpms_blanked(dcrtc->dpms) &&
- (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
+ if (!enable && (dumb_ctrl & DUMB_MASK) == DUMB24_RGB888_0) {
dumb_ctrl &= ~DUMB_MASK;
dumb_ctrl |= DUMB_BLANK;
}
- /*
- * The documentation doesn't indicate what the normal state of
- * the sync signals are. Sebastian Hesselbart kindly probed
- * these signals on his board to determine their state.
- *
- * The non-inverted state of the sync signals is active high.
- * Setting these bits makes the appropriate signal active low.
- */
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NCSYNC)
- dumb_ctrl |= CFG_INV_CSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NHSYNC)
- dumb_ctrl |= CFG_INV_HSYNC;
- if (dcrtc->crtc.mode.flags & DRM_MODE_FLAG_NVSYNC)
- dumb_ctrl |= CFG_INV_VSYNC;
-
- if (dcrtc->dumb_ctrl != dumb_ctrl) {
- dcrtc->dumb_ctrl = dumb_ctrl;
- writel_relaxed(dumb_ctrl, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-}
-
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y)
-{
- const struct drm_format_info *format = fb->format;
- unsigned int num_planes = format->num_planes;
- u32 addr = drm_fb_obj(fb)->dev_addr;
- int i;
-
- if (num_planes > 3)
- num_planes = 3;
-
- addrs[0] = addr + fb->offsets[0] + y * fb->pitches[0] +
- x * format->cpp[0];
-
- y /= format->vsub;
- x /= format->hsub;
-
- for (i = 1; i < num_planes; i++)
- addrs[i] = addr + fb->offsets[i] + y * fb->pitches[i] +
- x * format->cpp[i];
- for (; i < 3; i++)
- addrs[i] = 0;
-}
-
-static unsigned armada_drm_crtc_calc_fb(struct drm_framebuffer *fb,
- int x, int y, struct armada_regs *regs, bool interlaced)
-{
- unsigned pitch = fb->pitches[0];
- u32 addrs[3], addr_odd, addr_even;
- unsigned i = 0;
-
- DRM_DEBUG_DRIVER("pitch %u x %d y %d bpp %d\n",
- pitch, x, y, fb->format->cpp[0] * 8);
-
- armada_drm_plane_calc_addrs(addrs, fb, x, y);
-
- addr_odd = addr_even = addrs[0];
-
- if (interlaced) {
- addr_even += pitch;
- pitch *= 2;
- }
-
- /* write offset, base, and pitch */
- armada_reg_queue_set(regs, i, addr_odd, LCD_CFG_GRA_START_ADDR0);
- armada_reg_queue_set(regs, i, addr_even, LCD_CFG_GRA_START_ADDR1);
- armada_reg_queue_mod(regs, i, pitch, 0xffff, LCD_CFG_GRA_PITCH);
-
- return i;
-}
-
-static void armada_drm_plane_work_call(struct armada_crtc *dcrtc,
- struct armada_plane_work *work,
- void (*fn)(struct armada_crtc *, struct armada_plane_work *))
-{
- struct armada_plane *dplane = drm_to_armada_plane(work->plane);
- struct drm_pending_vblank_event *event;
- struct drm_framebuffer *fb;
-
- if (fn)
- fn(dcrtc, work);
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- event = work->event;
- fb = work->old_fb;
- if (event || fb) {
- struct drm_device *dev = dcrtc->crtc.dev;
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- if (event)
- drm_crtc_send_vblank_event(&dcrtc->crtc, event);
- if (fb)
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
- }
-
- if (work->need_kfree)
- kfree(work);
-
- wake_up(&dplane->frame_wait);
+ armada_updatel(dumb_ctrl,
+ ~(CFG_INV_CSYNC | CFG_INV_HSYNC | CFG_INV_VSYNC),
+ dcrtc->base + LCD_SPU_DUMB_CTRL);
}
-static void armada_drm_plane_work_run(struct armada_crtc *dcrtc,
- struct drm_plane *plane)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- /* Handle any pending frame work. */
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->fn);
-}
-
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- struct armada_plane *plane = drm_to_armada_plane(work->plane);
- int ret;
-
- ret = drm_crtc_vblank_get(&dcrtc->crtc);
- if (ret)
- return ret;
-
- ret = cmpxchg(&plane->work, NULL, work) ? -EBUSY : 0;
- if (ret)
- drm_crtc_vblank_put(&dcrtc->crtc);
-
- return ret;
-}
-
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout)
-{
- return wait_event_timeout(plane->frame_wait, !plane->work, timeout);
-}
-
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *dplane)
-{
- struct armada_plane_work *work = xchg(&dplane->work, NULL);
-
- if (work)
- armada_drm_plane_work_call(dcrtc, work, work->cancel);
-}
-
-static void armada_drm_crtc_complete_frame_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_drm_crtc_complete_disable_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
-{
- unsigned long flags;
-
- if (dcrtc->plane == work->plane)
- dcrtc->plane = NULL;
-
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static struct armada_plane_work *
-armada_drm_crtc_alloc_plane_work(struct drm_plane *plane)
-{
- struct armada_plane_work *work;
- int i = 0;
-
- work = kzalloc(sizeof(*work), GFP_KERNEL);
- if (!work)
- return NULL;
-
- work->plane = plane;
- work->fn = armada_drm_crtc_complete_frame_work;
- work->need_kfree = true;
- armada_reg_queue_end(work->regs, i);
-
- return work;
-}
-
-static void armada_drm_crtc_finish_fb(struct armada_crtc *dcrtc,
- struct drm_framebuffer *fb, bool force)
-{
- struct armada_plane_work *work;
-
- if (!fb)
- return;
-
- if (force) {
- /* Display is disabled, so just drop the old fb */
- drm_framebuffer_put(fb);
- return;
- }
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (work) {
- work->old_fb = fb;
-
- if (armada_drm_plane_work_queue(dcrtc, work) == 0)
- return;
-
- kfree(work);
- }
-
- /*
- * Oops - just drop the reference immediately and hope for
- * the best. The worst that will happen is the buffer gets
- * reused before it has finished being displayed.
- */
- drm_framebuffer_put(fb);
-}
-
-static void armada_drm_vblank_off(struct armada_crtc *dcrtc)
-{
- /*
- * Tell the DRM core that vblank IRQs aren't going to happen for
- * a while. This cleans up any pending vblank events for us.
- */
- drm_crtc_vblank_off(&dcrtc->crtc);
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_dpms(struct drm_crtc *crtc, int dpms)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
-
- if (dpms_blanked(dcrtc->dpms) != dpms_blanked(dpms)) {
- if (dpms_blanked(dpms))
- armada_drm_vblank_off(dcrtc);
- else if (!IS_ERR(dcrtc->clk))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
- dcrtc->dpms = dpms;
- armada_drm_crtc_update(dcrtc);
- if (!dpms_blanked(dpms))
- drm_crtc_vblank_on(&dcrtc->crtc);
- else if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
- } else if (dcrtc->dpms != dpms) {
- dcrtc->dpms = dpms;
- }
-}
-
-/*
- * Prepare for a mode set. Turn off overlay to ensure that we don't end
- * up with the overlay size being bigger than the active screen size.
- * We rely upon X refreshing this state after the mode set has completed.
- *
- * The mode_config.mutex will be held for this call
- */
-static void armada_drm_crtc_prepare(struct drm_crtc *crtc)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct drm_plane *plane;
-
- /*
- * If we have an overlay plane associated with this CRTC, disable
- * it before the modeset to avoid its coordinates being outside
- * the new mode parameters.
- */
- plane = dcrtc->plane;
- if (plane) {
- drm_plane_force_disable(plane);
- WARN_ON(!armada_drm_plane_work_wait(drm_to_armada_plane(plane),
- HZ));
- }
-}
-
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_commit(struct drm_crtc *crtc)
+static void armada_drm_crtc_queue_state_event(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- if (dcrtc->dpms != DRM_MODE_DPMS_ON) {
- dcrtc->dpms = DRM_MODE_DPMS_ON;
- armada_drm_crtc_update(dcrtc);
+ /* If we have an event, we need vblank events enabled */
+ event = xchg(&crtc->state->event, NULL);
+ if (event) {
+ WARN_ON(drm_crtc_vblank_get(crtc) != 0);
+ dcrtc->event = event;
}
}
@@ -465,8 +171,8 @@ static void armada_drm_crtc_enable_irq(struct armada_crtc *dcrtc, u32 mask)
static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
{
+ struct drm_pending_vblank_event *event;
void __iomem *base = dcrtc->base;
- struct drm_plane *ovl_plane;
if (stat & DMA_FF_UNDERFLOW)
DRM_ERROR("video underflow on crtc %u\n", dcrtc->num);
@@ -476,10 +182,6 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
if (stat & VSYNC_IRQ)
drm_crtc_handle_vblank(&dcrtc->crtc);
- ovl_plane = dcrtc->plane;
- if (ovl_plane)
- armada_drm_plane_work_run(dcrtc, ovl_plane);
-
spin_lock(&dcrtc->irq_lock);
if (stat & GRA_FRAME_IRQ && dcrtc->interlaced) {
int i = stat & GRA_FRAME_IRQ0 ? 0 : 1;
@@ -495,22 +197,35 @@ static void armada_drm_crtc_irq(struct armada_crtc *dcrtc, u32 stat)
writel_relaxed(val, base + LCD_SPU_ADV_REG);
}
- if (stat & DUMB_FRAMEDONE && dcrtc->cursor_update) {
- writel_relaxed(dcrtc->cursor_hw_pos,
- base + LCD_SPU_HWC_OVSA_HPXL_VLN);
- writel_relaxed(dcrtc->cursor_hw_sz,
- base + LCD_SPU_HWC_HPXL_VLN);
- armada_updatel(CFG_HWC_ENA,
- CFG_HWC_ENA | CFG_HWC_1BITMOD | CFG_HWC_1BITENA,
- base + LCD_SPU_DMA_CTRL0);
- dcrtc->cursor_update = false;
+ if (stat & dcrtc->irq_ena & DUMB_FRAMEDONE) {
+ if (dcrtc->update_pending) {
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ dcrtc->update_pending = false;
+ }
+ if (dcrtc->cursor_update) {
+ writel_relaxed(dcrtc->cursor_hw_pos,
+ base + LCD_SPU_HWC_OVSA_HPXL_VLN);
+ writel_relaxed(dcrtc->cursor_hw_sz,
+ base + LCD_SPU_HWC_HPXL_VLN);
+ armada_updatel(CFG_HWC_ENA,
+ CFG_HWC_ENA | CFG_HWC_1BITMOD |
+ CFG_HWC_1BITENA,
+ base + LCD_SPU_DMA_CTRL0);
+ dcrtc->cursor_update = false;
+ }
armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
}
-
spin_unlock(&dcrtc->irq_lock);
- if (stat & GRA_FRAME_IRQ)
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
+ if (stat & VSYNC_IRQ && !dcrtc->update_pending) {
+ event = xchg(&dcrtc->event, NULL);
+ if (event) {
+ spin_lock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_send_vblank_event(&dcrtc->crtc, event);
+ spin_unlock(&dcrtc->crtc.dev->event_lock);
+ drm_crtc_vblank_put(&dcrtc->crtc);
+ }
+ }
}
static irqreturn_t armada_drm_irq(int irq, void *arg)
@@ -519,8 +234,9 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
u32 v, stat = readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
/*
- * This is rediculous - rather than writing bits to clear, we
- * have to set the actual status register value. This is racy.
+ * Reading the ISR appears to clear bits provided CLEAN_SPU_IRQ_ISR
+ * is set. Writing has some other effect to acknowledge the IRQ -
+ * without this, we only get a single IRQ.
*/
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
@@ -536,107 +252,16 @@ static irqreturn_t armada_drm_irq(int irq, void *arg)
return IRQ_NONE;
}
-static uint32_t armada_drm_crtc_calculate_csc(struct armada_crtc *dcrtc)
-{
- struct drm_display_mode *adj = &dcrtc->crtc.mode;
- uint32_t val = 0;
-
- if (dcrtc->csc_yuv_mode == CSC_YUV_CCIR709)
- val |= CFG_CSC_YUV_CCIR709;
- if (dcrtc->csc_rgb_mode == CSC_RGB_STUDIO)
- val |= CFG_CSC_RGB_STUDIO;
-
- /*
- * In auto mode, set the colorimetry, based upon the HDMI spec.
- * 1280x720p, 1920x1080p and 1920x1080i use ITU709, others use
- * ITU601. It may be more appropriate to set this depending on
- * the source - but what if the graphic frame is YUV and the
- * video frame is RGB?
- */
- if ((adj->hdisplay == 1280 && adj->vdisplay == 720 &&
- !(adj->flags & DRM_MODE_FLAG_INTERLACE)) ||
- (adj->hdisplay == 1920 && adj->vdisplay == 1080)) {
- if (dcrtc->csc_yuv_mode == CSC_AUTO)
- val |= CFG_CSC_YUV_CCIR709;
- }
-
- /*
- * We assume we're connected to a TV-like device, so the YUV->RGB
- * conversion should produce a limited range. We should set this
- * depending on the connectors attached to this CRTC, and what
- * kind of device they report being connected.
- */
- if (dcrtc->csc_rgb_mode == CSC_AUTO)
- val |= CFG_CSC_RGB_STUDIO;
-
- return val;
-}
-
-static void armada_drm_gra_plane_regs(struct armada_regs *regs,
- struct drm_framebuffer *fb, struct armada_plane_state *state,
- int x, int y, bool interlaced)
-{
- unsigned int i;
- u32 ctrl0;
-
- i = armada_drm_crtc_calc_fb(fb, x, y, regs, interlaced);
- armada_reg_queue_set(regs, i, state->dst_yx, LCD_SPU_GRA_OVSA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->src_hw, LCD_SPU_GRA_HPXL_VLN);
- armada_reg_queue_set(regs, i, state->dst_hw, LCD_SPU_GZM_HPXL_VLN);
-
- ctrl0 = state->ctrl0;
- if (interlaced)
- ctrl0 |= CFG_GRA_FTOGGLE;
-
- armada_reg_queue_mod(regs, i, ctrl0, CFG_GRAFORMAT |
- CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
- CFG_SWAPYU | CFG_YUV2RGB) |
- CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
- CFG_GRA_HSMOOTH | CFG_GRA_ENA,
- LCD_SPU_DMA_CTRL0);
- armada_reg_queue_end(regs, i);
-}
-
-static void armada_drm_primary_set(struct drm_crtc *crtc,
- struct drm_plane *plane, int x, int y)
-{
- struct armada_plane_state *state = &drm_to_armada_plane(plane)->state;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[8];
- bool interlaced = dcrtc->interlaced;
-
- armada_drm_gra_plane_regs(regs, plane->fb, state, x, y, interlaced);
- armada_drm_crtc_update_regs(dcrtc, regs);
-}
-
/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
- struct drm_display_mode *mode, struct drm_display_mode *adj,
- int x, int y, struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
+ struct drm_display_mode *adj = &crtc->state->adjusted_mode;
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
struct armada_regs regs[17];
uint32_t lm, rm, tm, bm, val, sclk;
unsigned long flags;
unsigned i;
- bool interlaced;
-
- drm_framebuffer_get(crtc->primary->fb);
-
- interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
-
- val = CFG_GRA_ENA;
- val |= CFG_GRA_FMT(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt);
- val |= CFG_GRA_MOD(drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->mod);
-
- if (drm_fb_to_armada_fb(dcrtc->crtc.primary->fb)->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
-
- drm_to_armada_plane(crtc->primary)->state.ctrl0 = val;
- drm_to_armada_plane(crtc->primary)->state.src_hw =
- drm_to_armada_plane(crtc->primary)->state.dst_hw =
- adj->crtc_vdisplay << 16 | adj->crtc_hdisplay;
- drm_to_armada_plane(crtc->primary)->state.dst_yx = 0;
+ bool interlaced = !!(adj->flags & DRM_MODE_FLAG_INTERLACE);
i = 0;
rm = adj->crtc_hsync_start - adj->crtc_hdisplay;
@@ -644,35 +269,15 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
bm = adj->crtc_vsync_start - adj->crtc_vdisplay;
tm = adj->crtc_vtotal - adj->crtc_vsync_end;
- DRM_DEBUG_DRIVER("H: %d %d %d %d lm %d rm %d\n",
- adj->crtc_hdisplay,
- adj->crtc_hsync_start,
- adj->crtc_hsync_end,
- adj->crtc_htotal, lm, rm);
- DRM_DEBUG_DRIVER("V: %d %d %d %d tm %d bm %d\n",
- adj->crtc_vdisplay,
- adj->crtc_vsync_start,
- adj->crtc_vsync_end,
- adj->crtc_vtotal, tm, bm);
-
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
-
- drm_crtc_vblank_off(crtc);
-
- val = dcrtc->dumb_ctrl & ~CFG_DUMB_ENA;
- if (val != dcrtc->dumb_ctrl) {
- dcrtc->dumb_ctrl = val;
- writel_relaxed(val, dcrtc->base + LCD_SPU_DUMB_CTRL);
- }
-
- /*
- * If we are blanked, we would have disabled the clock. Re-enable
- * it so that compute_clock() does the right thing.
- */
- if (!IS_ERR(dcrtc->clk) && dpms_blanked(dcrtc->dpms))
- WARN_ON(clk_prepare_enable(dcrtc->clk));
+ DRM_DEBUG_KMS("[CRTC:%d:%s] mode " DRM_MODE_FMT "\n",
+ crtc->base.id, crtc->name,
+ adj->base.id, adj->name, adj->vrefresh, adj->clock,
+ adj->crtc_hdisplay, adj->crtc_hsync_start,
+ adj->crtc_hsync_end, adj->crtc_htotal,
+ adj->crtc_vdisplay, adj->crtc_vsync_start,
+ adj->crtc_vsync_end, adj->crtc_vtotal,
+ adj->type, adj->flags);
+ DRM_DEBUG_KMS("lm %d rm %d tm %d bm %d\n", lm, rm, tm, bm);
/* Now compute the divider for real */
dcrtc->variant->compute_clock(dcrtc, adj, &sclk);
@@ -689,25 +294,20 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
spin_lock_irqsave(&dcrtc->irq_lock, flags);
- /* Ensure graphic fifo is enabled */
- armada_reg_queue_mod(regs, i, 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
/* Even interlaced/progressive frame */
dcrtc->v[1].spu_v_h_total = adj->crtc_vtotal << 16 |
adj->crtc_htotal;
dcrtc->v[1].spu_v_porch = tm << 16 | bm;
val = adj->crtc_hsync_start;
- dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
+ dcrtc->v[1].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
if (interlaced) {
/* Odd interlaced frame */
+ val -= adj->crtc_htotal / 2;
+ dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN;
dcrtc->v[0].spu_v_h_total = dcrtc->v[1].spu_v_h_total +
(1 << 16);
dcrtc->v[0].spu_v_porch = dcrtc->v[1].spu_v_porch + 1;
- val = adj->crtc_hsync_start - adj->crtc_htotal / 2;
- dcrtc->v[0].spu_adv_reg = val << 20 | val | ADV_VSYNCOFFEN |
- dcrtc->variant->spu_adv_reg;
} else {
dcrtc->v[0] = dcrtc->v[1];
}
@@ -720,77 +320,136 @@ static int armada_drm_crtc_mode_set(struct drm_crtc *crtc,
armada_reg_queue_set(regs, i, dcrtc->v[0].spu_v_h_total,
LCD_SPUT_V_H_TOTAL);
- if (dcrtc->variant->has_spu_adv_reg) {
+ if (dcrtc->variant->has_spu_adv_reg)
armada_reg_queue_mod(regs, i, dcrtc->v[0].spu_adv_reg,
ADV_VSYNC_L_OFF | ADV_VSYNC_H_OFF |
ADV_VSYNCOFFEN, LCD_SPU_ADV_REG);
- }
val = adj->flags & DRM_MODE_FLAG_NVSYNC ? CFG_VSYNC_INV : 0;
armada_reg_queue_mod(regs, i, val, CFG_VSYNC_INV, LCD_SPU_DMA_CTRL1);
- val = dcrtc->spu_iopad_ctrl | armada_drm_crtc_calculate_csc(dcrtc);
- armada_reg_queue_set(regs, i, val, LCD_SPU_IOPAD_CONTROL);
+ /*
+ * The documentation doesn't indicate what the normal state of
+ * the sync signals are. Sebastian Hesselbart kindly probed
+ * these signals on his board to determine their state.
+ *
+ * The non-inverted state of the sync signals is active high.
+ * Setting these bits makes the appropriate signal active low.
+ */
+ val = 0;
+ if (adj->flags & DRM_MODE_FLAG_NCSYNC)
+ val |= CFG_INV_CSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NHSYNC)
+ val |= CFG_INV_HSYNC;
+ if (adj->flags & DRM_MODE_FLAG_NVSYNC)
+ val |= CFG_INV_VSYNC;
+ armada_reg_queue_mod(regs, i, val, CFG_INV_CSYNC | CFG_INV_HSYNC |
+ CFG_INV_VSYNC, LCD_SPU_DUMB_CTRL);
armada_reg_queue_end(regs, i);
armada_drm_crtc_update_regs(dcrtc, regs);
-
- armada_drm_primary_set(crtc, crtc->primary, x, y);
spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
+}
- armada_drm_crtc_update(dcrtc);
+static void armada_drm_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- drm_crtc_vblank_on(crtc);
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- return 0;
+ dcrtc->regs_idx = 0;
+ dcrtc->regs = dcrtc->atomic_regs;
}
-/* The mode_config.mutex will be held for this call */
-static int armada_drm_crtc_mode_set_base(struct drm_crtc *crtc, int x, int y,
- struct drm_framebuffer *old_fb)
+static void armada_drm_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_regs regs[4];
- unsigned i;
- i = armada_drm_crtc_calc_fb(crtc->primary->fb, crtc->x, crtc->y, regs,
- dcrtc->interlaced);
- armada_reg_queue_end(regs, i);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ armada_reg_queue_end(dcrtc->regs, dcrtc->regs_idx);
+
+ /*
+ * If we aren't doing a full modeset, then we need to queue
+ * the event here.
+ */
+ if (!drm_atomic_crtc_needs_modeset(crtc->state)) {
+ dcrtc->update_pending = true;
+ armada_drm_crtc_queue_state_event(crtc);
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_enable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ } else {
+ spin_lock_irq(&dcrtc->irq_lock);
+ armada_drm_crtc_update_regs(dcrtc, dcrtc->regs);
+ spin_unlock_irq(&dcrtc->irq_lock);
+ }
+}
- /* Wait for pending flips to complete */
- armada_drm_plane_work_wait(drm_to_armada_plane(dcrtc->crtc.primary),
- MAX_SCHEDULE_TIMEOUT);
+static void armada_drm_crtc_atomic_disable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ struct drm_pending_vblank_event *event;
- /* Take a reference to the new fb as we're using it */
- drm_framebuffer_get(crtc->primary->fb);
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
- /* Update the base in the CRTC */
- armada_drm_crtc_update_regs(dcrtc, regs);
+ drm_crtc_vblank_off(crtc);
+ armada_drm_crtc_update(dcrtc, false);
- /* Drop our previously held reference */
- armada_drm_crtc_finish_fb(dcrtc, old_fb, dpms_blanked(dcrtc->dpms));
+ if (!crtc->state->active) {
+ /*
+ * This modeset will be leaving the CRTC disabled, so
+ * call the backend to disable upstream clocks etc.
+ */
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
- return 0;
+ /*
+ * We will not receive any further vblank events.
+ * Send the flip_done event manually.
+ */
+ event = crtc->state->event;
+ crtc->state->event = NULL;
+ if (event) {
+ spin_lock_irq(&crtc->dev->event_lock);
+ drm_crtc_send_vblank_event(crtc, event);
+ spin_unlock_irq(&crtc->dev->event_lock);
+ }
+ }
}
-/* The mode_config.mutex will be held for this call */
-static void armada_drm_crtc_disable(struct drm_crtc *crtc)
+static void armada_drm_crtc_atomic_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
{
- armada_drm_crtc_dpms(crtc, DRM_MODE_DPMS_OFF);
+ struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+
+ DRM_DEBUG_KMS("[CRTC:%d:%s]\n", crtc->base.id, crtc->name);
+
+ if (!old_state->active) {
+ /*
+ * This modeset is enabling the CRTC after it having
+ * been disabled. Reverse the call to ->disable in
+ * the atomic_disable().
+ */
+ if (dcrtc->variant->enable)
+ dcrtc->variant->enable(dcrtc, &crtc->state->adjusted_mode);
+ }
+ armada_drm_crtc_update(dcrtc, true);
+ drm_crtc_vblank_on(crtc);
- /* Disable our primary plane when we disable the CRTC. */
- crtc->primary->funcs->disable_plane(crtc->primary, NULL);
+ armada_drm_crtc_queue_state_event(crtc);
}
static const struct drm_crtc_helper_funcs armada_crtc_helper_funcs = {
- .dpms = armada_drm_crtc_dpms,
- .prepare = armada_drm_crtc_prepare,
- .commit = armada_drm_crtc_commit,
.mode_fixup = armada_drm_crtc_mode_fixup,
- .mode_set = armada_drm_crtc_mode_set,
- .mode_set_base = armada_drm_crtc_mode_set_base,
- .disable = armada_drm_crtc_disable,
+ .mode_set_nofb = armada_drm_crtc_mode_set_nofb,
+ .atomic_begin = armada_drm_crtc_atomic_begin,
+ .atomic_flush = armada_drm_crtc_atomic_flush,
+ .atomic_disable = armada_drm_crtc_atomic_disable,
+ .atomic_enable = armada_drm_crtc_atomic_enable,
};
static void armada_load_cursor_argb(void __iomem *base, uint32_t *pix,
@@ -883,7 +542,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (!dcrtc->cursor_obj || !h || !w) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -907,7 +565,6 @@ static int armada_drm_crtc_cursor_update(struct armada_crtc *dcrtc, bool reload)
if (dcrtc->cursor_hw_sz != (h << 16 | w)) {
spin_lock_irq(&dcrtc->irq_lock);
- armada_drm_crtc_disable_irq(dcrtc, DUMB_FRAMEDONE_ENA);
dcrtc->cursor_update = false;
armada_updatel(0, CFG_HWC_ENA, dcrtc->base + LCD_SPU_DMA_CTRL0);
spin_unlock_irq(&dcrtc->irq_lock);
@@ -1015,8 +672,8 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
priv->dcrtc[dcrtc->num] = NULL;
drm_crtc_cleanup(&dcrtc->crtc);
- if (!IS_ERR(dcrtc->clk))
- clk_disable_unprepare(dcrtc->clk);
+ if (dcrtc->variant->disable)
+ dcrtc->variant->disable(dcrtc);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ENA);
@@ -1025,361 +682,51 @@ static void armada_drm_crtc_destroy(struct drm_crtc *crtc)
kfree(dcrtc);
}
-/*
- * The mode_config lock is held here, to prevent races between this
- * and a mode_set.
- */
-static int armada_drm_crtc_page_flip(struct drm_crtc *crtc,
- struct drm_framebuffer *fb, struct drm_pending_vblank_event *event, uint32_t page_flip_flags,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- unsigned i;
- int ret;
-
- /* We don't support changing the pixel format */
- if (fb->format != crtc->primary->fb->format)
- return -EINVAL;
-
- work = armada_drm_crtc_alloc_plane_work(dcrtc->crtc.primary);
- if (!work)
- return -ENOMEM;
-
- work->event = event;
- work->old_fb = dcrtc->crtc.primary->fb;
-
- i = armada_drm_crtc_calc_fb(fb, crtc->x, crtc->y, work->regs,
- dcrtc->interlaced);
- armada_reg_queue_end(work->regs, i);
-
- /*
- * Ensure that we hold a reference on the new framebuffer.
- * This has to match the behaviour in mode_set.
- */
- drm_framebuffer_get(fb);
-
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- /* Undo our reference above */
- drm_framebuffer_put(fb);
- kfree(work);
- return ret;
- }
-
- /*
- * Don't take a reference on the new framebuffer;
- * drm_mode_page_flip_ioctl() has already grabbed a reference and
- * will _not_ drop that reference on successful return from this
- * function. Simply mark this new framebuffer as the current one.
- */
- dcrtc->crtc.primary->fb = fb;
-
- /*
- * Finally, if the display is blanked, we won't receive an
- * interrupt, so complete it now.
- */
- if (dpms_blanked(dcrtc->dpms))
- armada_drm_plane_work_run(dcrtc, dcrtc->crtc.primary);
-
- return 0;
-}
-
-static int
-armada_drm_crtc_set_property(struct drm_crtc *crtc,
- struct drm_property *property, uint64_t val)
-{
- struct armada_private *priv = crtc->dev->dev_private;
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- bool update_csc = false;
-
- if (property == priv->csc_yuv_prop) {
- dcrtc->csc_yuv_mode = val;
- update_csc = true;
- } else if (property == priv->csc_rgb_prop) {
- dcrtc->csc_rgb_mode = val;
- update_csc = true;
- }
-
- if (update_csc) {
- uint32_t val;
-
- val = dcrtc->spu_iopad_ctrl |
- armada_drm_crtc_calculate_csc(dcrtc);
- writel_relaxed(val, dcrtc->base + LCD_SPU_IOPAD_CONTROL);
- }
-
- return 0;
-}
-
/* These are called under the vbl_lock. */
static int armada_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_enable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
return 0;
}
static void armada_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
+ unsigned long flags;
+ spin_lock_irqsave(&dcrtc->irq_lock, flags);
armada_drm_crtc_disable_irq(dcrtc, VSYNC_IRQ_ENA);
+ spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
}
static const struct drm_crtc_funcs armada_crtc_funcs = {
+ .reset = drm_atomic_helper_crtc_reset,
.cursor_set = armada_drm_crtc_cursor_set,
.cursor_move = armada_drm_crtc_cursor_move,
.destroy = armada_drm_crtc_destroy,
- .set_config = drm_crtc_helper_set_config,
- .page_flip = armada_drm_crtc_page_flip,
- .set_property = armada_drm_crtc_set_property,
+ .set_config = drm_atomic_helper_set_config,
+ .page_flip = drm_atomic_helper_page_flip,
+ .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
.enable_vblank = armada_drm_crtc_enable_vblank,
.disable_vblank = armada_drm_crtc_disable_vblank,
};
-static void armada_drm_primary_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_plane *dplane = drm_to_armada_plane(state->plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(state->crtc);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- bool was_disabled;
- unsigned int idx = 0;
- u32 val;
-
- val = CFG_GRA_FMT(dfb->fmt) | CFG_GRA_MOD(dfb->mod);
- if (dfb->fmt > CFG_420)
- val |= CFG_PALETTE_ENA;
- if (state->visible)
- val |= CFG_GRA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- val |= CFG_GRA_HSMOOTH;
-
- was_disabled = !(dplane->state.ctrl0 & CFG_GRA_ENA);
- if (was_disabled)
- armada_reg_queue_mod(regs, idx,
- 0, CFG_PDWN64x66, LCD_SPU_SRAM_PARA1);
-
- dplane->state.ctrl0 = val;
- dplane->state.src_hw = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- dplane->state.dst_hw = drm_rect_height(&state->dst) << 16 |
- drm_rect_width(&state->dst);
- dplane->state.dst_yx = state->dst.y1 << 16 | state->dst.x1;
-
- armada_drm_gra_plane_regs(regs + idx, &dfb->fb, &dplane->state,
- state->src.x1 >> 16, state->src.y1 >> 16,
- dcrtc->interlaced);
-
- dplane->state.vsync_update = !was_disabled;
- dplane->state.changed = true;
-}
-
-static int armada_drm_primary_update(struct drm_plane *plane,
- struct drm_crtc *crtc, struct drm_framebuffer *fb,
- int crtc_x, int crtc_y, unsigned int crtc_w, unsigned int crtc_h,
- uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
-
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_frame_work;
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
-
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
- }
-
- armada_drm_primary_update_state(&state, work->regs);
-
- if (!dplane->state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(dplane, HZ / 10) == 0)
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (!dplane->state.vsync_update) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- return 0;
- }
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx)
-{
- struct armada_plane *dplane = drm_to_armada_plane(plane);
- struct armada_crtc *dcrtc;
- struct armada_plane_work *work;
- unsigned int idx = 0;
- u32 sram_para1, enable_mask;
-
- if (!plane->crtc)
- return 0;
-
- /*
- * Arrange to power down most RAMs and FIFOs if this is the primary
- * plane, otherwise just the YUV FIFOs for the overlay plane.
- */
- if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
- sram_para1 = CFG_PDWN256x32 | CFG_PDWN256x24 | CFG_PDWN256x8 |
- CFG_PDWN32x32 | CFG_PDWN64x66;
- enable_mask = CFG_GRA_ENA;
- } else {
- sram_para1 = CFG_PDWN16x66 | CFG_PDWN32x66;
- enable_mask = CFG_DMA_ENA;
- }
-
- dplane->state.ctrl0 &= ~enable_mask;
-
- dcrtc = drm_to_armada_crtc(plane->crtc);
-
- /*
- * Try to disable the plane and drop our ref on the framebuffer
- * at the next frame update. If we fail for any reason, disable
- * the plane immediately.
- */
- work = &dplane->works[dplane->next_work];
- work->fn = armada_drm_crtc_complete_disable_work;
- work->cancel = armada_drm_crtc_complete_disable_work;
- work->old_fb = plane->fb;
-
- armada_reg_queue_mod(work->regs, idx,
- 0, enable_mask, LCD_SPU_DMA_CTRL0);
- armada_reg_queue_mod(work->regs, idx,
- sram_para1, 0, LCD_SPU_SRAM_PARA1);
- armada_reg_queue_end(work->regs, idx);
-
- /* Wait for any preceding work to complete, but don't wedge */
- if (WARN_ON(!armada_drm_plane_work_wait(dplane, HZ)))
- armada_drm_plane_work_cancel(dcrtc, dplane);
-
- if (armada_drm_plane_work_queue(dcrtc, work)) {
- work->fn(dcrtc, work);
- if (work->old_fb)
- drm_framebuffer_unreference(work->old_fb);
- }
-
- dplane->next_work = !dplane->next_work;
-
- return 0;
-}
-
-static const struct drm_plane_funcs armada_primary_plane_funcs = {
- .update_plane = armada_drm_primary_update,
- .disable_plane = armada_drm_plane_disable,
- .destroy = drm_primary_helper_destroy,
-};
-
-int armada_drm_plane_init(struct armada_plane *plane)
-{
- unsigned int i;
-
- for (i = 0; i < ARRAY_SIZE(plane->works); i++)
- plane->works[i].plane = &plane->base;
-
- init_waitqueue_head(&plane->frame_wait);
-
- return 0;
-}
-
-static const struct drm_prop_enum_list armada_drm_csc_yuv_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_YUV_CCIR601, "CCIR601" },
- { CSC_YUV_CCIR709, "CCIR709" },
-};
-
-static const struct drm_prop_enum_list armada_drm_csc_rgb_enum_list[] = {
- { CSC_AUTO, "Auto" },
- { CSC_RGB_COMPUTER, "Computer system" },
- { CSC_RGB_STUDIO, "Studio" },
-};
-
-static int armada_drm_crtc_create_properties(struct drm_device *dev)
-{
- struct armada_private *priv = dev->dev_private;
-
- if (priv->csc_yuv_prop)
- return 0;
-
- priv->csc_yuv_prop = drm_property_create_enum(dev, 0,
- "CSC_YUV", armada_drm_csc_yuv_enum_list,
- ARRAY_SIZE(armada_drm_csc_yuv_enum_list));
- priv->csc_rgb_prop = drm_property_create_enum(dev, 0,
- "CSC_RGB", armada_drm_csc_rgb_enum_list,
- ARRAY_SIZE(armada_drm_csc_rgb_enum_list));
-
- if (!priv->csc_yuv_prop || !priv->csc_rgb_prop)
- return -ENOMEM;
-
- return 0;
-}
-
static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
struct resource *res, int irq, const struct armada_variant *variant,
struct device_node *port)
{
struct armada_private *priv = drm->dev_private;
struct armada_crtc *dcrtc;
- struct armada_plane *primary;
+ struct drm_plane *primary;
void __iomem *base;
int ret;
- ret = armada_drm_crtc_create_properties(drm);
- if (ret)
- return ret;
-
base = devm_ioremap_resource(dev, res);
if (IS_ERR(base))
return PTR_ERR(base);
@@ -1397,8 +744,6 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
dcrtc->base = base;
dcrtc->num = drm->mode_config.num_crtc;
dcrtc->clk = ERR_PTR(-EINVAL);
- dcrtc->csc_yuv_mode = CSC_AUTO;
- dcrtc->csc_rgb_mode = CSC_AUTO;
dcrtc->cfg_dumb_ctrl = DUMB24_RGB888_0;
dcrtc->spu_iopad_ctrl = CFG_VSCALE_LN_EN | CFG_IOPAD_DUMB24;
spin_lock_init(&dcrtc->irq_lock);
@@ -1415,6 +760,7 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
CFG_PDWN64x66, dcrtc->base + LCD_SPU_SRAM_PARA1);
writel_relaxed(0x2032ff81, dcrtc->base + LCD_SPU_DMA_CTRL1);
writel_relaxed(dcrtc->irq_ena, dcrtc->base + LCD_SPU_IRQ_ENA);
+ readl_relaxed(dcrtc->base + LCD_SPU_IRQ_ISR);
writel_relaxed(0, dcrtc->base + LCD_SPU_IRQ_ISR);
ret = devm_request_irq(dev, irq, armada_drm_irq, 0, "armada_drm_crtc",
@@ -1441,39 +787,23 @@ static int armada_drm_crtc_create(struct drm_device *drm, struct device *dev,
goto err_crtc;
}
- ret = armada_drm_plane_init(primary);
- if (ret) {
- kfree(primary);
- goto err_crtc;
- }
-
- ret = drm_universal_plane_init(drm, &primary->base, 0,
- &armada_primary_plane_funcs,
- armada_primary_formats,
- ARRAY_SIZE(armada_primary_formats),
- NULL,
- DRM_PLANE_TYPE_PRIMARY, NULL);
+ ret = armada_drm_primary_plane_init(drm, primary);
if (ret) {
kfree(primary);
goto err_crtc;
}
- ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, &primary->base, NULL,
+ ret = drm_crtc_init_with_planes(drm, &dcrtc->crtc, primary, NULL,
&armada_crtc_funcs, NULL);
if (ret)
goto err_crtc_init;
drm_crtc_helper_add(&dcrtc->crtc, &armada_crtc_helper_funcs);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_yuv_prop,
- dcrtc->csc_yuv_mode);
- drm_object_attach_property(&dcrtc->crtc.base, priv->csc_rgb_prop,
- dcrtc->csc_rgb_mode);
-
return armada_overlay_plane_create(drm, 1 << dcrtc->num);
err_crtc_init:
- primary->base.funcs->destroy(&primary->base);
+ primary->funcs->destroy(primary);
err_crtc:
kfree(dcrtc);
diff --git a/drivers/gpu/drm/armada/armada_crtc.h b/drivers/gpu/drm/armada/armada_crtc.h
index 445829b8877a..7ebd337b60af 100644
--- a/drivers/gpu/drm/armada/armada_crtc.h
+++ b/drivers/gpu/drm/armada/armada_crtc.h
@@ -32,49 +32,8 @@ struct armada_regs {
armada_reg_queue_mod(_r, _i, 0, 0, ~0)
struct armada_crtc;
-struct armada_plane;
struct armada_variant;
-struct armada_plane_work {
- void (*fn)(struct armada_crtc *, struct armada_plane_work *);
- void (*cancel)(struct armada_crtc *, struct armada_plane_work *);
- bool need_kfree;
- struct drm_plane *plane;
- struct drm_framebuffer *old_fb;
- struct drm_pending_vblank_event *event;
- struct armada_regs regs[14];
-};
-
-struct armada_plane_state {
- u16 src_x;
- u16 src_y;
- u32 src_hw;
- u32 dst_hw;
- u32 dst_yx;
- u32 ctrl0;
- bool changed;
- bool vsync_update;
-};
-
-struct armada_plane {
- struct drm_plane base;
- wait_queue_head_t frame_wait;
- bool next_work;
- struct armada_plane_work works[2];
- struct armada_plane_work *work;
- struct armada_plane_state state;
-};
-#define drm_to_armada_plane(p) container_of(p, struct armada_plane, base)
-
-int armada_drm_plane_init(struct armada_plane *plane);
-int armada_drm_plane_work_queue(struct armada_crtc *dcrtc,
- struct armada_plane_work *work);
-int armada_drm_plane_work_wait(struct armada_plane *plane, long timeout);
-void armada_drm_plane_work_cancel(struct armada_crtc *dcrtc,
- struct armada_plane *plane);
-void armada_drm_plane_calc_addrs(u32 *addrs, struct drm_framebuffer *fb,
- int x, int y);
-
struct armada_crtc {
struct drm_crtc crtc;
const struct armada_variant *variant;
@@ -89,10 +48,6 @@ struct armada_crtc {
} v[2];
bool interlaced;
bool cursor_update;
- uint8_t csc_yuv_mode;
- uint8_t csc_rgb_mode;
-
- struct drm_plane *plane;
struct armada_gem_object *cursor_obj;
int cursor_x;
@@ -102,21 +57,22 @@ struct armada_crtc {
uint32_t cursor_w;
uint32_t cursor_h;
- int dpms;
uint32_t cfg_dumb_ctrl;
- uint32_t dumb_ctrl;
uint32_t spu_iopad_ctrl;
spinlock_t irq_lock;
uint32_t irq_ena;
+
+ bool update_pending;
+ struct drm_pending_vblank_event *event;
+ struct armada_regs atomic_regs[32];
+ struct armada_regs *regs;
+ unsigned int regs_idx;
};
#define drm_to_armada_crtc(c) container_of(c, struct armada_crtc, crtc)
void armada_drm_crtc_update_regs(struct armada_crtc *, struct armada_regs *);
-int armada_drm_plane_disable(struct drm_plane *plane,
- struct drm_modeset_acquire_ctx *ctx);
-
extern struct platform_driver armada_lcd_platform_driver;
#endif
diff --git a/drivers/gpu/drm/armada/armada_drm.h b/drivers/gpu/drm/armada/armada_drm.h
index cc4c557c9f66..f09083ff15d3 100644
--- a/drivers/gpu/drm/armada/armada_drm.h
+++ b/drivers/gpu/drm/armada/armada_drm.h
@@ -42,11 +42,12 @@ struct armada_private;
struct armada_variant {
bool has_spu_adv_reg;
- uint32_t spu_adv_reg;
int (*init)(struct armada_crtc *, struct device *);
int (*compute_clock)(struct armada_crtc *,
const struct drm_display_mode *,
uint32_t *);
+ void (*disable)(struct armada_crtc *);
+ void (*enable)(struct armada_crtc *, const struct drm_display_mode *);
};
/* Variant ops */
@@ -54,14 +55,10 @@ extern const struct armada_variant armada510_ops;
struct armada_private {
struct drm_device drm;
- struct work_struct fb_unref_work;
- DECLARE_KFIFO(fb_unref, struct drm_framebuffer *, 8);
struct drm_fb_helper *fbdev;
struct armada_crtc *dcrtc[2];
struct drm_mm linear; /* protected by linear_lock */
struct mutex linear_lock;
- struct drm_property *csc_yuv_prop;
- struct drm_property *csc_rgb_prop;
struct drm_property *colorkey_prop;
struct drm_property *colorkey_min_prop;
struct drm_property *colorkey_max_prop;
@@ -76,13 +73,6 @@ struct armada_private {
#endif
};
-void __armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-void armada_drm_queue_unref_work(struct drm_device *,
- struct drm_framebuffer *);
-
-extern const struct drm_mode_config_funcs armada_drm_mode_config_funcs;
-
int armada_fbdev_init(struct drm_device *);
void armada_fbdev_fini(struct drm_device *);
diff --git a/drivers/gpu/drm/armada/armada_drv.c b/drivers/gpu/drm/armada/armada_drv.c
index 4b11b6b52f1d..fa31589b4fc0 100644
--- a/drivers/gpu/drm/armada/armada_drv.c
+++ b/drivers/gpu/drm/armada/armada_drv.c
@@ -9,46 +9,18 @@
#include <linux/component.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
#include <drm/drm_fb_helper.h>
#include <drm/drm_of.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_gem.h"
+#include "armada_fb.h"
#include "armada_hw.h"
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static void armada_drm_unref_work(struct work_struct *work)
-{
- struct armada_private *priv =
- container_of(work, struct armada_private, fb_unref_work);
- struct drm_framebuffer *fb;
-
- while (kfifo_get(&priv->fb_unref, &fb))
- drm_framebuffer_put(fb);
-}
-
-/* Must be called with dev->event_lock held */
-void __armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- struct armada_private *priv = dev->dev_private;
-
- WARN_ON(!kfifo_put(&priv->fb_unref, fb));
- schedule_work(&priv->fb_unref_work);
-}
-
-void armada_drm_queue_unref_work(struct drm_device *dev,
- struct drm_framebuffer *fb)
-{
- unsigned long flags;
-
- spin_lock_irqsave(&dev->event_lock, flags);
- __armada_drm_queue_unref_work(dev, fb);
- spin_unlock_irqrestore(&dev->event_lock, flags);
-}
-
static struct drm_ioctl_desc armada_ioctls[] = {
DRM_IOCTL_DEF_DRV(ARMADA_GEM_CREATE, armada_gem_create_ioctl,0),
DRM_IOCTL_DEF_DRV(ARMADA_GEM_MMAP, armada_gem_mmap_ioctl, 0),
@@ -72,11 +44,18 @@ static struct drm_driver armada_drm_driver = {
.desc = "Armada SoC DRM",
.date = "20120730",
.driver_features = DRIVER_GEM | DRIVER_MODESET |
- DRIVER_PRIME,
+ DRIVER_PRIME | DRIVER_ATOMIC,
.ioctls = armada_ioctls,
.fops = &armada_drm_fops,
};
+static const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
+ .fb_create = armada_fb_create,
+ .output_poll_changed = drm_fb_helper_output_poll_changed,
+ .atomic_check = drm_atomic_helper_check,
+ .atomic_commit = drm_atomic_helper_commit,
+};
+
static int armada_drm_bind(struct device *dev)
{
struct armada_private *priv;
@@ -109,7 +88,7 @@ static int armada_drm_bind(struct device *dev)
/*
* The drm_device structure must be at the start of
- * armada_private for drm_dev_unref() to work correctly.
+ * armada_private for drm_dev_put() to work correctly.
*/
BUILD_BUG_ON(offsetof(struct armada_private, drm) != 0);
@@ -125,9 +104,6 @@ static int armada_drm_bind(struct device *dev)
dev_set_drvdata(dev, &priv->drm);
- INIT_WORK(&priv->fb_unref_work, armada_drm_unref_work);
- INIT_KFIFO(priv->fb_unref);
-
/* Mode setting support */
drm_mode_config_init(&priv->drm);
priv->drm.mode_config.min_width = 320;
@@ -155,6 +131,8 @@ static int armada_drm_bind(struct device *dev)
priv->drm.irq_enabled = true;
+ drm_mode_config_reset(&priv->drm);
+
ret = armada_fbdev_init(&priv->drm);
if (ret)
goto err_comp;
@@ -179,8 +157,7 @@ static int armada_drm_bind(struct device *dev)
err_kms:
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
return ret;
}
@@ -198,9 +175,8 @@ static void armada_drm_unbind(struct device *dev)
drm_mode_config_cleanup(&priv->drm);
drm_mm_takedown(&priv->linear);
- flush_work(&priv->fb_unref_work);
- drm_dev_unref(&priv->drm);
+ drm_dev_put(&priv->drm);
}
static int compare_of(struct device *dev, void *data)
diff --git a/drivers/gpu/drm/armada/armada_fb.c b/drivers/gpu/drm/armada/armada_fb.c
index edd15126bde9..6bd638a54579 100644
--- a/drivers/gpu/drm/armada/armada_fb.c
+++ b/drivers/gpu/drm/armada/armada_fb.c
@@ -84,7 +84,7 @@ struct armada_framebuffer *armada_framebuffer_create(struct drm_device *dev,
return dfb;
}
-static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode)
{
struct armada_gem_object *obj;
@@ -138,8 +138,3 @@ static struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
DRM_ERROR("failed to initialize framebuffer: %d\n", ret);
return ERR_PTR(ret);
}
-
-const struct drm_mode_config_funcs armada_drm_mode_config_funcs = {
- .fb_create = armada_fb_create,
- .output_poll_changed = drm_fb_helper_output_poll_changed,
-};
diff --git a/drivers/gpu/drm/armada/armada_fb.h b/drivers/gpu/drm/armada/armada_fb.h
index 5c130ff5da77..476daad0a36a 100644
--- a/drivers/gpu/drm/armada/armada_fb.h
+++ b/drivers/gpu/drm/armada/armada_fb.h
@@ -19,5 +19,6 @@ struct armada_framebuffer {
struct armada_framebuffer *armada_framebuffer_create(struct drm_device *,
const struct drm_mode_fb_cmd2 *, struct armada_gem_object *);
-
+struct drm_framebuffer *armada_fb_create(struct drm_device *dev,
+ struct drm_file *dfile, const struct drm_mode_fb_cmd2 *mode);
#endif
diff --git a/drivers/gpu/drm/armada/armada_fbdev.c b/drivers/gpu/drm/armada/armada_fbdev.c
index 2a59db0994b2..8d23700848df 100644
--- a/drivers/gpu/drm/armada/armada_fbdev.c
+++ b/drivers/gpu/drm/armada/armada_fbdev.c
@@ -24,7 +24,7 @@ static /*const*/ struct fb_ops armada_fb_ops = {
.fb_imageblit = drm_fb_helper_cfb_imageblit,
};
-static int armada_fb_create(struct drm_fb_helper *fbh,
+static int armada_fbdev_create(struct drm_fb_helper *fbh,
struct drm_fb_helper_surface_size *sizes)
{
struct drm_device *dev = fbh->dev;
@@ -108,7 +108,7 @@ static int armada_fb_probe(struct drm_fb_helper *fbh,
int ret = 0;
if (!fbh->fb) {
- ret = armada_fb_create(fbh, sizes);
+ ret = armada_fbdev_create(fbh, sizes);
if (ret == 0)
ret = 1;
}
diff --git a/drivers/gpu/drm/armada/armada_gem.c b/drivers/gpu/drm/armada/armada_gem.c
index 3fb37c75c065..892c1d9304bb 100644
--- a/drivers/gpu/drm/armada/armada_gem.c
+++ b/drivers/gpu/drm/armada/armada_gem.c
@@ -13,25 +13,14 @@
#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
-static int armada_gem_vm_fault(struct vm_fault *vmf)
+static vm_fault_t armada_gem_vm_fault(struct vm_fault *vmf)
{
struct drm_gem_object *gobj = vmf->vma->vm_private_data;
struct armada_gem_object *obj = drm_to_armada_gem(gobj);
unsigned long pfn = obj->phys_addr >> PAGE_SHIFT;
- int ret;
pfn += (vmf->address - vmf->vma->vm_start) >> PAGE_SHIFT;
- ret = vm_insert_pfn(vmf->vma, vmf->address, pfn);
-
- switch (ret) {
- case 0:
- case -EBUSY:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_pfn(vmf->vma, vmf->address, pfn);
}
const struct vm_operations_struct armada_gem_vm_ops = {
diff --git a/drivers/gpu/drm/armada/armada_hw.h b/drivers/gpu/drm/armada/armada_hw.h
index 27319a8335e2..277580b36758 100644
--- a/drivers/gpu/drm/armada/armada_hw.h
+++ b/drivers/gpu/drm/armada/armada_hw.h
@@ -160,6 +160,7 @@ enum {
CFG_ALPHAM_GRA = 0x1 << 16,
CFG_ALPHAM_CFG = 0x2 << 16,
CFG_ALPHA_MASK = 0xff << 8,
+#define CFG_ALPHA(x) ((x) << 8)
CFG_PIXCMD_MASK = 0xff,
};
@@ -315,4 +316,19 @@ enum {
PWRDN_IRQ_LEVEL = 1 << 0,
};
+static inline u32 armada_rect_hw_fp(struct drm_rect *r)
+{
+ return (drm_rect_height(r) & 0xffff0000) | drm_rect_width(r) >> 16;
+}
+
+static inline u32 armada_rect_hw(struct drm_rect *r)
+{
+ return drm_rect_height(r) << 16 | (drm_rect_width(r) & 0x0000ffff);
+}
+
+static inline u32 armada_rect_yx(struct drm_rect *r)
+{
+ return (r)->y1 << 16 | ((r)->x1 & 0x0000ffff);
+}
+
#endif
diff --git a/drivers/gpu/drm/armada/armada_overlay.c b/drivers/gpu/drm/armada/armada_overlay.c
index c391955009d6..eb7dfb65ef47 100644
--- a/drivers/gpu/drm/armada/armada_overlay.c
+++ b/drivers/gpu/drm/armada/armada_overlay.c
@@ -7,346 +7,468 @@
* published by the Free Software Foundation.
*/
#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include <drm/armada_drm.h>
#include "armada_crtc.h"
#include "armada_drm.h"
#include "armada_fb.h"
#include "armada_gem.h"
#include "armada_hw.h"
-#include <drm/armada_drm.h>
#include "armada_ioctlP.h"
+#include "armada_plane.h"
#include "armada_trace.h"
-struct armada_ovl_plane_properties {
- uint32_t colorkey_yr;
- uint32_t colorkey_ug;
- uint32_t colorkey_vb;
-#define K2R(val) (((val) >> 0) & 0xff)
-#define K2G(val) (((val) >> 8) & 0xff)
-#define K2B(val) (((val) >> 16) & 0xff)
- int16_t brightness;
- uint16_t contrast;
- uint16_t saturation;
- uint32_t colorkey_mode;
-};
-
-struct armada_ovl_plane {
- struct armada_plane base;
- struct armada_ovl_plane_properties prop;
+#define DEFAULT_BRIGHTNESS 0
+#define DEFAULT_CONTRAST 0x4000
+#define DEFAULT_SATURATION 0x4000
+#define DEFAULT_ENCODING DRM_COLOR_YCBCR_BT601
+
+struct armada_overlay_state {
+ struct drm_plane_state base;
+ u32 colorkey_yr;
+ u32 colorkey_ug;
+ u32 colorkey_vb;
+ u32 colorkey_mode;
+ u32 colorkey_enable;
+ s16 brightness;
+ u16 contrast;
+ u16 saturation;
};
-#define drm_to_armada_ovl_plane(p) \
- container_of(p, struct armada_ovl_plane, base.base)
-
+#define drm_to_overlay_state(s) \
+ container_of(s, struct armada_overlay_state, base)
-static void
-armada_ovl_update_attr(struct armada_ovl_plane_properties *prop,
- struct armada_crtc *dcrtc)
+static inline u32 armada_spu_contrast(struct drm_plane_state *state)
{
- writel_relaxed(prop->colorkey_yr, dcrtc->base + LCD_SPU_COLORKEY_Y);
- writel_relaxed(prop->colorkey_ug, dcrtc->base + LCD_SPU_COLORKEY_U);
- writel_relaxed(prop->colorkey_vb, dcrtc->base + LCD_SPU_COLORKEY_V);
+ return drm_to_overlay_state(state)->brightness << 16 |
+ drm_to_overlay_state(state)->contrast;
+}
- writel_relaxed(prop->brightness << 16 | prop->contrast,
- dcrtc->base + LCD_SPU_CONTRAST);
+static inline u32 armada_spu_saturation(struct drm_plane_state *state)
+{
/* Docs say 15:0, but it seems to actually be 31:16 on Armada 510 */
- writel_relaxed(prop->saturation << 16,
- dcrtc->base + LCD_SPU_SATURATION);
- writel_relaxed(0x00002000, dcrtc->base + LCD_SPU_CBSH_HUE);
-
- spin_lock_irq(&dcrtc->irq_lock);
- armada_updatel(prop->colorkey_mode | CFG_ALPHAM_GRA,
- CFG_CKMODE_MASK | CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
- dcrtc->base + LCD_SPU_DMA_CTRL1);
+ return drm_to_overlay_state(state)->saturation << 16;
+}
- armada_updatel(ADV_GRACOLORKEY, 0, dcrtc->base + LCD_SPU_ADV_REG);
- spin_unlock_irq(&dcrtc->irq_lock);
+static inline u32 armada_csc(struct drm_plane_state *state)
+{
+ /*
+ * The CFG_CSC_RGB_* settings control the output of the colour space
+ * converter, setting the range of output values it produces. Since
+ * we will be blending with the full-range graphics, we need to
+ * produce full-range RGB output from the conversion.
+ */
+ return CFG_CSC_RGB_COMPUTER |
+ (state->color_encoding == DRM_COLOR_YCBCR_BT709 ?
+ CFG_CSC_YUV_CCIR709 : CFG_CSC_YUV_CCIR601);
}
/* === Plane support === */
-static void armada_ovl_plane_work(struct armada_crtc *dcrtc,
- struct armada_plane_work *work)
+static void armada_drm_overlay_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
{
- unsigned long flags;
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx;
+ u32 cfg, cfg_mask, val;
- trace_armada_ovl_plane_work(&dcrtc->crtc, work->plane);
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
- spin_lock_irqsave(&dcrtc->irq_lock, flags);
- armada_drm_crtc_update_regs(dcrtc, work->regs);
- spin_unlock_irqrestore(&dcrtc->irq_lock, flags);
-}
-
-static void armada_ovl_plane_update_state(struct drm_plane_state *state,
- struct armada_regs *regs)
-{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(state->plane);
- struct armada_framebuffer *dfb = drm_fb_to_armada_fb(state->fb);
- const struct drm_format_info *format;
- unsigned int idx = 0;
- bool fb_changed;
- u32 val, ctrl0;
- u16 src_x, src_y;
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
- ctrl0 = CFG_DMA_FMT(dfb->fmt) | CFG_DMA_MOD(dfb->mod) | CFG_CBSH_ENA;
- if (state->visible)
- ctrl0 |= CFG_DMA_ENA;
- if (drm_rect_width(&state->src) >> 16 != drm_rect_width(&state->dst))
- ctrl0 |= CFG_DMA_HSMOOTH;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
- /*
- * Shifting a YUV packed format image by one pixel causes the U/V
- * planes to swap. Compensate for it by also toggling the UV swap.
- */
- format = dfb->fb.format;
- if (format->num_planes == 1 && state->src.x1 >> 16 & (format->hsub - 1))
- ctrl0 ^= CFG_DMA_MOD(CFG_SWAPUV);
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
- if (~dplane->base.state.ctrl0 & ctrl0 & CFG_DMA_ENA) {
- /* Power up the Y/U/V FIFOs on ENA 0->1 transitions */
+ idx = 0;
+ if (!old_state->visible && state->visible)
armada_reg_queue_mod(regs, idx,
0, CFG_PDWN16x66 | CFG_PDWN32x66,
LCD_SPU_SRAM_PARA1);
- }
-
- fb_changed = dplane->base.base.fb != &dfb->fb ||
- dplane->base.state.src_x != state->src.x1 >> 16 ||
- dplane->base.state.src_y != state->src.y1 >> 16;
-
- dplane->base.state.vsync_update = fb_changed;
-
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DZM_HPXL_VLN);
/* FIXME: overlay on an interlaced display */
- if (fb_changed) {
- u32 addrs[3];
-
- dplane->base.state.src_y = src_y = state->src.y1 >> 16;
- dplane->base.state.src_x = src_x = state->src.x1 >> 16;
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb) {
+ const struct drm_format_info *format;
+ u16 src_x, pitches[3];
+ u32 addrs[2][3];
- armada_drm_plane_calc_addrs(addrs, &dfb->fb, src_x, src_y);
+ armada_drm_plane_calc(state, addrs, pitches, false);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[0][0],
LCD_SPU_DMA_START_ADDR_Y0);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[0][1],
LCD_SPU_DMA_START_ADDR_U0);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[0][2],
LCD_SPU_DMA_START_ADDR_V0);
- armada_reg_queue_set(regs, idx, addrs[0],
+ armada_reg_queue_set(regs, idx, addrs[1][0],
LCD_SPU_DMA_START_ADDR_Y1);
- armada_reg_queue_set(regs, idx, addrs[1],
+ armada_reg_queue_set(regs, idx, addrs[1][1],
LCD_SPU_DMA_START_ADDR_U1);
- armada_reg_queue_set(regs, idx, addrs[2],
+ armada_reg_queue_set(regs, idx, addrs[1][2],
LCD_SPU_DMA_START_ADDR_V1);
- val = dfb->fb.pitches[0] << 16 | dfb->fb.pitches[0];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_YC);
- val = dfb->fb.pitches[1] << 16 | dfb->fb.pitches[2];
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_PITCH_UV);
- }
+ val = pitches[0] << 16 | pitches[0];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_YC);
+ val = pitches[1] << 16 | pitches[2];
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_DMA_PITCH_UV);
- val = (drm_rect_height(&state->src) & 0xffff0000) |
- drm_rect_width(&state->src) >> 16;
- if (dplane->base.state.src_hw != val) {
- dplane->base.state.src_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_HPXL_VLN);
- }
+ cfg = CFG_DMA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_DMA_MOD(drm_fb_to_armada_fb(state->fb)->mod) |
+ CFG_CBSH_ENA;
+ if (state->visible)
+ cfg |= CFG_DMA_ENA;
- val = drm_rect_height(&state->dst) << 16 | drm_rect_width(&state->dst);
- if (dplane->base.state.dst_hw != val) {
- dplane->base.state.dst_hw = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DZM_HPXL_VLN);
+ /*
+ * Shifting a YUV packed format image by one pixel causes the
+ * U/V planes to swap. Compensate for it by also toggling
+ * the UV swap.
+ */
+ format = state->fb->format;
+ src_x = state->src.x1 >> 16;
+ if (format->num_planes == 1 && src_x & (format->hsub - 1))
+ cfg ^= CFG_DMA_MOD(CFG_SWAPUV);
+ cfg_mask = CFG_CBSH_ENA | CFG_DMAFORMAT |
+ CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_DMA_FTOGGLE | CFG_DMA_TSTMODE |
+ CFG_DMA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_DMA_ENA : 0;
+ cfg_mask = CFG_DMA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
}
-
- val = state->dst.y1 << 16 | state->dst.x1;
- if (dplane->base.state.dst_yx != val) {
- dplane->base.state.dst_yx = val;
- armada_reg_queue_set(regs, idx, val,
- LCD_SPU_DMA_OVSA_HPXL_VLN);
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_DMA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_DMA_HSMOOTH;
}
- if (dplane->base.state.ctrl0 != ctrl0) {
- dplane->base.state.ctrl0 = ctrl0;
- armada_reg_queue_mod(regs, idx, ctrl0,
- CFG_CBSH_ENA | CFG_DMAFORMAT | CFG_DMA_FTOGGLE |
- CFG_DMA_HSMOOTH | CFG_DMA_TSTMODE |
- CFG_DMA_MOD(CFG_SWAPRB | CFG_SWAPUV | CFG_SWAPYU |
- CFG_YUV2RGB) | CFG_DMA_ENA,
- LCD_SPU_DMA_CTRL0);
- dplane->base.state.vsync_update = true;
- }
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ val = armada_spu_contrast(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_contrast(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_CONTRAST);
+ val = armada_spu_saturation(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_spu_saturation(old_state) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_SATURATION);
+ if (!old_state->visible && state->visible)
+ armada_reg_queue_set(regs, idx, 0x00002000, LCD_SPU_CBSH_HUE);
+ val = armada_csc(state);
+ if ((!old_state->visible && state->visible) ||
+ armada_csc(old_state) != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CSC_MASK,
+ LCD_SPU_IOPAD_CONTROL);
+ val = drm_to_overlay_state(state)->colorkey_yr;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_yr != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_Y);
+ val = drm_to_overlay_state(state)->colorkey_ug;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_ug != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_U);
+ val = drm_to_overlay_state(state)->colorkey_vb;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_vb != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_COLORKEY_V);
+ val = drm_to_overlay_state(state)->colorkey_mode;
+ if ((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_mode != val)
+ armada_reg_queue_mod(regs, idx, val, CFG_CKMODE_MASK |
+ CFG_ALPHAM_MASK | CFG_ALPHA_MASK,
+ LCD_SPU_DMA_CTRL1);
+ val = drm_to_overlay_state(state)->colorkey_enable;
+ if (((!old_state->visible && state->visible) ||
+ drm_to_overlay_state(old_state)->colorkey_enable != val) &&
+ dcrtc->variant->has_spu_adv_reg)
+ armada_reg_queue_mod(regs, idx, val, ADV_GRACOLORKEY |
+ ADV_VIDCOLORKEY, LCD_SPU_ADV_REG);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_overlay_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
- dplane->base.state.changed = idx != 0;
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
- armada_reg_queue_end(regs, idx);
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down the YUV FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_DMA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN16x66 | CFG_PDWN32x66, 0,
+ LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
}
+static const struct drm_plane_helper_funcs armada_overlay_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_overlay_plane_atomic_update,
+ .atomic_disable = armada_drm_overlay_plane_atomic_disable,
+};
+
static int
-armada_ovl_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
+armada_overlay_plane_update(struct drm_plane *plane, struct drm_crtc *crtc,
struct drm_framebuffer *fb,
int crtc_x, int crtc_y, unsigned crtc_w, unsigned crtc_h,
uint32_t src_x, uint32_t src_y, uint32_t src_w, uint32_t src_h,
struct drm_modeset_acquire_ctx *ctx)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- struct armada_crtc *dcrtc = drm_to_armada_crtc(crtc);
- struct armada_plane_work *work;
- struct drm_plane_state state = {
- .plane = plane,
- .crtc = crtc,
- .fb = fb,
- .src_x = src_x,
- .src_y = src_y,
- .src_w = src_w,
- .src_h = src_h,
- .crtc_x = crtc_x,
- .crtc_y = crtc_y,
- .crtc_w = crtc_w,
- .crtc_h = crtc_h,
- .rotation = DRM_MODE_ROTATE_0,
- };
- struct drm_crtc_state crtc_state = {
- .crtc = crtc,
- .enable = crtc->enabled,
- .mode = crtc->mode,
- };
- int ret;
+ struct drm_atomic_state *state;
+ struct drm_plane_state *plane_state;
+ int ret = 0;
trace_armada_ovl_plane_update(plane, crtc, fb,
crtc_x, crtc_y, crtc_w, crtc_h,
src_x, src_y, src_w, src_h);
- ret = drm_atomic_helper_check_plane_state(&state, &crtc_state, 0,
- INT_MAX, true, false);
- if (ret)
- return ret;
-
- work = &dplane->base.works[dplane->base.next_work];
-
- if (plane->fb != fb) {
- /*
- * Take a reference on the new framebuffer - we want to
- * hold on to it while the hardware is displaying it.
- */
- drm_framebuffer_reference(fb);
+ state = drm_atomic_state_alloc(plane->dev);
+ if (!state)
+ return -ENOMEM;
- work->old_fb = plane->fb;
- } else {
- work->old_fb = NULL;
+ state->acquire_ctx = ctx;
+ plane_state = drm_atomic_get_plane_state(state, plane);
+ if (IS_ERR(plane_state)) {
+ ret = PTR_ERR(plane_state);
+ goto fail;
}
- armada_ovl_plane_update_state(&state, work->regs);
-
- if (!dplane->base.state.changed)
- return 0;
-
- /* Wait for pending work to complete */
- if (armada_drm_plane_work_wait(&dplane->base, HZ / 25) == 0)
- armada_drm_plane_work_cancel(dcrtc, &dplane->base);
+ ret = drm_atomic_set_crtc_for_plane(plane_state, crtc);
+ if (ret != 0)
+ goto fail;
+
+ drm_atomic_set_fb_for_plane(plane_state, fb);
+ plane_state->crtc_x = crtc_x;
+ plane_state->crtc_y = crtc_y;
+ plane_state->crtc_h = crtc_h;
+ plane_state->crtc_w = crtc_w;
+ plane_state->src_x = src_x;
+ plane_state->src_y = src_y;
+ plane_state->src_h = src_h;
+ plane_state->src_w = src_w;
+
+ ret = drm_atomic_nonblocking_commit(state);
+fail:
+ drm_atomic_state_put(state);
+ return ret;
+}
- /* Just updating the position/size? */
- if (!dplane->base.state.vsync_update) {
- armada_ovl_plane_work(dcrtc, work);
- return 0;
- }
+static void armada_ovl_plane_destroy(struct drm_plane *plane)
+{
+ drm_plane_cleanup(plane);
+ kfree(plane);
+}
- if (!dcrtc->plane) {
- dcrtc->plane = plane;
- armada_ovl_update_attr(&dplane->prop, dcrtc);
+static void armada_overlay_reset(struct drm_plane *plane)
+{
+ struct armada_overlay_state *state;
+
+ if (plane->state)
+ __drm_atomic_helper_plane_destroy_state(plane->state);
+ kfree(plane->state);
+
+ state = kzalloc(sizeof(*state), GFP_KERNEL);
+ if (state) {
+ state->base.plane = plane;
+ state->base.color_encoding = DEFAULT_ENCODING;
+ state->base.color_range = DRM_COLOR_YCBCR_LIMITED_RANGE;
+ state->base.rotation = DRM_MODE_ROTATE_0;
+ state->colorkey_yr = 0xfefefe00;
+ state->colorkey_ug = 0x01010100;
+ state->colorkey_vb = 0x01010100;
+ state->colorkey_mode = CFG_CKMODE(CKMODE_RGB) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ state->colorkey_enable = ADV_GRACOLORKEY;
+ state->brightness = DEFAULT_BRIGHTNESS;
+ state->contrast = DEFAULT_CONTRAST;
+ state->saturation = DEFAULT_SATURATION;
}
-
- /* Queue it for update on the next interrupt if we are enabled */
- ret = armada_drm_plane_work_queue(dcrtc, work);
- if (ret)
- DRM_ERROR("failed to queue plane work: %d\n", ret);
-
- dplane->base.next_work = !dplane->base.next_work;
-
- return 0;
+ plane->state = &state->base;
}
-static void armada_ovl_plane_destroy(struct drm_plane *plane)
+struct drm_plane_state *
+armada_overlay_duplicate_state(struct drm_plane *plane)
{
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
+ struct armada_overlay_state *state;
- drm_plane_cleanup(plane);
+ if (WARN_ON(!plane->state))
+ return NULL;
- kfree(dplane);
+ state = kmemdup(plane->state, sizeof(*state), GFP_KERNEL);
+ if (state)
+ __drm_atomic_helper_plane_duplicate_state(plane, &state->base);
+ return &state->base;
}
-static int armada_ovl_plane_set_property(struct drm_plane *plane,
- struct drm_property *property, uint64_t val)
+static int armada_overlay_set_property(struct drm_plane *plane,
+ struct drm_plane_state *state, struct drm_property *property,
+ uint64_t val)
{
struct armada_private *priv = plane->dev->dev_private;
- struct armada_ovl_plane *dplane = drm_to_armada_ovl_plane(plane);
- bool update_attr = false;
+#define K2R(val) (((val) >> 0) & 0xff)
+#define K2G(val) (((val) >> 8) & 0xff)
+#define K2B(val) (((val) >> 16) & 0xff)
if (property == priv->colorkey_prop) {
#define CCC(v) ((v) << 24 | (v) << 16 | (v) << 8)
- dplane->prop.colorkey_yr = CCC(K2R(val));
- dplane->prop.colorkey_ug = CCC(K2G(val));
- dplane->prop.colorkey_vb = CCC(K2B(val));
+ drm_to_overlay_state(state)->colorkey_yr = CCC(K2R(val));
+ drm_to_overlay_state(state)->colorkey_ug = CCC(K2G(val));
+ drm_to_overlay_state(state)->colorkey_vb = CCC(K2B(val));
#undef CCC
- update_attr = true;
} else if (property == priv->colorkey_min_prop) {
- dplane->prop.colorkey_yr &= ~0x00ff0000;
- dplane->prop.colorkey_yr |= K2R(val) << 16;
- dplane->prop.colorkey_ug &= ~0x00ff0000;
- dplane->prop.colorkey_ug |= K2G(val) << 16;
- dplane->prop.colorkey_vb &= ~0x00ff0000;
- dplane->prop.colorkey_vb |= K2B(val) << 16;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 16;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 16;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x00ff0000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 16;
} else if (property == priv->colorkey_max_prop) {
- dplane->prop.colorkey_yr &= ~0xff000000;
- dplane->prop.colorkey_yr |= K2R(val) << 24;
- dplane->prop.colorkey_ug &= ~0xff000000;
- dplane->prop.colorkey_ug |= K2G(val) << 24;
- dplane->prop.colorkey_vb &= ~0xff000000;
- dplane->prop.colorkey_vb |= K2B(val) << 24;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 24;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 24;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0xff000000;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 24;
} else if (property == priv->colorkey_val_prop) {
- dplane->prop.colorkey_yr &= ~0x0000ff00;
- dplane->prop.colorkey_yr |= K2R(val) << 8;
- dplane->prop.colorkey_ug &= ~0x0000ff00;
- dplane->prop.colorkey_ug |= K2G(val) << 8;
- dplane->prop.colorkey_vb &= ~0x0000ff00;
- dplane->prop.colorkey_vb |= K2B(val) << 8;
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val) << 8;
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val) << 8;
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x0000ff00;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val) << 8;
} else if (property == priv->colorkey_alpha_prop) {
- dplane->prop.colorkey_yr &= ~0x000000ff;
- dplane->prop.colorkey_yr |= K2R(val);
- dplane->prop.colorkey_ug &= ~0x000000ff;
- dplane->prop.colorkey_ug |= K2G(val);
- dplane->prop.colorkey_vb &= ~0x000000ff;
- dplane->prop.colorkey_vb |= K2B(val);
- update_attr = true;
+ drm_to_overlay_state(state)->colorkey_yr &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_yr |= K2R(val);
+ drm_to_overlay_state(state)->colorkey_ug &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_ug |= K2G(val);
+ drm_to_overlay_state(state)->colorkey_vb &= ~0x000000ff;
+ drm_to_overlay_state(state)->colorkey_vb |= K2B(val);
} else if (property == priv->colorkey_mode_prop) {
- dplane->prop.colorkey_mode &= ~CFG_CKMODE_MASK;
- dplane->prop.colorkey_mode |= CFG_CKMODE(val);
- update_attr = true;
+ if (val == CKMODE_DISABLE) {
+ drm_to_overlay_state(state)->colorkey_mode =
+ CFG_CKMODE(CKMODE_DISABLE) |
+ CFG_ALPHAM_CFG | CFG_ALPHA(255);
+ drm_to_overlay_state(state)->colorkey_enable = 0;
+ } else {
+ drm_to_overlay_state(state)->colorkey_mode =
+ CFG_CKMODE(val) |
+ CFG_ALPHAM_GRA | CFG_ALPHA(0);
+ drm_to_overlay_state(state)->colorkey_enable =
+ ADV_GRACOLORKEY;
+ }
} else if (property == priv->brightness_prop) {
- dplane->prop.brightness = val - 256;
- update_attr = true;
+ drm_to_overlay_state(state)->brightness = val - 256;
} else if (property == priv->contrast_prop) {
- dplane->prop.contrast = val;
- update_attr = true;
+ drm_to_overlay_state(state)->contrast = val;
} else if (property == priv->saturation_prop) {
- dplane->prop.saturation = val;
- update_attr = true;
+ drm_to_overlay_state(state)->saturation = val;
+ } else {
+ return -EINVAL;
}
+ return 0;
+}
- if (update_attr && dplane->base.base.crtc)
- armada_ovl_update_attr(&dplane->prop,
- drm_to_armada_crtc(dplane->base.base.crtc));
+static int armada_overlay_get_property(struct drm_plane *plane,
+ const struct drm_plane_state *state, struct drm_property *property,
+ uint64_t *val)
+{
+ struct armada_private *priv = plane->dev->dev_private;
+#define C2K(c,s) (((c) >> (s)) & 0xff)
+#define R2BGR(r,g,b,s) (C2K(r,s) << 0 | C2K(g,s) << 8 | C2K(b,s) << 16)
+ if (property == priv->colorkey_prop) {
+ /* Do best-efforts here for this property */
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ /* If min != max, or min != val, error out */
+ if (*val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24) ||
+ *val != R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8))
+ return -EINVAL;
+ } else if (property == priv->colorkey_min_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 16);
+ } else if (property == priv->colorkey_max_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 24);
+ } else if (property == priv->colorkey_val_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 8);
+ } else if (property == priv->colorkey_alpha_prop) {
+ *val = R2BGR(drm_to_overlay_state(state)->colorkey_yr,
+ drm_to_overlay_state(state)->colorkey_ug,
+ drm_to_overlay_state(state)->colorkey_vb, 0);
+ } else if (property == priv->colorkey_mode_prop) {
+ *val = (drm_to_overlay_state(state)->colorkey_mode &
+ CFG_CKMODE_MASK) >> ffs(CFG_CKMODE_MASK);
+ } else if (property == priv->brightness_prop) {
+ *val = drm_to_overlay_state(state)->brightness + 256;
+ } else if (property == priv->contrast_prop) {
+ *val = drm_to_overlay_state(state)->contrast;
+ } else if (property == priv->saturation_prop) {
+ *val = drm_to_overlay_state(state)->saturation;
+ } else {
+ return -EINVAL;
+ }
return 0;
}
static const struct drm_plane_funcs armada_ovl_plane_funcs = {
- .update_plane = armada_ovl_plane_update,
- .disable_plane = armada_drm_plane_disable,
+ .update_plane = armada_overlay_plane_update,
+ .disable_plane = drm_atomic_helper_disable_plane,
.destroy = armada_ovl_plane_destroy,
- .set_property = armada_ovl_plane_set_property,
+ .reset = armada_overlay_reset,
+ .atomic_duplicate_state = armada_overlay_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .atomic_set_property = armada_overlay_set_property,
+ .atomic_get_property = armada_overlay_get_property,
};
static const uint32_t armada_ovl_formats[] = {
@@ -419,46 +541,31 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
{
struct armada_private *priv = dev->dev_private;
struct drm_mode_object *mobj;
- struct armada_ovl_plane *dplane;
+ struct drm_plane *overlay;
int ret;
ret = armada_overlay_create_properties(dev);
if (ret)
return ret;
- dplane = kzalloc(sizeof(*dplane), GFP_KERNEL);
- if (!dplane)
+ overlay = kzalloc(sizeof(*overlay), GFP_KERNEL);
+ if (!overlay)
return -ENOMEM;
- ret = armada_drm_plane_init(&dplane->base);
- if (ret) {
- kfree(dplane);
- return ret;
- }
-
- dplane->base.works[0].fn = armada_ovl_plane_work;
- dplane->base.works[1].fn = armada_ovl_plane_work;
+ drm_plane_helper_add(overlay, &armada_overlay_plane_helper_funcs);
- ret = drm_universal_plane_init(dev, &dplane->base.base, crtcs,
+ ret = drm_universal_plane_init(dev, overlay, crtcs,
&armada_ovl_plane_funcs,
armada_ovl_formats,
ARRAY_SIZE(armada_ovl_formats),
NULL,
DRM_PLANE_TYPE_OVERLAY, NULL);
if (ret) {
- kfree(dplane);
+ kfree(overlay);
return ret;
}
- dplane->prop.colorkey_yr = 0xfefefe00;
- dplane->prop.colorkey_ug = 0x01010100;
- dplane->prop.colorkey_vb = 0x01010100;
- dplane->prop.colorkey_mode = CFG_CKMODE(CKMODE_RGB);
- dplane->prop.brightness = 0;
- dplane->prop.contrast = 0x4000;
- dplane->prop.saturation = 0x4000;
-
- mobj = &dplane->base.base.base;
+ mobj = &overlay->base;
drm_object_attach_property(mobj, priv->colorkey_prop,
0x0101fe);
drm_object_attach_property(mobj, priv->colorkey_min_prop,
@@ -471,11 +578,19 @@ int armada_overlay_plane_create(struct drm_device *dev, unsigned long crtcs)
0x000000);
drm_object_attach_property(mobj, priv->colorkey_mode_prop,
CKMODE_RGB);
- drm_object_attach_property(mobj, priv->brightness_prop, 256);
+ drm_object_attach_property(mobj, priv->brightness_prop,
+ 256 + DEFAULT_BRIGHTNESS);
drm_object_attach_property(mobj, priv->contrast_prop,
- dplane->prop.contrast);
+ DEFAULT_CONTRAST);
drm_object_attach_property(mobj, priv->saturation_prop,
- dplane->prop.saturation);
+ DEFAULT_SATURATION);
- return 0;
+ ret = drm_plane_create_color_properties(overlay,
+ BIT(DRM_COLOR_YCBCR_BT601) |
+ BIT(DRM_COLOR_YCBCR_BT709),
+ BIT(DRM_COLOR_YCBCR_LIMITED_RANGE),
+ DEFAULT_ENCODING,
+ DRM_COLOR_YCBCR_LIMITED_RANGE);
+
+ return ret;
}
diff --git a/drivers/gpu/drm/armada/armada_plane.c b/drivers/gpu/drm/armada/armada_plane.c
new file mode 100644
index 000000000000..9f36423dd394
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.c
@@ -0,0 +1,289 @@
+/*
+ * Copyright (C) 2012 Russell King
+ * Rewritten from the dovefb driver, and Armada510 manuals.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 as
+ * published by the Free Software Foundation.
+ */
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_plane_helper.h>
+#include "armada_crtc.h"
+#include "armada_drm.h"
+#include "armada_fb.h"
+#include "armada_gem.h"
+#include "armada_hw.h"
+#include "armada_plane.h"
+#include "armada_trace.h"
+
+static const uint32_t armada_primary_formats[] = {
+ DRM_FORMAT_UYVY,
+ DRM_FORMAT_YUYV,
+ DRM_FORMAT_VYUY,
+ DRM_FORMAT_YVYU,
+ DRM_FORMAT_ARGB8888,
+ DRM_FORMAT_ABGR8888,
+ DRM_FORMAT_XRGB8888,
+ DRM_FORMAT_XBGR8888,
+ DRM_FORMAT_RGB888,
+ DRM_FORMAT_BGR888,
+ DRM_FORMAT_ARGB1555,
+ DRM_FORMAT_ABGR1555,
+ DRM_FORMAT_RGB565,
+ DRM_FORMAT_BGR565,
+};
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced)
+{
+ struct drm_framebuffer *fb = state->fb;
+ const struct drm_format_info *format = fb->format;
+ unsigned int num_planes = format->num_planes;
+ unsigned int x = state->src.x1 >> 16;
+ unsigned int y = state->src.y1 >> 16;
+ u32 addr = drm_fb_obj(fb)->dev_addr;
+ int i;
+
+ DRM_DEBUG_KMS("pitch %u x %d y %d bpp %d\n",
+ fb->pitches[0], x, y, format->cpp[0] * 8);
+
+ if (num_planes > 3)
+ num_planes = 3;
+
+ addrs[0][0] = addr + fb->offsets[0] + y * fb->pitches[0] +
+ x * format->cpp[0];
+ pitches[0] = fb->pitches[0];
+
+ y /= format->vsub;
+ x /= format->hsub;
+
+ for (i = 1; i < num_planes; i++) {
+ addrs[0][i] = addr + fb->offsets[i] + y * fb->pitches[i] +
+ x * format->cpp[i];
+ pitches[i] = fb->pitches[i];
+ }
+ for (; i < 3; i++) {
+ addrs[0][i] = 0;
+ pitches[i] = 0;
+ }
+ if (interlaced) {
+ for (i = 0; i < 3; i++) {
+ addrs[1][i] = addrs[0][i] + pitches[i];
+ pitches[i] *= 2;
+ }
+ } else {
+ for (i = 0; i < 3; i++)
+ addrs[1][i] = addrs[0][i];
+ }
+}
+
+static unsigned armada_drm_crtc_calc_fb(struct drm_plane_state *state,
+ struct armada_regs *regs, bool interlaced)
+{
+ u16 pitches[3];
+ u32 addrs[2][3];
+ unsigned i = 0;
+
+ armada_drm_plane_calc(state, addrs, pitches, interlaced);
+
+ /* write offset, base, and pitch */
+ armada_reg_queue_set(regs, i, addrs[0][0], LCD_CFG_GRA_START_ADDR0);
+ armada_reg_queue_set(regs, i, addrs[1][0], LCD_CFG_GRA_START_ADDR1);
+ armada_reg_queue_mod(regs, i, pitches[0], 0xffff, LCD_CFG_GRA_PITCH);
+
+ return i;
+}
+
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ state->fb ? state->fb->base.id : 0);
+
+ /*
+ * Take a reference on the new framebuffer - we want to
+ * hold on to it while the hardware is displaying it.
+ */
+ if (state->fb)
+ drm_framebuffer_get(state->fb);
+ return 0;
+}
+
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ DRM_DEBUG_KMS("[PLANE:%d:%s] [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->fb ? old_state->fb->base.id : 0);
+
+ if (old_state->fb)
+ drm_framebuffer_put(old_state->fb);
+}
+
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (state->fb && !WARN_ON(!state->crtc)) {
+ struct drm_crtc *crtc = state->crtc;
+ struct drm_crtc_state *crtc_state;
+
+ if (state->state)
+ crtc_state = drm_atomic_get_existing_crtc_state(state->state, crtc);
+ else
+ crtc_state = crtc->state;
+ return drm_atomic_helper_check_plane_state(state, crtc_state,
+ 0, INT_MAX,
+ true, false);
+ } else {
+ state->visible = false;
+ }
+ return 0;
+}
+
+static void armada_drm_primary_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_plane_state *state = plane->state;
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ u32 cfg, cfg_mask, val;
+ unsigned int idx;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!state->fb || WARN_ON(!state->crtc))
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] is on [CRTC:%d:%s] with [FB:%d] visible %u->%u\n",
+ plane->base.id, plane->name,
+ state->crtc->base.id, state->crtc->name,
+ state->fb->base.id,
+ old_state->visible, state->visible);
+
+ dcrtc = drm_to_armada_crtc(state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ idx = 0;
+ if (!old_state->visible && state->visible) {
+ val = CFG_PDWN64x66;
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ val |= CFG_PDWN256x24;
+ armada_reg_queue_mod(regs, idx, 0, val, LCD_SPU_SRAM_PARA1);
+ }
+ val = armada_rect_hw_fp(&state->src);
+ if (armada_rect_hw_fp(&old_state->src) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_HPXL_VLN);
+ val = armada_rect_yx(&state->dst);
+ if (armada_rect_yx(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GRA_OVSA_HPXL_VLN);
+ val = armada_rect_hw(&state->dst);
+ if (armada_rect_hw(&old_state->dst) != val)
+ armada_reg_queue_set(regs, idx, val, LCD_SPU_GZM_HPXL_VLN);
+ if (old_state->src.x1 != state->src.x1 ||
+ old_state->src.y1 != state->src.y1 ||
+ old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ idx += armada_drm_crtc_calc_fb(state, regs + idx,
+ dcrtc->interlaced);
+ }
+ if (old_state->fb != state->fb ||
+ state->crtc->state->mode_changed) {
+ cfg = CFG_GRA_FMT(drm_fb_to_armada_fb(state->fb)->fmt) |
+ CFG_GRA_MOD(drm_fb_to_armada_fb(state->fb)->mod);
+ if (drm_fb_to_armada_fb(state->fb)->fmt > CFG_420)
+ cfg |= CFG_PALETTE_ENA;
+ if (state->visible)
+ cfg |= CFG_GRA_ENA;
+ if (dcrtc->interlaced)
+ cfg |= CFG_GRA_FTOGGLE;
+ cfg_mask = CFG_GRAFORMAT |
+ CFG_GRA_MOD(CFG_SWAPRB | CFG_SWAPUV |
+ CFG_SWAPYU | CFG_YUV2RGB) |
+ CFG_PALETTE_ENA | CFG_GRA_FTOGGLE |
+ CFG_GRA_ENA;
+ } else if (old_state->visible != state->visible) {
+ cfg = state->visible ? CFG_GRA_ENA : 0;
+ cfg_mask = CFG_GRA_ENA;
+ } else {
+ cfg = cfg_mask = 0;
+ }
+ if (drm_rect_width(&old_state->src) != drm_rect_width(&state->src) ||
+ drm_rect_width(&old_state->dst) != drm_rect_width(&state->dst)) {
+ cfg_mask |= CFG_GRA_HSMOOTH;
+ if (drm_rect_width(&state->src) >> 16 !=
+ drm_rect_width(&state->dst))
+ cfg |= CFG_GRA_HSMOOTH;
+ }
+
+ if (cfg_mask)
+ armada_reg_queue_mod(regs, idx, cfg, cfg_mask,
+ LCD_SPU_DMA_CTRL0);
+
+ dcrtc->regs_idx += idx;
+}
+
+static void armada_drm_primary_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct armada_crtc *dcrtc;
+ struct armada_regs *regs;
+ unsigned int idx = 0;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s]\n", plane->base.id, plane->name);
+
+ if (!old_state->crtc)
+ return;
+
+ DRM_DEBUG_KMS("[PLANE:%d:%s] was on [CRTC:%d:%s] with [FB:%d]\n",
+ plane->base.id, plane->name,
+ old_state->crtc->base.id, old_state->crtc->name,
+ old_state->fb->base.id);
+
+ dcrtc = drm_to_armada_crtc(old_state->crtc);
+ regs = dcrtc->regs + dcrtc->regs_idx;
+
+ /* Disable plane and power down most RAMs and FIFOs */
+ armada_reg_queue_mod(regs, idx, 0, CFG_GRA_ENA, LCD_SPU_DMA_CTRL0);
+ armada_reg_queue_mod(regs, idx, CFG_PDWN256x32 | CFG_PDWN256x24 |
+ CFG_PDWN256x8 | CFG_PDWN32x32 | CFG_PDWN64x66,
+ 0, LCD_SPU_SRAM_PARA1);
+
+ dcrtc->regs_idx += idx;
+}
+
+static const struct drm_plane_helper_funcs armada_primary_plane_helper_funcs = {
+ .prepare_fb = armada_drm_plane_prepare_fb,
+ .cleanup_fb = armada_drm_plane_cleanup_fb,
+ .atomic_check = armada_drm_plane_atomic_check,
+ .atomic_update = armada_drm_primary_plane_atomic_update,
+ .atomic_disable = armada_drm_primary_plane_atomic_disable,
+};
+
+static const struct drm_plane_funcs armada_primary_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = drm_primary_helper_destroy,
+ .reset = drm_atomic_helper_plane_reset,
+ .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+};
+
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary)
+{
+ int ret;
+
+ drm_plane_helper_add(primary, &armada_primary_plane_helper_funcs);
+
+ ret = drm_universal_plane_init(drm, primary, 0,
+ &armada_primary_plane_funcs,
+ armada_primary_formats,
+ ARRAY_SIZE(armada_primary_formats),
+ NULL,
+ DRM_PLANE_TYPE_PRIMARY, NULL);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/armada/armada_plane.h b/drivers/gpu/drm/armada/armada_plane.h
new file mode 100644
index 000000000000..ff4281ba7fad
--- /dev/null
+++ b/drivers/gpu/drm/armada/armada_plane.h
@@ -0,0 +1,15 @@
+#ifndef ARMADA_PLANE_H
+#define ARMADA_PLANE_H
+
+void armada_drm_plane_calc(struct drm_plane_state *state, u32 addrs[2][3],
+ u16 pitches[3], bool interlaced);
+int armada_drm_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state);
+void armada_drm_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state);
+int armada_drm_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state);
+int armada_drm_primary_plane_init(struct drm_device *drm,
+ struct drm_plane *primary);
+
+#endif
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
index d73281095fac..9e34bce089d0 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_crtc.c
@@ -101,18 +101,34 @@ static void atmel_hlcdc_crtc_mode_set_nofb(struct drm_crtc *c)
(adj->crtc_hdisplay - 1) |
((adj->crtc_vdisplay - 1) << 16));
- cfg = 0;
+ cfg = ATMEL_HLCDC_CLKSEL;
- prate = clk_get_rate(crtc->dc->hlcdc->sys_clk);
+ prate = 2 * clk_get_rate(crtc->dc->hlcdc->sys_clk);
mode_rate = adj->crtc_clock * 1000;
- if ((prate / 2) < mode_rate) {
- prate *= 2;
- cfg |= ATMEL_HLCDC_CLKSEL;
- }
div = DIV_ROUND_UP(prate, mode_rate);
- if (div < 2)
+ if (div < 2) {
div = 2;
+ } else if (ATMEL_HLCDC_CLKDIV(div) & ~ATMEL_HLCDC_CLKDIV_MASK) {
+ /* The divider ended up too big, try a lower base rate. */
+ cfg &= ~ATMEL_HLCDC_CLKSEL;
+ prate /= 2;
+ div = DIV_ROUND_UP(prate, mode_rate);
+ if (ATMEL_HLCDC_CLKDIV(div) & ~ATMEL_HLCDC_CLKDIV_MASK)
+ div = ATMEL_HLCDC_CLKDIV_MASK;
+ } else {
+ int div_low = prate / mode_rate;
+
+ if (div_low >= 2 &&
+ ((prate / div_low - mode_rate) <
+ 10 * (mode_rate - prate / div)))
+ /*
+ * At least 10 times better when using a higher
+ * frequency than requested, instead of a lower.
+ * So, go with that.
+ */
+ div = div_low;
+ }
cfg |= ATMEL_HLCDC_CLKDIV(div);
@@ -226,6 +242,55 @@ static void atmel_hlcdc_crtc_atomic_enable(struct drm_crtc *c,
#define ATMEL_HLCDC_RGB888_OUTPUT BIT(3)
#define ATMEL_HLCDC_OUTPUT_MODE_MASK GENMASK(3, 0)
+static int atmel_hlcdc_connector_output_mode(struct drm_connector_state *state)
+{
+ struct drm_connector *connector = state->connector;
+ struct drm_display_info *info = &connector->display_info;
+ struct drm_encoder *encoder;
+ unsigned int supported_fmts = 0;
+ int j;
+
+ encoder = state->best_encoder;
+ if (!encoder)
+ encoder = connector->encoder;
+
+ switch (atmel_hlcdc_encoder_get_bus_fmt(encoder)) {
+ case 0:
+ break;
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ return ATMEL_HLCDC_RGB444_OUTPUT;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ return ATMEL_HLCDC_RGB565_OUTPUT;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ return ATMEL_HLCDC_RGB666_OUTPUT;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ return ATMEL_HLCDC_RGB888_OUTPUT;
+ default:
+ return -EINVAL;
+ }
+
+ for (j = 0; j < info->num_bus_formats; j++) {
+ switch (info->bus_formats[j]) {
+ case MEDIA_BUS_FMT_RGB444_1X12:
+ supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT;
+ break;
+ default:
+ break;
+ }
+ }
+
+ return supported_fmts;
+}
+
static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
{
unsigned int output_fmts = ATMEL_HLCDC_OUTPUT_MODE_MASK;
@@ -238,31 +303,12 @@ static int atmel_hlcdc_crtc_select_output_mode(struct drm_crtc_state *state)
crtc = drm_crtc_to_atmel_hlcdc_crtc(state->crtc);
for_each_new_connector_in_state(state->state, connector, cstate, i) {
- struct drm_display_info *info = &connector->display_info;
unsigned int supported_fmts = 0;
- int j;
if (!cstate->crtc)
continue;
- for (j = 0; j < info->num_bus_formats; j++) {
- switch (info->bus_formats[j]) {
- case MEDIA_BUS_FMT_RGB444_1X12:
- supported_fmts |= ATMEL_HLCDC_RGB444_OUTPUT;
- break;
- case MEDIA_BUS_FMT_RGB565_1X16:
- supported_fmts |= ATMEL_HLCDC_RGB565_OUTPUT;
- break;
- case MEDIA_BUS_FMT_RGB666_1X18:
- supported_fmts |= ATMEL_HLCDC_RGB666_OUTPUT;
- break;
- case MEDIA_BUS_FMT_RGB888_1X24:
- supported_fmts |= ATMEL_HLCDC_RGB888_OUTPUT;
- break;
- default:
- break;
- }
- }
+ supported_fmts = atmel_hlcdc_connector_output_mode(cstate);
if (crtc->dc->desc->conflicting_output_formats)
output_fmts &= supported_fmts;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
index 60c937f42114..4cc1e03f0aee 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_dc.h
@@ -441,5 +441,6 @@ void atmel_hlcdc_crtc_irq(struct drm_crtc *c);
int atmel_hlcdc_crtc_create(struct drm_device *dev);
int atmel_hlcdc_create_outputs(struct drm_device *dev);
+int atmel_hlcdc_encoder_get_bus_fmt(struct drm_encoder *encoder);
#endif /* DRM_ATMEL_HLCDC_H */
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
index 8db51fb131db..f73d8a92274e 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_output.c
@@ -27,33 +27,94 @@
#include "atmel_hlcdc_dc.h"
+struct atmel_hlcdc_rgb_output {
+ struct drm_encoder encoder;
+ int bus_fmt;
+};
+
static const struct drm_encoder_funcs atmel_hlcdc_panel_encoder_funcs = {
.destroy = drm_encoder_cleanup,
};
+static struct atmel_hlcdc_rgb_output *
+atmel_hlcdc_encoder_to_rgb_output(struct drm_encoder *encoder)
+{
+ return container_of(encoder, struct atmel_hlcdc_rgb_output, encoder);
+}
+
+int atmel_hlcdc_encoder_get_bus_fmt(struct drm_encoder *encoder)
+{
+ struct atmel_hlcdc_rgb_output *output;
+
+ output = atmel_hlcdc_encoder_to_rgb_output(encoder);
+
+ return output->bus_fmt;
+}
+
+static int atmel_hlcdc_of_bus_fmt(const struct device_node *ep)
+{
+ u32 bus_width;
+ int ret;
+
+ ret = of_property_read_u32(ep, "bus-width", &bus_width);
+ if (ret == -EINVAL)
+ return 0;
+ if (ret)
+ return ret;
+
+ switch (bus_width) {
+ case 12:
+ return MEDIA_BUS_FMT_RGB444_1X12;
+ case 16:
+ return MEDIA_BUS_FMT_RGB565_1X16;
+ case 18:
+ return MEDIA_BUS_FMT_RGB666_1X18;
+ case 24:
+ return MEDIA_BUS_FMT_RGB888_1X24;
+ default:
+ return -EINVAL;
+ }
+}
+
static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
{
- struct drm_encoder *encoder;
+ struct atmel_hlcdc_rgb_output *output;
+ struct device_node *ep;
struct drm_panel *panel;
struct drm_bridge *bridge;
int ret;
+ ep = of_graph_get_endpoint_by_regs(dev->dev->of_node, 0, endpoint);
+ if (!ep)
+ return -ENODEV;
+
ret = drm_of_find_panel_or_bridge(dev->dev->of_node, 0, endpoint,
&panel, &bridge);
- if (ret)
+ if (ret) {
+ of_node_put(ep);
return ret;
+ }
- encoder = devm_kzalloc(dev->dev, sizeof(*encoder), GFP_KERNEL);
- if (!encoder)
+ output = devm_kzalloc(dev->dev, sizeof(*output), GFP_KERNEL);
+ if (!output) {
+ of_node_put(ep);
+ return -ENOMEM;
+ }
+
+ output->bus_fmt = atmel_hlcdc_of_bus_fmt(ep);
+ of_node_put(ep);
+ if (output->bus_fmt < 0) {
+ dev_err(dev->dev, "endpoint %d: invalid bus width\n", endpoint);
return -EINVAL;
+ }
- ret = drm_encoder_init(dev, encoder,
+ ret = drm_encoder_init(dev, &output->encoder,
&atmel_hlcdc_panel_encoder_funcs,
DRM_MODE_ENCODER_NONE, NULL);
if (ret)
return ret;
- encoder->possible_crtcs = 0x1;
+ output->encoder.possible_crtcs = 0x1;
if (panel) {
bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_Unknown);
@@ -62,7 +123,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
}
if (bridge) {
- ret = drm_bridge_attach(encoder, bridge, NULL);
+ ret = drm_bridge_attach(&output->encoder, bridge, NULL);
if (!ret)
return 0;
@@ -70,7 +131,7 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
drm_panel_bridge_remove(bridge);
}
- drm_encoder_cleanup(encoder);
+ drm_encoder_cleanup(&output->encoder);
return ret;
}
@@ -78,12 +139,23 @@ static int atmel_hlcdc_attach_endpoint(struct drm_device *dev, int endpoint)
int atmel_hlcdc_create_outputs(struct drm_device *dev)
{
int endpoint, ret = 0;
+ int attached = 0;
- for (endpoint = 0; !ret; endpoint++)
+ /*
+ * Always scan the first few endpoints even if we get -ENODEV,
+ * but keep going after that as long as we keep getting hits.
+ */
+ for (endpoint = 0; !ret || endpoint < 4; endpoint++) {
ret = atmel_hlcdc_attach_endpoint(dev, endpoint);
+ if (ret == -ENODEV)
+ continue;
+ if (ret)
+ break;
+ attached++;
+ }
/* At least one device was successfully attached.*/
- if (ret == -ENODEV && endpoint)
+ if (ret == -ENODEV && attached)
return 0;
return ret;
diff --git a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
index 04440064b9b7..9330a076e15a 100644
--- a/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
+++ b/drivers/gpu/drm/atmel-hlcdc/atmel_hlcdc_plane.c
@@ -942,10 +942,7 @@ static void atmel_hlcdc_plane_reset(struct drm_plane *p)
"Failed to allocate initial plane state\n");
return;
}
-
- p->state = &state->base;
- p->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- p->state->plane = p;
+ __drm_atomic_helper_plane_reset(p, &state->base);
}
}
diff --git a/drivers/gpu/drm/bochs/bochs_drv.c b/drivers/gpu/drm/bochs/bochs_drv.c
index 7b20318483e4..c61b40c72b62 100644
--- a/drivers/gpu/drm/bochs/bochs_drv.c
+++ b/drivers/gpu/drm/bochs/bochs_drv.c
@@ -143,22 +143,6 @@ static const struct dev_pm_ops bochs_pm_ops = {
/* ---------------------------------------------------------------------- */
/* pci interface */
-static int bochs_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
- drm_fb_helper_remove_conflicting_framebuffers(ap, "bochsdrmfb", false);
- kfree(ap);
-
- return 0;
-}
-
static int bochs_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -171,7 +155,7 @@ static int bochs_pci_probe(struct pci_dev *pdev,
return -ENOMEM;
}
- ret = bochs_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "bochsdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/bochs/bochs_mm.c b/drivers/gpu/drm/bochs/bochs_mm.c
index 39cd08416773..c9c7097030ca 100644
--- a/drivers/gpu/drm/bochs/bochs_mm.c
+++ b/drivers/gpu/drm/bochs/bochs_mm.c
@@ -430,7 +430,7 @@ static void bochs_bo_unref(struct bochs_bo **bo)
return;
tbo = &((*bo)->bo);
- ttm_bo_unref(&tbo);
+ ttm_bo_put(tbo);
*bo = NULL;
}
diff --git a/drivers/gpu/drm/bridge/Kconfig b/drivers/gpu/drm/bridge/Kconfig
index bf6cad6c9178..9eeb8ef0b174 100644
--- a/drivers/gpu/drm/bridge/Kconfig
+++ b/drivers/gpu/drm/bridge/Kconfig
@@ -112,6 +112,14 @@ config DRM_THINE_THC63LVD1024
---help---
Thine THC63LVD1024 LVDS/parallel converter driver.
+config DRM_TOSHIBA_TC358764
+ tristate "TC358764 DSI/LVDS bridge"
+ depends on DRM && DRM_PANEL
+ depends on OF
+ select DRM_MIPI_DSI
+ help
+ Toshiba TC358764 DSI/LVDS bridge driver.
+
config DRM_TOSHIBA_TC358767
tristate "Toshiba TC358767 eDP bridge"
depends on OF
@@ -128,6 +136,16 @@ config DRM_TI_TFP410
---help---
Texas Instruments TFP410 DVI/HDMI Transmitter driver
+config DRM_TI_SN65DSI86
+ tristate "TI SN65DSI86 DSI to eDP bridge"
+ depends on OF
+ select DRM_KMS_HELPER
+ select REGMAP_I2C
+ select DRM_PANEL
+ select DRM_MIPI_DSI
+ help
+ Texas Instruments SN65DSI86 DSI to eDP Bridge driver
+
source "drivers/gpu/drm/bridge/analogix/Kconfig"
source "drivers/gpu/drm/bridge/adv7511/Kconfig"
diff --git a/drivers/gpu/drm/bridge/Makefile b/drivers/gpu/drm/bridge/Makefile
index 35f88d48ec20..4934fcf5a6f8 100644
--- a/drivers/gpu/drm/bridge/Makefile
+++ b/drivers/gpu/drm/bridge/Makefile
@@ -10,8 +10,10 @@ obj-$(CONFIG_DRM_SIL_SII8620) += sil-sii8620.o
obj-$(CONFIG_DRM_SII902X) += sii902x.o
obj-$(CONFIG_DRM_SII9234) += sii9234.o
obj-$(CONFIG_DRM_THINE_THC63LVD1024) += thc63lvd1024.o
+obj-$(CONFIG_DRM_TOSHIBA_TC358764) += tc358764.o
obj-$(CONFIG_DRM_TOSHIBA_TC358767) += tc358767.o
obj-$(CONFIG_DRM_ANALOGIX_DP) += analogix/
obj-$(CONFIG_DRM_I2C_ADV7511) += adv7511/
+obj-$(CONFIG_DRM_TI_SN65DSI86) += ti-sn65dsi86.o
obj-$(CONFIG_DRM_TI_TFP410) += ti-tfp410.o
obj-y += synopsys/
diff --git a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
index 6437b878724a..85c2d407a52e 100644
--- a/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
+++ b/drivers/gpu/drm/bridge/adv7511/adv7511_drv.c
@@ -429,6 +429,18 @@ static void adv7511_hpd_work(struct work_struct *work)
else
status = connector_status_disconnected;
+ /*
+ * The bridge resets its registers on unplug. So when we get a plug
+ * event and we're already supposed to be powered, cycle the bridge to
+ * restore its state.
+ */
+ if (status == connector_status_connected &&
+ adv7511->connector.status == connector_status_disconnected &&
+ adv7511->powered) {
+ regcache_mark_dirty(adv7511->regmap);
+ adv7511_power_on(adv7511);
+ }
+
if (adv7511->connector.status != status) {
adv7511->connector.status = status;
if (status == connector_status_disconnected)
diff --git a/drivers/gpu/drm/bridge/sil-sii8620.c b/drivers/gpu/drm/bridge/sil-sii8620.c
index 250effa0e6b8..a6e8f4591e63 100644
--- a/drivers/gpu/drm/bridge/sil-sii8620.c
+++ b/drivers/gpu/drm/bridge/sil-sii8620.c
@@ -14,6 +14,7 @@
#include <drm/bridge/mhl.h>
#include <drm/drm_crtc.h>
#include <drm/drm_edid.h>
+#include <drm/drm_encoder.h>
#include <linux/clk.h>
#include <linux/delay.h>
@@ -72,9 +73,7 @@ struct sii8620 {
struct regulator_bulk_data supplies[2];
struct mutex lock; /* context lock, protects fields below */
int error;
- int pixel_clock;
unsigned int use_packed_pixel:1;
- int video_code;
enum sii8620_mode mode;
enum sii8620_sink_type sink_type;
u8 cbus_status;
@@ -82,7 +81,6 @@ struct sii8620 {
u8 xstat[MHL_XDS_SIZE];
u8 devcap[MHL_DCAP_SIZE];
u8 xdevcap[MHL_XDC_SIZE];
- u8 avif[HDMI_INFOFRAME_SIZE(AVI)];
bool feature_complete;
bool devcap_read;
bool sink_detected;
@@ -1017,21 +1015,36 @@ static void sii8620_stop_video(struct sii8620 *ctx)
static void sii8620_set_format(struct sii8620 *ctx)
{
+ u8 out_fmt;
+
if (sii8620_is_mhl3(ctx)) {
sii8620_setbits(ctx, REG_M3_P0CTRL,
BIT_M3_P0CTRL_MHL3_P0_PIXEL_MODE_PACKED,
ctx->use_packed_pixel ? ~0 : 0);
} else {
+ if (ctx->use_packed_pixel) {
+ sii8620_write_seq_static(ctx,
+ REG_VID_MODE, BIT_VID_MODE_M1080P,
+ REG_MHL_TOP_CTL, BIT_MHL_TOP_CTL_MHL_PP_SEL | 1,
+ REG_MHLTX_CTL6, 0x60
+ );
+ } else {
sii8620_write_seq_static(ctx,
REG_VID_MODE, 0,
REG_MHL_TOP_CTL, 1,
REG_MHLTX_CTL6, 0xa0
);
+ }
}
+ if (ctx->use_packed_pixel)
+ out_fmt = VAL_TPI_FORMAT(YCBCR422, FULL);
+ else
+ out_fmt = VAL_TPI_FORMAT(RGB, FULL);
+
sii8620_write_seq(ctx,
REG_TPI_INPUT, VAL_TPI_FORMAT(RGB, FULL),
- REG_TPI_OUTPUT, VAL_TPI_FORMAT(RGB, FULL),
+ REG_TPI_OUTPUT, out_fmt,
);
}
@@ -1082,18 +1095,28 @@ static ssize_t mhl3_infoframe_pack(struct mhl3_infoframe *frame,
return frm_len;
}
-static void sii8620_set_infoframes(struct sii8620 *ctx)
+static void sii8620_set_infoframes(struct sii8620 *ctx,
+ struct drm_display_mode *mode)
{
struct mhl3_infoframe mhl_frm;
union hdmi_infoframe frm;
u8 buf[31];
int ret;
+ ret = drm_hdmi_avi_infoframe_from_display_mode(&frm.avi,
+ mode,
+ true);
+ if (ctx->use_packed_pixel)
+ frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
+
+ if (!ret)
+ ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
+ if (ret > 0)
+ sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
+
if (!sii8620_is_mhl3(ctx) || !ctx->use_packed_pixel) {
sii8620_write(ctx, REG_TPI_SC,
BIT_TPI_SC_TPI_OUTPUT_MODE_0_HDMI);
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, ctx->avif + 3,
- ARRAY_SIZE(ctx->avif) - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1102,16 +1125,6 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
return;
}
- ret = hdmi_avi_infoframe_init(&frm.avi);
- frm.avi.colorspace = HDMI_COLORSPACE_YUV422;
- frm.avi.active_aspect = HDMI_ACTIVE_ASPECT_PICTURE;
- frm.avi.picture_aspect = HDMI_PICTURE_ASPECT_16_9;
- frm.avi.colorimetry = HDMI_COLORIMETRY_ITU_709;
- frm.avi.video_code = ctx->video_code;
- if (!ret)
- ret = hdmi_avi_infoframe_pack(&frm.avi, buf, ARRAY_SIZE(buf));
- if (ret > 0)
- sii8620_write_buf(ctx, REG_TPI_AVI_CHSUM, buf + 3, ret - 3);
sii8620_write(ctx, REG_PKT_FILTER_0,
BIT_PKT_FILTER_0_DROP_CEA_GAMUT_PKT |
BIT_PKT_FILTER_0_DROP_MPEG_PKT |
@@ -1131,6 +1144,9 @@ static void sii8620_set_infoframes(struct sii8620 *ctx)
static void sii8620_start_video(struct sii8620 *ctx)
{
+ struct drm_display_mode *mode =
+ &ctx->bridge.encoder->crtc->state->adjusted_mode;
+
if (!sii8620_is_mhl3(ctx))
sii8620_stop_video(ctx);
@@ -1149,8 +1165,14 @@ static void sii8620_start_video(struct sii8620 *ctx)
sii8620_set_format(ctx);
if (!sii8620_is_mhl3(ctx)) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL | MHL_DST_LM_PATH_ENABLED);
+ u8 link_mode = MHL_DST_LM_PATH_ENABLED;
+
+ if (ctx->use_packed_pixel)
+ link_mode |= MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode |= MHL_DST_LM_CLK_MODE_NORMAL;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE), link_mode);
sii8620_set_auto_zone(ctx);
} else {
static const struct {
@@ -1167,7 +1189,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
MHL_XDS_LINK_RATE_6_0_GBPS, 0x40 },
};
u8 p0_ctrl = BIT_M3_P0CTRL_MHL3_P0_PORT_EN;
- int clk = ctx->pixel_clock * (ctx->use_packed_pixel ? 2 : 3);
+ int clk = mode->clock * (ctx->use_packed_pixel ? 2 : 3);
int i;
for (i = 0; i < ARRAY_SIZE(clk_spec) - 1; ++i)
@@ -1196,7 +1218,7 @@ static void sii8620_start_video(struct sii8620 *ctx)
clk_spec[i].link_rate);
}
- sii8620_set_infoframes(ctx);
+ sii8620_set_infoframes(ctx, mode);
}
static void sii8620_disable_hpd(struct sii8620 *ctx)
@@ -1661,14 +1683,18 @@ static void sii8620_status_dcap_ready(struct sii8620 *ctx)
static void sii8620_status_changed_path(struct sii8620 *ctx)
{
- if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED) {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL
- | MHL_DST_LM_PATH_ENABLED);
- } else {
- sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
- MHL_DST_LM_CLK_MODE_NORMAL);
- }
+ u8 link_mode;
+
+ if (ctx->use_packed_pixel)
+ link_mode = MHL_DST_LM_CLK_MODE_PACKED_PIXEL;
+ else
+ link_mode = MHL_DST_LM_CLK_MODE_NORMAL;
+
+ if (ctx->stat[MHL_DST_LINK_MODE] & MHL_DST_LM_PATH_ENABLED)
+ link_mode |= MHL_DST_LM_PATH_ENABLED;
+
+ sii8620_mt_write_stat(ctx, MHL_DST_REG(LINK_MODE),
+ link_mode);
}
static void sii8620_msc_mr_write_stat(struct sii8620 *ctx)
@@ -2242,8 +2268,6 @@ static bool sii8620_mode_fixup(struct drm_bridge *bridge,
mutex_lock(&ctx->lock);
ctx->use_packed_pixel = sii8620_is_packing_required(ctx, adjusted_mode);
- ctx->video_code = drm_match_cea_mode(adjusted_mode);
- ctx->pixel_clock = adjusted_mode->clock;
mutex_unlock(&ctx->lock);
diff --git a/drivers/gpu/drm/bridge/synopsys/Makefile b/drivers/gpu/drm/bridge/synopsys/Makefile
index 5dad97d920be..3e1b1e3d9533 100644
--- a/drivers/gpu/drm/bridge/synopsys/Makefile
+++ b/drivers/gpu/drm/bridge/synopsys/Makefile
@@ -1,5 +1,3 @@
-#ccflags-y := -Iinclude/drm
-
obj-$(CONFIG_DRM_DW_HDMI) += dw-hdmi.o
obj-$(CONFIG_DRM_DW_HDMI_AHB_AUDIO) += dw-hdmi-ahb-audio.o
obj-$(CONFIG_DRM_DW_HDMI_I2S_AUDIO) += dw-hdmi-i2s-audio.o
diff --git a/drivers/gpu/drm/bridge/tc358764.c b/drivers/gpu/drm/bridge/tc358764.c
new file mode 100644
index 000000000000..ee6b98efa9c2
--- /dev/null
+++ b/drivers/gpu/drm/bridge/tc358764.c
@@ -0,0 +1,499 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (C) 2018 Samsung Electronics Co., Ltd
+ *
+ * Authors:
+ * Andrzej Hajda <a.hajda@samsung.com>
+ * Maciej Purski <m.purski@samsung.com>
+ */
+
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_fb_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <drm/drmP.h>
+#include <linux/gpio/consumer.h>
+#include <linux/of_graph.h>
+#include <linux/regulator/consumer.h>
+#include <video/mipi_display.h>
+
+#define FLD_MASK(start, end) (((1 << ((start) - (end) + 1)) - 1) << (end))
+#define FLD_VAL(val, start, end) (((val) << (end)) & FLD_MASK(start, end))
+
+/* PPI layer registers */
+#define PPI_STARTPPI 0x0104 /* START control bit */
+#define PPI_LPTXTIMECNT 0x0114 /* LPTX timing signal */
+#define PPI_LANEENABLE 0x0134 /* Enables each lane */
+#define PPI_TX_RX_TA 0x013C /* BTA timing parameters */
+#define PPI_D0S_CLRSIPOCOUNT 0x0164 /* Assertion timer for Lane 0 */
+#define PPI_D1S_CLRSIPOCOUNT 0x0168 /* Assertion timer for Lane 1 */
+#define PPI_D2S_CLRSIPOCOUNT 0x016C /* Assertion timer for Lane 2 */
+#define PPI_D3S_CLRSIPOCOUNT 0x0170 /* Assertion timer for Lane 3 */
+#define PPI_START_FUNCTION 1
+
+/* DSI layer registers */
+#define DSI_STARTDSI 0x0204 /* START control bit of DSI-TX */
+#define DSI_LANEENABLE 0x0210 /* Enables each lane */
+#define DSI_RX_START 1
+
+/* Video path registers */
+#define VP_CTRL 0x0450 /* Video Path Control */
+#define VP_CTRL_MSF(v) FLD_VAL(v, 0, 0) /* Magic square in RGB666 */
+#define VP_CTRL_VTGEN(v) FLD_VAL(v, 4, 4) /* Use chip clock for timing */
+#define VP_CTRL_EVTMODE(v) FLD_VAL(v, 5, 5) /* Event mode */
+#define VP_CTRL_RGB888(v) FLD_VAL(v, 8, 8) /* RGB888 mode */
+#define VP_CTRL_VSDELAY(v) FLD_VAL(v, 31, 20) /* VSYNC delay */
+#define VP_CTRL_HSPOL BIT(17) /* Polarity of HSYNC signal */
+#define VP_CTRL_DEPOL BIT(18) /* Polarity of DE signal */
+#define VP_CTRL_VSPOL BIT(19) /* Polarity of VSYNC signal */
+#define VP_HTIM1 0x0454 /* Horizontal Timing Control 1 */
+#define VP_HTIM1_HBP(v) FLD_VAL(v, 24, 16)
+#define VP_HTIM1_HSYNC(v) FLD_VAL(v, 8, 0)
+#define VP_HTIM2 0x0458 /* Horizontal Timing Control 2 */
+#define VP_HTIM2_HFP(v) FLD_VAL(v, 24, 16)
+#define VP_HTIM2_HACT(v) FLD_VAL(v, 10, 0)
+#define VP_VTIM1 0x045C /* Vertical Timing Control 1 */
+#define VP_VTIM1_VBP(v) FLD_VAL(v, 23, 16)
+#define VP_VTIM1_VSYNC(v) FLD_VAL(v, 7, 0)
+#define VP_VTIM2 0x0460 /* Vertical Timing Control 2 */
+#define VP_VTIM2_VFP(v) FLD_VAL(v, 23, 16)
+#define VP_VTIM2_VACT(v) FLD_VAL(v, 10, 0)
+#define VP_VFUEN 0x0464 /* Video Frame Timing Update Enable */
+
+/* LVDS registers */
+#define LV_MX0003 0x0480 /* Mux input bit 0 to 3 */
+#define LV_MX0407 0x0484 /* Mux input bit 4 to 7 */
+#define LV_MX0811 0x0488 /* Mux input bit 8 to 11 */
+#define LV_MX1215 0x048C /* Mux input bit 12 to 15 */
+#define LV_MX1619 0x0490 /* Mux input bit 16 to 19 */
+#define LV_MX2023 0x0494 /* Mux input bit 20 to 23 */
+#define LV_MX2427 0x0498 /* Mux input bit 24 to 27 */
+#define LV_MX(b0, b1, b2, b3) (FLD_VAL(b0, 4, 0) | FLD_VAL(b1, 12, 8) | \
+ FLD_VAL(b2, 20, 16) | FLD_VAL(b3, 28, 24))
+
+/* Input bit numbers used in mux registers */
+enum {
+ LVI_R0,
+ LVI_R1,
+ LVI_R2,
+ LVI_R3,
+ LVI_R4,
+ LVI_R5,
+ LVI_R6,
+ LVI_R7,
+ LVI_G0,
+ LVI_G1,
+ LVI_G2,
+ LVI_G3,
+ LVI_G4,
+ LVI_G5,
+ LVI_G6,
+ LVI_G7,
+ LVI_B0,
+ LVI_B1,
+ LVI_B2,
+ LVI_B3,
+ LVI_B4,
+ LVI_B5,
+ LVI_B6,
+ LVI_B7,
+ LVI_HS,
+ LVI_VS,
+ LVI_DE,
+ LVI_L0
+};
+
+#define LV_CFG 0x049C /* LVDS Configuration */
+#define LV_PHY0 0x04A0 /* LVDS PHY 0 */
+#define LV_PHY0_RST(v) FLD_VAL(v, 22, 22) /* PHY reset */
+#define LV_PHY0_IS(v) FLD_VAL(v, 15, 14)
+#define LV_PHY0_ND(v) FLD_VAL(v, 4, 0) /* Frequency range select */
+#define LV_PHY0_PRBS_ON(v) FLD_VAL(v, 20, 16) /* Clock/Data Flag pins */
+
+/* System registers */
+#define SYS_RST 0x0504 /* System Reset */
+#define SYS_ID 0x0580 /* System ID */
+
+#define SYS_RST_I2CS BIT(0) /* Reset I2C-Slave controller */
+#define SYS_RST_I2CM BIT(1) /* Reset I2C-Master controller */
+#define SYS_RST_LCD BIT(2) /* Reset LCD controller */
+#define SYS_RST_BM BIT(3) /* Reset Bus Management controller */
+#define SYS_RST_DSIRX BIT(4) /* Reset DSI-RX and App controller */
+#define SYS_RST_REG BIT(5) /* Reset Register module */
+
+#define LPX_PERIOD 2
+#define TTA_SURE 3
+#define TTA_GET 0x20000
+
+/* Lane enable PPI and DSI register bits */
+#define LANEENABLE_CLEN BIT(0)
+#define LANEENABLE_L0EN BIT(1)
+#define LANEENABLE_L1EN BIT(2)
+#define LANEENABLE_L2EN BIT(3)
+#define LANEENABLE_L3EN BIT(4)
+
+/* LVCFG fields */
+#define LV_CFG_LVEN BIT(0)
+#define LV_CFG_LVDLINK BIT(1)
+#define LV_CFG_CLKPOL1 BIT(2)
+#define LV_CFG_CLKPOL2 BIT(3)
+
+static const char * const tc358764_supplies[] = {
+ "vddc", "vddio", "vddlvds"
+};
+
+struct tc358764 {
+ struct device *dev;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct regulator_bulk_data supplies[ARRAY_SIZE(tc358764_supplies)];
+ struct gpio_desc *gpio_reset;
+ struct drm_panel *panel;
+ int error;
+};
+
+static int tc358764_clear_error(struct tc358764 *ctx)
+{
+ int ret = ctx->error;
+
+ ctx->error = 0;
+ return ret;
+}
+
+static void tc358764_read(struct tc358764 *ctx, u16 addr, u32 *val)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ ssize_t ret;
+
+ if (ctx->error)
+ return;
+
+ cpu_to_le16s(&addr);
+ ret = mipi_dsi_generic_read(dsi, &addr, sizeof(addr), val, sizeof(*val));
+ if (ret >= 0)
+ le32_to_cpus(val);
+
+ dev_dbg(ctx->dev, "read: %d, addr: %d\n", addr, *val);
+}
+
+static void tc358764_write(struct tc358764 *ctx, u16 addr, u32 val)
+{
+ struct mipi_dsi_device *dsi = to_mipi_dsi_device(ctx->dev);
+ ssize_t ret;
+ u8 data[6];
+
+ if (ctx->error)
+ return;
+
+ data[0] = addr;
+ data[1] = addr >> 8;
+ data[2] = val;
+ data[3] = val >> 8;
+ data[4] = val >> 16;
+ data[5] = val >> 24;
+
+ ret = mipi_dsi_generic_write(dsi, data, sizeof(data));
+ if (ret < 0)
+ ctx->error = ret;
+}
+
+static inline struct tc358764 *bridge_to_tc358764(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct tc358764, bridge);
+}
+
+static inline
+struct tc358764 *connector_to_tc358764(struct drm_connector *connector)
+{
+ return container_of(connector, struct tc358764, connector);
+}
+
+static int tc358764_init(struct tc358764 *ctx)
+{
+ u32 v = 0;
+
+ tc358764_read(ctx, SYS_ID, &v);
+ if (ctx->error)
+ return tc358764_clear_error(ctx);
+ dev_info(ctx->dev, "ID: %#x\n", v);
+
+ /* configure PPI counters */
+ tc358764_write(ctx, PPI_TX_RX_TA, TTA_GET | TTA_SURE);
+ tc358764_write(ctx, PPI_LPTXTIMECNT, LPX_PERIOD);
+ tc358764_write(ctx, PPI_D0S_CLRSIPOCOUNT, 5);
+ tc358764_write(ctx, PPI_D1S_CLRSIPOCOUNT, 5);
+ tc358764_write(ctx, PPI_D2S_CLRSIPOCOUNT, 5);
+ tc358764_write(ctx, PPI_D3S_CLRSIPOCOUNT, 5);
+
+ /* enable four data lanes and clock lane */
+ tc358764_write(ctx, PPI_LANEENABLE, LANEENABLE_L3EN | LANEENABLE_L2EN |
+ LANEENABLE_L1EN | LANEENABLE_L0EN | LANEENABLE_CLEN);
+ tc358764_write(ctx, DSI_LANEENABLE, LANEENABLE_L3EN | LANEENABLE_L2EN |
+ LANEENABLE_L1EN | LANEENABLE_L0EN | LANEENABLE_CLEN);
+
+ /* start */
+ tc358764_write(ctx, PPI_STARTPPI, PPI_START_FUNCTION);
+ tc358764_write(ctx, DSI_STARTDSI, DSI_RX_START);
+
+ /* configure video path */
+ tc358764_write(ctx, VP_CTRL, VP_CTRL_VSDELAY(15) | VP_CTRL_RGB888(1) |
+ VP_CTRL_EVTMODE(1) | VP_CTRL_HSPOL | VP_CTRL_VSPOL);
+
+ /* reset PHY */
+ tc358764_write(ctx, LV_PHY0, LV_PHY0_RST(1) |
+ LV_PHY0_PRBS_ON(4) | LV_PHY0_IS(2) | LV_PHY0_ND(6));
+ tc358764_write(ctx, LV_PHY0, LV_PHY0_PRBS_ON(4) | LV_PHY0_IS(2) |
+ LV_PHY0_ND(6));
+
+ /* reset bridge */
+ tc358764_write(ctx, SYS_RST, SYS_RST_LCD);
+
+ /* set bit order */
+ tc358764_write(ctx, LV_MX0003, LV_MX(LVI_R0, LVI_R1, LVI_R2, LVI_R3));
+ tc358764_write(ctx, LV_MX0407, LV_MX(LVI_R4, LVI_R7, LVI_R5, LVI_G0));
+ tc358764_write(ctx, LV_MX0811, LV_MX(LVI_G1, LVI_G2, LVI_G6, LVI_G7));
+ tc358764_write(ctx, LV_MX1215, LV_MX(LVI_G3, LVI_G4, LVI_G5, LVI_B0));
+ tc358764_write(ctx, LV_MX1619, LV_MX(LVI_B6, LVI_B7, LVI_B1, LVI_B2));
+ tc358764_write(ctx, LV_MX2023, LV_MX(LVI_B3, LVI_B4, LVI_B5, LVI_L0));
+ tc358764_write(ctx, LV_MX2427, LV_MX(LVI_HS, LVI_VS, LVI_DE, LVI_R6));
+ tc358764_write(ctx, LV_CFG, LV_CFG_CLKPOL2 | LV_CFG_CLKPOL1 |
+ LV_CFG_LVEN);
+
+ return tc358764_clear_error(ctx);
+}
+
+static void tc358764_reset(struct tc358764 *ctx)
+{
+ gpiod_set_value(ctx->gpio_reset, 1);
+ usleep_range(1000, 2000);
+ gpiod_set_value(ctx->gpio_reset, 0);
+ usleep_range(1000, 2000);
+}
+
+static int tc358764_get_modes(struct drm_connector *connector)
+{
+ struct tc358764 *ctx = connector_to_tc358764(connector);
+
+ return drm_panel_get_modes(ctx->panel);
+}
+
+static const
+struct drm_connector_helper_funcs tc358764_connector_helper_funcs = {
+ .get_modes = tc358764_get_modes,
+};
+
+static const struct drm_connector_funcs tc358764_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static void tc358764_disable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_disable(bridge_to_tc358764(bridge)->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling panel (%d)\n", ret);
+}
+
+static void tc358764_post_disable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret;
+
+ ret = drm_panel_unprepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error unpreparing panel (%d)\n", ret);
+ tc358764_reset(ctx);
+ usleep_range(10000, 15000);
+ ret = regulator_bulk_disable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ dev_err(ctx->dev, "error disabling regulators (%d)\n", ret);
+}
+
+static void tc358764_pre_enable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret;
+
+ ret = regulator_bulk_enable(ARRAY_SIZE(ctx->supplies), ctx->supplies);
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling regulators (%d)\n", ret);
+ usleep_range(10000, 15000);
+ tc358764_reset(ctx);
+ ret = tc358764_init(ctx);
+ if (ret < 0)
+ dev_err(ctx->dev, "error initializing bridge (%d)\n", ret);
+ ret = drm_panel_prepare(ctx->panel);
+ if (ret < 0)
+ dev_err(ctx->dev, "error preparing panel (%d)\n", ret);
+}
+
+static void tc358764_enable(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ int ret = drm_panel_enable(ctx->panel);
+
+ if (ret < 0)
+ dev_err(ctx->dev, "error enabling panel (%d)\n", ret);
+}
+
+static int tc358764_attach(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ struct drm_device *drm = bridge->dev;
+ int ret;
+
+ ctx->connector.polled = DRM_CONNECTOR_POLL_HPD;
+ ret = drm_connector_init(drm, &ctx->connector,
+ &tc358764_connector_funcs,
+ DRM_MODE_CONNECTOR_LVDS);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&ctx->connector,
+ &tc358764_connector_helper_funcs);
+ drm_connector_attach_encoder(&ctx->connector, bridge->encoder);
+ drm_panel_attach(ctx->panel, &ctx->connector);
+ ctx->connector.funcs->reset(&ctx->connector);
+ drm_fb_helper_add_one_connector(drm->fb_helper, &ctx->connector);
+ drm_connector_register(&ctx->connector);
+
+ return 0;
+}
+
+static void tc358764_detach(struct drm_bridge *bridge)
+{
+ struct tc358764 *ctx = bridge_to_tc358764(bridge);
+ struct drm_device *drm = bridge->dev;
+
+ drm_connector_unregister(&ctx->connector);
+ drm_fb_helper_remove_one_connector(drm->fb_helper, &ctx->connector);
+ drm_panel_detach(ctx->panel);
+ ctx->panel = NULL;
+ drm_connector_unreference(&ctx->connector);
+}
+
+static const struct drm_bridge_funcs tc358764_bridge_funcs = {
+ .disable = tc358764_disable,
+ .post_disable = tc358764_post_disable,
+ .enable = tc358764_enable,
+ .pre_enable = tc358764_pre_enable,
+ .attach = tc358764_attach,
+ .detach = tc358764_detach,
+};
+
+static int tc358764_parse_dt(struct tc358764 *ctx)
+{
+ struct device *dev = ctx->dev;
+ int ret;
+
+ ctx->gpio_reset = devm_gpiod_get(dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(ctx->gpio_reset)) {
+ dev_err(dev, "no reset GPIO pin provided\n");
+ return PTR_ERR(ctx->gpio_reset);
+ }
+
+ ret = drm_of_find_panel_or_bridge(ctx->dev->of_node, 1, 0, &ctx->panel,
+ NULL);
+ if (ret && ret != -EPROBE_DEFER)
+ dev_err(dev, "cannot find panel (%d)\n", ret);
+
+ return ret;
+}
+
+static int tc358764_configure_regulators(struct tc358764 *ctx)
+{
+ int i, ret;
+
+ for (i = 0; i < ARRAY_SIZE(ctx->supplies); ++i)
+ ctx->supplies[i].supply = tc358764_supplies[i];
+
+ ret = devm_regulator_bulk_get(ctx->dev, ARRAY_SIZE(ctx->supplies),
+ ctx->supplies);
+ if (ret < 0)
+ dev_err(ctx->dev, "failed to get regulators: %d\n", ret);
+
+ return ret;
+}
+
+static int tc358764_probe(struct mipi_dsi_device *dsi)
+{
+ struct device *dev = &dsi->dev;
+ struct tc358764 *ctx;
+ int ret;
+
+ ctx = devm_kzalloc(dev, sizeof(struct tc358764), GFP_KERNEL);
+ if (!ctx)
+ return -ENOMEM;
+
+ mipi_dsi_set_drvdata(dsi, ctx);
+
+ ctx->dev = dev;
+
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_BURST
+ | MIPI_DSI_MODE_VIDEO_AUTO_VERT | MIPI_DSI_MODE_LPM;
+
+ ret = tc358764_parse_dt(ctx);
+ if (ret < 0)
+ return ret;
+
+ ret = tc358764_configure_regulators(ctx);
+ if (ret < 0)
+ return ret;
+
+ ctx->bridge.funcs = &tc358764_bridge_funcs;
+ ctx->bridge.of_node = dev->of_node;
+
+ drm_bridge_add(&ctx->bridge);
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ drm_bridge_remove(&ctx->bridge);
+ dev_err(dev, "failed to attach dsi\n");
+ }
+
+ return ret;
+}
+
+static int tc358764_remove(struct mipi_dsi_device *dsi)
+{
+ struct tc358764 *ctx = mipi_dsi_get_drvdata(dsi);
+
+ mipi_dsi_detach(dsi);
+ drm_bridge_remove(&ctx->bridge);
+
+ return 0;
+}
+
+static const struct of_device_id tc358764_of_match[] = {
+ { .compatible = "toshiba,tc358764" },
+ { }
+};
+MODULE_DEVICE_TABLE(of, tc358764_of_match);
+
+static struct mipi_dsi_driver tc358764_driver = {
+ .probe = tc358764_probe,
+ .remove = tc358764_remove,
+ .driver = {
+ .name = "tc358764",
+ .owner = THIS_MODULE,
+ .of_match_table = tc358764_of_match,
+ },
+};
+module_mipi_dsi_driver(tc358764_driver);
+
+MODULE_AUTHOR("Andrzej Hajda <a.hajda@samsung.com>");
+MODULE_AUTHOR("Maciej Purski <m.purski@samsung.com>");
+MODULE_DESCRIPTION("MIPI-DSI based Driver for TC358764 DSI/LVDS Bridge");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/bridge/ti-sn65dsi86.c b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
new file mode 100644
index 000000000000..f8a931cf3665
--- /dev/null
+++ b/drivers/gpu/drm/bridge/ti-sn65dsi86.c
@@ -0,0 +1,779 @@
+// SPDX-License-Identifier: GPL-2.0
+/*
+ * Copyright (c) 2018, The Linux Foundation. All rights reserved.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_mipi_dsi.h>
+#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
+#include <linux/clk.h>
+#include <linux/gpio/consumer.h>
+#include <linux/i2c.h>
+#include <linux/iopoll.h>
+#include <linux/of_graph.h>
+#include <linux/pm_runtime.h>
+#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
+
+#define SN_DEVICE_REV_REG 0x08
+#define SN_DPPLL_SRC_REG 0x0A
+#define DPPLL_CLK_SRC_DSICLK BIT(0)
+#define REFCLK_FREQ_MASK GENMASK(3, 1)
+#define REFCLK_FREQ(x) ((x) << 1)
+#define DPPLL_SRC_DP_PLL_LOCK BIT(7)
+#define SN_PLL_ENABLE_REG 0x0D
+#define SN_DSI_LANES_REG 0x10
+#define CHA_DSI_LANES_MASK GENMASK(4, 3)
+#define CHA_DSI_LANES(x) ((x) << 3)
+#define SN_DSIA_CLK_FREQ_REG 0x12
+#define SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG 0x20
+#define SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG 0x24
+#define SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG 0x2C
+#define SN_CHA_HSYNC_PULSE_WIDTH_HIGH_REG 0x2D
+#define CHA_HSYNC_POLARITY BIT(7)
+#define SN_CHA_VSYNC_PULSE_WIDTH_LOW_REG 0x30
+#define SN_CHA_VSYNC_PULSE_WIDTH_HIGH_REG 0x31
+#define CHA_VSYNC_POLARITY BIT(7)
+#define SN_CHA_HORIZONTAL_BACK_PORCH_REG 0x34
+#define SN_CHA_VERTICAL_BACK_PORCH_REG 0x36
+#define SN_CHA_HORIZONTAL_FRONT_PORCH_REG 0x38
+#define SN_CHA_VERTICAL_FRONT_PORCH_REG 0x3A
+#define SN_ENH_FRAME_REG 0x5A
+#define VSTREAM_ENABLE BIT(3)
+#define SN_DATA_FORMAT_REG 0x5B
+#define SN_HPD_DISABLE_REG 0x5C
+#define HPD_DISABLE BIT(0)
+#define SN_AUX_WDATA_REG(x) (0x64 + (x))
+#define SN_AUX_ADDR_19_16_REG 0x74
+#define SN_AUX_ADDR_15_8_REG 0x75
+#define SN_AUX_ADDR_7_0_REG 0x76
+#define SN_AUX_LENGTH_REG 0x77
+#define SN_AUX_CMD_REG 0x78
+#define AUX_CMD_SEND BIT(1)
+#define AUX_CMD_REQ(x) ((x) << 4)
+#define SN_AUX_RDATA_REG(x) (0x79 + (x))
+#define SN_SSC_CONFIG_REG 0x93
+#define DP_NUM_LANES_MASK GENMASK(5, 4)
+#define DP_NUM_LANES(x) ((x) << 4)
+#define SN_DATARATE_CONFIG_REG 0x94
+#define DP_DATARATE_MASK GENMASK(7, 5)
+#define DP_DATARATE(x) ((x) << 5)
+#define SN_ML_TX_MODE_REG 0x96
+#define ML_TX_MAIN_LINK_OFF 0
+#define ML_TX_NORMAL_MODE BIT(0)
+#define SN_AUX_CMD_STATUS_REG 0xF4
+#define AUX_IRQ_STATUS_AUX_RPLY_TOUT BIT(3)
+#define AUX_IRQ_STATUS_AUX_SHORT BIT(5)
+#define AUX_IRQ_STATUS_NAT_I2C_FAIL BIT(6)
+
+#define MIN_DSI_CLK_FREQ_MHZ 40
+
+/* fudge factor required to account for 8b/10b encoding */
+#define DP_CLK_FUDGE_NUM 10
+#define DP_CLK_FUDGE_DEN 8
+
+/* Matches DP_AUX_MAX_PAYLOAD_BYTES (for now) */
+#define SN_AUX_MAX_PAYLOAD_BYTES 16
+
+#define SN_REGULATOR_SUPPLY_NUM 4
+
+struct ti_sn_bridge {
+ struct device *dev;
+ struct regmap *regmap;
+ struct drm_dp_aux aux;
+ struct drm_bridge bridge;
+ struct drm_connector connector;
+ struct device_node *host_node;
+ struct mipi_dsi_device *dsi;
+ struct clk *refclk;
+ struct drm_panel *panel;
+ struct gpio_desc *enable_gpio;
+ struct regulator_bulk_data supplies[SN_REGULATOR_SUPPLY_NUM];
+};
+
+static const struct regmap_range ti_sn_bridge_volatile_ranges[] = {
+ { .range_min = 0, .range_max = 0xFF },
+};
+
+static const struct regmap_access_table ti_sn_bridge_volatile_table = {
+ .yes_ranges = ti_sn_bridge_volatile_ranges,
+ .n_yes_ranges = ARRAY_SIZE(ti_sn_bridge_volatile_ranges),
+};
+
+static const struct regmap_config ti_sn_bridge_regmap_config = {
+ .reg_bits = 8,
+ .val_bits = 8,
+ .volatile_table = &ti_sn_bridge_volatile_table,
+ .cache_type = REGCACHE_NONE,
+};
+
+static void ti_sn_bridge_write_u16(struct ti_sn_bridge *pdata,
+ unsigned int reg, u16 val)
+{
+ regmap_write(pdata->regmap, reg, val & 0xFF);
+ regmap_write(pdata->regmap, reg + 1, val >> 8);
+}
+
+static int __maybe_unused ti_sn_bridge_resume(struct device *dev)
+{
+ struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
+ int ret;
+
+ ret = regulator_bulk_enable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
+ if (ret) {
+ DRM_ERROR("failed to enable supplies %d\n", ret);
+ return ret;
+ }
+
+ gpiod_set_value(pdata->enable_gpio, 1);
+
+ return ret;
+}
+
+static int __maybe_unused ti_sn_bridge_suspend(struct device *dev)
+{
+ struct ti_sn_bridge *pdata = dev_get_drvdata(dev);
+ int ret;
+
+ gpiod_set_value(pdata->enable_gpio, 0);
+
+ ret = regulator_bulk_disable(SN_REGULATOR_SUPPLY_NUM, pdata->supplies);
+ if (ret)
+ DRM_ERROR("failed to disable supplies %d\n", ret);
+
+ return ret;
+}
+
+static const struct dev_pm_ops ti_sn_bridge_pm_ops = {
+ SET_RUNTIME_PM_OPS(ti_sn_bridge_suspend, ti_sn_bridge_resume, NULL)
+};
+
+/* Connector funcs */
+static struct ti_sn_bridge *
+connector_to_ti_sn_bridge(struct drm_connector *connector)
+{
+ return container_of(connector, struct ti_sn_bridge, connector);
+}
+
+static int ti_sn_bridge_connector_get_modes(struct drm_connector *connector)
+{
+ struct ti_sn_bridge *pdata = connector_to_ti_sn_bridge(connector);
+
+ return drm_panel_get_modes(pdata->panel);
+}
+
+static enum drm_mode_status
+ti_sn_bridge_connector_mode_valid(struct drm_connector *connector,
+ struct drm_display_mode *mode)
+{
+ /* maximum supported resolution is 4K at 60 fps */
+ if (mode->clock > 594000)
+ return MODE_CLOCK_HIGH;
+
+ return MODE_OK;
+}
+
+static struct drm_connector_helper_funcs ti_sn_bridge_connector_helper_funcs = {
+ .get_modes = ti_sn_bridge_connector_get_modes,
+ .mode_valid = ti_sn_bridge_connector_mode_valid,
+};
+
+static enum drm_connector_status
+ti_sn_bridge_connector_detect(struct drm_connector *connector, bool force)
+{
+ /**
+ * TODO: Currently if drm_panel is present, then always
+ * return the status as connected. Need to add support to detect
+ * device state for hot pluggable scenarios.
+ */
+ return connector_status_connected;
+}
+
+static const struct drm_connector_funcs ti_sn_bridge_connector_funcs = {
+ .fill_modes = drm_helper_probe_single_connector_modes,
+ .detect = ti_sn_bridge_connector_detect,
+ .destroy = drm_connector_cleanup,
+ .reset = drm_atomic_helper_connector_reset,
+ .atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
+ .atomic_destroy_state = drm_atomic_helper_connector_destroy_state,
+};
+
+static struct ti_sn_bridge *bridge_to_ti_sn_bridge(struct drm_bridge *bridge)
+{
+ return container_of(bridge, struct ti_sn_bridge, bridge);
+}
+
+static int ti_sn_bridge_parse_regulators(struct ti_sn_bridge *pdata)
+{
+ unsigned int i;
+ const char * const ti_sn_bridge_supply_names[] = {
+ "vcca", "vcc", "vccio", "vpll",
+ };
+
+ for (i = 0; i < SN_REGULATOR_SUPPLY_NUM; i++)
+ pdata->supplies[i].supply = ti_sn_bridge_supply_names[i];
+
+ return devm_regulator_bulk_get(pdata->dev, SN_REGULATOR_SUPPLY_NUM,
+ pdata->supplies);
+}
+
+static int ti_sn_bridge_attach(struct drm_bridge *bridge)
+{
+ int ret, val;
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ struct mipi_dsi_host *host;
+ struct mipi_dsi_device *dsi;
+ const struct mipi_dsi_device_info info = { .type = "ti_sn_bridge",
+ .channel = 0,
+ .node = NULL,
+ };
+
+ ret = drm_connector_init(bridge->dev, &pdata->connector,
+ &ti_sn_bridge_connector_funcs,
+ DRM_MODE_CONNECTOR_eDP);
+ if (ret) {
+ DRM_ERROR("Failed to initialize connector with drm\n");
+ return ret;
+ }
+
+ drm_connector_helper_add(&pdata->connector,
+ &ti_sn_bridge_connector_helper_funcs);
+ drm_connector_attach_encoder(&pdata->connector, bridge->encoder);
+
+ /*
+ * TODO: ideally finding host resource and dsi dev registration needs
+ * to be done in bridge probe. But some existing DSI host drivers will
+ * wait for any of the drm_bridge/drm_panel to get added to the global
+ * bridge/panel list, before completing their probe. So if we do the
+ * dsi dev registration part in bridge probe, before populating in
+ * the global bridge list, then it will cause deadlock as dsi host probe
+ * will never complete, neither our bridge probe. So keeping it here
+ * will satisfy most of the existing host drivers. Once the host driver
+ * is fixed we can move the below code to bridge probe safely.
+ */
+ host = of_find_mipi_dsi_host_by_node(pdata->host_node);
+ if (!host) {
+ DRM_ERROR("failed to find dsi host\n");
+ ret = -ENODEV;
+ goto err_dsi_host;
+ }
+
+ dsi = mipi_dsi_device_register_full(host, &info);
+ if (IS_ERR(dsi)) {
+ DRM_ERROR("failed to create dsi device\n");
+ ret = PTR_ERR(dsi);
+ goto err_dsi_host;
+ }
+
+ /* TODO: setting to 4 lanes always for now */
+ dsi->lanes = 4;
+ dsi->format = MIPI_DSI_FMT_RGB888;
+ dsi->mode_flags = MIPI_DSI_MODE_VIDEO | MIPI_DSI_MODE_VIDEO_SYNC_PULSE |
+ MIPI_DSI_MODE_EOT_PACKET | MIPI_DSI_MODE_VIDEO_HSE;
+
+ /* check if continuous dsi clock is required or not */
+ pm_runtime_get_sync(pdata->dev);
+ regmap_read(pdata->regmap, SN_DPPLL_SRC_REG, &val);
+ pm_runtime_put(pdata->dev);
+ if (!(val & DPPLL_CLK_SRC_DSICLK))
+ dsi->mode_flags |= MIPI_DSI_CLOCK_NON_CONTINUOUS;
+
+ ret = mipi_dsi_attach(dsi);
+ if (ret < 0) {
+ DRM_ERROR("failed to attach dsi to host\n");
+ goto err_dsi_attach;
+ }
+ pdata->dsi = dsi;
+
+ /* attach panel to bridge */
+ drm_panel_attach(pdata->panel, &pdata->connector);
+
+ return 0;
+
+err_dsi_attach:
+ mipi_dsi_device_unregister(dsi);
+err_dsi_host:
+ drm_connector_cleanup(&pdata->connector);
+ return ret;
+}
+
+static void ti_sn_bridge_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+
+ drm_panel_disable(pdata->panel);
+
+ /* disable video stream */
+ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE, 0);
+ /* semi auto link training mode OFF */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0);
+ /* disable DP PLL */
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 0);
+
+ drm_panel_unprepare(pdata->panel);
+}
+
+static u32 ti_sn_bridge_get_dsi_freq(struct ti_sn_bridge *pdata)
+{
+ u32 bit_rate_khz, clk_freq_khz;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ bit_rate_khz = mode->clock *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_khz = bit_rate_khz / (pdata->dsi->lanes * 2);
+
+ return clk_freq_khz;
+}
+
+/* clk frequencies supported by bridge in Hz in case derived from REFCLK pin */
+static const u32 ti_sn_bridge_refclk_lut[] = {
+ 12000000,
+ 19200000,
+ 26000000,
+ 27000000,
+ 38400000,
+};
+
+/* clk frequencies supported by bridge in Hz in case derived from DACP/N pin */
+static const u32 ti_sn_bridge_dsiclk_lut[] = {
+ 468000000,
+ 384000000,
+ 416000000,
+ 486000000,
+ 460800000,
+};
+
+static void ti_sn_bridge_set_refclk_freq(struct ti_sn_bridge *pdata)
+{
+ int i;
+ u32 refclk_rate;
+ const u32 *refclk_lut;
+ size_t refclk_lut_size;
+
+ if (pdata->refclk) {
+ refclk_rate = clk_get_rate(pdata->refclk);
+ refclk_lut = ti_sn_bridge_refclk_lut;
+ refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_refclk_lut);
+ clk_prepare_enable(pdata->refclk);
+ } else {
+ refclk_rate = ti_sn_bridge_get_dsi_freq(pdata) * 1000;
+ refclk_lut = ti_sn_bridge_dsiclk_lut;
+ refclk_lut_size = ARRAY_SIZE(ti_sn_bridge_dsiclk_lut);
+ }
+
+ /* for i equals to refclk_lut_size means default frequency */
+ for (i = 0; i < refclk_lut_size; i++)
+ if (refclk_lut[i] == refclk_rate)
+ break;
+
+ regmap_update_bits(pdata->regmap, SN_DPPLL_SRC_REG, REFCLK_FREQ_MASK,
+ REFCLK_FREQ(i));
+}
+
+/**
+ * LUT index corresponds to register value and
+ * LUT values corresponds to dp data rate supported
+ * by the bridge in Mbps unit.
+ */
+static const unsigned int ti_sn_bridge_dp_rate_lut[] = {
+ 0, 1620, 2160, 2430, 2700, 3240, 4320, 5400
+};
+
+static void ti_sn_bridge_set_dsi_dp_rate(struct ti_sn_bridge *pdata)
+{
+ unsigned int bit_rate_mhz, clk_freq_mhz, dp_rate_mhz;
+ unsigned int val, i;
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+
+ /* set DSIA clk frequency */
+ bit_rate_mhz = (mode->clock / 1000) *
+ mipi_dsi_pixel_format_to_bpp(pdata->dsi->format);
+ clk_freq_mhz = bit_rate_mhz / (pdata->dsi->lanes * 2);
+
+ /* for each increment in val, frequency increases by 5MHz */
+ val = (MIN_DSI_CLK_FREQ_MHZ / 5) +
+ (((clk_freq_mhz - MIN_DSI_CLK_FREQ_MHZ) / 5) & 0xFF);
+ regmap_write(pdata->regmap, SN_DSIA_CLK_FREQ_REG, val);
+
+ /* set DP data rate */
+ dp_rate_mhz = ((bit_rate_mhz / pdata->dsi->lanes) * DP_CLK_FUDGE_NUM) /
+ DP_CLK_FUDGE_DEN;
+ for (i = 0; i < ARRAY_SIZE(ti_sn_bridge_dp_rate_lut) - 1; i++)
+ if (ti_sn_bridge_dp_rate_lut[i] > dp_rate_mhz)
+ break;
+
+ regmap_update_bits(pdata->regmap, SN_DATARATE_CONFIG_REG,
+ DP_DATARATE_MASK, DP_DATARATE(i));
+}
+
+static void ti_sn_bridge_set_video_timings(struct ti_sn_bridge *pdata)
+{
+ struct drm_display_mode *mode =
+ &pdata->bridge.encoder->crtc->state->adjusted_mode;
+ u8 hsync_polarity = 0, vsync_polarity = 0;
+
+ if (mode->flags & DRM_MODE_FLAG_PHSYNC)
+ hsync_polarity = CHA_HSYNC_POLARITY;
+ if (mode->flags & DRM_MODE_FLAG_PVSYNC)
+ vsync_polarity = CHA_VSYNC_POLARITY;
+
+ ti_sn_bridge_write_u16(pdata, SN_CHA_ACTIVE_LINE_LENGTH_LOW_REG,
+ mode->hdisplay);
+ ti_sn_bridge_write_u16(pdata, SN_CHA_VERTICAL_DISPLAY_SIZE_LOW_REG,
+ mode->vdisplay);
+ regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_LOW_REG,
+ (mode->hsync_end - mode->hsync_start) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_HSYNC_PULSE_WIDTH_HIGH_REG,
+ (((mode->hsync_end - mode->hsync_start) >> 8) & 0x7F) |
+ hsync_polarity);
+ regmap_write(pdata->regmap, SN_CHA_VSYNC_PULSE_WIDTH_LOW_REG,
+ (mode->vsync_end - mode->vsync_start) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_VSYNC_PULSE_WIDTH_HIGH_REG,
+ (((mode->vsync_end - mode->vsync_start) >> 8) & 0x7F) |
+ vsync_polarity);
+
+ regmap_write(pdata->regmap, SN_CHA_HORIZONTAL_BACK_PORCH_REG,
+ (mode->htotal - mode->hsync_end) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_VERTICAL_BACK_PORCH_REG,
+ (mode->vtotal - mode->vsync_end) & 0xFF);
+
+ regmap_write(pdata->regmap, SN_CHA_HORIZONTAL_FRONT_PORCH_REG,
+ (mode->hsync_start - mode->hdisplay) & 0xFF);
+ regmap_write(pdata->regmap, SN_CHA_VERTICAL_FRONT_PORCH_REG,
+ (mode->vsync_start - mode->vdisplay) & 0xFF);
+
+ usleep_range(10000, 10500); /* 10ms delay recommended by spec */
+}
+
+static void ti_sn_bridge_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+ unsigned int val;
+ int ret;
+
+ /*
+ * FIXME:
+ * This 70ms was found necessary by experimentation. If it's not
+ * present, link training fails. It seems like it can go anywhere from
+ * pre_enable() up to semi-auto link training initiation below.
+ *
+ * Neither the datasheet for the bridge nor the panel tested mention a
+ * delay of this magnitude in the timing requirements. So for now, add
+ * the mystery delay until someone figures out a better fix.
+ */
+ msleep(70);
+
+ /* DSI_A lane config */
+ val = CHA_DSI_LANES(4 - pdata->dsi->lanes);
+ regmap_update_bits(pdata->regmap, SN_DSI_LANES_REG,
+ CHA_DSI_LANES_MASK, val);
+
+ /* DP lane config */
+ val = DP_NUM_LANES(pdata->dsi->lanes - 1);
+ regmap_update_bits(pdata->regmap, SN_SSC_CONFIG_REG, DP_NUM_LANES_MASK,
+ val);
+
+ /* set dsi/dp clk frequency value */
+ ti_sn_bridge_set_dsi_dp_rate(pdata);
+
+ /* enable DP PLL */
+ regmap_write(pdata->regmap, SN_PLL_ENABLE_REG, 1);
+
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_DPPLL_SRC_REG, val,
+ val & DPPLL_SRC_DP_PLL_LOCK, 1000,
+ 50 * 1000);
+ if (ret) {
+ DRM_ERROR("DP_PLL_LOCK polling failed (%d)\n", ret);
+ return;
+ }
+
+ /**
+ * The SN65DSI86 only supports ASSR Display Authentication method and
+ * this method is enabled by default. An eDP panel must support this
+ * authentication method. We need to enable this method in the eDP panel
+ * at DisplayPort address 0x0010A prior to link training.
+ */
+ drm_dp_dpcd_writeb(&pdata->aux, DP_EDP_CONFIGURATION_SET,
+ DP_ALTERNATE_SCRAMBLER_RESET_ENABLE);
+
+ /* Semi auto link training mode */
+ regmap_write(pdata->regmap, SN_ML_TX_MODE_REG, 0x0A);
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_ML_TX_MODE_REG, val,
+ val == ML_TX_MAIN_LINK_OFF ||
+ val == ML_TX_NORMAL_MODE, 1000,
+ 500 * 1000);
+ if (ret) {
+ DRM_ERROR("Training complete polling failed (%d)\n", ret);
+ return;
+ } else if (val == ML_TX_MAIN_LINK_OFF) {
+ DRM_ERROR("Link training failed, link is off\n");
+ return;
+ }
+
+ /* config video parameters */
+ ti_sn_bridge_set_video_timings(pdata);
+
+ /* enable video stream */
+ regmap_update_bits(pdata->regmap, SN_ENH_FRAME_REG, VSTREAM_ENABLE,
+ VSTREAM_ENABLE);
+
+ drm_panel_enable(pdata->panel);
+}
+
+static void ti_sn_bridge_pre_enable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+
+ pm_runtime_get_sync(pdata->dev);
+
+ /* configure bridge ref_clk */
+ ti_sn_bridge_set_refclk_freq(pdata);
+
+ /* in case drm_panel is connected then HPD is not supported */
+ regmap_update_bits(pdata->regmap, SN_HPD_DISABLE_REG, HPD_DISABLE,
+ HPD_DISABLE);
+
+ drm_panel_prepare(pdata->panel);
+}
+
+static void ti_sn_bridge_post_disable(struct drm_bridge *bridge)
+{
+ struct ti_sn_bridge *pdata = bridge_to_ti_sn_bridge(bridge);
+
+ if (pdata->refclk)
+ clk_disable_unprepare(pdata->refclk);
+
+ pm_runtime_put_sync(pdata->dev);
+}
+
+static const struct drm_bridge_funcs ti_sn_bridge_funcs = {
+ .attach = ti_sn_bridge_attach,
+ .pre_enable = ti_sn_bridge_pre_enable,
+ .enable = ti_sn_bridge_enable,
+ .disable = ti_sn_bridge_disable,
+ .post_disable = ti_sn_bridge_post_disable,
+};
+
+static struct ti_sn_bridge *aux_to_ti_sn_bridge(struct drm_dp_aux *aux)
+{
+ return container_of(aux, struct ti_sn_bridge, aux);
+}
+
+static ssize_t ti_sn_aux_transfer(struct drm_dp_aux *aux,
+ struct drm_dp_aux_msg *msg)
+{
+ struct ti_sn_bridge *pdata = aux_to_ti_sn_bridge(aux);
+ u32 request = msg->request & ~DP_AUX_I2C_MOT;
+ u32 request_val = AUX_CMD_REQ(msg->request);
+ u8 *buf = (u8 *)msg->buffer;
+ unsigned int val;
+ int ret, i;
+
+ if (msg->size > SN_AUX_MAX_PAYLOAD_BYTES)
+ return -EINVAL;
+
+ switch (request) {
+ case DP_AUX_NATIVE_WRITE:
+ case DP_AUX_I2C_WRITE:
+ case DP_AUX_NATIVE_READ:
+ case DP_AUX_I2C_READ:
+ regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ regmap_write(pdata->regmap, SN_AUX_ADDR_19_16_REG,
+ (msg->address >> 16) & 0xF);
+ regmap_write(pdata->regmap, SN_AUX_ADDR_15_8_REG,
+ (msg->address >> 8) & 0xFF);
+ regmap_write(pdata->regmap, SN_AUX_ADDR_7_0_REG, msg->address & 0xFF);
+
+ regmap_write(pdata->regmap, SN_AUX_LENGTH_REG, msg->size);
+
+ if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE) {
+ for (i = 0; i < msg->size; i++)
+ regmap_write(pdata->regmap, SN_AUX_WDATA_REG(i),
+ buf[i]);
+ }
+
+ regmap_write(pdata->regmap, SN_AUX_CMD_REG, request_val | AUX_CMD_SEND);
+
+ ret = regmap_read_poll_timeout(pdata->regmap, SN_AUX_CMD_REG, val,
+ !(val & AUX_CMD_SEND), 200,
+ 50 * 1000);
+ if (ret)
+ return ret;
+
+ ret = regmap_read(pdata->regmap, SN_AUX_CMD_STATUS_REG, &val);
+ if (ret)
+ return ret;
+ else if ((val & AUX_IRQ_STATUS_NAT_I2C_FAIL)
+ || (val & AUX_IRQ_STATUS_AUX_RPLY_TOUT)
+ || (val & AUX_IRQ_STATUS_AUX_SHORT))
+ return -ENXIO;
+
+ if (request == DP_AUX_NATIVE_WRITE || request == DP_AUX_I2C_WRITE)
+ return msg->size;
+
+ for (i = 0; i < msg->size; i++) {
+ unsigned int val;
+ ret = regmap_read(pdata->regmap, SN_AUX_RDATA_REG(i),
+ &val);
+ if (ret)
+ return ret;
+
+ WARN_ON(val & ~0xFF);
+ buf[i] = (u8)(val & 0xFF);
+ }
+
+ return msg->size;
+}
+
+static int ti_sn_bridge_parse_dsi_host(struct ti_sn_bridge *pdata)
+{
+ struct device_node *np = pdata->dev->of_node;
+
+ pdata->host_node = of_graph_get_remote_node(np, 0, 0);
+
+ if (!pdata->host_node) {
+ DRM_ERROR("remote dsi host node not found\n");
+ return -ENODEV;
+ }
+
+ return 0;
+}
+
+static int ti_sn_bridge_probe(struct i2c_client *client,
+ const struct i2c_device_id *id)
+{
+ struct ti_sn_bridge *pdata;
+ int ret;
+
+ if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
+ DRM_ERROR("device doesn't support I2C\n");
+ return -ENODEV;
+ }
+
+ pdata = devm_kzalloc(&client->dev, sizeof(struct ti_sn_bridge),
+ GFP_KERNEL);
+ if (!pdata)
+ return -ENOMEM;
+
+ pdata->regmap = devm_regmap_init_i2c(client,
+ &ti_sn_bridge_regmap_config);
+ if (IS_ERR(pdata->regmap)) {
+ DRM_ERROR("regmap i2c init failed\n");
+ return PTR_ERR(pdata->regmap);
+ }
+
+ pdata->dev = &client->dev;
+
+ ret = drm_of_find_panel_or_bridge(pdata->dev->of_node, 1, 0,
+ &pdata->panel, NULL);
+ if (ret) {
+ DRM_ERROR("could not find any panel node\n");
+ return ret;
+ }
+
+ dev_set_drvdata(&client->dev, pdata);
+
+ pdata->enable_gpio = devm_gpiod_get(pdata->dev, "enable",
+ GPIOD_OUT_LOW);
+ if (IS_ERR(pdata->enable_gpio)) {
+ DRM_ERROR("failed to get enable gpio from DT\n");
+ ret = PTR_ERR(pdata->enable_gpio);
+ return ret;
+ }
+
+ ret = ti_sn_bridge_parse_regulators(pdata);
+ if (ret) {
+ DRM_ERROR("failed to parse regulators\n");
+ return ret;
+ }
+
+ pdata->refclk = devm_clk_get(pdata->dev, "refclk");
+ if (IS_ERR(pdata->refclk)) {
+ ret = PTR_ERR(pdata->refclk);
+ if (ret == -EPROBE_DEFER)
+ return ret;
+ DRM_DEBUG_KMS("refclk not found\n");
+ pdata->refclk = NULL;
+ }
+
+ ret = ti_sn_bridge_parse_dsi_host(pdata);
+ if (ret)
+ return ret;
+
+ pm_runtime_enable(pdata->dev);
+
+ i2c_set_clientdata(client, pdata);
+
+ pdata->aux.name = "ti-sn65dsi86-aux";
+ pdata->aux.dev = pdata->dev;
+ pdata->aux.transfer = ti_sn_aux_transfer;
+ drm_dp_aux_register(&pdata->aux);
+
+ pdata->bridge.funcs = &ti_sn_bridge_funcs;
+ pdata->bridge.of_node = client->dev.of_node;
+
+ drm_bridge_add(&pdata->bridge);
+
+ return 0;
+}
+
+static int ti_sn_bridge_remove(struct i2c_client *client)
+{
+ struct ti_sn_bridge *pdata = i2c_get_clientdata(client);
+
+ if (!pdata)
+ return -EINVAL;
+
+ of_node_put(pdata->host_node);
+
+ pm_runtime_disable(pdata->dev);
+
+ if (pdata->dsi) {
+ mipi_dsi_detach(pdata->dsi);
+ mipi_dsi_device_unregister(pdata->dsi);
+ }
+
+ drm_bridge_remove(&pdata->bridge);
+
+ return 0;
+}
+
+static struct i2c_device_id ti_sn_bridge_id[] = {
+ { "ti,sn65dsi86", 0},
+ {},
+};
+MODULE_DEVICE_TABLE(i2c, ti_sn_bridge_id);
+
+static const struct of_device_id ti_sn_bridge_match_table[] = {
+ {.compatible = "ti,sn65dsi86"},
+ {},
+};
+MODULE_DEVICE_TABLE(of, ti_sn_bridge_match_table);
+
+static struct i2c_driver ti_sn_bridge_driver = {
+ .driver = {
+ .name = "ti_sn65dsi86",
+ .of_match_table = ti_sn_bridge_match_table,
+ .pm = &ti_sn_bridge_pm_ops,
+ },
+ .probe = ti_sn_bridge_probe,
+ .remove = ti_sn_bridge_remove,
+ .id_table = ti_sn_bridge_id,
+};
+module_i2c_driver(ti_sn_bridge_driver);
+
+MODULE_AUTHOR("Sandeep Panda <spanda@codeaurora.org>");
+MODULE_DESCRIPTION("sn65dsi86 DSI to eDP bridge driver");
+MODULE_LICENSE("GPL v2");
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.c b/drivers/gpu/drm/cirrus/cirrus_drv.c
index 69c4e352dd78..db40b77c7f7c 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.c
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.c
@@ -16,11 +16,11 @@
#include "cirrus_drv.h"
int cirrus_modeset = -1;
-int cirrus_bpp = 24;
+int cirrus_bpp = 16;
MODULE_PARM_DESC(modeset, "Disable/Enable modesetting");
module_param_named(modeset, cirrus_modeset, int, 0400);
-MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:24)");
+MODULE_PARM_DESC(bpp, "Max bits-per-pixel (default:16)");
module_param_named(bpp, cirrus_bpp, int, 0400);
/*
@@ -42,33 +42,12 @@ static const struct pci_device_id pciidlist[] = {
};
-static int cirrus_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "cirrusdrmfb", primary);
- kfree(ap);
-
- return 0;
-}
-
static int cirrus_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
int ret;
- ret = cirrus_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "cirrusdrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/cirrus/cirrus_drv.h b/drivers/gpu/drm/cirrus/cirrus_drv.h
index ce9db7aab225..a29f87e98d9d 100644
--- a/drivers/gpu/drm/cirrus/cirrus_drv.h
+++ b/drivers/gpu/drm/cirrus/cirrus_drv.h
@@ -146,7 +146,7 @@ struct cirrus_device {
struct cirrus_fbdev {
struct drm_fb_helper helper;
- struct drm_framebuffer gfb;
+ struct drm_framebuffer *gfb;
void *sysram;
int size;
int x1, y1, x2, y2; /* dirty rect */
diff --git a/drivers/gpu/drm/cirrus/cirrus_fbdev.c b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
index b643ac92801c..68ab1821e15b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_fbdev.c
+++ b/drivers/gpu/drm/cirrus/cirrus_fbdev.c
@@ -22,14 +22,14 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
struct drm_gem_object *obj;
struct cirrus_bo *bo;
int src_offset, dst_offset;
- int bpp = afbdev->gfb.format->cpp[0];
+ int bpp = afbdev->gfb->format->cpp[0];
int ret = -EBUSY;
bool unmap = false;
bool store_for_later = false;
int x2, y2;
unsigned long flags;
- obj = afbdev->gfb.obj[0];
+ obj = afbdev->gfb->obj[0];
bo = gem_to_cirrus_bo(obj);
/*
@@ -82,7 +82,7 @@ static void cirrus_dirty_update(struct cirrus_fbdev *afbdev,
}
for (i = y; i < y + height; i++) {
/* assume equal stride for now */
- src_offset = dst_offset = i * afbdev->gfb.pitches[0] + (x * bpp);
+ src_offset = dst_offset = i * afbdev->gfb->pitches[0] + (x * bpp);
memcpy_toio(bo->kmap.virtual + src_offset, afbdev->sysram + src_offset, width * bpp);
}
@@ -192,23 +192,26 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
return -ENOMEM;
info = drm_fb_helper_alloc_fbi(helper);
- if (IS_ERR(info))
- return PTR_ERR(info);
+ if (IS_ERR(info)) {
+ ret = PTR_ERR(info);
+ goto err_vfree;
+ }
info->par = gfbdev;
- ret = cirrus_framebuffer_init(cdev->dev, &gfbdev->gfb, &mode_cmd, gobj);
+ fb = kzalloc(sizeof(*fb), GFP_KERNEL);
+ if (!fb) {
+ ret = -ENOMEM;
+ goto err_drm_gem_object_put_unlocked;
+ }
+
+ ret = cirrus_framebuffer_init(cdev->dev, fb, &mode_cmd, gobj);
if (ret)
- return ret;
+ goto err_kfree;
gfbdev->sysram = sysram;
gfbdev->size = size;
-
- fb = &gfbdev->gfb;
- if (!fb) {
- DRM_INFO("fb is NULL\n");
- return -EINVAL;
- }
+ gfbdev->gfb = fb;
/* setup helper */
gfbdev->helper.fb = fb;
@@ -241,24 +244,27 @@ static int cirrusfb_create(struct drm_fb_helper *helper,
DRM_INFO(" pitch is %d\n", fb->pitches[0]);
return 0;
+
+err_kfree:
+ kfree(fb);
+err_drm_gem_object_put_unlocked:
+ drm_gem_object_put_unlocked(gobj);
+err_vfree:
+ vfree(sysram);
+ return ret;
}
static int cirrus_fbdev_destroy(struct drm_device *dev,
struct cirrus_fbdev *gfbdev)
{
- struct drm_framebuffer *gfb = &gfbdev->gfb;
+ struct drm_framebuffer *gfb = gfbdev->gfb;
drm_fb_helper_unregister_fbi(&gfbdev->helper);
- if (gfb->obj[0]) {
- drm_gem_object_put_unlocked(gfb->obj[0]);
- gfb->obj[0] = NULL;
- }
-
vfree(gfbdev->sysram);
drm_fb_helper_fini(&gfbdev->helper);
- drm_framebuffer_unregister_private(gfb);
- drm_framebuffer_cleanup(gfb);
+ if (gfb)
+ drm_framebuffer_put(gfb);
return 0;
}
@@ -271,7 +277,6 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
{
struct cirrus_fbdev *gfbdev;
int ret;
- int bpp_sel = 24;
/*bpp_sel = 8;*/
gfbdev = kzalloc(sizeof(struct cirrus_fbdev), GFP_KERNEL);
@@ -296,7 +301,7 @@ int cirrus_fbdev_init(struct cirrus_device *cdev)
/* disable all the possible outputs/crtcs before entering KMS mode */
drm_helper_disable_unused_functions(cdev->dev);
- return drm_fb_helper_initial_config(&gfbdev->helper, bpp_sel);
+ return drm_fb_helper_initial_config(&gfbdev->helper, cirrus_bpp);
}
void cirrus_fbdev_fini(struct cirrus_device *cdev)
diff --git a/drivers/gpu/drm/cirrus/cirrus_main.c b/drivers/gpu/drm/cirrus/cirrus_main.c
index 60d54e10a34d..57f8fe6d020b 100644
--- a/drivers/gpu/drm/cirrus/cirrus_main.c
+++ b/drivers/gpu/drm/cirrus/cirrus_main.c
@@ -269,7 +269,7 @@ static void cirrus_bo_unref(struct cirrus_bo **bo)
return;
tbo = &((*bo)->bo);
- ttm_bo_unref(&tbo);
+ ttm_bo_put(tbo);
*bo = NULL;
}
diff --git a/drivers/gpu/drm/cirrus/cirrus_mode.c b/drivers/gpu/drm/cirrus/cirrus_mode.c
index 336bfda40125..ed7dcf212a34 100644
--- a/drivers/gpu/drm/cirrus/cirrus_mode.c
+++ b/drivers/gpu/drm/cirrus/cirrus_mode.c
@@ -127,7 +127,7 @@ static int cirrus_crtc_do_set_base(struct drm_crtc *crtc,
return ret;
}
- if (&cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
+ if (cdev->mode_info.gfbdev->gfb == crtc->primary->fb) {
/* if pushing console in kmap it */
ret = ttm_bo_kmap(&bo->bo, 0, bo->bo.num_pages, &bo->kmap);
if (ret)
@@ -512,7 +512,7 @@ int cirrus_modeset_init(struct cirrus_device *cdev)
cdev->dev->mode_config.max_height = CIRRUS_MAX_FB_HEIGHT;
cdev->dev->mode_config.fb_base = cdev->mc.vram_base;
- cdev->dev->mode_config.preferred_depth = 24;
+ cdev->dev->mode_config.preferred_depth = cirrus_bpp;
/* don't prefer a shadow on virt GPU */
cdev->dev->mode_config.prefer_shadow = 0;
diff --git a/drivers/gpu/drm/drm_atomic.c b/drivers/gpu/drm/drm_atomic.c
index 3eb061e11e2e..d0478abc01bd 100644
--- a/drivers/gpu/drm/drm_atomic.c
+++ b/drivers/gpu/drm/drm_atomic.c
@@ -895,6 +895,8 @@ static int drm_atomic_plane_set_property(struct drm_plane *plane,
state->src_h = val;
} else if (property == plane->alpha_property) {
state->alpha = val;
+ } else if (property == plane->blend_mode_property) {
+ state->pixel_blend_mode = val;
} else if (property == plane->rotation_property) {
if (!is_power_of_2(val & DRM_MODE_ROTATE_MASK)) {
DRM_DEBUG_ATOMIC("[PLANE:%d:%s] bad rotation bitmask: 0x%llx\n",
@@ -968,6 +970,8 @@ drm_atomic_plane_get_property(struct drm_plane *plane,
*val = state->src_h;
} else if (property == plane->alpha_property) {
*val = state->alpha;
+ } else if (property == plane->blend_mode_property) {
+ *val = state->pixel_blend_mode;
} else if (property == plane->rotation_property) {
*val = state->rotation;
} else if (property == plane->zpos_property) {
diff --git a/drivers/gpu/drm/drm_atomic_helper.c b/drivers/gpu/drm/drm_atomic_helper.c
index 866a2cc72ef6..2c23a48482da 100644
--- a/drivers/gpu/drm/drm_atomic_helper.c
+++ b/drivers/gpu/drm/drm_atomic_helper.c
@@ -1538,8 +1538,9 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
{
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
- struct drm_plane *plane;
- struct drm_plane_state *old_plane_state, *new_plane_state;
+ struct drm_plane *plane = NULL;
+ struct drm_plane_state *old_plane_state = NULL;
+ struct drm_plane_state *new_plane_state = NULL;
const struct drm_plane_helper_funcs *funcs;
int i, n_planes = 0;
@@ -1555,7 +1556,8 @@ int drm_atomic_helper_async_check(struct drm_device *dev,
if (n_planes != 1)
return -EINVAL;
- if (!new_plane_state->crtc)
+ if (!new_plane_state->crtc ||
+ old_plane_state->crtc != new_plane_state->crtc)
return -EINVAL;
funcs = plane->helper_private;
@@ -3553,6 +3555,29 @@ void drm_atomic_helper_crtc_destroy_state(struct drm_crtc *crtc,
EXPORT_SYMBOL(drm_atomic_helper_crtc_destroy_state);
/**
+ * __drm_atomic_helper_plane_reset - resets planes state to default values
+ * @plane: plane object, must not be NULL
+ * @state: atomic plane state, must not be NULL
+ *
+ * Initializes plane state to default. This is useful for drivers that subclass
+ * the plane state.
+ */
+void __drm_atomic_helper_plane_reset(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ state->plane = plane;
+ state->rotation = DRM_MODE_ROTATE_0;
+
+ /* Reset the alpha value to fully opaque if it matters */
+ if (plane->alpha_property)
+ state->alpha = plane->alpha_property->values[1];
+ state->pixel_blend_mode = DRM_MODE_BLEND_PREMULTI;
+
+ plane->state = state;
+}
+EXPORT_SYMBOL(__drm_atomic_helper_plane_reset);
+
+/**
* drm_atomic_helper_plane_reset - default &drm_plane_funcs.reset hook for planes
* @plane: drm plane
*
@@ -3566,15 +3591,8 @@ void drm_atomic_helper_plane_reset(struct drm_plane *plane)
kfree(plane->state);
plane->state = kzalloc(sizeof(*plane->state), GFP_KERNEL);
-
- if (plane->state) {
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
-
- /* Reset the alpha value to fully opaque if it matters */
- if (plane->alpha_property)
- plane->state->alpha = plane->alpha_property->values[1];
- }
+ if (plane->state)
+ __drm_atomic_helper_plane_reset(plane, plane->state);
}
EXPORT_SYMBOL(drm_atomic_helper_plane_reset);
diff --git a/drivers/gpu/drm/drm_blend.c b/drivers/gpu/drm/drm_blend.c
index a16a74d7e15e..402b62d3f072 100644
--- a/drivers/gpu/drm/drm_blend.c
+++ b/drivers/gpu/drm/drm_blend.c
@@ -107,6 +107,52 @@
* planes. Without this property the primary plane is always below the cursor
* plane, and ordering between all other planes is undefined.
*
+ * pixel blend mode:
+ * Pixel blend mode is set up with drm_plane_create_blend_mode_property().
+ * It adds a blend mode for alpha blending equation selection, describing
+ * how the pixels from the current plane are composited with the
+ * background.
+ *
+ * Three alpha blending equations are defined:
+ *
+ * "None":
+ * Blend formula that ignores the pixel alpha::
+ *
+ * out.rgb = plane_alpha * fg.rgb +
+ * (1 - plane_alpha) * bg.rgb
+ *
+ * "Pre-multiplied":
+ * Blend formula that assumes the pixel color values
+ * have been already pre-multiplied with the alpha
+ * channel values::
+ *
+ * out.rgb = plane_alpha * fg.rgb +
+ * (1 - (plane_alpha * fg.alpha)) * bg.rgb
+ *
+ * "Coverage":
+ * Blend formula that assumes the pixel color values have not
+ * been pre-multiplied and will do so when blending them to the
+ * background color values::
+ *
+ * out.rgb = plane_alpha * fg.alpha * fg.rgb +
+ * (1 - (plane_alpha * fg.alpha)) * bg.rgb
+ *
+ * Using the following symbols:
+ *
+ * "fg.rgb":
+ * Each of the RGB component values from the plane's pixel
+ * "fg.alpha":
+ * Alpha component value from the plane's pixel. If the plane's
+ * pixel format has no alpha component, then this is assumed to be
+ * 1.0. In these cases, this property has no effect, as all three
+ * equations become equivalent.
+ * "bg.rgb":
+ * Each of the RGB component values from the background
+ * "plane_alpha":
+ * Plane alpha value set by the plane "alpha" property. If the
+ * plane does not expose the "alpha" property, then this is
+ * assumed to be 1.0
+ *
* Note that all the property extensions described here apply either to the
* plane or the CRTC (e.g. for the background color, which currently is not
* exposed and assumed to be black).
@@ -448,3 +494,80 @@ int drm_atomic_normalize_zpos(struct drm_device *dev,
return 0;
}
EXPORT_SYMBOL(drm_atomic_normalize_zpos);
+
+/**
+ * drm_plane_create_blend_mode_property - create a new blend mode property
+ * @plane: drm plane
+ * @supported_modes: bitmask of supported modes, must include
+ * BIT(DRM_MODE_BLEND_PREMULTI). Current DRM assumption is
+ * that alpha is premultiplied, and old userspace can break if
+ * the property defaults to anything else.
+ *
+ * This creates a new property describing the blend mode.
+ *
+ * The property exposed to userspace is an enumeration property (see
+ * drm_property_create_enum()) called "pixel blend mode" and has the
+ * following enumeration values:
+ *
+ * "None":
+ * Blend formula that ignores the pixel alpha.
+ *
+ * "Pre-multiplied":
+ * Blend formula that assumes the pixel color values have been already
+ * pre-multiplied with the alpha channel values.
+ *
+ * "Coverage":
+ * Blend formula that assumes the pixel color values have not been
+ * pre-multiplied and will do so when blending them to the background color
+ * values.
+ *
+ * RETURNS:
+ * Zero for success or -errno
+ */
+int drm_plane_create_blend_mode_property(struct drm_plane *plane,
+ unsigned int supported_modes)
+{
+ struct drm_device *dev = plane->dev;
+ struct drm_property *prop;
+ static const struct drm_prop_enum_list props[] = {
+ { DRM_MODE_BLEND_PIXEL_NONE, "None" },
+ { DRM_MODE_BLEND_PREMULTI, "Pre-multiplied" },
+ { DRM_MODE_BLEND_COVERAGE, "Coverage" },
+ };
+ unsigned int valid_mode_mask = BIT(DRM_MODE_BLEND_PIXEL_NONE) |
+ BIT(DRM_MODE_BLEND_PREMULTI) |
+ BIT(DRM_MODE_BLEND_COVERAGE);
+ int i;
+
+ if (WARN_ON((supported_modes & ~valid_mode_mask) ||
+ ((supported_modes & BIT(DRM_MODE_BLEND_PREMULTI)) == 0)))
+ return -EINVAL;
+
+ prop = drm_property_create(dev, DRM_MODE_PROP_ENUM,
+ "pixel blend mode",
+ hweight32(supported_modes));
+ if (!prop)
+ return -ENOMEM;
+
+ for (i = 0; i < ARRAY_SIZE(props); i++) {
+ int ret;
+
+ if (!(BIT(props[i].type) & supported_modes))
+ continue;
+
+ ret = drm_property_add_enum(prop, props[i].type,
+ props[i].name);
+
+ if (ret) {
+ drm_property_destroy(dev, prop);
+
+ return ret;
+ }
+ }
+
+ drm_object_attach_property(&plane->base, prop, DRM_MODE_BLEND_PREMULTI);
+ plane->blend_mode_property = prop;
+
+ return 0;
+}
+EXPORT_SYMBOL(drm_plane_create_blend_mode_property);
diff --git a/drivers/gpu/drm/drm_context.c b/drivers/gpu/drm/drm_context.c
index 3c4000facb36..f973d287696a 100644
--- a/drivers/gpu/drm/drm_context.c
+++ b/drivers/gpu/drm/drm_context.c
@@ -372,7 +372,7 @@ int drm_legacy_addctx(struct drm_device *dev, void *data,
ctx->handle = drm_legacy_ctxbitmap_next(dev);
}
DRM_DEBUG("%d\n", ctx->handle);
- if (ctx->handle == -1) {
+ if (ctx->handle < 0) {
DRM_DEBUG("Not enough free contexts.\n");
/* Should this return -EBUSY instead? */
return -ENOMEM;
diff --git a/drivers/gpu/drm/drm_debugfs_crc.c b/drivers/gpu/drm/drm_debugfs_crc.c
index 99961192bf03..00e743153e94 100644
--- a/drivers/gpu/drm/drm_debugfs_crc.c
+++ b/drivers/gpu/drm/drm_debugfs_crc.c
@@ -68,8 +68,29 @@ static int crc_control_show(struct seq_file *m, void *data)
{
struct drm_crtc *crtc = m->private;
- seq_printf(m, "%s\n", crtc->crc.source);
+ if (crtc->funcs->get_crc_sources) {
+ size_t count;
+ const char *const *sources = crtc->funcs->get_crc_sources(crtc,
+ &count);
+ size_t values_cnt;
+ int i;
+
+ if (count == 0 || !sources)
+ goto out;
+
+ for (i = 0; i < count; i++)
+ if (!crtc->funcs->verify_crc_source(crtc, sources[i],
+ &values_cnt)) {
+ if (strcmp(sources[i], crtc->crc.source))
+ seq_printf(m, "%s\n", sources[i]);
+ else
+ seq_printf(m, "%s*\n", sources[i]);
+ }
+ }
+ return 0;
+out:
+ seq_printf(m, "%s*\n", crtc->crc.source);
return 0;
}
@@ -87,6 +108,8 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
struct drm_crtc *crtc = m->private;
struct drm_crtc_crc *crc = &crtc->crc;
char *source;
+ size_t values_cnt;
+ int ret;
if (len == 0)
return 0;
@@ -104,6 +127,10 @@ static ssize_t crc_control_write(struct file *file, const char __user *ubuf,
if (source[len] == '\n')
source[len] = '\0';
+ ret = crtc->funcs->verify_crc_source(crtc, source, &values_cnt);
+ if (ret)
+ return ret;
+
spin_lock_irq(&crc->lock);
if (crc->opened) {
@@ -168,57 +195,41 @@ static int crtc_crc_open(struct inode *inode, struct file *filep)
return ret;
}
- spin_lock_irq(&crc->lock);
- if (!crc->opened)
- crc->opened = true;
- else
- ret = -EBUSY;
- spin_unlock_irq(&crc->lock);
-
+ ret = crtc->funcs->verify_crc_source(crtc, crc->source, &values_cnt);
if (ret)
return ret;
- ret = crtc->funcs->set_crc_source(crtc, crc->source, &values_cnt);
- if (ret)
- goto err;
-
- if (WARN_ON(values_cnt > DRM_MAX_CRC_NR)) {
- ret = -EINVAL;
- goto err_disable;
- }
+ if (WARN_ON(values_cnt > DRM_MAX_CRC_NR))
+ return -EINVAL;
- if (WARN_ON(values_cnt == 0)) {
- ret = -EINVAL;
- goto err_disable;
- }
+ if (WARN_ON(values_cnt == 0))
+ return -EINVAL;
entries = kcalloc(DRM_CRC_ENTRIES_NR, sizeof(*entries), GFP_KERNEL);
- if (!entries) {
- ret = -ENOMEM;
- goto err_disable;
- }
+ if (!entries)
+ return -ENOMEM;
spin_lock_irq(&crc->lock);
- crc->entries = entries;
- crc->values_cnt = values_cnt;
-
- /*
- * Only return once we got a first frame, so userspace doesn't have to
- * guess when this particular piece of HW will be ready to start
- * generating CRCs.
- */
- ret = wait_event_interruptible_lock_irq(crc->wq,
- crtc_crc_data_count(crc),
- crc->lock);
+ if (!crc->opened) {
+ crc->opened = true;
+ crc->entries = entries;
+ crc->values_cnt = values_cnt;
+ } else {
+ ret = -EBUSY;
+ }
spin_unlock_irq(&crc->lock);
+ if (ret) {
+ kfree(entries);
+ return ret;
+ }
+
+ ret = crtc->funcs->set_crc_source(crtc, crc->source);
if (ret)
- goto err_disable;
+ goto err;
return 0;
-err_disable:
- crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
err:
spin_lock_irq(&crc->lock);
crtc_crc_cleanup(crc);
@@ -230,9 +241,8 @@ static int crtc_crc_release(struct inode *inode, struct file *filep)
{
struct drm_crtc *crtc = filep->f_inode->i_private;
struct drm_crtc_crc *crc = &crtc->crc;
- size_t values_cnt;
- crtc->funcs->set_crc_source(crtc, NULL, &values_cnt);
+ crtc->funcs->set_crc_source(crtc, NULL);
spin_lock_irq(&crc->lock);
crtc_crc_cleanup(crc);
@@ -338,7 +348,7 @@ int drm_debugfs_crtc_crc_add(struct drm_crtc *crtc)
{
struct dentry *crc_ent, *ent;
- if (!crtc->funcs->set_crc_source)
+ if (!crtc->funcs->set_crc_source || !crtc->funcs->verify_crc_source)
return 0;
crc_ent = debugfs_create_dir("crc", crtc->debugfs_entry);
diff --git a/drivers/gpu/drm/drm_dp_cec.c b/drivers/gpu/drm/drm_dp_cec.c
index ddb1c5adebb9..8a718f85079a 100644
--- a/drivers/gpu/drm/drm_dp_cec.c
+++ b/drivers/gpu/drm/drm_dp_cec.c
@@ -16,7 +16,9 @@
* here. Quite a few active (mini-)DP-to-HDMI or USB-C-to-HDMI adapters
* have a converter chip that supports CEC-Tunneling-over-AUX (usually the
* Parade PS176), but they do not wire up the CEC pin, thus making CEC
- * useless.
+ * useless. Note that MegaChips 2900-based adapters appear to have good
+ * support for CEC tunneling. Those adapters that I have tested using
+ * this chipset all have the CEC line connected.
*
* Sadly there is no way for this driver to know this. What happens is
* that a /dev/cecX device is created that is isolated and unable to see
@@ -157,7 +159,7 @@ static void drm_dp_cec_adap_status(struct cec_adapter *adap,
if (drm_dp_read_desc(aux, &desc, true))
return;
- seq_printf(file, "OUI: %*pdH\n",
+ seq_printf(file, "OUI: %*phD\n",
(int)sizeof(id->oui), id->oui);
seq_printf(file, "ID: %*pE\n",
(int)strnlen(id->device_id, sizeof(id->device_id)),
@@ -238,6 +240,10 @@ void drm_dp_cec_irq(struct drm_dp_aux *aux)
u8 cec_irq;
int ret;
+ /* No transfer function was set, so not a DP connector */
+ if (!aux->transfer)
+ return;
+
mutex_lock(&aux->cec.lock);
if (!aux->cec.adap)
goto unlock;
@@ -293,6 +299,10 @@ void drm_dp_cec_set_edid(struct drm_dp_aux *aux, const struct edid *edid)
unsigned int num_las = 1;
u8 cap;
+ /* No transfer function was set, so not a DP connector */
+ if (!aux->transfer)
+ return;
+
#ifndef CONFIG_MEDIA_CEC_RC
/*
* CEC_CAP_RC is part of CEC_CAP_DEFAULTS, but it is stripped by
@@ -361,6 +371,10 @@ EXPORT_SYMBOL(drm_dp_cec_set_edid);
*/
void drm_dp_cec_unset_edid(struct drm_dp_aux *aux)
{
+ /* No transfer function was set, so not a DP connector */
+ if (!aux->transfer)
+ return;
+
cancel_delayed_work_sync(&aux->cec.unregister_work);
mutex_lock(&aux->cec.lock);
@@ -404,6 +418,8 @@ void drm_dp_cec_register_connector(struct drm_dp_aux *aux, const char *name,
struct device *parent)
{
WARN_ON(aux->cec.adap);
+ if (WARN_ON(!aux->transfer))
+ return;
aux->cec.name = name;
aux->cec.parent = parent;
INIT_DELAYED_WORK(&aux->cec.unregister_work,
diff --git a/drivers/gpu/drm/drm_dp_helper.c b/drivers/gpu/drm/drm_dp_helper.c
index 0cccbcb2d03e..8c6b9fd89f8a 100644
--- a/drivers/gpu/drm/drm_dp_helper.c
+++ b/drivers/gpu/drm/drm_dp_helper.c
@@ -850,7 +850,8 @@ static int drm_dp_i2c_do_msg(struct drm_dp_aux *aux, struct drm_dp_aux_msg *msg)
return ret;
case DP_AUX_I2C_REPLY_NACK:
- DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu\n", ret, msg->size);
+ DRM_DEBUG_KMS("I2C nack (result=%d, size=%zu)\n",
+ ret, msg->size);
aux->i2c_nack_count++;
return -EREMOTEIO;
diff --git a/drivers/gpu/drm/drm_dp_mst_topology.c b/drivers/gpu/drm/drm_dp_mst_topology.c
index 7780567aa669..5ff1d79b86c4 100644
--- a/drivers/gpu/drm/drm_dp_mst_topology.c
+++ b/drivers/gpu/drm/drm_dp_mst_topology.c
@@ -439,6 +439,7 @@ static bool drm_dp_sideband_parse_remote_dpcd_read(struct drm_dp_sideband_msg_rx
if (idx > raw->curlen)
goto fail_len;
repmsg->u.remote_dpcd_read_ack.num_bytes = raw->msg[idx];
+ idx++;
if (idx > raw->curlen)
goto fail_len;
diff --git a/drivers/gpu/drm/drm_edid.c b/drivers/gpu/drm/drm_edid.c
index 5dc742b27ca0..3c9fc99648b7 100644
--- a/drivers/gpu/drm/drm_edid.c
+++ b/drivers/gpu/drm/drm_edid.c
@@ -116,6 +116,9 @@ static const struct edid_quirk {
/* CPT panel of Asus UX303LA reports 8 bpc, but is a 6 bpc panel */
{ "CPT", 0x17df, EDID_QUIRK_FORCE_6BPC },
+ /* SDC panel of Lenovo B50-80 reports 8 bpc, but is a 6 bpc panel */
+ { "SDC", 0x3652, EDID_QUIRK_FORCE_6BPC },
+
/* Belinea 10 15 55 */
{ "MAX", 1516, EDID_QUIRK_PREFER_LARGE_60 },
{ "MAX", 0x77e, EDID_QUIRK_PREFER_LARGE_60 },
diff --git a/drivers/gpu/drm/drm_fb_cma_helper.c b/drivers/gpu/drm/drm_fb_cma_helper.c
index 9da36a6271d3..47e0e2f6642d 100644
--- a/drivers/gpu/drm/drm_fb_cma_helper.c
+++ b/drivers/gpu/drm/drm_fb_cma_helper.c
@@ -86,14 +86,21 @@ dma_addr_t drm_fb_cma_get_gem_addr(struct drm_framebuffer *fb,
{
struct drm_gem_cma_object *obj;
dma_addr_t paddr;
+ u8 h_div = 1, v_div = 1;
obj = drm_fb_cma_get_gem_obj(fb, plane);
if (!obj)
return 0;
paddr = obj->paddr + fb->offsets[plane];
- paddr += fb->format->cpp[plane] * (state->src_x >> 16);
- paddr += fb->pitches[plane] * (state->src_y >> 16);
+
+ if (plane > 0) {
+ h_div = fb->format->hsub;
+ v_div = fb->format->vsub;
+ }
+
+ paddr += (fb->format->cpp[plane] * (state->src_x >> 16)) / h_div;
+ paddr += (fb->pitches[plane] * (state->src_y >> 16)) / v_div;
return paddr;
}
@@ -219,21 +226,6 @@ void drm_fbdev_cma_hotplug_event(struct drm_fbdev_cma *fbdev_cma)
EXPORT_SYMBOL_GPL(drm_fbdev_cma_hotplug_event);
/**
- * drm_fbdev_cma_set_suspend - wrapper around drm_fb_helper_set_suspend
- * @fbdev_cma: The drm_fbdev_cma struct, may be NULL
- * @state: desired state, zero to resume, non-zero to suspend
- *
- * Calls drm_fb_helper_set_suspend, which is a wrapper around
- * fb_set_suspend implemented by fbdev core.
- */
-void drm_fbdev_cma_set_suspend(struct drm_fbdev_cma *fbdev_cma, bool state)
-{
- if (fbdev_cma)
- drm_fb_helper_set_suspend(&fbdev_cma->fb_helper, state);
-}
-EXPORT_SYMBOL(drm_fbdev_cma_set_suspend);
-
-/**
* drm_fbdev_cma_set_suspend_unlocked - wrapper around
* drm_fb_helper_set_suspend_unlocked
* @fbdev_cma: The drm_fbdev_cma struct, may be NULL
diff --git a/drivers/gpu/drm/drm_gem_cma_helper.c b/drivers/gpu/drm/drm_gem_cma_helper.c
index 80a5115c3846..1d2ced882b66 100644
--- a/drivers/gpu/drm/drm_gem_cma_helper.c
+++ b/drivers/gpu/drm/drm_gem_cma_helper.c
@@ -436,7 +436,7 @@ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
sgt = kzalloc(sizeof(*sgt), GFP_KERNEL);
if (!sgt)
- return NULL;
+ return ERR_PTR(-ENOMEM);
ret = dma_get_sgtable(obj->dev->dev, sgt, cma_obj->vaddr,
cma_obj->paddr, obj->size);
@@ -447,7 +447,7 @@ struct sg_table *drm_gem_cma_prime_get_sg_table(struct drm_gem_object *obj)
out:
kfree(sgt);
- return NULL;
+ return ERR_PTR(ret);
}
EXPORT_SYMBOL_GPL(drm_gem_cma_prime_get_sg_table);
diff --git a/drivers/gpu/drm/drm_lease.c b/drivers/gpu/drm/drm_lease.c
index 50c73c0a20b9..b54fb78a283c 100644
--- a/drivers/gpu/drm/drm_lease.c
+++ b/drivers/gpu/drm/drm_lease.c
@@ -553,24 +553,13 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
/* Clone the lessor file to create a new file for us */
DRM_DEBUG_LEASE("Allocating lease file\n");
- path_get(&lessor_file->f_path);
- lessee_file = alloc_file(&lessor_file->f_path,
- lessor_file->f_mode,
- fops_get(lessor_file->f_inode->i_fop));
-
+ lessee_file = file_clone_open(lessor_file);
if (IS_ERR(lessee_file)) {
ret = PTR_ERR(lessee_file);
goto out_lessee;
}
- /* Initialize the new file for DRM */
- DRM_DEBUG_LEASE("Initializing the file with %p\n", lessee_file->f_op->open);
- ret = lessee_file->f_op->open(lessee_file->f_inode, lessee_file);
- if (ret)
- goto out_lessee_file;
-
lessee_priv = lessee_file->private_data;
-
/* Change the file to a master one */
drm_master_put(&lessee_priv->master);
lessee_priv->master = lessee;
@@ -588,9 +577,6 @@ int drm_mode_create_lease_ioctl(struct drm_device *dev,
DRM_DEBUG_LEASE("drm_mode_create_lease_ioctl succeeded\n");
return 0;
-out_lessee_file:
- fput(lessee_file);
-
out_lessee:
drm_master_put(&lessee);
diff --git a/drivers/gpu/drm/drm_mipi_dsi.c b/drivers/gpu/drm/drm_mipi_dsi.c
index bc73b7f5b9fc..80b75501f5c6 100644
--- a/drivers/gpu/drm/drm_mipi_dsi.c
+++ b/drivers/gpu/drm/drm_mipi_dsi.c
@@ -392,6 +392,7 @@ bool mipi_dsi_packet_format_is_short(u8 type)
case MIPI_DSI_DCS_SHORT_WRITE:
case MIPI_DSI_DCS_SHORT_WRITE_PARAM:
case MIPI_DSI_DCS_READ:
+ case MIPI_DSI_DCS_COMPRESSION_MODE:
case MIPI_DSI_SET_MAXIMUM_RETURN_PACKET_SIZE:
return true;
}
@@ -410,6 +411,7 @@ EXPORT_SYMBOL(mipi_dsi_packet_format_is_short);
bool mipi_dsi_packet_format_is_long(u8 type)
{
switch (type) {
+ case MIPI_DSI_PPS_LONG_WRITE:
case MIPI_DSI_NULL_PACKET:
case MIPI_DSI_BLANKING_PACKET:
case MIPI_DSI_GENERIC_LONG_WRITE:
diff --git a/drivers/gpu/drm/drm_panel.c b/drivers/gpu/drm/drm_panel.c
index b902361dee6e..0db486d10d1c 100644
--- a/drivers/gpu/drm/drm_panel.c
+++ b/drivers/gpu/drm/drm_panel.c
@@ -152,7 +152,9 @@ EXPORT_SYMBOL(drm_panel_detach);
*
* Return: A pointer to the panel registered for the specified device tree
* node or an ERR_PTR() if no panel matching the device tree node can be found.
+ *
* Possible error codes returned by this function:
+ *
* - EPROBE_DEFER: the panel device has not been probed yet, and the caller
* should retry later
* - ENODEV: the device is not available (status != "okay" or "ok")
diff --git a/drivers/gpu/drm/drm_print.c b/drivers/gpu/drm/drm_print.c
index b25f98f33f6c..0e7fc3e7dfb4 100644
--- a/drivers/gpu/drm/drm_print.c
+++ b/drivers/gpu/drm/drm_print.c
@@ -30,6 +30,100 @@
#include <drm/drmP.h>
#include <drm/drm_print.h>
+void __drm_puts_coredump(struct drm_printer *p, const char *str)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ ssize_t len;
+
+ if (!iterator->remain)
+ return;
+
+ if (iterator->offset < iterator->start) {
+ ssize_t copy;
+
+ len = strlen(str);
+
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ copy = len - (iterator->start - iterator->offset);
+
+ if (copy > iterator->remain)
+ copy = iterator->remain;
+
+ /* Copy out the bit of the string that we need */
+ memcpy(iterator->data,
+ str + (iterator->start - iterator->offset), copy);
+
+ iterator->offset = iterator->start + copy;
+ iterator->remain -= copy;
+ } else {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ len = min_t(ssize_t, strlen(str), iterator->remain);
+
+ memcpy(iterator->data + pos, str, len);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+ }
+}
+EXPORT_SYMBOL(__drm_puts_coredump);
+
+void __drm_printfn_coredump(struct drm_printer *p, struct va_format *vaf)
+{
+ struct drm_print_iterator *iterator = p->arg;
+ size_t len;
+ char *buf;
+
+ if (!iterator->remain)
+ return;
+
+ /* Figure out how big the string will be */
+ len = snprintf(NULL, 0, "%pV", vaf);
+
+ /* This is the easiest path, we've already advanced beyond the offset */
+ if (iterator->offset + len <= iterator->start) {
+ iterator->offset += len;
+ return;
+ }
+
+ /* Then check if we can directly copy into the target buffer */
+ if ((iterator->offset >= iterator->start) && (len < iterator->remain)) {
+ ssize_t pos = iterator->offset - iterator->start;
+
+ snprintf(((char *) iterator->data) + pos,
+ iterator->remain, "%pV", vaf);
+
+ iterator->offset += len;
+ iterator->remain -= len;
+
+ return;
+ }
+
+ /*
+ * Finally, hit the slow path and make a temporary string to copy over
+ * using _drm_puts_coredump
+ */
+ buf = kmalloc(len + 1, GFP_KERNEL | __GFP_NOWARN | __GFP_NORETRY);
+ if (!buf)
+ return;
+
+ snprintf(buf, len + 1, "%pV", vaf);
+ __drm_puts_coredump(p, (const char *) buf);
+
+ kfree(buf);
+}
+EXPORT_SYMBOL(__drm_printfn_coredump);
+
+void __drm_puts_seq_file(struct drm_printer *p, const char *str)
+{
+ seq_puts(p->arg, str);
+}
+EXPORT_SYMBOL(__drm_puts_seq_file);
+
void __drm_printfn_seq_file(struct drm_printer *p, struct va_format *vaf)
{
seq_printf(p->arg, "%pV", vaf);
@@ -49,6 +143,23 @@ void __drm_printfn_debug(struct drm_printer *p, struct va_format *vaf)
EXPORT_SYMBOL(__drm_printfn_debug);
/**
+ * drm_puts - print a const string to a &drm_printer stream
+ * @p: the &drm printer
+ * @str: const string
+ *
+ * Allow &drm_printer types that have a constant string
+ * option to use it.
+ */
+void drm_puts(struct drm_printer *p, const char *str)
+{
+ if (p->puts)
+ p->puts(p, str);
+ else
+ drm_printf(p, "%s", str);
+}
+EXPORT_SYMBOL(drm_puts);
+
+/**
* drm_printf - print to a &drm_printer stream
* @p: the &drm_printer
* @f: format string
diff --git a/drivers/gpu/drm/drm_property.c b/drivers/gpu/drm/drm_property.c
index 1f8031e30f53..cdb10f885a4f 100644
--- a/drivers/gpu/drm/drm_property.c
+++ b/drivers/gpu/drm/drm_property.c
@@ -532,7 +532,7 @@ static void drm_property_free_blob(struct kref *kref)
drm_mode_object_unregister(blob->dev, &blob->base);
- kfree(blob);
+ kvfree(blob);
}
/**
@@ -559,7 +559,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
if (!length || length > ULONG_MAX - sizeof(struct drm_property_blob))
return ERR_PTR(-EINVAL);
- blob = kzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
+ blob = kvzalloc(sizeof(struct drm_property_blob)+length, GFP_KERNEL);
if (!blob)
return ERR_PTR(-ENOMEM);
@@ -576,7 +576,7 @@ drm_property_create_blob(struct drm_device *dev, size_t length,
ret = __drm_mode_object_add(dev, &blob->base, DRM_MODE_OBJECT_BLOB,
true, drm_property_free_blob);
if (ret) {
- kfree(blob);
+ kvfree(blob);
return ERR_PTR(-EINVAL);
}
diff --git a/drivers/gpu/drm/drm_syncobj.c b/drivers/gpu/drm/drm_syncobj.c
index adb3cb27d31e..3a8837c49639 100644
--- a/drivers/gpu/drm/drm_syncobj.c
+++ b/drivers/gpu/drm/drm_syncobj.c
@@ -120,14 +120,6 @@ static int drm_syncobj_fence_get_or_add_callback(struct drm_syncobj *syncobj,
return ret;
}
-/**
- * drm_syncobj_add_callback - adds a callback to syncobj::cb_list
- * @syncobj: Sync object to which to add the callback
- * @cb: Callback to add
- * @func: Func to use when initializing the drm_syncobj_cb struct
- *
- * This adds a callback to be called next time the fence is replaced
- */
void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb,
drm_syncobj_func_t func)
@@ -136,13 +128,7 @@ void drm_syncobj_add_callback(struct drm_syncobj *syncobj,
drm_syncobj_add_callback_locked(syncobj, cb, func);
spin_unlock(&syncobj->lock);
}
-EXPORT_SYMBOL(drm_syncobj_add_callback);
-/**
- * drm_syncobj_add_callback - removes a callback to syncobj::cb_list
- * @syncobj: Sync object from which to remove the callback
- * @cb: Callback to remove
- */
void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
struct drm_syncobj_cb *cb)
{
@@ -150,7 +136,6 @@ void drm_syncobj_remove_callback(struct drm_syncobj *syncobj,
list_del_init(&cb->node);
spin_unlock(&syncobj->lock);
}
-EXPORT_SYMBOL(drm_syncobj_remove_callback);
/**
* drm_syncobj_replace_fence - replace fence in a sync object.
diff --git a/drivers/gpu/drm/drm_vblank.c b/drivers/gpu/drm/drm_vblank.c
index 28cdcf76b6f9..ec2dcfdac8ef 100644
--- a/drivers/gpu/drm/drm_vblank.c
+++ b/drivers/gpu/drm/drm_vblank.c
@@ -873,8 +873,8 @@ static void send_vblank_event(struct drm_device *dev,
* handler by calling drm_crtc_send_vblank_event() and make sure that there's no
* possible race with the hardware committing the atomic update.
*
- * Caller must hold a vblank reference for the event @e, which will be dropped
- * when the next vblank arrives.
+ * Caller must hold a vblank reference for the event @e acquired by a
+ * drm_crtc_vblank_get(), which will be dropped when the next vblank arrives.
*/
void drm_crtc_arm_vblank_event(struct drm_crtc *crtc,
struct drm_pending_vblank_event *e)
@@ -1541,7 +1541,7 @@ int drm_wait_vblank_ioctl(struct drm_device *dev, void *data,
if (vblwait->request.type &
~(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK)) {
- DRM_ERROR("Unsupported type value 0x%x, supported mask 0x%x\n",
+ DRM_DEBUG("Unsupported type value 0x%x, supported mask 0x%x\n",
vblwait->request.type,
(_DRM_VBLANK_TYPES_MASK | _DRM_VBLANK_FLAGS_MASK |
_DRM_VBLANK_HIGH_CRTC_MASK));
diff --git a/drivers/gpu/drm/drm_vma_manager.c b/drivers/gpu/drm/drm_vma_manager.c
index a6b2fe36b025..c5d0d2358301 100644
--- a/drivers/gpu/drm/drm_vma_manager.c
+++ b/drivers/gpu/drm/drm_vma_manager.c
@@ -103,10 +103,7 @@ EXPORT_SYMBOL(drm_vma_offset_manager_init);
*/
void drm_vma_offset_manager_destroy(struct drm_vma_offset_manager *mgr)
{
- /* take the lock to protect against buggy drivers */
- write_lock(&mgr->vm_lock);
drm_mm_takedown(&mgr->vm_addr_space_mm);
- write_unlock(&mgr->vm_lock);
}
EXPORT_SYMBOL(drm_vma_offset_manager_destroy);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.c b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
index 36414ba56b22..9b2720b41571 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.c
@@ -78,8 +78,7 @@ static void etnaviv_postclose(struct drm_device *dev, struct drm_file *file)
gpu->lastctx = NULL;
mutex_unlock(&gpu->lock);
- drm_sched_entity_destroy(&gpu->sched,
- &ctx->sched_entity[i]);
+ drm_sched_entity_destroy(&ctx->sched_entity[i]);
}
}
@@ -631,8 +630,11 @@ static struct platform_driver etnaviv_platform_driver = {
},
};
+static struct platform_device *etnaviv_drm;
+
static int __init etnaviv_init(void)
{
+ struct platform_device *pdev;
int ret;
struct device_node *np;
@@ -644,7 +646,7 @@ static int __init etnaviv_init(void)
ret = platform_driver_register(&etnaviv_platform_driver);
if (ret != 0)
- platform_driver_unregister(&etnaviv_gpu_driver);
+ goto unregister_gpu_driver;
/*
* If the DT contains at least one available GPU device, instantiate
@@ -653,20 +655,33 @@ static int __init etnaviv_init(void)
for_each_compatible_node(np, NULL, "vivante,gc") {
if (!of_device_is_available(np))
continue;
-
- platform_device_register_simple("etnaviv", -1, NULL, 0);
+ pdev = platform_device_register_simple("etnaviv", -1,
+ NULL, 0);
+ if (IS_ERR(pdev)) {
+ ret = PTR_ERR(pdev);
+ of_node_put(np);
+ goto unregister_platform_driver;
+ }
+ etnaviv_drm = pdev;
of_node_put(np);
break;
}
+ return 0;
+
+unregister_platform_driver:
+ platform_driver_unregister(&etnaviv_platform_driver);
+unregister_gpu_driver:
+ platform_driver_unregister(&etnaviv_gpu_driver);
return ret;
}
module_init(etnaviv_init);
static void __exit etnaviv_exit(void)
{
- platform_driver_unregister(&etnaviv_gpu_driver);
+ platform_device_unregister(etnaviv_drm);
platform_driver_unregister(&etnaviv_platform_driver);
+ platform_driver_unregister(&etnaviv_gpu_driver);
}
module_exit(etnaviv_exit);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_drv.h b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
index d36c7bbe66db..8d02d1b7dcf5 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_drv.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_drv.h
@@ -18,6 +18,7 @@
#include <linux/time64.h>
#include <linux/types.h>
#include <linux/sizes.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
#include <drm/drm_crtc_helper.h>
@@ -53,7 +54,7 @@ int etnaviv_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int etnaviv_gem_fault(struct vm_fault *vmf);
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf);
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset);
struct sg_table *etnaviv_gem_prime_get_sg_table(struct drm_gem_object *obj);
void *etnaviv_gem_prime_vmap(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem.c b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
index 209ef1274b80..1fa74226db91 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem.c
@@ -169,31 +169,30 @@ int etnaviv_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return obj->ops->mmap(obj, vma);
}
-int etnaviv_gem_fault(struct vm_fault *vmf)
+vm_fault_t etnaviv_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
struct etnaviv_gem_object *etnaviv_obj = to_etnaviv_bo(obj);
struct page **pages, *page;
pgoff_t pgoff;
- int ret;
+ int err;
/*
* Make sure we don't parallel update on a fault, nor move or remove
- * something from beneath our feet. Note that vm_insert_page() is
+ * something from beneath our feet. Note that vmf_insert_page() is
* specifically coded to take care of this, so we don't have to.
*/
- ret = mutex_lock_interruptible(&etnaviv_obj->lock);
- if (ret)
- goto out;
-
+ err = mutex_lock_interruptible(&etnaviv_obj->lock);
+ if (err)
+ return VM_FAULT_NOPAGE;
/* make sure we have pages attached now */
pages = etnaviv_gem_get_pages(etnaviv_obj);
mutex_unlock(&etnaviv_obj->lock);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
- goto out;
+ err = PTR_ERR(pages);
+ return vmf_error(err);
}
/* We don't use vmf->pgoff since that has the fake offset: */
@@ -204,25 +203,7 @@ int etnaviv_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
page_to_pfn(page), page_to_pfn(page) << PAGE_SHIFT);
- ret = vm_insert_page(vma, vmf->address, page);
-
-out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return vmf_insert_page(vma, vmf->address, page);
}
int etnaviv_gem_mmap_offset(struct drm_gem_object *obj, u64 *offset)
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
index 46ecd3e66ac9..983e67f19e45 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gem_submit.c
@@ -388,9 +388,9 @@ static void submit_cleanup(struct kref *kref)
dma_fence_put(submit->in_fence);
if (submit->out_fence) {
/* first remove from IDR, so fence can not be found anymore */
- mutex_lock(&submit->gpu->fence_idr_lock);
+ mutex_lock(&submit->gpu->fence_lock);
idr_remove(&submit->gpu->fence_idr, submit->out_fence_id);
- mutex_unlock(&submit->gpu->fence_idr_lock);
+ mutex_unlock(&submit->gpu->fence_lock);
dma_fence_put(submit->out_fence);
}
kfree(submit->pmrs);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
index 19b09a59e30e..f225fbc6edd2 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.c
@@ -799,6 +799,7 @@ int etnaviv_gpu_init(struct etnaviv_gpu *gpu)
free_buffer:
etnaviv_cmdbuf_free(&gpu->buffer);
+ gpu->buffer.suballoc = NULL;
destroy_iommu:
etnaviv_iommu_destroy(gpu->mmu);
gpu->mmu = NULL;
@@ -1726,7 +1727,7 @@ static int etnaviv_gpu_platform_probe(struct platform_device *pdev)
gpu->dev = &pdev->dev;
mutex_init(&gpu->lock);
- mutex_init(&gpu->fence_idr_lock);
+ mutex_init(&gpu->fence_lock);
/* Map registers: */
res = platform_get_resource(pdev, IORESOURCE_MEM, 0);
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
index dd430f0f8ff5..9a75a6937268 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
+++ b/drivers/gpu/drm/etnaviv/etnaviv_gpu.h
@@ -118,7 +118,7 @@ struct etnaviv_gpu {
u32 idle_mask;
/* Fencing support */
- struct mutex fence_idr_lock;
+ struct mutex fence_lock;
struct idr fence_idr;
u32 next_fence;
u32 active_fence;
@@ -131,6 +131,9 @@ struct etnaviv_gpu {
struct work_struct sync_point_work;
int sync_point_event;
+ /* hang detection */
+ u32 hangcheck_dma_addr;
+
void __iomem *mmio;
int irq;
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
index 71fbc1f96cb6..f1c88d8ad5ba 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_iommu_v2.c
@@ -119,8 +119,7 @@ static size_t etnaviv_iommuv2_unmap(struct etnaviv_iommu_domain *domain,
static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
{
- u32 *p;
- int ret, i;
+ int ret;
/* allocate scratch page */
etnaviv_domain->base.bad_page_cpu =
@@ -131,9 +130,9 @@ static int etnaviv_iommuv2_init(struct etnaviv_iommuv2_domain *etnaviv_domain)
ret = -ENOMEM;
goto fail_mem;
}
- p = etnaviv_domain->base.bad_page_cpu;
- for (i = 0; i < SZ_4K / 4; i++)
- *p++ = 0xdead55aa;
+
+ memset32(etnaviv_domain->base.bad_page_cpu, 0xdead55aa,
+ SZ_4K / sizeof(u32));
etnaviv_domain->pta_cpu = dma_alloc_wc(etnaviv_domain->base.dev,
SZ_4K, &etnaviv_domain->pta_dma,
diff --git a/drivers/gpu/drm/etnaviv/etnaviv_sched.c b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
index a74eb57af15b..69e9b431bf1f 100644
--- a/drivers/gpu/drm/etnaviv/etnaviv_sched.c
+++ b/drivers/gpu/drm/etnaviv/etnaviv_sched.c
@@ -10,6 +10,7 @@
#include "etnaviv_gem.h"
#include "etnaviv_gpu.h"
#include "etnaviv_sched.h"
+#include "state.xml.h"
static int etnaviv_job_hang_limit = 0;
module_param_named(job_hang_limit, etnaviv_job_hang_limit, int , 0444);
@@ -85,6 +86,29 @@ static void etnaviv_sched_timedout_job(struct drm_sched_job *sched_job)
{
struct etnaviv_gem_submit *submit = to_etnaviv_submit(sched_job);
struct etnaviv_gpu *gpu = submit->gpu;
+ u32 dma_addr;
+ int change;
+
+ /*
+ * If the GPU managed to complete this jobs fence, the timout is
+ * spurious. Bail out.
+ */
+ if (fence_completed(gpu, submit->out_fence->seqno))
+ return;
+
+ /*
+ * If the GPU is still making forward progress on the front-end (which
+ * should never loop) we shift out the timeout to give it a chance to
+ * finish the job.
+ */
+ dma_addr = gpu_read(gpu, VIVS_FE_DMA_ADDRESS);
+ change = dma_addr - gpu->hangcheck_dma_addr;
+ if (change < 0 || change > 16) {
+ gpu->hangcheck_dma_addr = dma_addr;
+ schedule_delayed_work(&sched_job->work_tdr,
+ sched_job->sched->timeout);
+ return;
+ }
/* block scheduler */
kthread_park(gpu->sched.thread);
@@ -116,28 +140,38 @@ static const struct drm_sched_backend_ops etnaviv_sched_ops = {
int etnaviv_sched_push_job(struct drm_sched_entity *sched_entity,
struct etnaviv_gem_submit *submit)
{
- int ret;
+ int ret = 0;
- ret = drm_sched_job_init(&submit->sched_job, &submit->gpu->sched,
- sched_entity, submit->cmdbuf.ctx);
+ /*
+ * Hold the fence lock across the whole operation to avoid jobs being
+ * pushed out of order with regard to their sched fence seqnos as
+ * allocated in drm_sched_job_init.
+ */
+ mutex_lock(&submit->gpu->fence_lock);
+
+ ret = drm_sched_job_init(&submit->sched_job, sched_entity,
+ submit->cmdbuf.ctx);
if (ret)
- return ret;
+ goto out_unlock;
submit->out_fence = dma_fence_get(&submit->sched_job.s_fence->finished);
- mutex_lock(&submit->gpu->fence_idr_lock);
submit->out_fence_id = idr_alloc_cyclic(&submit->gpu->fence_idr,
submit->out_fence, 0,
INT_MAX, GFP_KERNEL);
- mutex_unlock(&submit->gpu->fence_idr_lock);
- if (submit->out_fence_id < 0)
- return -ENOMEM;
+ if (submit->out_fence_id < 0) {
+ ret = -ENOMEM;
+ goto out_unlock;
+ }
/* the scheduler holds on to the job now */
kref_get(&submit->refcount);
drm_sched_entity_push_job(&submit->sched_job, sched_entity);
- return 0;
+out_unlock:
+ mutex_unlock(&submit->gpu->fence_lock);
+
+ return ret;
}
int etnaviv_sched_init(struct etnaviv_gpu *gpu)
diff --git a/drivers/gpu/drm/exynos/Makefile b/drivers/gpu/drm/exynos/Makefile
index 3b323f1e0475..2ad146bbf4f5 100644
--- a/drivers/gpu/drm/exynos/Makefile
+++ b/drivers/gpu/drm/exynos/Makefile
@@ -4,7 +4,7 @@
# Direct Rendering Infrastructure (DRI) in XFree86 4.1.0 and higher.
exynosdrm-y := exynos_drm_drv.o exynos_drm_crtc.o exynos_drm_fb.o \
- exynos_drm_gem.o exynos_drm_core.o exynos_drm_plane.o
+ exynos_drm_gem.o exynos_drm_plane.o
exynosdrm-$(CONFIG_DRM_FBDEV_EMULATION) += exynos_drm_fbdev.o
exynosdrm-$(CONFIG_DRM_EXYNOS_IOMMU) += exynos_drm_iommu.o
diff --git a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
index 82c95c34447f..94529aa82339 100644
--- a/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos5433_drm_decon.c
@@ -265,7 +265,7 @@ static void decon_win_set_pixfmt(struct decon_context *ctx, unsigned int win,
unsigned long val;
val = readl(ctx->addr + DECON_WINCONx(win));
- val &= ~WINCONx_BPPMODE_MASK;
+ val &= WINCONx_ENWIN_F;
switch (fb->format->format) {
case DRM_FORMAT_XRGB1555:
@@ -356,8 +356,8 @@ static void decon_update_plane(struct exynos_drm_crtc *crtc,
writel(val, ctx->addr + DECON_VIDOSDxB(win));
}
- val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
- VIDOSD_Wx_ALPHA_B_F(0x0);
+ val = VIDOSD_Wx_ALPHA_R_F(0xff) | VIDOSD_Wx_ALPHA_G_F(0xff) |
+ VIDOSD_Wx_ALPHA_B_F(0xff);
writel(val, ctx->addr + DECON_VIDOSDxC(win));
val = VIDOSD_Wx_ALPHA_R_F(0x0) | VIDOSD_Wx_ALPHA_G_F(0x0) |
@@ -673,6 +673,8 @@ err:
static const struct dev_pm_ops exynos5433_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos5433_decon_suspend, exynos5433_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos5433_decon_driver_dt_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos7_drm_decon.c b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
index 3931d5e33fe0..88cbd000eb09 100644
--- a/drivers/gpu/drm/exynos/exynos7_drm_decon.c
+++ b/drivers/gpu/drm/exynos/exynos7_drm_decon.c
@@ -832,6 +832,8 @@ static int exynos7_decon_resume(struct device *dev)
static const struct dev_pm_ops exynos7_decon_pm_ops = {
SET_RUNTIME_PM_OPS(exynos7_decon_suspend, exynos7_decon_resume,
NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver decon_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_dp.c b/drivers/gpu/drm/exynos/exynos_dp.c
index af7ab1ceb50f..c8449ae4f4fe 100644
--- a/drivers/gpu/drm/exynos/exynos_dp.c
+++ b/drivers/gpu/drm/exynos/exynos_dp.c
@@ -16,6 +16,7 @@
#include <linux/clk.h>
#include <linux/of_graph.h>
#include <linux/component.h>
+#include <linux/pm_runtime.h>
#include <video/of_display_timing.h>
#include <video/of_videomode.h>
#include <video/videomode.h>
@@ -278,6 +279,8 @@ static int exynos_dp_resume(struct device *dev)
static const struct dev_pm_ops exynos_dp_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dp_suspend, exynos_dp_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static const struct of_device_id exynos_dp_match[] = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_core.c b/drivers/gpu/drm/exynos/exynos_drm_core.c
deleted file mode 100644
index b0c0621fcdf7..000000000000
--- a/drivers/gpu/drm/exynos/exynos_drm_core.c
+++ /dev/null
@@ -1,119 +0,0 @@
-/* exynos_drm_core.c
- *
- * Copyright (c) 2011 Samsung Electronics Co., Ltd.
- * Author:
- * Inki Dae <inki.dae@samsung.com>
- * Joonyoung Shim <jy0922.shim@samsung.com>
- * Seung-Woo Kim <sw0312.kim@samsung.com>
- *
- * This program is free software; you can redistribute it and/or modify it
- * under the terms of the GNU General Public License as published by the
- * Free Software Foundation; either version 2 of the License, or (at your
- * option) any later version.
- */
-
-#include <drm/drmP.h>
-
-#include "exynos_drm_drv.h"
-#include "exynos_drm_crtc.h"
-
-static LIST_HEAD(exynos_drm_subdrv_list);
-
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_add_tail(&subdrv->list, &exynos_drm_subdrv_list);
-
- return 0;
-}
-
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *subdrv)
-{
- if (!subdrv)
- return -EINVAL;
-
- list_del(&subdrv->list);
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv, *n;
- int err;
-
- if (!dev)
- return -EINVAL;
-
- list_for_each_entry_safe(subdrv, n, &exynos_drm_subdrv_list, list) {
- if (subdrv->probe) {
- subdrv->drm_dev = dev;
-
- /*
- * this probe callback would be called by sub driver
- * after setting of all resources to this sub driver,
- * such as clock, irq and register map are done.
- */
- err = subdrv->probe(dev, subdrv->dev);
- if (err) {
- DRM_DEBUG("exynos drm subdrv probe failed.\n");
- list_del(&subdrv->list);
- continue;
- }
- }
- }
-
- return 0;
-}
-
-int exynos_drm_device_subdrv_remove(struct drm_device *dev)
-{
- struct exynos_drm_subdrv *subdrv;
-
- if (!dev) {
- WARN(1, "Unexpected drm device unregister!\n");
- return -EINVAL;
- }
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->remove)
- subdrv->remove(dev, subdrv->dev);
- }
-
- return 0;
-}
-
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
- int ret;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->open) {
- ret = subdrv->open(dev, subdrv->dev, file);
- if (ret)
- goto err;
- }
- }
-
- return 0;
-
-err:
- list_for_each_entry_continue_reverse(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
- return ret;
-}
-
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file)
-{
- struct exynos_drm_subdrv *subdrv;
-
- list_for_each_entry(subdrv, &exynos_drm_subdrv_list, list) {
- if (subdrv->close)
- subdrv->close(dev, subdrv->dev, file);
- }
-}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.c b/drivers/gpu/drm/exynos/exynos_drm_drv.c
index a81b4a5e24a7..b599f74692e5 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.c
@@ -55,8 +55,7 @@ static int exynos_drm_open(struct drm_device *dev, struct drm_file *file)
return -ENOMEM;
file->driver_priv = file_priv;
-
- ret = exynos_drm_subdrv_open(dev, file);
+ ret = g2d_open(dev, file);
if (ret)
goto err_file_priv_free;
@@ -70,7 +69,7 @@ err_file_priv_free:
static void exynos_drm_postclose(struct drm_device *dev, struct drm_file *file)
{
- exynos_drm_subdrv_close(dev, file);
+ g2d_close(dev, file);
kfree(file->driver_priv);
file->driver_priv = NULL;
}
@@ -147,13 +146,12 @@ static struct drm_driver exynos_drm_driver = {
.minor = DRIVER_MINOR,
};
-#ifdef CONFIG_PM_SLEEP
static int exynos_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
+ if (!drm_dev)
return 0;
private = drm_dev->dev_private;
@@ -170,25 +168,23 @@ static int exynos_drm_suspend(struct device *dev)
return 0;
}
-static int exynos_drm_resume(struct device *dev)
+static void exynos_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
struct exynos_drm_private *private;
- if (pm_runtime_suspended(dev) || !drm_dev)
- return 0;
+ if (!drm_dev)
+ return;
private = drm_dev->dev_private;
drm_atomic_helper_resume(drm_dev, private->suspend_state);
exynos_drm_fbdev_resume(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
}
-#endif
static const struct dev_pm_ops exynos_drm_pm_ops = {
- SET_SYSTEM_SLEEP_PM_OPS(exynos_drm_suspend, exynos_drm_resume)
+ .prepare = exynos_drm_suspend,
+ .complete = exynos_drm_resume,
};
/* forward declaration */
@@ -240,6 +236,7 @@ static struct exynos_drm_driver_info exynos_drm_drivers[] = {
DRM_COMPONENT_DRIVER | DRM_VIRTUAL_DEVICE
}, {
DRV_PTR(g2d_driver, CONFIG_DRM_EXYNOS_G2D),
+ DRM_COMPONENT_DRIVER
}, {
DRV_PTR(fimc_driver, CONFIG_DRM_EXYNOS_FIMC),
DRM_COMPONENT_DRIVER | DRM_FIMC_DEVICE,
@@ -376,11 +373,6 @@ static int exynos_drm_bind(struct device *dev)
if (ret)
goto err_unbind_all;
- /* Probe non kms sub drivers and virtual display driver. */
- ret = exynos_drm_device_subdrv_probe(drm);
- if (ret)
- goto err_unbind_all;
-
drm_mode_config_reset(drm);
/*
@@ -411,7 +403,6 @@ err_cleanup_fbdev:
exynos_drm_fbdev_fini(drm);
err_cleanup_poll:
drm_kms_helper_poll_fini(drm);
- exynos_drm_device_subdrv_remove(drm);
err_unbind_all:
component_unbind_all(drm->dev, drm);
err_mode_config_cleanup:
@@ -420,7 +411,7 @@ err_mode_config_cleanup:
err_free_private:
kfree(private);
err_free_drm:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -431,8 +422,6 @@ static void exynos_drm_unbind(struct device *dev)
drm_dev_unregister(drm);
- exynos_drm_device_subdrv_remove(drm);
-
exynos_drm_fbdev_fini(drm);
drm_kms_helper_poll_fini(drm);
@@ -444,7 +433,7 @@ static void exynos_drm_unbind(struct device *dev)
drm->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops exynos_drm_ops = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_drv.h b/drivers/gpu/drm/exynos/exynos_drm_drv.h
index 0f6d079a55c9..c737c4bd2c19 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_drv.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_drv.h
@@ -179,17 +179,13 @@ static inline void exynos_drm_pipe_clk_enable(struct exynos_drm_crtc *crtc,
crtc->pipe_clk->enable(crtc->pipe_clk, enable);
}
-struct exynos_drm_g2d_private {
- struct device *dev;
+struct drm_exynos_file_private {
+ /* for g2d api */
struct list_head inuse_cmdlist;
struct list_head event_list;
struct list_head userptr_list;
};
-struct drm_exynos_file_private {
- struct exynos_drm_g2d_private *g2d_priv;
-};
-
/*
* Exynos drm private structure.
*
@@ -201,6 +197,7 @@ struct exynos_drm_private {
struct drm_fb_helper *fb_helper;
struct drm_atomic_state *suspend_state;
+ struct device *g2d_dev;
struct device *dma_dev;
void *mapping;
@@ -217,44 +214,6 @@ static inline struct device *to_dma_dev(struct drm_device *dev)
return priv->dma_dev;
}
-/*
- * Exynos drm sub driver structure.
- *
- * @list: sub driver has its own list object to register to exynos drm driver.
- * @dev: pointer to device object for subdrv device driver.
- * @drm_dev: pointer to drm_device and this pointer would be set
- * when sub driver calls exynos_drm_subdrv_register().
- * @probe: this callback would be called by exynos drm driver after
- * subdrv is registered to it.
- * @remove: this callback is used to release resources created
- * by probe callback.
- * @open: this would be called with drm device file open.
- * @close: this would be called with drm device file close.
- */
-struct exynos_drm_subdrv {
- struct list_head list;
- struct device *dev;
- struct drm_device *drm_dev;
-
- int (*probe)(struct drm_device *drm_dev, struct device *dev);
- void (*remove)(struct drm_device *drm_dev, struct device *dev);
- int (*open)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
- void (*close)(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file);
-};
-
- /* This function would be called by non kms drivers such as g2d and ipp. */
-int exynos_drm_subdrv_register(struct exynos_drm_subdrv *drm_subdrv);
-
-/* this function removes subdrv list from exynos drm driver */
-int exynos_drm_subdrv_unregister(struct exynos_drm_subdrv *drm_subdrv);
-
-int exynos_drm_device_subdrv_probe(struct drm_device *dev);
-int exynos_drm_device_subdrv_remove(struct drm_device *dev);
-int exynos_drm_subdrv_open(struct drm_device *dev, struct drm_file *file);
-void exynos_drm_subdrv_close(struct drm_device *dev, struct drm_file *file);
-
#ifdef CONFIG_DRM_EXYNOS_DPI
struct drm_encoder *exynos_dpi_probe(struct device *dev);
int exynos_dpi_remove(struct drm_encoder *encoder);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_dsi.c b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
index a1ed6146a3b5..781b82c2c579 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_dsi.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_dsi.c
@@ -1863,6 +1863,8 @@ err_clk:
static const struct dev_pm_ops exynos_dsi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_dsi_suspend, exynos_dsi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver dsi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fb.c b/drivers/gpu/drm/exynos/exynos_drm_fb.c
index 7fcc1a7ab1a0..9f52382e19ee 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fb.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fb.c
@@ -101,7 +101,6 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
{
const struct drm_format_info *info = drm_get_format_info(dev, mode_cmd);
struct exynos_drm_gem *exynos_gem[MAX_FB_BUFFER];
- struct drm_gem_object *obj;
struct drm_framebuffer *fb;
int i;
int ret;
@@ -112,15 +111,14 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
unsigned long size = height * mode_cmd->pitches[i] +
mode_cmd->offsets[i];
- obj = drm_gem_object_lookup(file_priv, mode_cmd->handles[i]);
- if (!obj) {
+ exynos_gem[i] = exynos_drm_gem_get(file_priv,
+ mode_cmd->handles[i]);
+ if (!exynos_gem[i]) {
DRM_ERROR("failed to lookup gem object\n");
ret = -ENOENT;
goto err;
}
- exynos_gem[i] = to_exynos_gem(obj);
-
if (size > exynos_gem[i]->size) {
i++;
ret = -EINVAL;
@@ -138,7 +136,7 @@ exynos_user_fb_create(struct drm_device *dev, struct drm_file *file_priv,
err:
while (i--)
- drm_gem_object_unreference_unlocked(&exynos_gem[i]->base);
+ exynos_drm_gem_put(exynos_gem[i]);
return ERR_PTR(ret);
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimc.c b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
index 6127ef25acd6..e8d0670bb5f8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimc.c
@@ -470,17 +470,18 @@ static void fimc_src_set_transf(struct fimc_context *ctx, unsigned int rotation)
static void fimc_set_window(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, h1, h2, v1, v2;
/* cropped image */
h1 = buf->rect.x;
- h2 = buf->buf.width - buf->rect.w - buf->rect.x;
+ h2 = real_width - buf->rect.w - buf->rect.x;
v1 = buf->rect.y;
v2 = buf->buf.height - buf->rect.h - buf->rect.y;
DRM_DEBUG_KMS("x[%d]y[%d]w[%d]h[%d]hsize[%d]vsize[%d]\n",
buf->rect.x, buf->rect.y, buf->rect.w, buf->rect.h,
- buf->buf.width, buf->buf.height);
+ real_width, buf->buf.height);
DRM_DEBUG_KMS("h1[%d]h2[%d]v1[%d]v2[%d]\n", h1, h2, v1, v2);
/*
@@ -503,12 +504,13 @@ static void fimc_set_window(struct fimc_context *ctx,
static void fimc_src_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGISIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGISIZE_HORIZONTAL(real_width) |
EXYNOS_ORGISIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGISIZE);
@@ -529,7 +531,7 @@ static void fimc_src_set_size(struct fimc_context *ctx,
* for now, we support only ITU601 8 bit mode
*/
cfg = (EXYNOS_CISRCFMT_ITU601_8BIT |
- EXYNOS_CISRCFMT_SOURCEHSIZE(buf->buf.width) |
+ EXYNOS_CISRCFMT_SOURCEHSIZE(real_width) |
EXYNOS_CISRCFMT_SOURCEVSIZE(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_CISRCFMT);
@@ -842,12 +844,13 @@ static void fimc_set_scaler(struct fimc_context *ctx, struct fimc_scaler *sc)
static void fimc_dst_set_size(struct fimc_context *ctx,
struct exynos_drm_ipp_buffer *buf)
{
+ unsigned int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
u32 cfg, cfg_ext;
- DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", buf->buf.width, buf->buf.height);
+ DRM_DEBUG_KMS("hsize[%d]vsize[%d]\n", real_width, buf->buf.height);
/* original size */
- cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(buf->buf.width) |
+ cfg = (EXYNOS_ORGOSIZE_HORIZONTAL(real_width) |
EXYNOS_ORGOSIZE_VERTICAL(buf->buf.height));
fimc_write(ctx, cfg, EXYNOS_ORGOSIZE);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_fimd.c b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
index 01b1570d0c3a..b7f56935a46b 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_fimd.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_fimd.c
@@ -1192,6 +1192,8 @@ static int exynos_fimd_resume(struct device *dev)
static const struct dev_pm_ops exynos_fimd_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_fimd_suspend, exynos_fimd_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver fimd_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.c b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
index f68ef1b3a28c..f2481a2014bb 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.c
@@ -9,6 +9,7 @@
#include <linux/kernel.h>
#include <linux/clk.h>
+#include <linux/component.h>
#include <linux/err.h>
#include <linux/interrupt.h>
#include <linux/io.h>
@@ -190,7 +191,7 @@ struct g2d_buf_desc {
struct g2d_buf_info {
unsigned int map_nr;
enum g2d_reg_type reg_types[MAX_REG_TYPE_NR];
- unsigned long handles[MAX_REG_TYPE_NR];
+ void *obj[MAX_REG_TYPE_NR];
unsigned int types[MAX_REG_TYPE_NR];
struct g2d_buf_desc descs[MAX_REG_TYPE_NR];
};
@@ -237,7 +238,7 @@ struct g2d_data {
int irq;
struct workqueue_struct *g2d_workq;
struct work_struct runqueue_work;
- struct exynos_drm_subdrv subdrv;
+ struct drm_device *drm_dev;
unsigned long flags;
/* cmdlist */
@@ -268,14 +269,13 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
{
struct device *dev = g2d->dev;
struct g2d_cmdlist_node *node = g2d->cmdlist_node;
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
int nr;
int ret;
struct g2d_buf_info *buf_info;
g2d->cmdlist_dma_attrs = DMA_ATTR_WRITE_COMBINE;
- g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(subdrv->drm_dev),
+ g2d->cmdlist_pool_virt = dma_alloc_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
&g2d->cmdlist_pool, GFP_KERNEL,
g2d->cmdlist_dma_attrs);
@@ -308,7 +308,7 @@ static int g2d_init_cmdlist(struct g2d_data *g2d)
return 0;
err:
- dma_free_attrs(to_dma_dev(subdrv->drm_dev), G2D_CMDLIST_POOL_SIZE,
+ dma_free_attrs(to_dma_dev(g2d->drm_dev), G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
return ret;
@@ -316,12 +316,10 @@ err:
static void g2d_fini_cmdlist(struct g2d_data *g2d)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
-
kfree(g2d->cmdlist_node);
if (g2d->cmdlist_pool_virt && g2d->cmdlist_pool) {
- dma_free_attrs(to_dma_dev(subdrv->drm_dev),
+ dma_free_attrs(to_dma_dev(g2d->drm_dev),
G2D_CMDLIST_POOL_SIZE,
g2d->cmdlist_pool_virt,
g2d->cmdlist_pool, g2d->cmdlist_dma_attrs);
@@ -355,32 +353,31 @@ static void g2d_put_cmdlist(struct g2d_data *g2d, struct g2d_cmdlist_node *node)
mutex_unlock(&g2d->cmdlist_mutex);
}
-static void g2d_add_cmdlist_to_inuse(struct exynos_drm_g2d_private *g2d_priv,
+static void g2d_add_cmdlist_to_inuse(struct drm_exynos_file_private *file_priv,
struct g2d_cmdlist_node *node)
{
struct g2d_cmdlist_node *lnode;
- if (list_empty(&g2d_priv->inuse_cmdlist))
+ if (list_empty(&file_priv->inuse_cmdlist))
goto add_to_list;
/* this links to base address of new cmdlist */
- lnode = list_entry(g2d_priv->inuse_cmdlist.prev,
+ lnode = list_entry(file_priv->inuse_cmdlist.prev,
struct g2d_cmdlist_node, list);
lnode->cmdlist->data[lnode->cmdlist->last] = node->dma_addr;
add_to_list:
- list_add_tail(&node->list, &g2d_priv->inuse_cmdlist);
+ list_add_tail(&node->list, &file_priv->inuse_cmdlist);
if (node->event)
- list_add_tail(&node->event->base.link, &g2d_priv->event_list);
+ list_add_tail(&node->event->base.link, &file_priv->event_list);
}
-static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
- unsigned long obj,
+static void g2d_userptr_put_dma_addr(struct g2d_data *g2d,
+ void *obj,
bool force)
{
- struct g2d_cmdlist_userptr *g2d_userptr =
- (struct g2d_cmdlist_userptr *)obj;
+ struct g2d_cmdlist_userptr *g2d_userptr = obj;
struct page **pages;
if (!obj)
@@ -398,7 +395,7 @@ static void g2d_userptr_put_dma_addr(struct drm_device *drm_dev,
return;
out:
- dma_unmap_sg(to_dma_dev(drm_dev), g2d_userptr->sgt->sgl,
+ dma_unmap_sg(to_dma_dev(g2d->drm_dev), g2d_userptr->sgt->sgl,
g2d_userptr->sgt->nents, DMA_BIDIRECTIONAL);
pages = frame_vector_pages(g2d_userptr->vec);
@@ -419,16 +416,14 @@ out:
kfree(g2d_userptr);
}
-static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
+static dma_addr_t *g2d_userptr_get_dma_addr(struct g2d_data *g2d,
unsigned long userptr,
unsigned long size,
struct drm_file *filp,
- unsigned long *obj)
+ void **obj)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr;
- struct g2d_data *g2d;
struct sg_table *sgt;
unsigned long start, end;
unsigned int npages, offset;
@@ -439,10 +434,8 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
return ERR_PTR(-EINVAL);
}
- g2d = dev_get_drvdata(g2d_priv->dev);
-
/* check if userptr already exists in userptr_list. */
- list_for_each_entry(g2d_userptr, &g2d_priv->userptr_list, list) {
+ list_for_each_entry(g2d_userptr, &file_priv->userptr_list, list) {
if (g2d_userptr->userptr == userptr) {
/*
* also check size because there could be same address
@@ -450,7 +443,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
*/
if (g2d_userptr->size == size) {
atomic_inc(&g2d_userptr->refcount);
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
}
@@ -517,7 +510,7 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->sgt = sgt;
- if (!dma_map_sg(to_dma_dev(drm_dev), sgt->sgl, sgt->nents,
+ if (!dma_map_sg(to_dma_dev(g2d->drm_dev), sgt->sgl, sgt->nents,
DMA_BIDIRECTIONAL)) {
DRM_ERROR("failed to map sgt with dma region.\n");
ret = -ENOMEM;
@@ -527,14 +520,14 @@ static dma_addr_t *g2d_userptr_get_dma_addr(struct drm_device *drm_dev,
g2d_userptr->dma_addr = sgt->sgl[0].dma_address;
g2d_userptr->userptr = userptr;
- list_add_tail(&g2d_userptr->list, &g2d_priv->userptr_list);
+ list_add_tail(&g2d_userptr->list, &file_priv->userptr_list);
if (g2d->current_pool + (npages << PAGE_SHIFT) < g2d->max_pool) {
g2d->current_pool += npages << PAGE_SHIFT;
g2d_userptr->in_pool = true;
}
- *obj = (unsigned long)g2d_userptr;
+ *obj = g2d_userptr;
return &g2d_userptr->dma_addr;
@@ -556,19 +549,14 @@ err_free:
return ERR_PTR(ret);
}
-static void g2d_userptr_free_all(struct drm_device *drm_dev,
- struct g2d_data *g2d,
- struct drm_file *filp)
+static void g2d_userptr_free_all(struct g2d_data *g2d, struct drm_file *filp)
{
struct drm_exynos_file_private *file_priv = filp->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
struct g2d_cmdlist_userptr *g2d_userptr, *n;
- list_for_each_entry_safe(g2d_userptr, n, &g2d_priv->userptr_list, list)
+ list_for_each_entry_safe(g2d_userptr, n, &file_priv->userptr_list, list)
if (g2d_userptr->in_pool)
- g2d_userptr_put_dma_addr(drm_dev,
- (unsigned long)g2d_userptr,
- true);
+ g2d_userptr_put_dma_addr(g2d, g2d_userptr, true);
g2d->current_pool = 0;
}
@@ -723,26 +711,23 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
buf_desc = &buf_info->descs[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM) {
- unsigned long size;
+ struct exynos_drm_gem *exynos_gem;
- size = exynos_drm_gem_get_size(drm_dev, handle, file);
- if (!size) {
+ exynos_gem = exynos_drm_gem_get(file, handle);
+ if (!exynos_gem) {
ret = -EFAULT;
goto err;
}
- if (!g2d_check_buf_desc_is_valid(buf_desc, reg_type,
- size)) {
+ if (!g2d_check_buf_desc_is_valid(buf_desc,
+ reg_type, exynos_gem->size)) {
+ exynos_drm_gem_put(exynos_gem);
ret = -EFAULT;
goto err;
}
- addr = exynos_drm_gem_get_dma_addr(drm_dev, handle,
- file);
- if (IS_ERR(addr)) {
- ret = -EFAULT;
- goto err;
- }
+ addr = &exynos_gem->dma_addr;
+ buf_info->obj[reg_type] = exynos_gem;
} else {
struct drm_exynos_g2d_userptr g2d_userptr;
@@ -758,11 +743,11 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
goto err;
}
- addr = g2d_userptr_get_dma_addr(drm_dev,
+ addr = g2d_userptr_get_dma_addr(g2d,
g2d_userptr.userptr,
g2d_userptr.size,
file,
- &handle);
+ &buf_info->obj[reg_type]);
if (IS_ERR(addr)) {
ret = -EFAULT;
goto err;
@@ -771,7 +756,6 @@ static int g2d_map_cmdlist_gem(struct g2d_data *g2d,
cmdlist->data[reg_pos + 1] = *addr;
buf_info->reg_types[i] = reg_type;
- buf_info->handles[reg_type] = handle;
}
return 0;
@@ -785,29 +769,26 @@ static void g2d_unmap_cmdlist_gem(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
struct drm_file *filp)
{
- struct exynos_drm_subdrv *subdrv = &g2d->subdrv;
struct g2d_buf_info *buf_info = &node->buf_info;
int i;
for (i = 0; i < buf_info->map_nr; i++) {
struct g2d_buf_desc *buf_desc;
enum g2d_reg_type reg_type;
- unsigned long handle;
+ void *obj;
reg_type = buf_info->reg_types[i];
buf_desc = &buf_info->descs[reg_type];
- handle = buf_info->handles[reg_type];
+ obj = buf_info->obj[reg_type];
if (buf_info->types[reg_type] == BUF_TYPE_GEM)
- exynos_drm_gem_put_dma_addr(subdrv->drm_dev, handle,
- filp);
+ exynos_drm_gem_put(obj);
else
- g2d_userptr_put_dma_addr(subdrv->drm_dev, handle,
- false);
+ g2d_userptr_put_dma_addr(g2d, obj, false);
buf_info->reg_types[i] = REG_TYPE_NONE;
- buf_info->handles[reg_type] = 0;
+ buf_info->obj[reg_type] = NULL;
buf_info->types[reg_type] = 0;
memset(buf_desc, 0x00, sizeof(*buf_desc));
}
@@ -922,7 +903,7 @@ static void g2d_runqueue_worker(struct work_struct *work)
static void g2d_finish_event(struct g2d_data *g2d, u32 cmdlist_no)
{
- struct drm_device *drm_dev = g2d->subdrv.drm_dev;
+ struct drm_device *drm_dev = g2d->drm_dev;
struct g2d_runqueue_node *runqueue_node = g2d->runqueue_node;
struct drm_exynos_pending_g2d_event *e;
struct timespec64 now;
@@ -1031,7 +1012,7 @@ out:
mutex_unlock(&g2d->runqueue_mutex);
}
-static int g2d_check_reg_offset(struct device *dev,
+static int g2d_check_reg_offset(struct g2d_data *g2d,
struct g2d_cmdlist_node *node,
int nr, bool for_addr)
{
@@ -1131,7 +1112,7 @@ static int g2d_check_reg_offset(struct device *dev,
return 0;
err:
- dev_err(dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
+ dev_err(g2d->dev, "Bad register offset: 0x%lx\n", cmdlist->data[index]);
return -EINVAL;
}
@@ -1139,23 +1120,8 @@ err:
int exynos_g2d_get_ver_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
- struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
struct drm_exynos_g2d_get_ver *ver = data;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
ver->major = G2D_HW_MAJOR_VER;
ver->minor = G2D_HW_MINOR_VER;
@@ -1166,9 +1132,8 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_set_cmdlist *req = data;
struct drm_exynos_g2d_cmd *cmd;
struct drm_exynos_pending_g2d_event *e;
@@ -1177,17 +1142,6 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
int size;
int ret;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
node = g2d_get_cmdlist(g2d);
if (!node)
return -ENOMEM;
@@ -1199,7 +1153,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
if (req->cmd_nr > G2D_CMDLIST_DATA_NUM ||
req->cmd_buf_nr > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "number of submitted G2D commands exceeds limit\n");
+ dev_err(g2d->dev, "number of submitted G2D commands exceeds limit\n");
return -EINVAL;
}
@@ -1267,7 +1221,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
*/
size = cmdlist->last + req->cmd_nr * 2 + req->cmd_buf_nr * 2 + 2;
if (size > G2D_CMDLIST_DATA_NUM) {
- dev_err(dev, "cmdlist size is too big\n");
+ dev_err(g2d->dev, "cmdlist size is too big\n");
ret = -EINVAL;
goto err_free_event;
}
@@ -1282,7 +1236,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_nr, false);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_nr, false);
if (ret < 0)
goto err_free_event;
@@ -1301,7 +1255,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
}
cmdlist->last += req->cmd_buf_nr * 2;
- ret = g2d_check_reg_offset(dev, node, req->cmd_buf_nr, true);
+ ret = g2d_check_reg_offset(g2d, node, req->cmd_buf_nr, true);
if (ret < 0)
goto err_free_event;
@@ -1319,7 +1273,7 @@ int exynos_g2d_set_cmdlist_ioctl(struct drm_device *drm_dev, void *data,
/* tail */
cmdlist->data[cmdlist->last] = 0;
- g2d_add_cmdlist_to_inuse(g2d_priv, node);
+ g2d_add_cmdlist_to_inuse(file_priv, node);
return 0;
@@ -1337,25 +1291,13 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
- struct device *dev;
- struct g2d_data *g2d;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ struct g2d_data *g2d = dev_get_drvdata(priv->g2d_dev);
struct drm_exynos_g2d_exec *req = data;
struct g2d_runqueue_node *runqueue_node;
struct list_head *run_cmdlist;
struct list_head *event_list;
- if (!g2d_priv)
- return -ENODEV;
-
- dev = g2d_priv->dev;
- if (!dev)
- return -ENODEV;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
runqueue_node = kmem_cache_alloc(g2d->runqueue_slab, GFP_KERNEL);
if (!runqueue_node)
return -ENOMEM;
@@ -1367,11 +1309,11 @@ int exynos_g2d_exec_ioctl(struct drm_device *drm_dev, void *data,
init_completion(&runqueue_node->complete);
runqueue_node->async = req->async;
- list_splice_init(&g2d_priv->inuse_cmdlist, run_cmdlist);
- list_splice_init(&g2d_priv->event_list, event_list);
+ list_splice_init(&file_priv->inuse_cmdlist, run_cmdlist);
+ list_splice_init(&file_priv->event_list, event_list);
if (list_empty(run_cmdlist)) {
- dev_err(dev, "there is no inuse cmdlist\n");
+ dev_err(g2d->dev, "there is no inuse cmdlist\n");
kmem_cache_free(g2d->runqueue_slab, runqueue_node);
return -EPERM;
}
@@ -1395,71 +1337,28 @@ out:
return 0;
}
-static int g2d_subdrv_probe(struct drm_device *drm_dev, struct device *dev)
-{
- struct g2d_data *g2d;
- int ret;
-
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return -EFAULT;
-
- /* allocate dma-aware cmdlist buffer. */
- ret = g2d_init_cmdlist(g2d);
- if (ret < 0) {
- dev_err(dev, "cmdlist init failed\n");
- return ret;
- }
-
- ret = drm_iommu_attach_device(drm_dev, dev);
- if (ret < 0) {
- dev_err(dev, "failed to enable iommu.\n");
- g2d_fini_cmdlist(g2d);
- }
-
- return ret;
-
-}
-
-static void g2d_subdrv_remove(struct drm_device *drm_dev, struct device *dev)
-{
- drm_iommu_detach_device(drm_dev, dev);
-}
-
-static int g2d_open(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv;
-
- g2d_priv = kzalloc(sizeof(*g2d_priv), GFP_KERNEL);
- if (!g2d_priv)
- return -ENOMEM;
- g2d_priv->dev = dev;
- file_priv->g2d_priv = g2d_priv;
-
- INIT_LIST_HEAD(&g2d_priv->inuse_cmdlist);
- INIT_LIST_HEAD(&g2d_priv->event_list);
- INIT_LIST_HEAD(&g2d_priv->userptr_list);
+ INIT_LIST_HEAD(&file_priv->inuse_cmdlist);
+ INIT_LIST_HEAD(&file_priv->event_list);
+ INIT_LIST_HEAD(&file_priv->userptr_list);
return 0;
}
-static void g2d_close(struct drm_device *drm_dev, struct device *dev,
- struct drm_file *file)
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
{
struct drm_exynos_file_private *file_priv = file->driver_priv;
- struct exynos_drm_g2d_private *g2d_priv = file_priv->g2d_priv;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
struct g2d_data *g2d;
struct g2d_cmdlist_node *node, *n;
- if (!dev)
+ if (!priv->g2d_dev)
return;
- g2d = dev_get_drvdata(dev);
- if (!g2d)
- return;
+ g2d = dev_get_drvdata(priv->g2d_dev);
/* Remove the runqueue nodes that belong to us. */
mutex_lock(&g2d->runqueue_mutex);
@@ -1480,24 +1379,70 @@ static void g2d_close(struct drm_device *drm_dev, struct device *dev,
* Properly unmap these buffers here.
*/
mutex_lock(&g2d->cmdlist_mutex);
- list_for_each_entry_safe(node, n, &g2d_priv->inuse_cmdlist, list) {
+ list_for_each_entry_safe(node, n, &file_priv->inuse_cmdlist, list) {
g2d_unmap_cmdlist_gem(g2d, node, file);
list_move_tail(&node->list, &g2d->free_cmdlist);
}
mutex_unlock(&g2d->cmdlist_mutex);
/* release all g2d_userptr in pool. */
- g2d_userptr_free_all(drm_dev, g2d, file);
+ g2d_userptr_free_all(g2d, file);
+}
+
+static int g2d_bind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+ int ret;
+
+ g2d->drm_dev = drm_dev;
- kfree(file_priv->g2d_priv);
+ /* allocate dma-aware cmdlist buffer. */
+ ret = g2d_init_cmdlist(g2d);
+ if (ret < 0) {
+ dev_err(dev, "cmdlist init failed\n");
+ return ret;
+ }
+
+ ret = drm_iommu_attach_device(drm_dev, dev);
+ if (ret < 0) {
+ dev_err(dev, "failed to enable iommu.\n");
+ g2d_fini_cmdlist(g2d);
+ return ret;
+ }
+ priv->g2d_dev = dev;
+
+ dev_info(dev, "The Exynos G2D (ver %d.%d) successfully registered.\n",
+ G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
+ return 0;
+}
+
+static void g2d_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct g2d_data *g2d = dev_get_drvdata(dev);
+ struct drm_device *drm_dev = data;
+ struct exynos_drm_private *priv = drm_dev->dev_private;
+
+ /* Suspend operation and wait for engine idle. */
+ set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
+ g2d_wait_finish(g2d, NULL);
+ priv->g2d_dev = NULL;
+
+ cancel_work_sync(&g2d->runqueue_work);
+ drm_iommu_detach_device(g2d->drm_dev, dev);
}
+static const struct component_ops g2d_component_ops = {
+ .bind = g2d_bind,
+ .unbind = g2d_unbind,
+};
+
static int g2d_probe(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
struct resource *res;
struct g2d_data *g2d;
- struct exynos_drm_subdrv *subdrv;
int ret;
g2d = devm_kzalloc(dev, sizeof(*g2d), GFP_KERNEL);
@@ -1564,22 +1509,12 @@ static int g2d_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, g2d);
- subdrv = &g2d->subdrv;
- subdrv->dev = dev;
- subdrv->probe = g2d_subdrv_probe;
- subdrv->remove = g2d_subdrv_remove;
- subdrv->open = g2d_open;
- subdrv->close = g2d_close;
-
- ret = exynos_drm_subdrv_register(subdrv);
+ ret = component_add(dev, &g2d_component_ops);
if (ret < 0) {
dev_err(dev, "failed to register drm g2d device\n");
goto err_put_clk;
}
- dev_info(dev, "The Exynos G2D (ver %d.%d) successfully probed.\n",
- G2D_HW_MAJOR_VER, G2D_HW_MINOR_VER);
-
return 0;
err_put_clk:
@@ -1595,12 +1530,7 @@ static int g2d_remove(struct platform_device *pdev)
{
struct g2d_data *g2d = platform_get_drvdata(pdev);
- /* Suspend operation and wait for engine idle. */
- set_bit(G2D_BIT_SUSPEND_RUNQUEUE, &g2d->flags);
- g2d_wait_finish(g2d, NULL);
-
- cancel_work_sync(&g2d->runqueue_work);
- exynos_drm_subdrv_unregister(&g2d->subdrv);
+ component_del(&pdev->dev, &g2d_component_ops);
/* There should be no locking needed here. */
g2d_remove_runqueue_nodes(g2d, NULL);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_g2d.h b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
index 1a9c7ca8c15b..287b2ed8f178 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_g2d.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_g2d.h
@@ -14,6 +14,9 @@ extern int exynos_g2d_set_cmdlist_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
extern int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
+
+extern int g2d_open(struct drm_device *drm_dev, struct drm_file *file);
+extern void g2d_close(struct drm_device *drm_dev, struct drm_file *file);
#else
static inline int exynos_g2d_get_ver_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv)
@@ -33,4 +36,12 @@ static inline int exynos_g2d_exec_ioctl(struct drm_device *dev, void *data,
{
return -ENODEV;
}
+
+int g2d_open(struct drm_device *drm_dev, struct drm_file *file)
+{
+ return 0;
+}
+
+void g2d_close(struct drm_device *drm_dev, struct drm_file *file)
+{ }
#endif
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.c b/drivers/gpu/drm/exynos/exynos_drm_gem.c
index 6e1494fa71b4..34ace85feb68 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.c
@@ -143,7 +143,7 @@ static int exynos_drm_gem_handle_create(struct drm_gem_object *obj,
DRM_DEBUG_KMS("gem handle = 0x%x\n", *handle);
/* drop reference from allocate - handle holds it now. */
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
@@ -171,26 +171,6 @@ void exynos_drm_gem_destroy(struct exynos_drm_gem *exynos_gem)
kfree(exynos_gem);
}
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(file_priv, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return 0;
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- drm_gem_object_unreference_unlocked(obj);
-
- return exynos_gem->size;
-}
-
static struct exynos_drm_gem *exynos_drm_gem_init(struct drm_device *dev,
unsigned long size)
{
@@ -299,43 +279,15 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
&args->offset);
}
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
-{
- struct exynos_drm_gem *exynos_gem;
- struct drm_gem_object *obj;
-
- obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return ERR_PTR(-EINVAL);
- }
-
- exynos_gem = to_exynos_gem(obj);
-
- return &exynos_gem->dma_addr;
-}
-
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp)
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle)
{
struct drm_gem_object *obj;
obj = drm_gem_object_lookup(filp, gem_handle);
- if (!obj) {
- DRM_ERROR("failed to lookup gem object.\n");
- return;
- }
-
- drm_gem_object_unreference_unlocked(obj);
-
- /*
- * decrease obj->refcount one more time because we has already
- * increased it at exynos_drm_gem_get_dma_addr().
- */
- drm_gem_object_unreference_unlocked(obj);
+ if (!obj)
+ return NULL;
+ return to_exynos_gem(obj);
}
static int exynos_drm_gem_mmap_buffer(struct exynos_drm_gem *exynos_gem,
@@ -383,7 +335,7 @@ int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
args->flags = exynos_gem->flags;
args->size = exynos_gem->size;
- drm_gem_object_unreference_unlocked(obj);
+ drm_gem_object_put_unlocked(obj);
return 0;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gem.h b/drivers/gpu/drm/exynos/exynos_drm_gem.h
index 9057d7f1d6ed..d46a62c30812 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gem.h
+++ b/drivers/gpu/drm/exynos/exynos_drm_gem.h
@@ -77,32 +77,26 @@ int exynos_drm_gem_map_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
/*
- * get dma address from gem handle and this function could be used for
+ * get exynos drm object from gem handle, this function could be used for
* other drivers such as 2d/3d acceleration drivers.
* with this function call, gem object reference count would be increased.
*/
-dma_addr_t *exynos_drm_gem_get_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+struct exynos_drm_gem *exynos_drm_gem_get(struct drm_file *filp,
+ unsigned int gem_handle);
/*
- * put dma address from gem handle and this function could be used for
- * other drivers such as 2d/3d acceleration drivers.
- * with this function call, gem object reference count would be decreased.
+ * put exynos drm object acquired from exynos_drm_gem_get(),
+ * gem object reference count would be decreased.
*/
-void exynos_drm_gem_put_dma_addr(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *filp);
+static inline void exynos_drm_gem_put(struct exynos_drm_gem *exynos_gem)
+{
+ drm_gem_object_put_unlocked(&exynos_gem->base);
+}
/* get buffer information to memory region allocated by gem. */
int exynos_drm_gem_get_ioctl(struct drm_device *dev, void *data,
struct drm_file *file_priv);
-/* get buffer size to gem handle. */
-unsigned long exynos_drm_gem_get_size(struct drm_device *dev,
- unsigned int gem_handle,
- struct drm_file *file_priv);
-
/* free gem object. */
void exynos_drm_gem_free_object(struct drm_gem_object *obj);
diff --git a/drivers/gpu/drm/exynos/exynos_drm_gsc.c b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
index 35ac66730563..7ba414b52faa 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_gsc.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_gsc.c
@@ -492,21 +492,25 @@ static void gsc_src_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_IN_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV61:
- cfg |= (GSC_IN_CHROMA_ORDER_CRCB |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV422_2P);
break;
case DRM_FORMAT_YUV422:
cfg |= GSC_IN_YUV422_3P;
break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_IN_YUV420_3P;
+ cfg |= (GSC_IN_CHROMA_ORDER_CRCB | GSC_IN_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_IN_CHROMA_ORDER_CBCR |
- GSC_IN_YUV420_2P);
+ cfg |= (GSC_IN_CHROMA_ORDER_CBCR | GSC_IN_YUV422_2P);
break;
}
@@ -523,30 +527,30 @@ static void gsc_src_set_transf(struct gsc_context *ctx, unsigned int rotation)
switch (degree) {
case DRM_MODE_ROTATE_0:
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_90:
cfg |= GSC_IN_ROT_90;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg |= GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg |= GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg |= GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_180:
cfg |= GSC_IN_ROT_180;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
case DRM_MODE_ROTATE_270:
cfg |= GSC_IN_ROT_270;
- if (rotation & DRM_MODE_REFLECT_Y)
- cfg &= ~GSC_IN_ROT_XFLIP;
if (rotation & DRM_MODE_REFLECT_X)
+ cfg &= ~GSC_IN_ROT_XFLIP;
+ if (rotation & DRM_MODE_REFLECT_Y)
cfg &= ~GSC_IN_ROT_YFLIP;
break;
}
@@ -577,7 +581,7 @@ static void gsc_src_set_size(struct gsc_context *ctx,
cfg &= ~(GSC_SRCIMG_HEIGHT_MASK |
GSC_SRCIMG_WIDTH_MASK);
- cfg |= (GSC_SRCIMG_WIDTH(buf->buf.width) |
+ cfg |= (GSC_SRCIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_SRCIMG_HEIGHT(buf->buf.height));
gsc_write(cfg, GSC_SRCIMG_SIZE);
@@ -672,18 +676,25 @@ static void gsc_dst_set_fmt(struct gsc_context *ctx, u32 fmt)
GSC_OUT_CHROMA_ORDER_CRCB);
break;
case DRM_FORMAT_NV21:
- case DRM_FORMAT_NV61:
cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_2P);
break;
+ case DRM_FORMAT_NV61:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV422_2P);
+ break;
case DRM_FORMAT_YUV422:
+ cfg |= GSC_OUT_YUV422_3P;
+ break;
case DRM_FORMAT_YUV420:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_3P);
+ break;
case DRM_FORMAT_YVU420:
- cfg |= GSC_OUT_YUV420_3P;
+ cfg |= (GSC_OUT_CHROMA_ORDER_CRCB | GSC_OUT_YUV420_3P);
break;
case DRM_FORMAT_NV12:
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV420_2P);
+ break;
case DRM_FORMAT_NV16:
- cfg |= (GSC_OUT_CHROMA_ORDER_CBCR |
- GSC_OUT_YUV420_2P);
+ cfg |= (GSC_OUT_CHROMA_ORDER_CBCR | GSC_OUT_YUV422_2P);
break;
}
@@ -868,7 +879,7 @@ static void gsc_dst_set_size(struct gsc_context *ctx,
/* original size */
cfg = gsc_read(GSC_DSTIMG_SIZE);
cfg &= ~(GSC_DSTIMG_HEIGHT_MASK | GSC_DSTIMG_WIDTH_MASK);
- cfg |= GSC_DSTIMG_WIDTH(buf->buf.width) |
+ cfg |= GSC_DSTIMG_WIDTH(buf->buf.pitch[0] / buf->format->cpp[0]) |
GSC_DSTIMG_HEIGHT(buf->buf.height);
gsc_write(cfg, GSC_DSTIMG_SIZE);
@@ -1341,7 +1352,7 @@ static const struct drm_exynos_ipp_limit gsc_5420_limits[] = {
};
static const struct drm_exynos_ipp_limit gsc_5433_limits[] = {
- { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 2 }, .v = { 16, 8191, 2 }) },
+ { IPP_SIZE_LIMIT(BUFFER, .h = { 32, 8191, 16 }, .v = { 16, 8191, 2 }) },
{ IPP_SIZE_LIMIT(AREA, .h = { 16, 4800, 1 }, .v = { 8, 3344, 1 }) },
{ IPP_SIZE_LIMIT(ROTATED, .h = { 32, 2047 }, .v = { 8, 8191 }) },
{ IPP_SCALE_LIMIT(.h = { (1 << 16) / 16, (1 << 16) * 8 },
diff --git a/drivers/gpu/drm/exynos/exynos_drm_ipp.c b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
index 26374e58c557..23226a0212e8 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_ipp.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_ipp.c
@@ -345,39 +345,18 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
int ret = 0;
int i;
- /* basic checks */
- if (buf->buf.width == 0 || buf->buf.height == 0)
- return -EINVAL;
- buf->format = drm_format_info(buf->buf.fourcc);
- for (i = 0; i < buf->format->num_planes; i++) {
- unsigned int width = (i == 0) ? buf->buf.width :
- DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
-
- if (buf->buf.pitch[i] == 0)
- buf->buf.pitch[i] = width * buf->format->cpp[i];
- if (buf->buf.pitch[i] < width * buf->format->cpp[i])
- return -EINVAL;
- if (!buf->buf.gem_id[i])
- return -ENOENT;
- }
-
- /* pitch for additional planes must match */
- if (buf->format->num_planes > 2 &&
- buf->buf.pitch[1] != buf->buf.pitch[2])
- return -EINVAL;
-
/* get GEM buffers and check their size */
for (i = 0; i < buf->format->num_planes; i++) {
unsigned int height = (i == 0) ? buf->buf.height :
DIV_ROUND_UP(buf->buf.height, buf->format->vsub);
unsigned long size = height * buf->buf.pitch[i];
- struct drm_gem_object *obj = drm_gem_object_lookup(filp,
+ struct exynos_drm_gem *gem = exynos_drm_gem_get(filp,
buf->buf.gem_id[i]);
- if (!obj) {
+ if (!gem) {
ret = -ENOENT;
goto gem_free;
}
- buf->exynos_gem[i] = to_exynos_gem(obj);
+ buf->exynos_gem[i] = gem;
if (size + buf->buf.offset[i] > buf->exynos_gem[i]->size) {
i++;
@@ -391,7 +370,7 @@ static int exynos_drm_ipp_task_setup_buffer(struct exynos_drm_ipp_buffer *buf,
return 0;
gem_free:
while (i--) {
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
buf->exynos_gem[i] = NULL;
}
return ret;
@@ -404,7 +383,7 @@ static void exynos_drm_ipp_task_release_buf(struct exynos_drm_ipp_buffer *buf)
if (!buf->exynos_gem[0])
return;
for (i = 0; i < buf->format->num_planes; i++)
- drm_gem_object_put_unlocked(&buf->exynos_gem[i]->base);
+ exynos_drm_gem_put(buf->exynos_gem[i]);
}
static void exynos_drm_ipp_task_free(struct exynos_drm_ipp *ipp,
@@ -428,7 +407,7 @@ enum drm_ipp_size_id {
IPP_LIMIT_BUFFER, IPP_LIMIT_AREA, IPP_LIMIT_ROTATED, IPP_LIMIT_MAX
};
-static const enum drm_ipp_size_id limit_id_fallback[IPP_LIMIT_MAX][4] = {
+static const enum drm_exynos_ipp_limit_type limit_id_fallback[IPP_LIMIT_MAX][4] = {
[IPP_LIMIT_BUFFER] = { DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
[IPP_LIMIT_AREA] = { DRM_EXYNOS_IPP_LIMIT_SIZE_AREA,
DRM_EXYNOS_IPP_LIMIT_SIZE_BUFFER },
@@ -495,12 +474,13 @@ static int exynos_drm_ipp_check_size_limits(struct exynos_drm_ipp_buffer *buf,
enum drm_ipp_size_id id = rotate ? IPP_LIMIT_ROTATED : IPP_LIMIT_AREA;
struct drm_ipp_limit l;
struct drm_exynos_ipp_limit_val *lh = &l.h, *lv = &l.v;
+ int real_width = buf->buf.pitch[0] / buf->format->cpp[0];
if (!limits)
return 0;
__get_size_limit(limits, num_limits, IPP_LIMIT_BUFFER, &l);
- if (!__size_limit_check(buf->buf.width, &l.h) ||
+ if (!__size_limit_check(real_width, &l.h) ||
!__size_limit_check(buf->buf.height, &l.v))
return -EINVAL;
@@ -560,10 +540,62 @@ static int exynos_drm_ipp_check_scale_limits(
return 0;
}
+static int exynos_drm_ipp_check_format(struct exynos_drm_ipp_task *task,
+ struct exynos_drm_ipp_buffer *buf,
+ struct exynos_drm_ipp_buffer *src,
+ struct exynos_drm_ipp_buffer *dst,
+ bool rotate, bool swap)
+{
+ const struct exynos_drm_ipp_formats *fmt;
+ int ret, i;
+
+ fmt = __ipp_format_get(task->ipp, buf->buf.fourcc, buf->buf.modifier,
+ buf == src ? DRM_EXYNOS_IPP_FORMAT_SOURCE :
+ DRM_EXYNOS_IPP_FORMAT_DESTINATION);
+ if (!fmt) {
+ DRM_DEBUG_DRIVER("Task %pK: %s format not supported\n", task,
+ buf == src ? "src" : "dst");
+ return -EINVAL;
+ }
+
+ /* basic checks */
+ if (buf->buf.width == 0 || buf->buf.height == 0)
+ return -EINVAL;
+
+ buf->format = drm_format_info(buf->buf.fourcc);
+ for (i = 0; i < buf->format->num_planes; i++) {
+ unsigned int width = (i == 0) ? buf->buf.width :
+ DIV_ROUND_UP(buf->buf.width, buf->format->hsub);
+
+ if (buf->buf.pitch[i] == 0)
+ buf->buf.pitch[i] = width * buf->format->cpp[i];
+ if (buf->buf.pitch[i] < width * buf->format->cpp[i])
+ return -EINVAL;
+ if (!buf->buf.gem_id[i])
+ return -ENOENT;
+ }
+
+ /* pitch for additional planes must match */
+ if (buf->format->num_planes > 2 &&
+ buf->buf.pitch[1] != buf->buf.pitch[2])
+ return -EINVAL;
+
+ /* check driver limits */
+ ret = exynos_drm_ipp_check_size_limits(buf, fmt->limits,
+ fmt->num_limits,
+ rotate,
+ buf == dst ? swap : false);
+ if (ret)
+ return ret;
+ ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
+ fmt->limits,
+ fmt->num_limits, swap);
+ return ret;
+}
+
static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
{
struct exynos_drm_ipp *ipp = task->ipp;
- const struct exynos_drm_ipp_formats *src_fmt, *dst_fmt;
struct exynos_drm_ipp_buffer *src = &task->src, *dst = &task->dst;
unsigned int rotation = task->transform.rotation;
int ret = 0;
@@ -607,37 +639,11 @@ static int exynos_drm_ipp_task_check(struct exynos_drm_ipp_task *task)
return -EINVAL;
}
- src_fmt = __ipp_format_get(ipp, src->buf.fourcc, src->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_SOURCE);
- if (!src_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: src format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(src, src_fmt->limits,
- src_fmt->num_limits,
- rotate, false);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- src_fmt->limits,
- src_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, src, src, dst, rotate, swap);
if (ret)
return ret;
- dst_fmt = __ipp_format_get(ipp, dst->buf.fourcc, dst->buf.modifier,
- DRM_EXYNOS_IPP_FORMAT_DESTINATION);
- if (!dst_fmt) {
- DRM_DEBUG_DRIVER("Task %pK: dst format not supported\n", task);
- return -EINVAL;
- }
- ret = exynos_drm_ipp_check_size_limits(dst, dst_fmt->limits,
- dst_fmt->num_limits,
- false, swap);
- if (ret)
- return ret;
- ret = exynos_drm_ipp_check_scale_limits(&src->rect, &dst->rect,
- dst_fmt->limits,
- dst_fmt->num_limits, swap);
+ ret = exynos_drm_ipp_check_format(task, dst, src, dst, false, swap);
if (ret)
return ret;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_mic.c b/drivers/gpu/drm/exynos/exynos_drm_mic.c
index 2174814273e2..2fd299a58297 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_mic.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_mic.c
@@ -367,6 +367,8 @@ static int exynos_mic_resume(struct device *dev)
static const struct dev_pm_ops exynos_mic_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mic_suspend, exynos_mic_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
static int exynos_mic_probe(struct platform_device *pdev)
diff --git a/drivers/gpu/drm/exynos/exynos_drm_plane.c b/drivers/gpu/drm/exynos/exynos_drm_plane.c
index eb9915da7dec..dba29aec59b4 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_plane.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_plane.c
@@ -132,7 +132,7 @@ static void exynos_drm_plane_reset(struct drm_plane *plane)
if (plane->state) {
exynos_state = to_exynos_plane_state(plane->state);
if (exynos_state->base.fb)
- drm_framebuffer_unreference(exynos_state->base.fb);
+ drm_framebuffer_put(exynos_state->base.fb);
kfree(exynos_state);
plane->state = NULL;
}
diff --git a/drivers/gpu/drm/exynos/exynos_drm_rotator.c b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
index 1a76dd3d52e1..a820a68429b9 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_rotator.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_rotator.c
@@ -168,9 +168,9 @@ static void rotator_dst_set_transf(struct rot_context *rot,
val &= ~ROT_CONTROL_FLIP_MASK;
if (rotation & DRM_MODE_REFLECT_X)
- val |= ROT_CONTROL_FLIP_HORIZONTAL;
- if (rotation & DRM_MODE_REFLECT_Y)
val |= ROT_CONTROL_FLIP_VERTICAL;
+ if (rotation & DRM_MODE_REFLECT_Y)
+ val |= ROT_CONTROL_FLIP_HORIZONTAL;
val &= ~ROT_CONTROL_ROT_MASK;
diff --git a/drivers/gpu/drm/exynos/exynos_drm_scaler.c b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
index 91d4382343d0..0ddb6eec7b11 100644
--- a/drivers/gpu/drm/exynos/exynos_drm_scaler.c
+++ b/drivers/gpu/drm/exynos/exynos_drm_scaler.c
@@ -30,6 +30,7 @@
#define scaler_write(cfg, offset) writel(cfg, scaler->regs + (offset))
#define SCALER_MAX_CLK 4
#define SCALER_AUTOSUSPEND_DELAY 2000
+#define SCALER_RESET_WAIT_RETRIES 100
struct scaler_data {
const char *clk_name[SCALER_MAX_CLK];
@@ -51,9 +52,9 @@ struct scaler_context {
static u32 scaler_get_format(u32 drm_fmt)
{
switch (drm_fmt) {
- case DRM_FORMAT_NV21:
- return SCALER_YUV420_2P_UV;
case DRM_FORMAT_NV12:
+ return SCALER_YUV420_2P_UV;
+ case DRM_FORMAT_NV21:
return SCALER_YUV420_2P_VU;
case DRM_FORMAT_YUV420:
return SCALER_YUV420_3P;
@@ -63,15 +64,15 @@ static u32 scaler_get_format(u32 drm_fmt)
return SCALER_YUV422_1P_UYVY;
case DRM_FORMAT_YVYU:
return SCALER_YUV422_1P_YVYU;
- case DRM_FORMAT_NV61:
- return SCALER_YUV422_2P_UV;
case DRM_FORMAT_NV16:
+ return SCALER_YUV422_2P_UV;
+ case DRM_FORMAT_NV61:
return SCALER_YUV422_2P_VU;
case DRM_FORMAT_YUV422:
return SCALER_YUV422_3P;
- case DRM_FORMAT_NV42:
- return SCALER_YUV444_2P_UV;
case DRM_FORMAT_NV24:
+ return SCALER_YUV444_2P_UV;
+ case DRM_FORMAT_NV42:
return SCALER_YUV444_2P_VU;
case DRM_FORMAT_YUV444:
return SCALER_YUV444_3P;
@@ -100,6 +101,23 @@ static u32 scaler_get_format(u32 drm_fmt)
return 0;
}
+static inline int scaler_reset(struct scaler_context *scaler)
+{
+ int retry = SCALER_RESET_WAIT_RETRIES;
+
+ scaler_write(SCALER_CFG_SOFT_RESET, SCALER_CFG);
+ do {
+ cpu_relax();
+ } while (retry > 1 &&
+ scaler_read(SCALER_CFG) & SCALER_CFG_SOFT_RESET);
+ do {
+ cpu_relax();
+ scaler_write(1, SCALER_INT_EN);
+ } while (retry > 0 && scaler_read(SCALER_INT_EN) != 1);
+
+ return retry ? 0 : -EIO;
+}
+
static inline void scaler_enable_int(struct scaler_context *scaler)
{
u32 val;
@@ -354,9 +372,13 @@ static int scaler_commit(struct exynos_drm_ipp *ipp,
u32 dst_fmt = scaler_get_format(task->dst.buf.fourcc);
struct drm_exynos_ipp_task_rect *dst_pos = &task->dst.rect;
- scaler->task = task;
-
pm_runtime_get_sync(scaler->dev);
+ if (scaler_reset(scaler)) {
+ pm_runtime_put(scaler->dev);
+ return -EIO;
+ }
+
+ scaler->task = task;
scaler_set_src_fmt(scaler, src_fmt);
scaler_set_src_base(scaler, &task->src);
@@ -394,7 +416,11 @@ static inline void scaler_disable_int(struct scaler_context *scaler)
static inline u32 scaler_get_int_status(struct scaler_context *scaler)
{
- return scaler_read(SCALER_INT_STATUS);
+ u32 val = scaler_read(SCALER_INT_STATUS);
+
+ scaler_write(val, SCALER_INT_STATUS);
+
+ return val;
}
static inline int scaler_task_done(u32 val)
diff --git a/drivers/gpu/drm/exynos/exynos_hdmi.c b/drivers/gpu/drm/exynos/exynos_hdmi.c
index 3a11c719a580..2092a650df7d 100644
--- a/drivers/gpu/drm/exynos/exynos_hdmi.c
+++ b/drivers/gpu/drm/exynos/exynos_hdmi.c
@@ -2093,6 +2093,8 @@ static int __maybe_unused exynos_hdmi_resume(struct device *dev)
static const struct dev_pm_ops exynos_hdmi_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_hdmi_suspend, exynos_hdmi_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver hdmi_driver = {
diff --git a/drivers/gpu/drm/exynos/exynos_mixer.c b/drivers/gpu/drm/exynos/exynos_mixer.c
index 272c79f5f5bf..ffbf4a950f69 100644
--- a/drivers/gpu/drm/exynos/exynos_mixer.c
+++ b/drivers/gpu/drm/exynos/exynos_mixer.c
@@ -837,8 +837,6 @@ static int mixer_initialize(struct mixer_context *mixer_ctx,
struct drm_device *drm_dev)
{
int ret;
- struct exynos_drm_private *priv;
- priv = drm_dev->dev_private;
mixer_ctx->drm_dev = drm_dev;
@@ -1271,6 +1269,8 @@ static int __maybe_unused exynos_mixer_resume(struct device *dev)
static const struct dev_pm_ops exynos_mixer_pm_ops = {
SET_RUNTIME_PM_OPS(exynos_mixer_suspend, exynos_mixer_resume, NULL)
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend,
+ pm_runtime_force_resume)
};
struct platform_driver mixer_driver = {
diff --git a/drivers/gpu/drm/exynos/regs-gsc.h b/drivers/gpu/drm/exynos/regs-gsc.h
index 4704a993cbb7..16b39734115c 100644
--- a/drivers/gpu/drm/exynos/regs-gsc.h
+++ b/drivers/gpu/drm/exynos/regs-gsc.h
@@ -138,6 +138,7 @@
#define GSC_OUT_YUV420_3P (3 << 4)
#define GSC_OUT_YUV422_1P (4 << 4)
#define GSC_OUT_YUV422_2P (5 << 4)
+#define GSC_OUT_YUV422_3P (6 << 4)
#define GSC_OUT_YUV444 (7 << 4)
#define GSC_OUT_TILE_TYPE_MASK (1 << 2)
#define GSC_OUT_TILE_C_16x8 (0 << 2)
diff --git a/drivers/gpu/drm/gma500/framebuffer.c b/drivers/gpu/drm/gma500/framebuffer.c
index 2f00a37684a2..adefae58b5fc 100644
--- a/drivers/gpu/drm/gma500/framebuffer.c
+++ b/drivers/gpu/drm/gma500/framebuffer.c
@@ -108,7 +108,7 @@ static int psbfb_pan(struct fb_var_screeninfo *var, struct fb_info *info)
return 0;
}
-static int psbfb_vm_fault(struct vm_fault *vmf)
+static vm_fault_t psbfb_vm_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct psb_framebuffer *psbfb = vma->vm_private_data;
@@ -118,7 +118,7 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
int page_num;
int i;
unsigned long address;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
unsigned long pfn;
unsigned long phys_addr = (unsigned long)dev_priv->stolen_base +
gtt->offset;
@@ -131,18 +131,14 @@ static int psbfb_vm_fault(struct vm_fault *vmf)
for (i = 0; i < page_num; i++) {
pfn = (phys_addr >> PAGE_SHIFT);
- ret = vm_insert_mixed(vma, address,
+ ret = vmf_insert_mixed(vma, address,
__pfn_to_pfn_t(pfn, PFN_DEV));
- if (unlikely((ret == -EBUSY) || (ret != 0 && i > 0)))
+ if (unlikely(ret & VM_FAULT_ERROR))
break;
- else if (unlikely(ret != 0)) {
- ret = (ret == -ENOMEM) ? VM_FAULT_OOM : VM_FAULT_SIGBUS;
- return ret;
- }
address += PAGE_SIZE;
phys_addr += PAGE_SIZE;
}
- return VM_FAULT_NOPAGE;
+ return ret;
}
static void psbfb_vm_open(struct vm_area_struct *vma)
diff --git a/drivers/gpu/drm/gma500/gem.c b/drivers/gpu/drm/gma500/gem.c
index 913bf4c256fa..576f1b272f23 100644
--- a/drivers/gpu/drm/gma500/gem.c
+++ b/drivers/gpu/drm/gma500/gem.c
@@ -134,12 +134,13 @@ int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
* vma->vm_private_data points to the GEM object that is backing this
* mapping.
*/
-int psb_gem_fault(struct vm_fault *vmf)
+vm_fault_t psb_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj;
struct gtt_range *r;
- int ret;
+ int err;
+ vm_fault_t ret;
unsigned long pfn;
pgoff_t page_offset;
struct drm_device *dev;
@@ -158,9 +159,10 @@ int psb_gem_fault(struct vm_fault *vmf)
/* For now the mmap pins the object and it stays pinned. As things
stand that will do us no harm */
if (r->mmapping == 0) {
- ret = psb_gtt_pin(r);
- if (ret < 0) {
- dev_err(dev->dev, "gma500: pin failed: %d\n", ret);
+ err = psb_gtt_pin(r);
+ if (err < 0) {
+ dev_err(dev->dev, "gma500: pin failed: %d\n", err);
+ ret = vmf_error(err);
goto fail;
}
r->mmapping = 1;
@@ -175,18 +177,9 @@ int psb_gem_fault(struct vm_fault *vmf)
pfn = (dev_priv->stolen_base + r->offset) >> PAGE_SHIFT;
else
pfn = page_to_pfn(r->pages[page_offset]);
- ret = vm_insert_pfn(vma, vmf->address, pfn);
-
+ ret = vmf_insert_pfn(vma, vmf->address, pfn);
fail:
mutex_unlock(&dev_priv->mmap_mutex);
- switch (ret) {
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+
+ return ret;
}
diff --git a/drivers/gpu/drm/gma500/psb_drv.h b/drivers/gpu/drm/gma500/psb_drv.h
index e8300f509023..941b238bdcc9 100644
--- a/drivers/gpu/drm/gma500/psb_drv.h
+++ b/drivers/gpu/drm/gma500/psb_drv.h
@@ -21,9 +21,9 @@
#define _PSB_DRV_H_
#include <linux/kref.h>
+#include <linux/mm_types.h>
#include <drm/drmP.h>
-#include <drm/drm_global.h>
#include <drm/gma_drm.h>
#include "psb_reg.h"
#include "psb_intel_drv.h"
@@ -749,7 +749,7 @@ extern int psb_gem_get_aperture(struct drm_device *dev, void *data,
struct drm_file *file);
extern int psb_gem_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
-extern int psb_gem_fault(struct vm_fault *vmf);
+extern vm_fault_t psb_gem_fault(struct vm_fault *vmf);
/* psb_device.c */
extern const struct psb_ops psb_chip_ops;
diff --git a/drivers/gpu/drm/i2c/tda9950.c b/drivers/gpu/drm/i2c/tda9950.c
index 3f7396caad48..5d2f0d548469 100644
--- a/drivers/gpu/drm/i2c/tda9950.c
+++ b/drivers/gpu/drm/i2c/tda9950.c
@@ -76,9 +76,12 @@ struct tda9950_priv {
static int tda9950_write_range(struct i2c_client *client, u8 addr, u8 *p, int cnt)
{
struct i2c_msg msg;
- u8 buf[cnt + 1];
+ u8 buf[CEC_MAX_MSG_SIZE + 3];
int ret;
+ if (WARN_ON(cnt > sizeof(buf) - 1))
+ return -EINVAL;
+
buf[0] = addr;
memcpy(buf + 1, p, cnt);
diff --git a/drivers/gpu/drm/i2c/tda998x_drv.c b/drivers/gpu/drm/i2c/tda998x_drv.c
index eecdc327b9f8..a7c39f39793f 100644
--- a/drivers/gpu/drm/i2c/tda998x_drv.c
+++ b/drivers/gpu/drm/i2c/tda998x_drv.c
@@ -69,6 +69,7 @@ struct tda998x_priv {
bool edid_delay_active;
struct drm_encoder encoder;
+ struct drm_bridge bridge;
struct drm_connector connector;
struct tda998x_audio_port audio_port[2];
@@ -79,9 +80,10 @@ struct tda998x_priv {
#define conn_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, connector)
-
#define enc_to_tda998x_priv(x) \
container_of(x, struct tda998x_priv, encoder)
+#define bridge_to_tda998x_priv(x) \
+ container_of(x, struct tda998x_priv, bridge)
/* The TDA9988 series of devices use a paged register scheme.. to simplify
* things we encode the page # in upper bits of the register #. To read/
@@ -762,7 +764,7 @@ static void tda998x_detect_work(struct work_struct *work)
{
struct tda998x_priv *priv =
container_of(work, struct tda998x_priv, detect_work);
- struct drm_device *dev = priv->encoder.dev;
+ struct drm_device *dev = priv->connector.dev;
if (dev)
drm_kms_helper_hotplug_event(dev);
@@ -1104,29 +1106,6 @@ static int tda998x_audio_codec_init(struct tda998x_priv *priv,
/* DRM connector functions */
-static int tda998x_connector_fill_modes(struct drm_connector *connector,
- uint32_t maxX, uint32_t maxY)
-{
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- int ret;
-
- mutex_lock(&priv->audio_mutex);
- ret = drm_helper_probe_single_connector_modes(connector, maxX, maxY);
-
- if (connector->edid_blob_ptr) {
- struct edid *edid = (void *)connector->edid_blob_ptr->data;
-
- cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
-
- priv->sink_has_audio = drm_detect_monitor_audio(edid);
- } else {
- priv->sink_has_audio = false;
- }
- mutex_unlock(&priv->audio_mutex);
-
- return ret;
-}
-
static enum drm_connector_status
tda998x_connector_detect(struct drm_connector *connector, bool force)
{
@@ -1145,7 +1124,7 @@ static void tda998x_connector_destroy(struct drm_connector *connector)
static const struct drm_connector_funcs tda998x_connector_funcs = {
.dpms = drm_helper_connector_dpms,
.reset = drm_atomic_helper_connector_reset,
- .fill_modes = tda998x_connector_fill_modes,
+ .fill_modes = drm_helper_probe_single_connector_modes,
.detect = tda998x_connector_detect,
.destroy = tda998x_connector_destroy,
.atomic_duplicate_state = drm_atomic_helper_connector_duplicate_state,
@@ -1244,40 +1223,29 @@ static int tda998x_connector_get_modes(struct drm_connector *connector)
}
drm_connector_update_edid_property(connector, edid);
+ cec_notifier_set_phys_addr_from_edid(priv->cec_notify, edid);
+
+ mutex_lock(&priv->audio_mutex);
n = drm_add_edid_modes(connector, edid);
+ priv->sink_has_audio = drm_detect_monitor_audio(edid);
+ mutex_unlock(&priv->audio_mutex);
kfree(edid);
return n;
}
-static enum drm_mode_status tda998x_connector_mode_valid(struct drm_connector *connector,
- struct drm_display_mode *mode)
-{
- /* TDA19988 dotclock can go up to 165MHz */
- struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
-
- if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
- return MODE_CLOCK_HIGH;
- if (mode->htotal >= BIT(13))
- return MODE_BAD_HVALUE;
- if (mode->vtotal >= BIT(11))
- return MODE_BAD_VVALUE;
- return MODE_OK;
-}
-
static struct drm_encoder *
tda998x_connector_best_encoder(struct drm_connector *connector)
{
struct tda998x_priv *priv = conn_to_tda998x_priv(connector);
- return &priv->encoder;
+ return priv->bridge.encoder;
}
static
const struct drm_connector_helper_funcs tda998x_connector_helper_funcs = {
.get_modes = tda998x_connector_get_modes,
- .mode_valid = tda998x_connector_mode_valid,
.best_encoder = tda998x_connector_best_encoder,
};
@@ -1301,25 +1269,48 @@ static int tda998x_connector_init(struct tda998x_priv *priv,
if (ret)
return ret;
- drm_connector_attach_encoder(&priv->connector, &priv->encoder);
+ drm_connector_attach_encoder(&priv->connector,
+ priv->bridge.encoder);
return 0;
}
-/* DRM encoder functions */
+/* DRM bridge functions */
-static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
+static int tda998x_bridge_attach(struct drm_bridge *bridge)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
- bool on;
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- /* we only care about on or off: */
- on = mode == DRM_MODE_DPMS_ON;
+ return tda998x_connector_init(priv, bridge->dev);
+}
- if (on == priv->is_on)
- return;
+static void tda998x_bridge_detach(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
- if (on) {
+ drm_connector_cleanup(&priv->connector);
+}
+
+static enum drm_mode_status tda998x_bridge_mode_valid(struct drm_bridge *bridge,
+ const struct drm_display_mode *mode)
+{
+ /* TDA19988 dotclock can go up to 165MHz */
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (mode->clock > ((priv->rev == TDA19988) ? 165000 : 150000))
+ return MODE_CLOCK_HIGH;
+ if (mode->htotal >= BIT(13))
+ return MODE_BAD_HVALUE;
+ if (mode->vtotal >= BIT(11))
+ return MODE_BAD_VVALUE;
+ return MODE_OK;
+}
+
+static void tda998x_bridge_enable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (!priv->is_on) {
/* enable video ports, audio will be enabled later */
reg_write(priv, REG_ENA_VP_0, 0xff);
reg_write(priv, REG_ENA_VP_1, 0xff);
@@ -1330,7 +1321,14 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
reg_write(priv, REG_VIP_CNTRL_2, priv->vip_cntrl_2);
priv->is_on = true;
- } else {
+ }
+}
+
+static void tda998x_bridge_disable(struct drm_bridge *bridge)
+{
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+
+ if (priv->is_on) {
/* disable video ports */
reg_write(priv, REG_ENA_VP_0, 0x00);
reg_write(priv, REG_ENA_VP_1, 0x00);
@@ -1340,12 +1338,12 @@ static void tda998x_encoder_dpms(struct drm_encoder *encoder, int mode)
}
}
-static void
-tda998x_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+static void tda998x_bridge_mode_set(struct drm_bridge *bridge,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
+ struct tda998x_priv *priv = bridge_to_tda998x_priv(bridge);
+ unsigned long tmds_clock;
u16 ref_pix, ref_line, n_pix, n_line;
u16 hs_pix_s, hs_pix_e;
u16 vs1_pix_s, vs1_pix_e, vs1_line_s, vs1_line_e;
@@ -1416,12 +1414,19 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
(mode->vsync_end - mode->vsync_start)/2;
}
- div = 148500 / mode->clock;
- if (div != 0) {
- div--;
- if (div > 3)
- div = 3;
- }
+ tmds_clock = mode->clock;
+
+ /*
+ * The divisor is power-of-2. The TDA9983B datasheet gives
+ * this as ranges of Msample/s, which is 10x the TMDS clock:
+ * 0 - 800 to 1500 Msample/s
+ * 1 - 400 to 800 Msample/s
+ * 2 - 200 to 400 Msample/s
+ * 3 - as 2 above
+ */
+ for (div = 0; div < 3; div++)
+ if (80000 >> div <= tmds_clock)
+ break;
mutex_lock(&priv->audio_mutex);
@@ -1552,26 +1557,14 @@ tda998x_encoder_mode_set(struct drm_encoder *encoder,
mutex_unlock(&priv->audio_mutex);
}
-static void tda998x_destroy(struct tda998x_priv *priv)
-{
- /* disable all IRQs and free the IRQ handler */
- cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
- reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
-
- if (priv->audio_pdev)
- platform_device_unregister(priv->audio_pdev);
-
- if (priv->hdmi->irq)
- free_irq(priv->hdmi->irq, priv);
-
- del_timer_sync(&priv->edid_delay_timer);
- cancel_work_sync(&priv->detect_work);
-
- i2c_unregister_device(priv->cec);
-
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
-}
+static const struct drm_bridge_funcs tda998x_bridge_funcs = {
+ .attach = tda998x_bridge_attach,
+ .detach = tda998x_bridge_detach,
+ .mode_valid = tda998x_bridge_mode_valid,
+ .disable = tda998x_bridge_disable,
+ .mode_set = tda998x_bridge_mode_set,
+ .enable = tda998x_bridge_enable,
+};
/* I2C driver functions */
@@ -1617,16 +1610,69 @@ static int tda998x_get_audio_ports(struct tda998x_priv *priv,
return 0;
}
-static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
+static void tda998x_set_config(struct tda998x_priv *priv,
+ const struct tda998x_encoder_params *p)
{
+ priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
+ (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
+ VIP_CNTRL_0_SWAP_B(p->swap_b) |
+ (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
+ priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
+ (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
+ VIP_CNTRL_1_SWAP_D(p->swap_d) |
+ (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
+ priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
+ (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
+ VIP_CNTRL_2_SWAP_F(p->swap_f) |
+ (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
+
+ priv->audio_params = p->audio_params;
+}
+
+static void tda998x_destroy(struct device *dev)
+{
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
+
+ drm_bridge_remove(&priv->bridge);
+
+ /* disable all IRQs and free the IRQ handler */
+ cec_write(priv, REG_CEC_RXSHPDINTENA, 0);
+ reg_clear(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
+
+ if (priv->audio_pdev)
+ platform_device_unregister(priv->audio_pdev);
+
+ if (priv->hdmi->irq)
+ free_irq(priv->hdmi->irq, priv);
+
+ del_timer_sync(&priv->edid_delay_timer);
+ cancel_work_sync(&priv->detect_work);
+
+ i2c_unregister_device(priv->cec);
+
+ if (priv->cec_notify)
+ cec_notifier_put(priv->cec_notify);
+}
+
+static int tda998x_create(struct device *dev)
+{
+ struct i2c_client *client = to_i2c_client(dev);
struct device_node *np = client->dev.of_node;
struct i2c_board_info cec_info;
+ struct tda998x_priv *priv;
u32 video;
int rev_lo, rev_hi, ret;
+ priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
+ if (!priv)
+ return -ENOMEM;
+
+ dev_set_drvdata(dev, priv);
+
mutex_init(&priv->mutex); /* protect the page access */
mutex_init(&priv->audio_mutex); /* protect access from audio thread */
mutex_init(&priv->edid_mutex);
+ INIT_LIST_HEAD(&priv->bridge.list);
init_waitqueue_head(&priv->edid_delay_waitq);
timer_setup(&priv->edid_delay_timer, tda998x_edid_delay_done, 0);
INIT_WORK(&priv->detect_work, tda998x_detect_work);
@@ -1649,13 +1695,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* read version: */
rev_lo = reg_read(priv, REG_VERSION_LSB);
if (rev_lo < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_lo);
+ dev_err(dev, "failed to read version: %d\n", rev_lo);
return rev_lo;
}
rev_hi = reg_read(priv, REG_VERSION_MSB);
if (rev_hi < 0) {
- dev_err(&client->dev, "failed to read version: %d\n", rev_hi);
+ dev_err(dev, "failed to read version: %d\n", rev_hi);
return rev_hi;
}
@@ -1666,20 +1712,19 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
switch (priv->rev) {
case TDA9989N2:
- dev_info(&client->dev, "found TDA9989 n2");
+ dev_info(dev, "found TDA9989 n2");
break;
case TDA19989:
- dev_info(&client->dev, "found TDA19989");
+ dev_info(dev, "found TDA19989");
break;
case TDA19989N2:
- dev_info(&client->dev, "found TDA19989 n2");
+ dev_info(dev, "found TDA19989 n2");
break;
case TDA19988:
- dev_info(&client->dev, "found TDA19988");
+ dev_info(dev, "found TDA19988");
break;
default:
- dev_err(&client->dev, "found unsupported device: %04x\n",
- priv->rev);
+ dev_err(dev, "found unsupported device: %04x\n", priv->rev);
return -ENXIO;
}
@@ -1722,8 +1767,7 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
tda998x_irq_thread, irq_flags,
"tda998x", priv);
if (ret) {
- dev_err(&client->dev,
- "failed to request IRQ#%u: %d\n",
+ dev_err(dev, "failed to request IRQ#%u: %d\n",
client->irq, ret);
goto err_irq;
}
@@ -1732,13 +1776,13 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
cec_write(priv, REG_CEC_RXSHPDINTENA, CEC_RXSHPDLEV_HPD);
}
- priv->cec_notify = cec_notifier_get(&client->dev);
+ priv->cec_notify = cec_notifier_get(dev);
if (!priv->cec_notify) {
ret = -ENOMEM;
goto fail;
}
- priv->cec_glue.parent = &client->dev;
+ priv->cec_glue.parent = dev;
priv->cec_glue.data = priv;
priv->cec_glue.init = tda998x_cec_hook_init;
priv->cec_glue.exit = tda998x_cec_hook_exit;
@@ -1768,61 +1812,44 @@ static int tda998x_create(struct i2c_client *client, struct tda998x_priv *priv)
/* enable EDID read irq: */
reg_set(priv, REG_INT_FLAGS_2, INT_FLAGS_2_EDID_BLK_RD);
- if (!np)
- return 0; /* non-DT */
+ if (np) {
+ /* get the device tree parameters */
+ ret = of_property_read_u32(np, "video-ports", &video);
+ if (ret == 0) {
+ priv->vip_cntrl_0 = video >> 16;
+ priv->vip_cntrl_1 = video >> 8;
+ priv->vip_cntrl_2 = video;
+ }
+
+ ret = tda998x_get_audio_ports(priv, np);
+ if (ret)
+ goto fail;
- /* get the device tree parameters */
- ret = of_property_read_u32(np, "video-ports", &video);
- if (ret == 0) {
- priv->vip_cntrl_0 = video >> 16;
- priv->vip_cntrl_1 = video >> 8;
- priv->vip_cntrl_2 = video;
+ if (priv->audio_port[0].format != AFMT_UNUSED)
+ tda998x_audio_codec_init(priv, &client->dev);
+ } else if (dev->platform_data) {
+ tda998x_set_config(priv, dev->platform_data);
}
- ret = tda998x_get_audio_ports(priv, np);
- if (ret)
- goto fail;
+ priv->bridge.funcs = &tda998x_bridge_funcs;
+#ifdef CONFIG_OF
+ priv->bridge.of_node = dev->of_node;
+#endif
- if (priv->audio_port[0].format != AFMT_UNUSED)
- tda998x_audio_codec_init(priv, &client->dev);
+ drm_bridge_add(&priv->bridge);
return 0;
fail:
- /* if encoder_init fails, the encoder slave is never registered,
- * so cleanup here:
- */
- i2c_unregister_device(priv->cec);
- if (priv->cec_notify)
- cec_notifier_put(priv->cec_notify);
- if (client->irq)
- free_irq(client->irq, priv);
+ tda998x_destroy(dev);
err_irq:
return ret;
}
-static void tda998x_encoder_prepare(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_OFF);
-}
-
-static void tda998x_encoder_commit(struct drm_encoder *encoder)
-{
- tda998x_encoder_dpms(encoder, DRM_MODE_DPMS_ON);
-}
-
-static const struct drm_encoder_helper_funcs tda998x_encoder_helper_funcs = {
- .dpms = tda998x_encoder_dpms,
- .prepare = tda998x_encoder_prepare,
- .commit = tda998x_encoder_commit,
- .mode_set = tda998x_encoder_mode_set,
-};
+/* DRM encoder functions */
static void tda998x_encoder_destroy(struct drm_encoder *encoder)
{
- struct tda998x_priv *priv = enc_to_tda998x_priv(encoder);
-
- tda998x_destroy(priv);
drm_encoder_cleanup(encoder);
}
@@ -1830,40 +1857,12 @@ static const struct drm_encoder_funcs tda998x_encoder_funcs = {
.destroy = tda998x_encoder_destroy,
};
-static void tda998x_set_config(struct tda998x_priv *priv,
- const struct tda998x_encoder_params *p)
-{
- priv->vip_cntrl_0 = VIP_CNTRL_0_SWAP_A(p->swap_a) |
- (p->mirr_a ? VIP_CNTRL_0_MIRR_A : 0) |
- VIP_CNTRL_0_SWAP_B(p->swap_b) |
- (p->mirr_b ? VIP_CNTRL_0_MIRR_B : 0);
- priv->vip_cntrl_1 = VIP_CNTRL_1_SWAP_C(p->swap_c) |
- (p->mirr_c ? VIP_CNTRL_1_MIRR_C : 0) |
- VIP_CNTRL_1_SWAP_D(p->swap_d) |
- (p->mirr_d ? VIP_CNTRL_1_MIRR_D : 0);
- priv->vip_cntrl_2 = VIP_CNTRL_2_SWAP_E(p->swap_e) |
- (p->mirr_e ? VIP_CNTRL_2_MIRR_E : 0) |
- VIP_CNTRL_2_SWAP_F(p->swap_f) |
- (p->mirr_f ? VIP_CNTRL_2_MIRR_F : 0);
-
- priv->audio_params = p->audio_params;
-}
-
-static int tda998x_bind(struct device *dev, struct device *master, void *data)
+static int tda998x_encoder_init(struct device *dev, struct drm_device *drm)
{
- struct tda998x_encoder_params *params = dev->platform_data;
- struct i2c_client *client = to_i2c_client(dev);
- struct drm_device *drm = data;
- struct tda998x_priv *priv;
+ struct tda998x_priv *priv = dev_get_drvdata(dev);
u32 crtcs = 0;
int ret;
- priv = devm_kzalloc(dev, sizeof(*priv), GFP_KERNEL);
- if (!priv)
- return -ENOMEM;
-
- dev_set_drvdata(dev, priv);
-
if (dev->of_node)
crtcs = drm_of_find_possible_crtcs(drm, dev->of_node);
@@ -1875,40 +1874,36 @@ static int tda998x_bind(struct device *dev, struct device *master, void *data)
priv->encoder.possible_crtcs = crtcs;
- ret = tda998x_create(client, priv);
- if (ret)
- return ret;
-
- if (!dev->of_node && params)
- tda998x_set_config(priv, params);
-
- drm_encoder_helper_add(&priv->encoder, &tda998x_encoder_helper_funcs);
ret = drm_encoder_init(drm, &priv->encoder, &tda998x_encoder_funcs,
DRM_MODE_ENCODER_TMDS, NULL);
if (ret)
goto err_encoder;
- ret = tda998x_connector_init(priv, drm);
+ ret = drm_bridge_attach(&priv->encoder, &priv->bridge, NULL);
if (ret)
- goto err_connector;
+ goto err_bridge;
return 0;
-err_connector:
+err_bridge:
drm_encoder_cleanup(&priv->encoder);
err_encoder:
- tda998x_destroy(priv);
return ret;
}
+static int tda998x_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *drm = data;
+
+ return tda998x_encoder_init(dev, drm);
+}
+
static void tda998x_unbind(struct device *dev, struct device *master,
void *data)
{
struct tda998x_priv *priv = dev_get_drvdata(dev);
- drm_connector_cleanup(&priv->connector);
drm_encoder_cleanup(&priv->encoder);
- tda998x_destroy(priv);
}
static const struct component_ops tda998x_ops = {
@@ -1919,16 +1914,27 @@ static const struct component_ops tda998x_ops = {
static int
tda998x_probe(struct i2c_client *client, const struct i2c_device_id *id)
{
+ int ret;
+
if (!i2c_check_functionality(client->adapter, I2C_FUNC_I2C)) {
dev_warn(&client->dev, "adapter does not support I2C\n");
return -EIO;
}
- return component_add(&client->dev, &tda998x_ops);
+
+ ret = tda998x_create(&client->dev);
+ if (ret)
+ return ret;
+
+ ret = component_add(&client->dev, &tda998x_ops);
+ if (ret)
+ tda998x_destroy(&client->dev);
+ return ret;
}
static int tda998x_remove(struct i2c_client *client)
{
component_del(&client->dev, &tda998x_ops);
+ tda998x_destroy(&client->dev);
return 0;
}
diff --git a/drivers/gpu/drm/i915/Kconfig b/drivers/gpu/drm/i915/Kconfig
index dfd95889f4b7..33a458b7f1fc 100644
--- a/drivers/gpu/drm/i915/Kconfig
+++ b/drivers/gpu/drm/i915/Kconfig
@@ -23,6 +23,8 @@ config DRM_I915
select SYNC_FILE
select IOSF_MBI
select CRC32
+ select SND_HDA_I915 if SND_HDA_CORE
+ select CEC_CORE if CEC_NOTIFIER
help
Choose this option if you have a system that has "Intel Graphics
Media Accelerator" or "HD Graphics" integrated graphics,
diff --git a/drivers/gpu/drm/i915/gvt/aperture_gm.c b/drivers/gpu/drm/i915/gvt/aperture_gm.c
index 380eeb2a0e83..fe754022e356 100644
--- a/drivers/gpu/drm/i915/gvt/aperture_gm.c
+++ b/drivers/gpu/drm/i915/gvt/aperture_gm.c
@@ -131,7 +131,7 @@ void intel_vgpu_write_fence(struct intel_vgpu *vgpu,
assert_rpm_wakelock_held(dev_priv);
- if (WARN_ON(fence > vgpu_fence_sz(vgpu)))
+ if (WARN_ON(fence >= vgpu_fence_sz(vgpu)))
return;
reg = vgpu->fence.regs[fence];
diff --git a/drivers/gpu/drm/i915/gvt/cmd_parser.c b/drivers/gpu/drm/i915/gvt/cmd_parser.c
index 865d80919827..77edbfcb0f75 100644
--- a/drivers/gpu/drm/i915/gvt/cmd_parser.c
+++ b/drivers/gpu/drm/i915/gvt/cmd_parser.c
@@ -863,6 +863,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
{
struct intel_vgpu *vgpu = s->vgpu;
struct intel_gvt *gvt = vgpu->gvt;
+ u32 ctx_sr_ctl;
if (offset + 4 > gvt->device_info.mmio_size) {
gvt_vgpu_err("%s access to (%x) outside of MMIO range\n",
@@ -873,7 +874,7 @@ static int cmd_reg_handler(struct parser_exec_state *s,
if (!intel_gvt_mmio_is_cmd_access(gvt, offset)) {
gvt_vgpu_err("%s access to non-render register (%x)\n",
cmd, offset);
- return 0;
+ return -EBADRQC;
}
if (is_shadowed_mmio(offset)) {
@@ -895,6 +896,28 @@ static int cmd_reg_handler(struct parser_exec_state *s,
patch_value(s, cmd_ptr(s, index), VGT_PVINFO_PAGE);
}
+ /* TODO
+ * Right now only scan LRI command on KBL and in inhibit context.
+ * It's good enough to support initializing mmio by lri command in
+ * vgpu inhibit context on KBL.
+ */
+ if (IS_KABYLAKE(s->vgpu->gvt->dev_priv) &&
+ intel_gvt_mmio_is_in_ctx(gvt, offset) &&
+ !strncmp(cmd, "lri", 3)) {
+ intel_gvt_hypervisor_read_gpa(s->vgpu,
+ s->workload->ring_context_gpa + 12, &ctx_sr_ctl, 4);
+ /* check inhibit context */
+ if (ctx_sr_ctl & 1) {
+ u32 data = cmd_val(s, index + 1);
+
+ if (intel_gvt_mmio_has_mode_mask(s->vgpu->gvt, offset))
+ intel_vgpu_mask_mmio_write(vgpu,
+ offset, &data, 4);
+ else
+ vgpu_vreg(vgpu, offset) = data;
+ }
+ }
+
/* TODO: Update the global mask if this MMIO is a masked-MMIO */
intel_gvt_mmio_set_cmd_accessed(gvt, offset);
return 0;
diff --git a/drivers/gpu/drm/i915/gvt/display.c b/drivers/gpu/drm/i915/gvt/display.c
index 379fc81da863..df1e14145747 100644
--- a/drivers/gpu/drm/i915/gvt/display.c
+++ b/drivers/gpu/drm/i915/gvt/display.c
@@ -219,7 +219,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_B << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -239,7 +239,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_C << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
@@ -259,7 +259,7 @@ static void emulate_monitor_status_change(struct intel_vgpu *vgpu)
~(TRANS_DDI_BPC_MASK | TRANS_DDI_MODE_SELECT_MASK |
TRANS_DDI_PORT_MASK);
vgpu_vreg_t(vgpu, TRANS_DDI_FUNC_CTL(TRANSCODER_A)) |=
- (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DP_SST |
+ (TRANS_DDI_BPC_8 | TRANS_DDI_MODE_SELECT_DVI |
(PORT_D << TRANS_DDI_PORT_SHIFT) |
TRANS_DDI_FUNC_ENABLE);
if (IS_BROADWELL(dev_priv)) {
diff --git a/drivers/gpu/drm/i915/gvt/gtt.c b/drivers/gpu/drm/i915/gvt/gtt.c
index bbfb168a9665..2402395a068d 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.c
+++ b/drivers/gpu/drm/i915/gvt/gtt.c
@@ -1905,6 +1905,7 @@ static struct intel_vgpu_mm *intel_vgpu_create_ggtt_mm(struct intel_vgpu *vgpu)
vgpu_free_mm(mm);
return ERR_PTR(-ENOMEM);
}
+ mm->ggtt_mm.last_partial_off = -1UL;
return mm;
}
@@ -1929,6 +1930,7 @@ void _intel_vgpu_mm_release(struct kref *mm_ref)
invalidate_ppgtt_mm(mm);
} else {
vfree(mm->ggtt_mm.virtual_ggtt);
+ mm->ggtt_mm.last_partial_off = -1UL;
}
vgpu_free_mm(mm);
@@ -2181,6 +2183,62 @@ static int emulate_ggtt_mmio_write(struct intel_vgpu *vgpu, unsigned int off,
memcpy((void *)&e.val64 + (off & (info->gtt_entry_size - 1)), p_data,
bytes);
+ /* If ggtt entry size is 8 bytes, and it's split into two 4 bytes
+ * write, we assume the two 4 bytes writes are consecutive.
+ * Otherwise, we abort and report error
+ */
+ if (bytes < info->gtt_entry_size) {
+ if (ggtt_mm->ggtt_mm.last_partial_off == -1UL) {
+ /* the first partial part*/
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+ return 0;
+ } else if ((g_gtt_index ==
+ (ggtt_mm->ggtt_mm.last_partial_off >>
+ info->gtt_entry_size_shift)) &&
+ (off != ggtt_mm->ggtt_mm.last_partial_off)) {
+ /* the second partial part */
+
+ int last_off = ggtt_mm->ggtt_mm.last_partial_off &
+ (info->gtt_entry_size - 1);
+
+ memcpy((void *)&e.val64 + last_off,
+ (void *)&ggtt_mm->ggtt_mm.last_partial_data +
+ last_off, bytes);
+
+ ggtt_mm->ggtt_mm.last_partial_off = -1UL;
+ } else {
+ int last_offset;
+
+ gvt_vgpu_err("failed to populate guest ggtt entry: abnormal ggtt entry write sequence, last_partial_off=%lx, offset=%x, bytes=%d, ggtt entry size=%d\n",
+ ggtt_mm->ggtt_mm.last_partial_off, off,
+ bytes, info->gtt_entry_size);
+
+ /* set host ggtt entry to scratch page and clear
+ * virtual ggtt entry as not present for last
+ * partially write offset
+ */
+ last_offset = ggtt_mm->ggtt_mm.last_partial_off &
+ (~(info->gtt_entry_size - 1));
+
+ ggtt_get_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate_pte(vgpu, &m);
+ ops->set_pfn(&m, gvt->gtt.scratch_mfn);
+ ops->clear_present(&m);
+ ggtt_set_host_entry(ggtt_mm, &m, last_offset);
+ ggtt_invalidate(gvt->dev_priv);
+
+ ggtt_get_guest_entry(ggtt_mm, &e, last_offset);
+ ops->clear_present(&e);
+ ggtt_set_guest_entry(ggtt_mm, &e, last_offset);
+
+ ggtt_mm->ggtt_mm.last_partial_off = off;
+ ggtt_mm->ggtt_mm.last_partial_data = e.val64;
+
+ return 0;
+ }
+ }
+
if (ops->test_present(&e)) {
gfn = ops->get_pfn(&e);
m = e;
diff --git a/drivers/gpu/drm/i915/gvt/gtt.h b/drivers/gpu/drm/i915/gvt/gtt.h
index b7bf68cc8418..7a9b36176efb 100644
--- a/drivers/gpu/drm/i915/gvt/gtt.h
+++ b/drivers/gpu/drm/i915/gvt/gtt.h
@@ -157,6 +157,8 @@ struct intel_vgpu_mm {
} ppgtt_mm;
struct {
void *virtual_ggtt;
+ unsigned long last_partial_off;
+ u64 last_partial_data;
} ggtt_mm;
};
};
diff --git a/drivers/gpu/drm/i915/gvt/gvt.c b/drivers/gpu/drm/i915/gvt/gvt.c
index e14416d97e73..6ef5a7fc70df 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.c
+++ b/drivers/gpu/drm/i915/gvt/gvt.c
@@ -176,6 +176,7 @@ static const struct intel_gvt_ops intel_gvt_ops = {
.emulate_mmio_write = intel_vgpu_emulate_mmio_write,
.vgpu_create = intel_gvt_create_vgpu,
.vgpu_destroy = intel_gvt_destroy_vgpu,
+ .vgpu_release = intel_gvt_release_vgpu,
.vgpu_reset = intel_gvt_reset_vgpu,
.vgpu_activate = intel_gvt_activate_vgpu,
.vgpu_deactivate = intel_gvt_deactivate_vgpu,
@@ -314,6 +315,11 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
if (WARN_ON(!gvt))
return;
+ intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
+ intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
+ intel_gvt_cleanup_vgpu_type_groups(gvt);
+ intel_gvt_clean_vgpu_types(gvt);
+
intel_gvt_debugfs_clean(gvt);
clean_service_thread(gvt);
intel_gvt_clean_cmd_parser(gvt);
@@ -321,17 +327,10 @@ void intel_gvt_clean_device(struct drm_i915_private *dev_priv)
intel_gvt_clean_workload_scheduler(gvt);
intel_gvt_clean_gtt(gvt);
intel_gvt_clean_irq(gvt);
- intel_gvt_clean_mmio_info(gvt);
intel_gvt_free_firmware(gvt);
-
- intel_gvt_hypervisor_host_exit(&dev_priv->drm.pdev->dev, gvt);
- intel_gvt_cleanup_vgpu_type_groups(gvt);
- intel_gvt_clean_vgpu_types(gvt);
-
+ intel_gvt_clean_mmio_info(gvt);
idr_destroy(&gvt->vgpu_idr);
- intel_gvt_destroy_idle_vgpu(gvt->idle_vgpu);
-
kfree(dev_priv->gvt);
dev_priv->gvt = NULL;
}
diff --git a/drivers/gpu/drm/i915/gvt/gvt.h b/drivers/gpu/drm/i915/gvt/gvt.h
index de2a3a2580be..31f6cdbe5c42 100644
--- a/drivers/gpu/drm/i915/gvt/gvt.h
+++ b/drivers/gpu/drm/i915/gvt/gvt.h
@@ -274,6 +274,8 @@ struct intel_gvt_mmio {
#define F_CMD_ACCESSED (1 << 5)
/* This reg could be accessed by unaligned address */
#define F_UNALIGN (1 << 6)
+/* This reg is saved/restored in context */
+#define F_IN_CTX (1 << 7)
struct gvt_mmio_block *mmio_block;
unsigned int num_mmio_block;
@@ -484,6 +486,7 @@ void intel_gvt_destroy_idle_vgpu(struct intel_vgpu *vgpu);
struct intel_vgpu *intel_gvt_create_vgpu(struct intel_gvt *gvt,
struct intel_vgpu_type *type);
void intel_gvt_destroy_vgpu(struct intel_vgpu *vgpu);
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_reset_vgpu_locked(struct intel_vgpu *vgpu, bool dmlr,
unsigned int engine_mask);
void intel_gvt_reset_vgpu(struct intel_vgpu *vgpu);
@@ -561,7 +564,8 @@ struct intel_gvt_ops {
unsigned int);
struct intel_vgpu *(*vgpu_create)(struct intel_gvt *,
struct intel_vgpu_type *);
- void (*vgpu_destroy)(struct intel_vgpu *);
+ void (*vgpu_destroy)(struct intel_vgpu *vgpu);
+ void (*vgpu_release)(struct intel_vgpu *vgpu);
void (*vgpu_reset)(struct intel_vgpu *);
void (*vgpu_activate)(struct intel_vgpu *);
void (*vgpu_deactivate)(struct intel_vgpu *);
@@ -655,6 +659,33 @@ static inline bool intel_gvt_mmio_has_mode_mask(
return gvt->mmio.mmio_attribute[offset >> 2] & F_MODE_MASK;
}
+/**
+ * intel_gvt_mmio_is_in_ctx - check if a MMIO has in-ctx mask
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ * Returns:
+ * True if a MMIO has a in-context mask, false if it isn't.
+ *
+ */
+static inline bool intel_gvt_mmio_is_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ return gvt->mmio.mmio_attribute[offset >> 2] & F_IN_CTX;
+}
+
+/**
+ * intel_gvt_mmio_set_in_ctx - mask a MMIO in logical context
+ * @gvt: a GVT device
+ * @offset: register offset
+ *
+ */
+static inline void intel_gvt_mmio_set_in_ctx(
+ struct intel_gvt *gvt, unsigned int offset)
+{
+ gvt->mmio.mmio_attribute[offset >> 2] |= F_IN_CTX;
+}
+
int intel_gvt_debugfs_add_vgpu(struct intel_vgpu *vgpu);
void intel_gvt_debugfs_remove_vgpu(struct intel_vgpu *vgpu);
int intel_gvt_debugfs_init(struct intel_gvt *gvt);
diff --git a/drivers/gpu/drm/i915/gvt/handlers.c b/drivers/gpu/drm/i915/gvt/handlers.c
index ff4435432dc9..d26258786e3f 100644
--- a/drivers/gpu/drm/i915/gvt/handlers.c
+++ b/drivers/gpu/drm/i915/gvt/handlers.c
@@ -3377,6 +3377,30 @@ int intel_vgpu_default_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
}
/**
+ * intel_vgpu_mask_mmio_write - write mask register
+ * @vgpu: a vGPU
+ * @offset: access offset
+ * @p_data: write data buffer
+ * @bytes: access data length
+ *
+ * Returns:
+ * Zero on success, negative error code if failed.
+ */
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes)
+{
+ u32 mask, old_vreg;
+
+ old_vreg = vgpu_vreg(vgpu, offset);
+ write_vreg(vgpu, offset, p_data, bytes);
+ mask = vgpu_vreg(vgpu, offset) >> 16;
+ vgpu_vreg(vgpu, offset) = (old_vreg & ~mask) |
+ (vgpu_vreg(vgpu, offset) & mask);
+
+ return 0;
+}
+
+/**
* intel_gvt_in_force_nonpriv_whitelist - if a mmio is in whitelist to be
* force-nopriv register
*
diff --git a/drivers/gpu/drm/i915/gvt/kvmgt.c b/drivers/gpu/drm/i915/gvt/kvmgt.c
index a22d539b9d4e..71751be329e3 100644
--- a/drivers/gpu/drm/i915/gvt/kvmgt.c
+++ b/drivers/gpu/drm/i915/gvt/kvmgt.c
@@ -43,6 +43,8 @@
#include <linux/mdev.h>
#include <linux/debugfs.h>
+#include <linux/nospec.h>
+
#include "i915_drv.h"
#include "gvt.h"
@@ -187,14 +189,14 @@ static int gvt_dma_map_page(struct intel_vgpu *vgpu, unsigned long gfn,
/* Setup DMA mapping. */
*dma_addr = dma_map_page(dev, page, 0, size, PCI_DMA_BIDIRECTIONAL);
- ret = dma_mapping_error(dev, *dma_addr);
- if (ret) {
+ if (dma_mapping_error(dev, *dma_addr)) {
gvt_vgpu_err("DMA mapping failed for pfn 0x%lx, ret %d\n",
page_to_pfn(page), ret);
gvt_unpin_guest_page(vgpu, gfn, size);
+ return -ENOMEM;
}
- return ret;
+ return 0;
}
static void gvt_dma_unmap_page(struct intel_vgpu *vgpu, unsigned long gfn,
@@ -666,7 +668,7 @@ static void __intel_vgpu_release(struct intel_vgpu *vgpu)
if (atomic_cmpxchg(&vgpu->vdev.released, 0, 1))
return;
- intel_gvt_ops->vgpu_deactivate(vgpu);
+ intel_gvt_ops->vgpu_release(vgpu);
ret = vfio_unregister_notifier(mdev_dev(vgpu->vdev.mdev), VFIO_IOMMU_NOTIFY,
&vgpu->vdev.iommu_notifier);
@@ -1139,7 +1141,8 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
} else if (cmd == VFIO_DEVICE_GET_REGION_INFO) {
struct vfio_region_info info;
struct vfio_info_cap caps = { .buf = NULL, .size = 0 };
- int i, ret;
+ unsigned int i;
+ int ret;
struct vfio_region_info_cap_sparse_mmap *sparse = NULL;
size_t size;
int nr_areas = 1;
@@ -1224,6 +1227,10 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
if (info.index >= VFIO_PCI_NUM_REGIONS +
vgpu->vdev.num_regions)
return -EINVAL;
+ info.index =
+ array_index_nospec(info.index,
+ VFIO_PCI_NUM_REGIONS +
+ vgpu->vdev.num_regions);
i = info.index - VFIO_PCI_NUM_REGIONS;
@@ -1250,11 +1257,13 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
&sparse->header, sizeof(*sparse) +
(sparse->nr_areas *
sizeof(*sparse->areas)));
- kfree(sparse);
- if (ret)
+ if (ret) {
+ kfree(sparse);
return ret;
+ }
break;
default:
+ kfree(sparse);
return -EINVAL;
}
}
@@ -1270,6 +1279,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
sizeof(info), caps.buf,
caps.size)) {
kfree(caps.buf);
+ kfree(sparse);
return -EFAULT;
}
info.cap_offset = sizeof(info);
@@ -1278,6 +1288,7 @@ static long intel_vgpu_ioctl(struct mdev_device *mdev, unsigned int cmd,
kfree(caps.buf);
}
+ kfree(sparse);
return copy_to_user((void __user *)arg, &info, minsz) ?
-EFAULT : 0;
} else if (cmd == VFIO_DEVICE_GET_IRQ_INFO) {
@@ -1615,7 +1626,6 @@ static int kvmgt_guest_init(struct mdev_device *mdev)
kvmgt_protect_table_init(info);
gvt_cache_init(vgpu);
- mutex_init(&vgpu->dmabuf_lock);
init_completion(&vgpu->vblank_done);
info->track_node.track_write = kvmgt_page_track_write;
diff --git a/drivers/gpu/drm/i915/gvt/mmio.h b/drivers/gpu/drm/i915/gvt/mmio.h
index e474188b46d2..1ffc69eba30e 100644
--- a/drivers/gpu/drm/i915/gvt/mmio.h
+++ b/drivers/gpu/drm/i915/gvt/mmio.h
@@ -99,4 +99,6 @@ bool intel_gvt_in_force_nonpriv_whitelist(struct intel_gvt *gvt,
int intel_vgpu_mmio_reg_rw(struct intel_vgpu *vgpu, unsigned int offset,
void *pdata, unsigned int bytes, bool is_read);
+int intel_vgpu_mask_mmio_write(struct intel_vgpu *vgpu, unsigned int offset,
+ void *p_data, unsigned int bytes);
#endif
diff --git a/drivers/gpu/drm/i915/gvt/mmio_context.c b/drivers/gpu/drm/i915/gvt/mmio_context.c
index d20f2c9bda82..7e702c6a32af 100644
--- a/drivers/gpu/drm/i915/gvt/mmio_context.c
+++ b/drivers/gpu/drm/i915/gvt/mmio_context.c
@@ -574,7 +574,9 @@ void intel_gvt_init_engine_mmio_context(struct intel_gvt *gvt)
for (mmio = gvt->engine_mmio_list.mmio;
i915_mmio_reg_valid(mmio->reg); mmio++) {
- if (mmio->in_context)
+ if (mmio->in_context) {
gvt->engine_mmio_list.ctx_mmio_count[mmio->ring_id]++;
+ intel_gvt_mmio_set_in_ctx(gvt, mmio->reg.reg);
+ }
}
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.c b/drivers/gpu/drm/i915/gvt/scheduler.c
index 1a88c1972416..ea34003d6dd2 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.c
+++ b/drivers/gpu/drm/i915/gvt/scheduler.c
@@ -786,7 +786,8 @@ static void update_guest_context(struct intel_vgpu_workload *workload)
kunmap(page);
}
-static void clean_workloads(struct intel_vgpu *vgpu, unsigned long engine_mask)
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask)
{
struct intel_vgpu_submission *s = &vgpu->submission;
struct drm_i915_private *dev_priv = vgpu->gvt->dev_priv;
@@ -881,7 +882,7 @@ static void complete_current_workload(struct intel_gvt *gvt, int ring_id)
* cleaned up during the resetting process later, so doing
* the workload clean up here doesn't have any impact.
**/
- clean_workloads(vgpu, ENGINE_MASK(ring_id));
+ intel_vgpu_clean_workloads(vgpu, ENGINE_MASK(ring_id));
}
workload->complete(workload);
@@ -1083,7 +1084,7 @@ void intel_vgpu_reset_submission(struct intel_vgpu *vgpu,
if (!s->active)
return;
- clean_workloads(vgpu, engine_mask);
+ intel_vgpu_clean_workloads(vgpu, engine_mask);
s->ops->reset(vgpu, engine_mask);
}
diff --git a/drivers/gpu/drm/i915/gvt/scheduler.h b/drivers/gpu/drm/i915/gvt/scheduler.h
index 21eddab4a9cd..ca5529d0e48e 100644
--- a/drivers/gpu/drm/i915/gvt/scheduler.h
+++ b/drivers/gpu/drm/i915/gvt/scheduler.h
@@ -158,4 +158,7 @@ intel_vgpu_create_workload(struct intel_vgpu *vgpu, int ring_id,
void intel_vgpu_destroy_workload(struct intel_vgpu_workload *workload);
+void intel_vgpu_clean_workloads(struct intel_vgpu *vgpu,
+ unsigned long engine_mask);
+
#endif
diff --git a/drivers/gpu/drm/i915/gvt/vgpu.c b/drivers/gpu/drm/i915/gvt/vgpu.c
index f6fa916517c3..a4e8e3cf74fd 100644
--- a/drivers/gpu/drm/i915/gvt/vgpu.c
+++ b/drivers/gpu/drm/i915/gvt/vgpu.c
@@ -222,7 +222,7 @@ void intel_gvt_activate_vgpu(struct intel_vgpu *vgpu)
* @vgpu: virtual GPU
*
* This function is called when user wants to deactivate a virtual GPU.
- * All virtual GPU runtime information will be destroyed.
+ * The virtual GPU will be stopped.
*
*/
void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
@@ -238,12 +238,30 @@ void intel_gvt_deactivate_vgpu(struct intel_vgpu *vgpu)
}
intel_vgpu_stop_schedule(vgpu);
- intel_vgpu_dmabuf_cleanup(vgpu);
mutex_unlock(&vgpu->vgpu_lock);
}
/**
+ * intel_gvt_release_vgpu - release a virtual GPU
+ * @vgpu: virtual GPU
+ *
+ * This function is called when user wants to release a virtual GPU.
+ * The virtual GPU will be stopped and all runtime information will be
+ * destroyed.
+ *
+ */
+void intel_gvt_release_vgpu(struct intel_vgpu *vgpu)
+{
+ intel_gvt_deactivate_vgpu(vgpu);
+
+ mutex_lock(&vgpu->vgpu_lock);
+ intel_vgpu_clean_workloads(vgpu, ALL_ENGINES);
+ intel_vgpu_dmabuf_cleanup(vgpu);
+ mutex_unlock(&vgpu->vgpu_lock);
+}
+
+/**
* intel_gvt_destroy_vgpu - destroy a virtual GPU
* @vgpu: virtual GPU
*
@@ -361,6 +379,7 @@ static struct intel_vgpu *__intel_gvt_create_vgpu(struct intel_gvt *gvt,
vgpu->gvt = gvt;
vgpu->sched_ctl.weight = param->weight;
mutex_init(&vgpu->vgpu_lock);
+ mutex_init(&vgpu->dmabuf_lock);
INIT_LIST_HEAD(&vgpu->dmabuf_obj_list_head);
INIT_RADIX_TREE(&vgpu->page_track_tree, GFP_KERNEL);
idr_init(&vgpu->object_idr);
diff --git a/drivers/gpu/drm/i915/i915_gem_clflush.c b/drivers/gpu/drm/i915/i915_gem_clflush.c
index f5c570d35b2a..8e74c23cbd91 100644
--- a/drivers/gpu/drm/i915/i915_gem_clflush.c
+++ b/drivers/gpu/drm/i915/i915_gem_clflush.c
@@ -45,11 +45,6 @@ static const char *i915_clflush_get_timeline_name(struct dma_fence *fence)
return "clflush";
}
-static bool i915_clflush_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static void i915_clflush_release(struct dma_fence *fence)
{
struct clflush *clflush = container_of(fence, typeof(*clflush), dma);
@@ -63,8 +58,6 @@ static void i915_clflush_release(struct dma_fence *fence)
static const struct dma_fence_ops i915_clflush_ops = {
.get_driver_name = i915_clflush_get_driver_name,
.get_timeline_name = i915_clflush_get_timeline_name,
- .enable_signaling = i915_clflush_enable_signaling,
- .wait = dma_fence_default_wait,
.release = i915_clflush_release,
};
diff --git a/drivers/gpu/drm/i915/i915_gem_userptr.c b/drivers/gpu/drm/i915/i915_gem_userptr.c
index dcd6e230d16a..2c9b284036d1 100644
--- a/drivers/gpu/drm/i915/i915_gem_userptr.c
+++ b/drivers/gpu/drm/i915/i915_gem_userptr.c
@@ -112,10 +112,11 @@ static void del_object(struct i915_mmu_object *mo)
mo->attached = false;
}
-static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
+static int i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct i915_mmu_notifier *mn =
container_of(_mn, struct i915_mmu_notifier, mn);
@@ -124,7 +125,7 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
LIST_HEAD(cancelled);
if (RB_EMPTY_ROOT(&mn->objects.rb_root))
- return;
+ return 0;
/* interval ranges are inclusive, but invalidate range is exclusive */
end--;
@@ -132,6 +133,10 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
spin_lock(&mn->lock);
it = interval_tree_iter_first(&mn->objects, start, end);
while (it) {
+ if (!blockable) {
+ spin_unlock(&mn->lock);
+ return -EAGAIN;
+ }
/* The mmu_object is released late when destroying the
* GEM object so it is entirely possible to gain a
* reference on an object in the process of being freed
@@ -154,6 +159,8 @@ static void i915_gem_userptr_mn_invalidate_range_start(struct mmu_notifier *_mn,
if (!list_empty(&cancelled))
flush_workqueue(mn->wq);
+
+ return 0;
}
static const struct mmu_notifier_ops i915_gem_userptr_notifier = {
diff --git a/drivers/gpu/drm/i915/i915_gpu_error.c b/drivers/gpu/drm/i915/i915_gpu_error.c
index 8c81cf3aa182..f7f2aa71d8d9 100644
--- a/drivers/gpu/drm/i915/i915_gpu_error.c
+++ b/drivers/gpu/drm/i915/i915_gpu_error.c
@@ -31,6 +31,7 @@
#include <linux/stop_machine.h>
#include <linux/zlib.h>
#include <drm/drm_print.h>
+#include <linux/ascii85.h>
#include "i915_gpu_error.h"
#include "i915_drv.h"
@@ -517,35 +518,12 @@ void i915_error_printf(struct drm_i915_error_state_buf *e, const char *f, ...)
va_end(args);
}
-static int
-ascii85_encode_len(int len)
-{
- return DIV_ROUND_UP(len, 4);
-}
-
-static bool
-ascii85_encode(u32 in, char *out)
-{
- int i;
-
- if (in == 0)
- return false;
-
- out[5] = '\0';
- for (i = 5; i--; ) {
- out[i] = '!' + in % 85;
- in /= 85;
- }
-
- return true;
-}
-
static void print_error_obj(struct drm_i915_error_state_buf *m,
struct intel_engine_cs *engine,
const char *name,
struct drm_i915_error_object *obj)
{
- char out[6];
+ char out[ASCII85_BUFSZ];
int page;
if (!obj)
@@ -567,12 +545,8 @@ static void print_error_obj(struct drm_i915_error_state_buf *m,
len -= obj->unused;
len = ascii85_encode_len(len);
- for (i = 0; i < len; i++) {
- if (ascii85_encode(obj->pages[page][i], out))
- err_puts(m, out);
- else
- err_puts(m, "z");
- }
+ for (i = 0; i < len; i++)
+ err_puts(m, ascii85_encode(obj->pages[page][i], out));
}
err_puts(m, "\n");
}
diff --git a/drivers/gpu/drm/i915/i915_pmu.c b/drivers/gpu/drm/i915/i915_pmu.c
index c39541ed2219..d6c8f8fdfda5 100644
--- a/drivers/gpu/drm/i915/i915_pmu.c
+++ b/drivers/gpu/drm/i915/i915_pmu.c
@@ -4,6 +4,7 @@
* Copyright © 2017-2018 Intel Corporation
*/
+#include <linux/irq.h>
#include "i915_pmu.h"
#include "intel_ringbuffer.h"
#include "i915_drv.h"
diff --git a/drivers/gpu/drm/i915/i915_utils.h b/drivers/gpu/drm/i915/i915_utils.h
index 00165ad55fb3..395dd2511568 100644
--- a/drivers/gpu/drm/i915/i915_utils.h
+++ b/drivers/gpu/drm/i915/i915_utils.h
@@ -43,7 +43,7 @@
#define MISSING_CASE(x) WARN(1, "Missing case (%s == %ld)\n", \
__stringify(x), (long)(x))
-#if GCC_VERSION >= 70000
+#if defined(GCC_VERSION) && GCC_VERSION >= 70000
#define add_overflows(A, B) \
__builtin_add_overflow_p((A), (B), (typeof((A) + (B)))0)
#else
diff --git a/drivers/gpu/drm/i915/intel_audio.c b/drivers/gpu/drm/i915/intel_audio.c
index f02cb211d3e7..769f3f586661 100644
--- a/drivers/gpu/drm/i915/intel_audio.c
+++ b/drivers/gpu/drm/i915/intel_audio.c
@@ -649,11 +649,12 @@ void intel_audio_codec_enable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = encoder;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -691,11 +692,12 @@ void intel_audio_codec_disable(struct intel_encoder *encoder,
dev_priv->av_enc_map[pipe] = NULL;
mutex_unlock(&dev_priv->av_mutex);
- if (acomp && acomp->audio_ops && acomp->audio_ops->pin_eld_notify) {
+ if (acomp && acomp->base.audio_ops &&
+ acomp->base.audio_ops->pin_eld_notify) {
/* audio drivers expect pipe = -1 to indicate Non-MST cases */
if (!intel_crtc_has_type(old_crtc_state, INTEL_OUTPUT_DP_MST))
pipe = -1;
- acomp->audio_ops->pin_eld_notify(acomp->audio_ops->audio_ptr,
+ acomp->base.audio_ops->pin_eld_notify(acomp->base.audio_ops->audio_ptr,
(int) port, (int) pipe);
}
@@ -890,7 +892,7 @@ static int i915_audio_component_get_eld(struct device *kdev, int port,
return ret;
}
-static const struct i915_audio_component_ops i915_audio_component_ops = {
+static const struct drm_audio_component_ops i915_audio_component_ops = {
.owner = THIS_MODULE,
.get_power = i915_audio_component_get_power,
.put_power = i915_audio_component_put_power,
@@ -907,12 +909,12 @@ static int i915_audio_component_bind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
int i;
- if (WARN_ON(acomp->ops || acomp->dev))
+ if (WARN_ON(acomp->base.ops || acomp->base.dev))
return -EEXIST;
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = &i915_audio_component_ops;
- acomp->dev = i915_kdev;
+ acomp->base.ops = &i915_audio_component_ops;
+ acomp->base.dev = i915_kdev;
BUILD_BUG_ON(MAX_PORTS != I915_MAX_PORTS);
for (i = 0; i < ARRAY_SIZE(acomp->aud_sample_rate); i++)
acomp->aud_sample_rate[i] = 0;
@@ -929,8 +931,8 @@ static void i915_audio_component_unbind(struct device *i915_kdev,
struct drm_i915_private *dev_priv = kdev_to_i915(i915_kdev);
drm_modeset_lock_all(&dev_priv->drm);
- acomp->ops = NULL;
- acomp->dev = NULL;
+ acomp->base.ops = NULL;
+ acomp->base.dev = NULL;
dev_priv->audio_component = NULL;
drm_modeset_unlock_all(&dev_priv->drm);
}
diff --git a/drivers/gpu/drm/i915/intel_display.c b/drivers/gpu/drm/i915/intel_display.c
index 1c7321dadd84..1bd14c61dab5 100644
--- a/drivers/gpu/drm/i915/intel_display.c
+++ b/drivers/gpu/drm/i915/intel_display.c
@@ -12902,6 +12902,8 @@ static const struct drm_crtc_funcs intel_crtc_funcs = {
.atomic_duplicate_state = intel_crtc_duplicate_state,
.atomic_destroy_state = intel_crtc_destroy_state,
.set_crc_source = intel_crtc_set_crc_source,
+ .verify_crc_source = intel_crtc_verify_crc_source,
+ .get_crc_sources = intel_crtc_get_crc_sources,
};
struct wait_rps_boost {
diff --git a/drivers/gpu/drm/i915/intel_display.h b/drivers/gpu/drm/i915/intel_display.h
index 43f080c6538d..e20e6a36a748 100644
--- a/drivers/gpu/drm/i915/intel_display.h
+++ b/drivers/gpu/drm/i915/intel_display.h
@@ -142,6 +142,30 @@ enum port {
#define port_name(p) ((p) + 'A')
+/*
+ * Ports identifier referenced from other drivers.
+ * Expected to remain stable over time
+ */
+static inline const char *port_identifier(enum port port)
+{
+ switch (port) {
+ case PORT_A:
+ return "Port A";
+ case PORT_B:
+ return "Port B";
+ case PORT_C:
+ return "Port C";
+ case PORT_D:
+ return "Port D";
+ case PORT_E:
+ return "Port E";
+ case PORT_F:
+ return "Port F";
+ default:
+ return "<invalid>";
+ }
+}
+
enum tc_port {
PORT_TC_NONE = -1,
diff --git a/drivers/gpu/drm/i915/intel_drv.h b/drivers/gpu/drm/i915/intel_drv.h
index 9d0ca1715d81..f5731215210a 100644
--- a/drivers/gpu/drm/i915/intel_drv.h
+++ b/drivers/gpu/drm/i915/intel_drv.h
@@ -39,6 +39,7 @@
#include <drm/drm_dp_mst_helper.h>
#include <drm/drm_rect.h>
#include <drm/drm_atomic.h>
+#include <media/cec-notifier.h>
/**
* __wait_for - magic wait macro
@@ -1015,6 +1016,7 @@ struct intel_hdmi {
bool has_audio;
bool rgb_quant_range_selectable;
struct intel_connector *attached_connector;
+ struct cec_notifier *cec_notifier;
};
struct intel_dp_mst_encoder;
@@ -2193,12 +2195,17 @@ void lspcon_wait_pcon_mode(struct intel_lspcon *lspcon);
/* intel_pipe_crc.c */
#ifdef CONFIG_DEBUG_FS
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
- size_t *values_cnt);
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name);
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name, size_t *values_cnt);
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count);
void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc);
void intel_crtc_enable_pipe_crc(struct intel_crtc *crtc);
#else
#define intel_crtc_set_crc_source NULL
+#define intel_crtc_verify_crc_source NULL
+#define intel_crtc_get_crc_sources NULL
static inline void intel_crtc_disable_pipe_crc(struct intel_crtc *crtc)
{
}
diff --git a/drivers/gpu/drm/i915/intel_hdmi.c b/drivers/gpu/drm/i915/intel_hdmi.c
index 02faa2cf4a85..a2dab0b6bde6 100644
--- a/drivers/gpu/drm/i915/intel_hdmi.c
+++ b/drivers/gpu/drm/i915/intel_hdmi.c
@@ -1903,6 +1903,8 @@ intel_hdmi_set_edid(struct drm_connector *connector)
connected = true;
}
+ cec_notifier_set_phys_addr_from_edid(intel_hdmi->cec_notifier, edid);
+
return connected;
}
@@ -1931,6 +1933,9 @@ intel_hdmi_detect(struct drm_connector *connector, bool force)
out:
intel_display_power_put(dev_priv, POWER_DOMAIN_GMBUS);
+ if (status != connector_status_connected)
+ cec_notifier_phys_addr_invalidate(intel_hdmi->cec_notifier);
+
return status;
}
@@ -2071,6 +2076,8 @@ static void chv_hdmi_pre_enable(struct intel_encoder *encoder,
static void intel_hdmi_destroy(struct drm_connector *connector)
{
+ if (intel_attached_hdmi(connector)->cec_notifier)
+ cec_notifier_put(intel_attached_hdmi(connector)->cec_notifier);
kfree(to_intel_connector(connector)->detect_edid);
drm_connector_cleanup(connector);
kfree(connector);
@@ -2391,6 +2398,11 @@ void intel_hdmi_init_connector(struct intel_digital_port *intel_dig_port,
u32 temp = I915_READ(PEG_BAND_GAP_DATA);
I915_WRITE(PEG_BAND_GAP_DATA, (temp & ~0xf) | 0xd);
}
+
+ intel_hdmi->cec_notifier = cec_notifier_get_conn(dev->dev,
+ port_identifier(port));
+ if (!intel_hdmi->cec_notifier)
+ DRM_DEBUG_KMS("CEC notifier get failed\n");
}
void intel_hdmi_init(struct drm_i915_private *dev_priv,
diff --git a/drivers/gpu/drm/i915/intel_lpe_audio.c b/drivers/gpu/drm/i915/intel_lpe_audio.c
index 430732720e65..cdf19553ffac 100644
--- a/drivers/gpu/drm/i915/intel_lpe_audio.c
+++ b/drivers/gpu/drm/i915/intel_lpe_audio.c
@@ -62,6 +62,7 @@
#include <linux/acpi.h>
#include <linux/device.h>
+#include <linux/irq.h>
#include <linux/pci.h>
#include <linux/pm_runtime.h>
diff --git a/drivers/gpu/drm/i915/intel_pipe_crc.c b/drivers/gpu/drm/i915/intel_pipe_crc.c
index 849e1b69ba73..f3c9010e332a 100644
--- a/drivers/gpu/drm/i915/intel_pipe_crc.c
+++ b/drivers/gpu/drm/i915/intel_pipe_crc.c
@@ -468,8 +468,122 @@ void intel_display_crc_init(struct drm_i915_private *dev_priv)
}
}
-int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
- size_t *values_cnt)
+static int i8xx_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int i9xx_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_TV:
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int vlv_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_DP_B:
+ case INTEL_PIPE_CRC_SOURCE_DP_C:
+ case INTEL_PIPE_CRC_SOURCE_DP_D:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ilk_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int ivb_crc_source_valid(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ switch (source) {
+ case INTEL_PIPE_CRC_SOURCE_PIPE:
+ case INTEL_PIPE_CRC_SOURCE_PLANE1:
+ case INTEL_PIPE_CRC_SOURCE_PLANE2:
+ case INTEL_PIPE_CRC_SOURCE_PF:
+ case INTEL_PIPE_CRC_SOURCE_NONE:
+ return 0;
+ default:
+ return -EINVAL;
+ }
+}
+
+static int
+intel_is_valid_crc_source(struct drm_i915_private *dev_priv,
+ const enum intel_pipe_crc_source source)
+{
+ if (IS_GEN2(dev_priv))
+ return i8xx_crc_source_valid(dev_priv, source);
+ else if (INTEL_GEN(dev_priv) < 5)
+ return i9xx_crc_source_valid(dev_priv, source);
+ else if (IS_VALLEYVIEW(dev_priv) || IS_CHERRYVIEW(dev_priv))
+ return vlv_crc_source_valid(dev_priv, source);
+ else if (IS_GEN5(dev_priv) || IS_GEN6(dev_priv))
+ return ilk_crc_source_valid(dev_priv, source);
+ else
+ return ivb_crc_source_valid(dev_priv, source);
+}
+
+const char *const *intel_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ *count = ARRAY_SIZE(pipe_crc_sources);
+ return pipe_crc_sources;
+}
+
+int intel_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ struct drm_i915_private *dev_priv = to_i915(crtc->dev);
+ enum intel_pipe_crc_source source;
+
+ if (display_crc_ctl_parse_source(source_name, &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
+ return -EINVAL;
+ }
+
+ if (source == INTEL_PIPE_CRC_SOURCE_AUTO ||
+ intel_is_valid_crc_source(dev_priv, source) == 0) {
+ *values_cnt = 5;
+ return 0;
+ }
+
+ return -EINVAL;
+}
+
+int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name)
{
struct drm_i915_private *dev_priv = to_i915(crtc->dev);
struct intel_pipe_crc *pipe_crc = &dev_priv->pipe_crc[crtc->index];
@@ -508,7 +622,6 @@ int intel_crtc_set_crc_source(struct drm_crtc *crtc, const char *source_name,
}
pipe_crc->skipped = 0;
- *values_cnt = 5;
out:
intel_display_power_put(dev_priv, power_domain);
diff --git a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
index 570e325af93e..cdbc8f134e5e 100644
--- a/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
+++ b/drivers/gpu/drm/i915/selftests/i915_sw_fence.c
@@ -611,17 +611,9 @@ static const char *mock_name(struct dma_fence *fence)
return "mock";
}
-static bool mock_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static const struct dma_fence_ops mock_fence_ops = {
.get_driver_name = mock_name,
.get_timeline_name = mock_name,
- .enable_signaling = mock_enable_signaling,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
static DEFINE_SPINLOCK(mock_fence_lock);
diff --git a/drivers/gpu/drm/imx/imx-drm-core.c b/drivers/gpu/drm/imx/imx-drm-core.c
index 1d053bbefc02..5ea0c82f9957 100644
--- a/drivers/gpu/drm/imx/imx-drm-core.c
+++ b/drivers/gpu/drm/imx/imx-drm-core.c
@@ -35,12 +35,6 @@
#define MAX_CRTC 4
-struct imx_drm_device {
- struct drm_device *drm;
- unsigned int pipes;
- struct drm_atomic_state *state;
-};
-
#if IS_ENABLED(CONFIG_DRM_FBDEV_EMULATION)
static int legacyfb_depth = 16;
module_param(legacyfb_depth, int, 0444);
@@ -219,22 +213,12 @@ static int compare_of(struct device *dev, void *data)
static int imx_drm_bind(struct device *dev)
{
struct drm_device *drm;
- struct imx_drm_device *imxdrm;
int ret;
drm = drm_dev_alloc(&imx_drm_driver, dev);
if (IS_ERR(drm))
return PTR_ERR(drm);
- imxdrm = devm_kzalloc(dev, sizeof(*imxdrm), GFP_KERNEL);
- if (!imxdrm) {
- ret = -ENOMEM;
- goto err_unref;
- }
-
- imxdrm->drm = drm;
- drm->dev_private = imxdrm;
-
/*
* enable drm irq mode.
* - with irq_enabled = true, we can use the vblank feature.
@@ -306,8 +290,7 @@ err_unbind:
component_unbind_all(drm->dev, drm);
err_kms:
drm_mode_config_cleanup(drm);
-err_unref:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -327,7 +310,7 @@ static void imx_drm_unbind(struct device *dev)
component_unbind_all(drm->dev, drm);
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
}
static const struct component_master_ops imx_drm_ops = {
@@ -355,37 +338,15 @@ static int imx_drm_platform_remove(struct platform_device *pdev)
static int imx_drm_suspend(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imxdrm;
-
- /* The drm_dev is NULL before .load hook is called */
- if (drm_dev == NULL)
- return 0;
-
- drm_kms_helper_poll_disable(drm_dev);
- imxdrm = drm_dev->dev_private;
- imxdrm->state = drm_atomic_helper_suspend(drm_dev);
- if (IS_ERR(imxdrm->state)) {
- drm_kms_helper_poll_enable(drm_dev);
- return PTR_ERR(imxdrm->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm_dev);
}
static int imx_drm_resume(struct device *dev)
{
struct drm_device *drm_dev = dev_get_drvdata(dev);
- struct imx_drm_device *imx_drm;
- if (drm_dev == NULL)
- return 0;
-
- imx_drm = drm_dev->dev_private;
- drm_atomic_helper_resume(drm_dev, imx_drm->state);
- drm_kms_helper_poll_enable(drm_dev);
-
- return 0;
+ return drm_mode_config_helper_resume(drm_dev);
}
#endif
diff --git a/drivers/gpu/drm/imx/imx-drm.h b/drivers/gpu/drm/imx/imx-drm.h
index 15c2bec47a04..ab9c6f706eb3 100644
--- a/drivers/gpu/drm/imx/imx-drm.h
+++ b/drivers/gpu/drm/imx/imx-drm.h
@@ -10,7 +10,6 @@ struct drm_display_mode;
struct drm_encoder;
struct drm_framebuffer;
struct drm_plane;
-struct imx_drm_crtc;
struct platform_device;
struct imx_crtc_state {
diff --git a/drivers/gpu/drm/imx/imx-ldb.c b/drivers/gpu/drm/imx/imx-ldb.c
index 7312beb6f1fc..3bd0f8a18e74 100644
--- a/drivers/gpu/drm/imx/imx-ldb.c
+++ b/drivers/gpu/drm/imx/imx-ldb.c
@@ -611,6 +611,9 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
return PTR_ERR(imx_ldb->regmap);
}
+ /* disable LDB by resetting the control register to POR default */
+ regmap_write(imx_ldb->regmap, IOMUXC_GPR2, 0);
+
imx_ldb->dev = dev;
if (of_id)
@@ -651,14 +654,14 @@ static int imx_ldb_bind(struct device *dev, struct device *master, void *data)
if (ret || i < 0 || i > 1)
return -EINVAL;
+ if (!of_device_is_available(child))
+ continue;
+
if (dual && i > 0) {
dev_warn(dev, "dual-channel mode, ignoring second output\n");
continue;
}
- if (!of_device_is_available(child))
- continue;
-
channel = &imx_ldb->channel[i];
channel->ldb = imx_ldb;
channel->chno = i;
diff --git a/drivers/gpu/drm/imx/ipuv3-crtc.c b/drivers/gpu/drm/imx/ipuv3-crtc.c
index 21d002859ae0..7d4b710b837a 100644
--- a/drivers/gpu/drm/imx/ipuv3-crtc.c
+++ b/drivers/gpu/drm/imx/ipuv3-crtc.c
@@ -35,7 +35,6 @@
struct ipu_crtc {
struct device *dev;
struct drm_crtc base;
- struct imx_drm_crtc *imx_crtc;
/* plane[0] is the full plane, plane[1] is the partial plane */
struct ipu_plane *plane[2];
diff --git a/drivers/gpu/drm/imx/ipuv3-plane.c b/drivers/gpu/drm/imx/ipuv3-plane.c
index 203f247d4854..40605fdf0e33 100644
--- a/drivers/gpu/drm/imx/ipuv3-plane.c
+++ b/drivers/gpu/drm/imx/ipuv3-plane.c
@@ -281,16 +281,13 @@ static void ipu_plane_state_reset(struct drm_plane *plane)
ipu_state = to_ipu_plane_state(plane->state);
__drm_atomic_helper_plane_destroy_state(plane->state);
kfree(ipu_state);
+ plane->state = NULL;
}
ipu_state = kzalloc(sizeof(*ipu_state), GFP_KERNEL);
- if (ipu_state) {
- ipu_state->base.plane = plane;
- ipu_state->base.rotation = DRM_MODE_ROTATE_0;
- }
-
- plane->state = &ipu_state->base;
+ if (ipu_state)
+ __drm_atomic_helper_plane_reset(plane, &ipu_state->base);
}
static struct drm_plane_state *
diff --git a/drivers/gpu/drm/mediatek/mtk_cec.c b/drivers/gpu/drm/mediatek/mtk_cec.c
index 7a3eb8c17ef9..5ce84d0dbf81 100644
--- a/drivers/gpu/drm/mediatek/mtk_cec.c
+++ b/drivers/gpu/drm/mediatek/mtk_cec.c
@@ -15,6 +15,7 @@
#include <linux/delay.h>
#include <linux/io.h>
#include <linux/interrupt.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include "mtk_cec.h"
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
index 978782a77629..28d191192945 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_ovl.c
@@ -132,6 +132,11 @@ static void mtk_ovl_config(struct mtk_ddp_comp *comp, unsigned int w,
writel(0x0, comp->regs + DISP_REG_OVL_RST);
}
+static unsigned int mtk_ovl_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 4;
+}
+
static void mtk_ovl_layer_on(struct mtk_ddp_comp *comp, unsigned int idx)
{
unsigned int reg;
@@ -157,6 +162,11 @@ static void mtk_ovl_layer_off(struct mtk_ddp_comp *comp, unsigned int idx)
static unsigned int ovl_fmt_convert(struct mtk_disp_ovl *ovl, unsigned int fmt)
{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
switch (fmt) {
default:
case DRM_FORMAT_RGB565:
@@ -221,6 +231,7 @@ static const struct mtk_ddp_comp_funcs mtk_disp_ovl_funcs = {
.stop = mtk_ovl_stop,
.enable_vblank = mtk_ovl_enable_vblank,
.disable_vblank = mtk_ovl_disable_vblank,
+ .layer_nr = mtk_ovl_layer_nr,
.layer_on = mtk_ovl_layer_on,
.layer_off = mtk_ovl_layer_off,
.layer_config = mtk_ovl_layer_config,
diff --git a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
index 585943c81e1f..b0a5cffe345a 100644
--- a/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
+++ b/drivers/gpu/drm/mediatek/mtk_disp_rdma.c
@@ -31,14 +31,31 @@
#define RDMA_REG_UPDATE_INT BIT(0)
#define DISP_REG_RDMA_GLOBAL_CON 0x0010
#define RDMA_ENGINE_EN BIT(0)
+#define RDMA_MODE_MEMORY BIT(1)
#define DISP_REG_RDMA_SIZE_CON_0 0x0014
+#define RDMA_MATRIX_ENABLE BIT(17)
+#define RDMA_MATRIX_INT_MTX_SEL GENMASK(23, 20)
+#define RDMA_MATRIX_INT_MTX_BT601_to_RGB (6 << 20)
#define DISP_REG_RDMA_SIZE_CON_1 0x0018
#define DISP_REG_RDMA_TARGET_LINE 0x001c
+#define DISP_RDMA_MEM_CON 0x0024
+#define MEM_MODE_INPUT_FORMAT_RGB565 (0x000 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGB888 (0x001 << 4)
+#define MEM_MODE_INPUT_FORMAT_RGBA8888 (0x002 << 4)
+#define MEM_MODE_INPUT_FORMAT_ARGB8888 (0x003 << 4)
+#define MEM_MODE_INPUT_FORMAT_UYVY (0x004 << 4)
+#define MEM_MODE_INPUT_FORMAT_YUYV (0x005 << 4)
+#define MEM_MODE_INPUT_SWAP BIT(8)
+#define DISP_RDMA_MEM_SRC_PITCH 0x002c
+#define DISP_RDMA_MEM_GMC_SETTING_0 0x0030
#define DISP_REG_RDMA_FIFO_CON 0x0040
#define RDMA_FIFO_UNDERFLOW_EN BIT(31)
#define RDMA_FIFO_PSEUDO_SIZE(bytes) (((bytes) / 16) << 16)
#define RDMA_OUTPUT_VALID_FIFO_THRESHOLD(bytes) ((bytes) / 16)
#define RDMA_FIFO_SIZE(rdma) ((rdma)->data->fifo_size)
+#define DISP_RDMA_MEM_START_ADDR 0x0f00
+
+#define RDMA_MEM_GMC 0x40402020
struct mtk_disp_rdma_data {
unsigned int fifo_size;
@@ -138,12 +155,87 @@ static void mtk_rdma_config(struct mtk_ddp_comp *comp, unsigned int width,
writel(reg, comp->regs + DISP_REG_RDMA_FIFO_CON);
}
+static unsigned int rdma_fmt_convert(struct mtk_disp_rdma *rdma,
+ unsigned int fmt)
+{
+ /* The return value in switch "MEM_MODE_INPUT_FORMAT_XXX"
+ * is defined in mediatek HW data sheet.
+ * The alphabet order in XXX is no relation to data
+ * arrangement in memory.
+ */
+ switch (fmt) {
+ default:
+ case DRM_FORMAT_RGB565:
+ return MEM_MODE_INPUT_FORMAT_RGB565;
+ case DRM_FORMAT_BGR565:
+ return MEM_MODE_INPUT_FORMAT_RGB565 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGB888:
+ return MEM_MODE_INPUT_FORMAT_RGB888;
+ case DRM_FORMAT_BGR888:
+ return MEM_MODE_INPUT_FORMAT_RGB888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_RGBX8888:
+ case DRM_FORMAT_RGBA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888;
+ case DRM_FORMAT_BGRX8888:
+ case DRM_FORMAT_BGRA8888:
+ return MEM_MODE_INPUT_FORMAT_ARGB8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_XRGB8888:
+ case DRM_FORMAT_ARGB8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888;
+ case DRM_FORMAT_XBGR8888:
+ case DRM_FORMAT_ABGR8888:
+ return MEM_MODE_INPUT_FORMAT_RGBA8888 | MEM_MODE_INPUT_SWAP;
+ case DRM_FORMAT_UYVY:
+ return MEM_MODE_INPUT_FORMAT_UYVY;
+ case DRM_FORMAT_YUYV:
+ return MEM_MODE_INPUT_FORMAT_YUYV;
+ }
+}
+
+static unsigned int mtk_rdma_layer_nr(struct mtk_ddp_comp *comp)
+{
+ return 1;
+}
+
+static void mtk_rdma_layer_config(struct mtk_ddp_comp *comp, unsigned int idx,
+ struct mtk_plane_state *state)
+{
+ struct mtk_disp_rdma *rdma = comp_to_rdma(comp);
+ struct mtk_plane_pending_state *pending = &state->pending;
+ unsigned int addr = pending->addr;
+ unsigned int pitch = pending->pitch & 0xffff;
+ unsigned int fmt = pending->format;
+ unsigned int con;
+
+ con = rdma_fmt_convert(rdma, fmt);
+ writel_relaxed(con, comp->regs + DISP_RDMA_MEM_CON);
+
+ if (fmt == DRM_FORMAT_UYVY || fmt == DRM_FORMAT_YUYV) {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, RDMA_MATRIX_ENABLE);
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_INT_MTX_SEL,
+ RDMA_MATRIX_INT_MTX_BT601_to_RGB);
+ } else {
+ rdma_update_bits(comp, DISP_REG_RDMA_SIZE_CON_0,
+ RDMA_MATRIX_ENABLE, 0);
+ }
+
+ writel_relaxed(addr, comp->regs + DISP_RDMA_MEM_START_ADDR);
+ writel_relaxed(pitch, comp->regs + DISP_RDMA_MEM_SRC_PITCH);
+ writel(RDMA_MEM_GMC, comp->regs + DISP_RDMA_MEM_GMC_SETTING_0);
+ rdma_update_bits(comp, DISP_REG_RDMA_GLOBAL_CON,
+ RDMA_MODE_MEMORY, RDMA_MODE_MEMORY);
+}
+
static const struct mtk_ddp_comp_funcs mtk_disp_rdma_funcs = {
.config = mtk_rdma_config,
.start = mtk_rdma_start,
.stop = mtk_rdma_stop,
.enable_vblank = mtk_rdma_enable_vblank,
.disable_vblank = mtk_rdma_disable_vblank,
+ .layer_nr = mtk_rdma_layer_nr,
+ .layer_config = mtk_rdma_layer_config,
};
static int mtk_disp_rdma_bind(struct device *dev, struct device *master,
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
index 2d6aa150a9ff..0b976dfd04df 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.c
@@ -45,7 +45,8 @@ struct mtk_drm_crtc {
bool pending_needs_vblank;
struct drm_pending_vblank_event *event;
- struct drm_plane planes[OVL_LAYER_NR];
+ struct drm_plane *planes;
+ unsigned int layer_nr;
bool pending_planes;
void __iomem *config_regs;
@@ -171,9 +172,9 @@ static void mtk_drm_crtc_mode_set_nofb(struct drm_crtc *crtc)
static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_enable_vblank(ovl, &mtk_crtc->base);
+ mtk_ddp_comp_enable_vblank(comp, &mtk_crtc->base);
return 0;
}
@@ -181,9 +182,9 @@ static int mtk_drm_crtc_enable_vblank(struct drm_crtc *crtc)
static void mtk_drm_crtc_disable_vblank(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
- mtk_ddp_comp_disable_vblank(ovl);
+ mtk_ddp_comp_disable_vblank(comp);
}
static int mtk_crtc_ddp_clk_enable(struct mtk_drm_crtc *mtk_crtc)
@@ -286,7 +287,7 @@ static int mtk_crtc_ddp_hw_init(struct mtk_drm_crtc *mtk_crtc)
}
/* Initially configure all planes */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -334,7 +335,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_crtc_state *state = to_mtk_crtc_state(mtk_crtc->base.state);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
unsigned int i;
/*
@@ -343,7 +344,7 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
* queue update module registers on vblank.
*/
if (state->pending_config) {
- mtk_ddp_comp_config(ovl, state->pending_width,
+ mtk_ddp_comp_config(comp, state->pending_width,
state->pending_height,
state->pending_vrefresh, 0);
@@ -351,14 +352,14 @@ static void mtk_crtc_ddp_config(struct drm_crtc *crtc)
}
if (mtk_crtc->pending_planes) {
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
plane_state = to_mtk_plane_state(plane->state);
if (plane_state->pending.config) {
- mtk_ddp_comp_layer_config(ovl, i, plane_state);
+ mtk_ddp_comp_layer_config(comp, i, plane_state);
plane_state->pending.config = false;
}
}
@@ -370,12 +371,12 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int ret;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
- ret = mtk_smi_larb_get(ovl->larb_dev);
+ ret = mtk_smi_larb_get(comp->larb_dev);
if (ret) {
DRM_ERROR("Failed to get larb: %d\n", ret);
return;
@@ -383,7 +384,7 @@ static void mtk_drm_crtc_atomic_enable(struct drm_crtc *crtc,
ret = mtk_crtc_ddp_hw_init(mtk_crtc);
if (ret) {
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
return;
}
@@ -395,7 +396,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
- struct mtk_ddp_comp *ovl = mtk_crtc->ddp_comp[0];
+ struct mtk_ddp_comp *comp = mtk_crtc->ddp_comp[0];
int i;
DRM_DEBUG_DRIVER("%s %d\n", __func__, crtc->base.id);
@@ -403,7 +404,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
return;
/* Set all pending plane state to disabled */
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -418,7 +419,7 @@ static void mtk_drm_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
mtk_crtc_ddp_hw_fini(mtk_crtc);
- mtk_smi_larb_put(ovl->larb_dev);
+ mtk_smi_larb_put(comp->larb_dev);
mtk_crtc->enabled = false;
}
@@ -450,7 +451,7 @@ static void mtk_drm_crtc_atomic_flush(struct drm_crtc *crtc,
if (mtk_crtc->event)
mtk_crtc->pending_needs_vblank = true;
- for (i = 0; i < OVL_LAYER_NR; i++) {
+ for (i = 0; i < mtk_crtc->layer_nr; i++) {
struct drm_plane *plane = &mtk_crtc->planes[i];
struct mtk_plane_state *plane_state;
@@ -516,7 +517,7 @@ err_cleanup_crtc:
return ret;
}
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl)
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp)
{
struct mtk_drm_crtc *mtk_crtc = to_mtk_crtc(crtc);
struct mtk_drm_private *priv = crtc->dev->dev_private;
@@ -598,7 +599,12 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
mtk_crtc->ddp_comp[i] = comp;
}
- for (zpos = 0; zpos < OVL_LAYER_NR; zpos++) {
+ mtk_crtc->layer_nr = mtk_ddp_comp_layer_nr(mtk_crtc->ddp_comp[0]);
+ mtk_crtc->planes = devm_kzalloc(dev, mtk_crtc->layer_nr *
+ sizeof(struct drm_plane),
+ GFP_KERNEL);
+
+ for (zpos = 0; zpos < mtk_crtc->layer_nr; zpos++) {
type = (zpos == 0) ? DRM_PLANE_TYPE_PRIMARY :
(zpos == 1) ? DRM_PLANE_TYPE_CURSOR :
DRM_PLANE_TYPE_OVERLAY;
@@ -609,7 +615,8 @@ int mtk_drm_crtc_create(struct drm_device *drm_dev,
}
ret = mtk_drm_crtc_init(drm_dev, mtk_crtc, &mtk_crtc->planes[0],
- &mtk_crtc->planes[1], pipe);
+ mtk_crtc->layer_nr > 1 ? &mtk_crtc->planes[1] :
+ NULL, pipe);
if (ret < 0)
goto unprepare;
drm_mode_crtc_set_gamma_size(&mtk_crtc->base, MTK_LUT_SIZE);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
index 9d9410c67ae9..091adb2087eb 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_crtc.h
@@ -18,13 +18,12 @@
#include "mtk_drm_ddp_comp.h"
#include "mtk_drm_plane.h"
-#define OVL_LAYER_NR 4
#define MTK_LUT_SIZE 512
#define MTK_MAX_BPC 10
#define MTK_MIN_BPC 3
void mtk_drm_crtc_commit(struct drm_crtc *crtc);
-void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *ovl);
+void mtk_crtc_ddp_irq(struct drm_crtc *crtc, struct mtk_ddp_comp *comp);
int mtk_drm_crtc_create(struct drm_device *drm_dev,
const enum mtk_ddp_comp_id *path,
unsigned int path_len);
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
index 87e4191c250e..546b3e3b300b 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp.c
@@ -106,6 +106,8 @@
#define OVL1_MOUT_EN_COLOR1 0x1
#define GAMMA_MOUT_EN_RDMA1 0x1
#define RDMA0_SOUT_DPI0 0x2
+#define RDMA0_SOUT_DPI1 0x3
+#define RDMA0_SOUT_DSI1 0x1
#define RDMA0_SOUT_DSI2 0x4
#define RDMA0_SOUT_DSI3 0x5
#define RDMA1_SOUT_DPI0 0x2
@@ -122,6 +124,8 @@
#define DPI0_SEL_IN_RDMA2 0x3
#define DPI1_SEL_IN_RDMA1 (0x1 << 8)
#define DPI1_SEL_IN_RDMA2 (0x3 << 8)
+#define DSI0_SEL_IN_RDMA1 0x1
+#define DSI0_SEL_IN_RDMA2 0x4
#define DSI1_SEL_IN_RDMA1 0x1
#define DSI1_SEL_IN_RDMA2 0x4
#define DSI2_SEL_IN_RDMA1 (0x1 << 16)
@@ -224,6 +228,12 @@ static unsigned int mtk_ddp_mout_en(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI0) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DPI0;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DPI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DPI1;
+ } else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
+ value = RDMA0_SOUT_DSI1;
} else if (cur == DDP_COMPONENT_RDMA0 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DISP_RDMA0_SOUT_EN;
value = RDMA0_SOUT_DSI2;
@@ -282,6 +292,9 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA1;
+ } else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI0) {
+ *addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA1;
} else if (cur == DDP_COMPONENT_RDMA1 && next == DDP_COMPONENT_DSI1) {
*addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA1;
@@ -297,8 +310,11 @@ static unsigned int mtk_ddp_sel_in(enum mtk_ddp_comp_id cur,
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DPI1) {
*addr = DISP_REG_CONFIG_DPI_SEL_IN;
value = DPI1_SEL_IN_RDMA2;
- } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI0) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
+ value = DSI0_SEL_IN_RDMA2;
+ } else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI1) {
+ *addr = DISP_REG_CONFIG_DSIO_SEL_IN;
value = DSI1_SEL_IN_RDMA2;
} else if (cur == DDP_COMPONENT_RDMA2 && next == DDP_COMPONENT_DSI2) {
*addr = DISP_REG_CONFIG_DSIE_SEL_IN;
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
index 7413ffeb3c9d..8399229e6ad2 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
+++ b/drivers/gpu/drm/mediatek/mtk_drm_ddp_comp.h
@@ -78,6 +78,7 @@ struct mtk_ddp_comp_funcs {
void (*stop)(struct mtk_ddp_comp *comp);
void (*enable_vblank)(struct mtk_ddp_comp *comp, struct drm_crtc *crtc);
void (*disable_vblank)(struct mtk_ddp_comp *comp);
+ unsigned int (*layer_nr)(struct mtk_ddp_comp *comp);
void (*layer_on)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_off)(struct mtk_ddp_comp *comp, unsigned int idx);
void (*layer_config)(struct mtk_ddp_comp *comp, unsigned int idx,
@@ -128,6 +129,14 @@ static inline void mtk_ddp_comp_disable_vblank(struct mtk_ddp_comp *comp)
comp->funcs->disable_vblank(comp);
}
+static inline unsigned int mtk_ddp_comp_layer_nr(struct mtk_ddp_comp *comp)
+{
+ if (comp->funcs && comp->funcs->layer_nr)
+ return comp->funcs->layer_nr(comp);
+
+ return 0;
+}
+
static inline void mtk_ddp_comp_layer_on(struct mtk_ddp_comp *comp,
unsigned int idx)
{
diff --git a/drivers/gpu/drm/mediatek/mtk_drm_drv.c b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
index 39721119713b..47ec604289b7 100644
--- a/drivers/gpu/drm/mediatek/mtk_drm_drv.c
+++ b/drivers/gpu/drm/mediatek/mtk_drm_drv.c
@@ -381,7 +381,7 @@ static int mtk_drm_bind(struct device *dev)
err_deinit:
mtk_drm_kms_deinit(drm);
err_free:
- drm_dev_unref(drm);
+ drm_dev_put(drm);
return ret;
}
@@ -390,7 +390,7 @@ static void mtk_drm_unbind(struct device *dev)
struct mtk_drm_private *private = dev_get_drvdata(dev);
drm_dev_unregister(private->drm);
- drm_dev_unref(private->drm);
+ drm_dev_put(private->drm);
private->drm = NULL;
}
@@ -564,7 +564,7 @@ static int mtk_drm_remove(struct platform_device *pdev)
drm_dev_unregister(drm);
mtk_drm_kms_deinit(drm);
- drm_dev_unref(drm);
+ drm_dev_put(drm);
component_master_del(&pdev->dev, &mtk_drm_ops);
pm_runtime_disable(&pdev->dev);
@@ -580,29 +580,24 @@ static int mtk_drm_sys_suspend(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_kms_helper_poll_disable(drm);
-
- private->suspend_state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(private->suspend_state)) {
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(private->suspend_state);
- }
-
+ ret = drm_mode_config_helper_suspend(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_suspend\n");
- return 0;
+
+ return ret;
}
static int mtk_drm_sys_resume(struct device *dev)
{
struct mtk_drm_private *private = dev_get_drvdata(dev);
struct drm_device *drm = private->drm;
+ int ret;
- drm_atomic_helper_resume(drm, private->suspend_state);
- drm_kms_helper_poll_enable(drm);
-
+ ret = drm_mode_config_helper_resume(drm);
DRM_DEBUG_DRIVER("mtk_drm_sys_resume\n");
- return 0;
+
+ return ret;
}
#endif
diff --git a/drivers/gpu/drm/mgag200/mgag200_drv.c b/drivers/gpu/drm/mgag200/mgag200_drv.c
index 74cdde2ee474..ac6af4bd9df6 100644
--- a/drivers/gpu/drm/mgag200/mgag200_drv.c
+++ b/drivers/gpu/drm/mgag200/mgag200_drv.c
@@ -42,29 +42,10 @@ static const struct pci_device_id pciidlist[] = {
MODULE_DEVICE_TABLE(pci, pciidlist);
-static void mgag200_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "mgag200drmfb", primary);
- kfree(ap);
-}
-
static int mga_pci_probe(struct pci_dev *pdev, const struct pci_device_id *ent)
{
- mgag200_kick_out_firmware_fb(pdev);
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "mgag200drmfb");
return drm_get_pci_dev(pdev, ent, &driver);
}
diff --git a/drivers/gpu/drm/mgag200/mgag200_main.c b/drivers/gpu/drm/mgag200/mgag200_main.c
index 780f983b0294..79d54103d470 100644
--- a/drivers/gpu/drm/mgag200/mgag200_main.c
+++ b/drivers/gpu/drm/mgag200/mgag200_main.c
@@ -124,20 +124,11 @@ static int mga_probe_vram(struct mga_device *mdev, void __iomem *mem)
static int mga_vram_init(struct mga_device *mdev)
{
void __iomem *mem;
- struct apertures_struct *aper = alloc_apertures(1);
- if (!aper)
- return -ENOMEM;
/* BAR 0 is VRAM */
mdev->mc.vram_base = pci_resource_start(mdev->dev->pdev, 0);
mdev->mc.vram_window = pci_resource_len(mdev->dev->pdev, 0);
- aper->ranges[0].base = mdev->mc.vram_base;
- aper->ranges[0].size = mdev->mc.vram_window;
-
- drm_fb_helper_remove_conflicting_framebuffers(aper, "mgafb", true);
- kfree(aper);
-
if (!devm_request_mem_region(mdev->dev->dev, mdev->mc.vram_base, mdev->mc.vram_window,
"mgadrmfb_vram")) {
DRM_ERROR("can't reserve VRAM\n");
diff --git a/drivers/gpu/drm/msm/Kconfig b/drivers/gpu/drm/msm/Kconfig
index 38cbde971b48..843a9d40c05e 100644
--- a/drivers/gpu/drm/msm/Kconfig
+++ b/drivers/gpu/drm/msm/Kconfig
@@ -12,6 +12,7 @@ config DRM_MSM
select SHMEM
select TMPFS
select QCOM_SCM
+ select WANT_DEV_COREDUMP
select SND_SOC_HDMI_CODEC if SND_SOC
select SYNC_FILE
select PM_OPP
diff --git a/drivers/gpu/drm/msm/Makefile b/drivers/gpu/drm/msm/Makefile
index cd40c050b2d7..261fa79d456d 100644
--- a/drivers/gpu/drm/msm/Makefile
+++ b/drivers/gpu/drm/msm/Makefile
@@ -1,5 +1,6 @@
# SPDX-License-Identifier: GPL-2.0
ccflags-y := -Idrivers/gpu/drm/msm
+ccflags-y += -Idrivers/gpu/drm/msm/disp/dpu1
ccflags-$(CONFIG_DRM_MSM_DSI) += -Idrivers/gpu/drm/msm/dsi
msm-y := \
@@ -10,6 +11,9 @@ msm-y := \
adreno/a5xx_gpu.o \
adreno/a5xx_power.o \
adreno/a5xx_preempt.o \
+ adreno/a6xx_gpu.o \
+ adreno/a6xx_gmu.o \
+ adreno/a6xx_hfi.o \
hdmi/hdmi.o \
hdmi/hdmi_audio.o \
hdmi/hdmi_bridge.o \
@@ -45,6 +49,33 @@ msm-y := \
disp/mdp5/mdp5_mixer.o \
disp/mdp5/mdp5_plane.o \
disp/mdp5/mdp5_smp.o \
+ disp/dpu1/dpu_core_irq.o \
+ disp/dpu1/dpu_core_perf.o \
+ disp/dpu1/dpu_crtc.o \
+ disp/dpu1/dpu_encoder.o \
+ disp/dpu1/dpu_encoder_phys_cmd.o \
+ disp/dpu1/dpu_encoder_phys_vid.o \
+ disp/dpu1/dpu_formats.o \
+ disp/dpu1/dpu_hw_blk.o \
+ disp/dpu1/dpu_hw_catalog.o \
+ disp/dpu1/dpu_hw_cdm.o \
+ disp/dpu1/dpu_hw_ctl.o \
+ disp/dpu1/dpu_hw_interrupts.o \
+ disp/dpu1/dpu_hw_intf.o \
+ disp/dpu1/dpu_hw_lm.o \
+ disp/dpu1/dpu_hw_pingpong.o \
+ disp/dpu1/dpu_hw_sspp.o \
+ disp/dpu1/dpu_hw_top.o \
+ disp/dpu1/dpu_hw_util.o \
+ disp/dpu1/dpu_hw_vbif.o \
+ disp/dpu1/dpu_io_util.o \
+ disp/dpu1/dpu_irq.o \
+ disp/dpu1/dpu_kms.o \
+ disp/dpu1/dpu_mdss.o \
+ disp/dpu1/dpu_plane.o \
+ disp/dpu1/dpu_power_handle.o \
+ disp/dpu1/dpu_rm.o \
+ disp/dpu1/dpu_vbif.o \
msm_atomic.o \
msm_debugfs.o \
msm_drv.o \
@@ -62,7 +93,8 @@ msm-y := \
msm_ringbuffer.o \
msm_submitqueue.o
-msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o
+msm-$(CONFIG_DEBUG_FS) += adreno/a5xx_debugfs.o \
+ disp/dpu1/dpu_dbg.o
msm-$(CONFIG_DRM_FBDEV_EMULATION) += msm_fbdev.o
msm-$(CONFIG_COMMON_CLK) += disp/mdp4/mdp4_lvds_pll.o
diff --git a/drivers/gpu/drm/msm/adreno/a2xx.xml.h b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
index 644374c7b3e0..4bff0a740c7d 100644
--- a/drivers/gpu/drm/msm/adreno/a2xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a2xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -84,13 +86,12 @@ enum a2xx_sq_surfaceformat {
FMT_5_5_5_1 = 13,
FMT_8_8_8_8_A = 14,
FMT_4_4_4_4 = 15,
- FMT_10_11_11 = 16,
- FMT_11_11_10 = 17,
+ FMT_8_8_8 = 16,
FMT_DXT1 = 18,
FMT_DXT2_3 = 19,
FMT_DXT4_5 = 20,
+ FMT_10_10_10_2 = 21,
FMT_24_8 = 22,
- FMT_24_8_FLOAT = 23,
FMT_16 = 24,
FMT_16_16 = 25,
FMT_16_16_16_16 = 26,
@@ -106,29 +107,23 @@ enum a2xx_sq_surfaceformat {
FMT_32_FLOAT = 36,
FMT_32_32_FLOAT = 37,
FMT_32_32_32_32_FLOAT = 38,
- FMT_32_AS_8 = 39,
- FMT_32_AS_8_8 = 40,
- FMT_16_MPEG = 41,
- FMT_16_16_MPEG = 42,
- FMT_8_INTERLACED = 43,
- FMT_32_AS_8_INTERLACED = 44,
- FMT_32_AS_8_8_INTERLACED = 45,
- FMT_16_INTERLACED = 46,
- FMT_16_MPEG_INTERLACED = 47,
- FMT_16_16_MPEG_INTERLACED = 48,
+ FMT_ATI_TC_RGB = 39,
+ FMT_ATI_TC_RGBA = 40,
+ FMT_ATI_TC_555_565_RGB = 41,
+ FMT_ATI_TC_555_565_RGBA = 42,
+ FMT_ATI_TC_RGBA_INTERP = 43,
+ FMT_ATI_TC_555_565_RGBA_INTERP = 44,
+ FMT_ETC1_RGBA_INTERP = 46,
+ FMT_ETC1_RGB = 47,
+ FMT_ETC1_RGBA = 48,
FMT_DXN = 49,
- FMT_8_8_8_8_AS_16_16_16_16 = 50,
- FMT_DXT1_AS_16_16_16_16 = 51,
- FMT_DXT2_3_AS_16_16_16_16 = 52,
- FMT_DXT4_5_AS_16_16_16_16 = 53,
+ FMT_2_3_3 = 51,
FMT_2_10_10_10_AS_16_16_16_16 = 54,
- FMT_10_11_11_AS_16_16_16_16 = 55,
- FMT_11_11_10_AS_16_16_16_16 = 56,
+ FMT_10_10_10_2_AS_16_16_16_16 = 55,
FMT_32_32_32_FLOAT = 57,
FMT_DXT3A = 58,
FMT_DXT5A = 59,
FMT_CTX1 = 60,
- FMT_DXT3A_AS_1_1_1_1 = 61,
};
enum a2xx_sq_ps_vtx_mode {
diff --git a/drivers/gpu/drm/msm/adreno/a3xx.xml.h b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
index 663a73216926..645a19aef399 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a3xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
index 3ebbeb3a9b68..669c2d4b070d 100644
--- a/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a3xx_gpu.c
@@ -411,15 +411,6 @@ static const unsigned int a3xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a3xx_show(struct msm_gpu *gpu, struct seq_file *m)
-{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A3XX_RBBM_STATUS));
- adreno_show(gpu, m);
-}
-#endif
-
/* would be nice to not have to duplicate the _show() stuff with printk(): */
static void a3xx_dump(struct msm_gpu *gpu)
{
@@ -427,6 +418,21 @@ static void a3xx_dump(struct msm_gpu *gpu)
gpu_read(gpu, REG_A3XX_RBBM_STATUS));
adreno_dump(gpu);
}
+
+static struct msm_gpu_state *a3xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+
+ state->rbbm_status = gpu_read(gpu, REG_A3XX_RBBM_STATUS);
+
+ return state;
+}
+
/* Register offset defines for A3XX */
static const unsigned int a3xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_AXXX_CP_RB_BASE),
@@ -450,9 +456,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a3xx_irq,
.destroy = a3xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a3xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a3xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
};
diff --git a/drivers/gpu/drm/msm/adreno/a4xx.xml.h b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
index 1a14f4a40b9c..19565e87aa7b 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a4xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -263,12 +265,6 @@ enum a4xx_depth_format {
DEPTH4_32 = 3,
};
-enum a4xx_tess_spacing {
- EQUAL_SPACING = 0,
- ODD_SPACING = 2,
- EVEN_SPACING = 3,
-};
-
enum a4xx_ccu_perfcounter_select {
CCU_BUSY_CYCLES = 0,
CCU_RB_DEPTH_RETURN_STALL = 2,
@@ -3544,12 +3540,13 @@ static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_VS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_VS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_VS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_VS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3571,12 +3568,13 @@ static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_FS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_FS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_FS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_FS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3598,12 +3596,13 @@ static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_HS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_HS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_HS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_HS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3625,12 +3624,13 @@ static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_DS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_DS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_DS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_DS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3652,12 +3652,13 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTLENGTH__MASK;
}
-#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x0000ff00
+#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
#define A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
{
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
}
+#define A4XX_HLSQ_GS_CONTROL_REG_SSBO_ENABLE 0x00008000
#define A4XX_HLSQ_GS_CONTROL_REG_ENABLED 0x00010000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
#define A4XX_HLSQ_GS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
@@ -3672,23 +3673,103 @@ static inline uint32_t A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH(uint32_t val)
return ((val) << A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_GS_CONTROL_REG_INSTRLENGTH__MASK;
}
-#define REG_A4XX_HLSQ_CS_CONTROL 0x000023ca
+#define REG_A4XX_HLSQ_CS_CONTROL_REG 0x000023ca
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK 0x000000ff
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTLENGTH__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK 0x00007f00
+#define A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT 8
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_CONSTOBJECTOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_SSBO_ENABLE 0x00008000
+#define A4XX_HLSQ_CS_CONTROL_REG_ENABLED 0x00010000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK 0x00fe0000
+#define A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT 17
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_SHADEROBJOFFSET__MASK;
+}
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK 0xff000000
+#define A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__SHIFT) & A4XX_HLSQ_CS_CONTROL_REG_INSTRLENGTH__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_0 0x000023cd
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__SHIFT) & A4XX_HLSQ_CL_NDRANGE_0_LOCALSIZEZ__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_1 0x000023ce
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_1_SIZE_X(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__SHIFT) & A4XX_HLSQ_CL_NDRANGE_1_SIZE_X__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_2 0x000023cf
#define REG_A4XX_HLSQ_CL_NDRANGE_3 0x000023d0
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__SHIFT) & A4XX_HLSQ_CL_NDRANGE_3_SIZE_Y__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_4 0x000023d1
#define REG_A4XX_HLSQ_CL_NDRANGE_5 0x000023d2
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK 0xffffffff
+#define A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__SHIFT) & A4XX_HLSQ_CL_NDRANGE_5_SIZE_Z__MASK;
+}
#define REG_A4XX_HLSQ_CL_NDRANGE_6 0x000023d3
#define REG_A4XX_HLSQ_CL_CONTROL_0 0x000023d4
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK 0x000000ff
+#define A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_WGIDCONSTID__MASK;
+}
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK 0xff000000
+#define A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__SHIFT) & A4XX_HLSQ_CL_CONTROL_0_LOCALIDREGID__MASK;
+}
#define REG_A4XX_HLSQ_CL_CONTROL_1 0x000023d5
@@ -4087,5 +4168,71 @@ static inline uint32_t A4XX_TEX_CONST_4_BASE(uint32_t val)
#define REG_A4XX_TEX_CONST_7 0x00000007
+#define REG_A4XX_SSBO_0_0 0x00000000
+#define A4XX_SSBO_0_0_BASE__MASK 0xffffffe0
+#define A4XX_SSBO_0_0_BASE__SHIFT 5
+static inline uint32_t A4XX_SSBO_0_0_BASE(uint32_t val)
+{
+ return ((val >> 5) << A4XX_SSBO_0_0_BASE__SHIFT) & A4XX_SSBO_0_0_BASE__MASK;
+}
+
+#define REG_A4XX_SSBO_0_1 0x00000001
+#define A4XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A4XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_1_PITCH__SHIFT) & A4XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_2 0x00000002
+#define A4XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A4XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A4XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A4XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A4XX_SSBO_0_3 0x00000003
+#define A4XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A4XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_0_3_CPP__SHIFT) & A4XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A4XX_SSBO_1_0 0x00000000
+#define A4XX_SSBO_1_0_CPP__MASK 0x0000001f
+#define A4XX_SSBO_1_0_CPP__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_0_CPP(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_CPP__SHIFT) & A4XX_SSBO_1_0_CPP__MASK;
+}
+#define A4XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A4XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A4XX_SSBO_1_0_FMT(enum a4xx_color_fmt val)
+{
+ return ((val) << A4XX_SSBO_1_0_FMT__SHIFT) & A4XX_SSBO_1_0_FMT__MASK;
+}
+#define A4XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A4XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_0_WIDTH__SHIFT) & A4XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A4XX_SSBO_1_1 0x00000001
+#define A4XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A4XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A4XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_HEIGHT__SHIFT) & A4XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A4XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A4XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A4XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A4XX_SSBO_1_1_DEPTH__SHIFT) & A4XX_SSBO_1_1_DEPTH__MASK;
+}
+
#endif /* A4XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
index 16d3d596638e..7c4e6dc1ed59 100644
--- a/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a4xx_gpu.c
@@ -455,15 +455,19 @@ static const unsigned int a4xx_registers[] = {
~0 /* sentinel */
};
-#ifdef CONFIG_DEBUG_FS
-static void a4xx_show(struct msm_gpu *gpu, struct seq_file *m)
+static struct msm_gpu_state *a4xx_gpu_state_get(struct msm_gpu *gpu)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A4XX_RBBM_STATUS));
- adreno_show(gpu, m);
+ struct msm_gpu_state *state = kzalloc(sizeof(*state), GFP_KERNEL);
+
+ if (!state)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu_state_get(gpu, state);
+ state->rbbm_status = gpu_read(gpu, REG_A4XX_RBBM_STATUS);
+
+ return state;
}
-#endif
/* Register offset defines for A4XX, in order of enum adreno_regs */
static const unsigned int a4xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
@@ -538,9 +542,11 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = adreno_active_ring,
.irq = a4xx_irq,
.destroy = a4xx_destroy,
-#ifdef CONFIG_DEBUG_FS
- .show = a4xx_show,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = adreno_show,
#endif
+ .gpu_state_get = a4xx_gpu_state_get,
+ .gpu_state_put = adreno_gpu_state_put,
},
.get_timestamp = a4xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a5xx.xml.h b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
index e0e6711f4f78..182d37ff3794 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx.xml.h
+++ b/drivers/gpu/drm/msm/adreno/a5xx.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -119,6 +121,11 @@ enum a5xx_vtx_fmt {
VFMT5_8_8_8_8_SNORM = 50,
VFMT5_8_8_8_8_UINT = 51,
VFMT5_8_8_8_8_SINT = 52,
+ VFMT5_10_10_10_2_UNORM = 54,
+ VFMT5_10_10_10_2_SNORM = 57,
+ VFMT5_10_10_10_2_UINT = 58,
+ VFMT5_10_10_10_2_SINT = 59,
+ VFMT5_11_11_10_FLOAT = 66,
VFMT5_16_16_UNORM = 67,
VFMT5_16_16_SNORM = 68,
VFMT5_16_16_FLOAT = 69,
@@ -204,14 +211,45 @@ enum a5xx_tex_fmt {
TFMT5_32_32_FLOAT = 103,
TFMT5_32_32_UINT = 104,
TFMT5_32_32_SINT = 105,
+ TFMT5_32_32_32_UINT = 114,
+ TFMT5_32_32_32_SINT = 115,
+ TFMT5_32_32_32_FLOAT = 116,
TFMT5_32_32_32_32_FLOAT = 130,
TFMT5_32_32_32_32_UINT = 131,
TFMT5_32_32_32_32_SINT = 132,
TFMT5_X8Z24_UNORM = 160,
+ TFMT5_ETC2_RG11_UNORM = 171,
+ TFMT5_ETC2_RG11_SNORM = 172,
+ TFMT5_ETC2_R11_UNORM = 173,
+ TFMT5_ETC2_R11_SNORM = 174,
+ TFMT5_ETC1 = 175,
+ TFMT5_ETC2_RGB8 = 176,
+ TFMT5_ETC2_RGBA8 = 177,
+ TFMT5_ETC2_RGB8A1 = 178,
+ TFMT5_DXT1 = 179,
+ TFMT5_DXT3 = 180,
+ TFMT5_DXT5 = 181,
TFMT5_RGTC1_UNORM = 183,
TFMT5_RGTC1_SNORM = 184,
TFMT5_RGTC2_UNORM = 187,
TFMT5_RGTC2_SNORM = 188,
+ TFMT5_BPTC_UFLOAT = 190,
+ TFMT5_BPTC_FLOAT = 191,
+ TFMT5_BPTC = 192,
+ TFMT5_ASTC_4x4 = 193,
+ TFMT5_ASTC_5x4 = 194,
+ TFMT5_ASTC_5x5 = 195,
+ TFMT5_ASTC_6x5 = 196,
+ TFMT5_ASTC_6x6 = 197,
+ TFMT5_ASTC_8x5 = 198,
+ TFMT5_ASTC_8x6 = 199,
+ TFMT5_ASTC_8x8 = 200,
+ TFMT5_ASTC_10x5 = 201,
+ TFMT5_ASTC_10x6 = 202,
+ TFMT5_ASTC_10x8 = 203,
+ TFMT5_ASTC_10x10 = 204,
+ TFMT5_ASTC_12x10 = 205,
+ TFMT5_ASTC_12x12 = 206,
};
enum a5xx_tex_fetchsize {
@@ -239,7 +277,7 @@ enum a5xx_blit_buf {
BLIT_MRT6 = 6,
BLIT_MRT7 = 7,
BLIT_ZS = 8,
- BLIT_Z32 = 9,
+ BLIT_S = 9,
};
enum a5xx_cp_perfcounter_select {
@@ -899,6 +937,12 @@ enum a5xx_tex_type {
#define REG_A5XX_CP_DRAW_STATE_DATA 0x0000080c
+#define REG_A5XX_CP_ME_NRT_ADDR_LO 0x0000080d
+
+#define REG_A5XX_CP_ME_NRT_ADDR_HI 0x0000080e
+
+#define REG_A5XX_CP_ME_NRT_DATA 0x00000810
+
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_LO 0x00000817
#define REG_A5XX_CP_CRASH_SCRIPT_BASE_HI 0x00000818
@@ -2072,9 +2116,17 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_PC_MODE_CNTL 0x00000d02
-#define REG_A5XX_UNKNOWN_0D08 0x00000d08
+#define REG_A5XX_PC_INDEX_BUF_LO 0x00000d04
+
+#define REG_A5XX_PC_INDEX_BUF_HI 0x00000d05
+
+#define REG_A5XX_PC_START_INDEX 0x00000d06
-#define REG_A5XX_UNKNOWN_0D09 0x00000d09
+#define REG_A5XX_PC_MAX_INDEX 0x00000d07
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_LO 0x00000d08
+
+#define REG_A5XX_PC_TESSFACTOR_ADDR_HI 0x00000d09
#define REG_A5XX_PC_PERFCTR_PC_SEL_0 0x00000d10
@@ -2327,6 +2379,14 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_VBIF_PERF_CNT_EN3 0x000030c3
+#define REG_A5XX_VBIF_PERF_CNT_CLR0 0x000030c8
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR1 0x000030c9
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR2 0x000030ca
+
+#define REG_A5XX_VBIF_PERF_CNT_CLR3 0x000030cb
+
#define REG_A5XX_VBIF_PERF_CNT_SEL0 0x000030d0
#define REG_A5XX_VBIF_PERF_CNT_SEL1 0x000030d1
@@ -2590,6 +2650,7 @@ static inline uint32_t A5XX_VSC_RESOLVE_CNTL_Y(uint32_t val)
#define REG_A5XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x0000c557
#define REG_A5XX_GRAS_CL_CNTL 0x0000e000
+#define A5XX_GRAS_CL_CNTL_ZERO_GB_SCALE_Z 0x00000040
#define REG_A5XX_UNKNOWN_E001 0x0000e001
@@ -2700,7 +2761,7 @@ static inline uint32_t A5XX_GRAS_SU_POINT_SIZE(float val)
return ((((int32_t)(val * 16.0))) << A5XX_GRAS_SU_POINT_SIZE__SHIFT) & A5XX_GRAS_SU_POINT_SIZE__MASK;
}
-#define REG_A5XX_UNKNOWN_E093 0x0000e093
+#define REG_A5XX_GRAS_SU_LAYERED 0x0000e093
#define REG_A5XX_GRAS_SU_DEPTH_PLANE_CNTL 0x0000e094
#define A5XX_GRAS_SU_DEPTH_PLANE_CNTL_FRAG_WRITES_Z 0x00000001
@@ -2936,7 +2997,9 @@ static inline uint32_t A5XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val
#define A5XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
#define REG_A5XX_RB_RENDER_CONTROL1 0x0000e145
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
#define A5XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A5XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000004
#define REG_A5XX_RB_FS_OUTPUT_CNTL 0x0000e146
#define A5XX_RB_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -3002,6 +3065,13 @@ static inline uint32_t REG_A5XX_RB_MRT(uint32_t i0) { return 0x0000e150 + 0x7*i0
static inline uint32_t REG_A5XX_RB_MRT_CONTROL(uint32_t i0) { return 0x0000e150 + 0x7*i0; }
#define A5XX_RB_MRT_CONTROL_BLEND 0x00000001
#define A5XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A5XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A5XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A5XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A5XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
#define A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
static inline uint32_t A5XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
@@ -3060,6 +3130,12 @@ static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a5xx_tile_mode
{
return ((val) << A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
}
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK 0x00001800
+#define A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT 11
+static inline uint32_t A5XX_RB_MRT_BUF_INFO_DITHER_MODE(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A5XX_RB_MRT_BUF_INFO_DITHER_MODE__SHIFT) & A5XX_RB_MRT_BUF_INFO_DITHER_MODE__MASK;
+}
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
#define A5XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
static inline uint32_t A5XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
@@ -3223,6 +3299,7 @@ static inline uint32_t A5XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
return ((val) << A5XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A5XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
}
#define A5XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A5XX_RB_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
#define A5XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
static inline uint32_t A5XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
@@ -3369,7 +3446,25 @@ static inline uint32_t A5XX_RB_STENCILREFMASK_STENCILWRITEMASK(uint32_t val)
return ((val) << A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_STENCILWRITEMASK__MASK;
}
-#define REG_A5XX_UNKNOWN_E1C7 0x0000e1c7
+#define REG_A5XX_RB_STENCILREFMASK_BF 0x0000e1c7
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK 0x000000ff
+#define A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT 0
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILREF(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILREF__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILREF__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK 0x0000ff00
+#define A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT 8
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILMASK__MASK;
+}
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK 0x00ff0000
+#define A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT 16
+static inline uint32_t A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK(uint32_t val)
+{
+ return ((val) << A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__SHIFT) & A5XX_RB_STENCILREFMASK_BF_STENCILWRITEMASK__MASK;
+}
#define REG_A5XX_RB_WINDOW_OFFSET 0x0000e1d0
#define A5XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
@@ -3428,6 +3523,7 @@ static inline uint32_t A5XX_RB_RESOLVE_CNTL_2_Y(uint32_t val)
}
#define REG_A5XX_RB_RESOLVE_CNTL_3 0x0000e213
+#define A5XX_RB_RESOLVE_CNTL_3_TILED 0x00000001
#define REG_A5XX_RB_BLIT_DST_LO 0x0000e214
@@ -3459,6 +3555,7 @@ static inline uint32_t A5XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_CLEAR_CNTL 0x0000e21c
#define A5XX_RB_CLEAR_CNTL_FAST_CLEAR 0x00000002
+#define A5XX_RB_CLEAR_CNTL_MSAA_RESOLVE 0x00000004
#define A5XX_RB_CLEAR_CNTL_MASK__MASK 0x000000f0
#define A5XX_RB_CLEAR_CNTL_MASK__SHIFT 4
static inline uint32_t A5XX_RB_CLEAR_CNTL_MASK(uint32_t val)
@@ -3627,22 +3724,69 @@ static inline uint32_t A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC(uint32_t val)
{
return ((val) << A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__SHIFT) & A5XX_PC_PRIMITIVE_CNTL_STRIDE_IN_VPC__MASK;
}
+#define A5XX_PC_PRIMITIVE_CNTL_PRIMITIVE_RESTART 0x00000100
+#define A5XX_PC_PRIMITIVE_CNTL_COUNT_PRIMITIVES 0x00000200
#define A5XX_PC_PRIMITIVE_CNTL_PROVOKING_VTX_LAST 0x00000400
#define REG_A5XX_PC_PRIM_VTX_CNTL 0x0000e385
#define A5XX_PC_PRIM_VTX_CNTL_PSIZE 0x00000800
#define REG_A5XX_PC_RASTER_CNTL 0x0000e388
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK 0x00000007
+#define A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT 0
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_FRONT_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK 0x00000038
+#define A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT 3
+static inline uint32_t A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__SHIFT) & A5XX_PC_RASTER_CNTL_POLYMODE_BACK_PTYPE__MASK;
+}
+#define A5XX_PC_RASTER_CNTL_POLYMODE_ENABLE 0x00000040
#define REG_A5XX_UNKNOWN_E389 0x0000e389
#define REG_A5XX_PC_RESTART_INDEX 0x0000e38c
-#define REG_A5XX_UNKNOWN_E38D 0x0000e38d
+#define REG_A5XX_PC_GS_LAYERED 0x0000e38d
#define REG_A5XX_PC_GS_PARAM 0x0000e38e
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__MASK 0x000003ff
+#define A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT 0
+static inline uint32_t A5XX_PC_GS_PARAM_MAX_VERTICES(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_MAX_VERTICES__SHIFT) & A5XX_PC_GS_PARAM_MAX_VERTICES__MASK;
+}
+#define A5XX_PC_GS_PARAM_INVOCATIONS__MASK 0x0000f800
+#define A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT 11
+static inline uint32_t A5XX_PC_GS_PARAM_INVOCATIONS(uint32_t val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_INVOCATIONS__SHIFT) & A5XX_PC_GS_PARAM_INVOCATIONS__MASK;
+}
+#define A5XX_PC_GS_PARAM_PRIMTYPE__MASK 0x01800000
+#define A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT 23
+static inline uint32_t A5XX_PC_GS_PARAM_PRIMTYPE(enum adreno_pa_su_sc_draw val)
+{
+ return ((val) << A5XX_PC_GS_PARAM_PRIMTYPE__SHIFT) & A5XX_PC_GS_PARAM_PRIMTYPE__MASK;
+}
#define REG_A5XX_PC_HS_PARAM 0x0000e38f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__MASK 0x0000003f
+#define A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT 0
+static inline uint32_t A5XX_PC_HS_PARAM_VERTICES_OUT(uint32_t val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_VERTICES_OUT__SHIFT) & A5XX_PC_HS_PARAM_VERTICES_OUT__MASK;
+}
+#define A5XX_PC_HS_PARAM_SPACING__MASK 0x00600000
+#define A5XX_PC_HS_PARAM_SPACING__SHIFT 21
+static inline uint32_t A5XX_PC_HS_PARAM_SPACING(enum a4xx_tess_spacing val)
+{
+ return ((val) << A5XX_PC_HS_PARAM_SPACING__SHIFT) & A5XX_PC_HS_PARAM_SPACING__MASK;
+}
+#define A5XX_PC_HS_PARAM_CW 0x00800000
+#define A5XX_PC_HS_PARAM_CONNECTED 0x01000000
#define REG_A5XX_PC_POWER_CNTL 0x0000e3b0
@@ -3667,10 +3811,40 @@ static inline uint32_t A5XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
{
return ((val) << A5XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A5XX_VFD_CONTROL_1_REGID4INST__MASK;
}
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A5XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_2 0x0000e402
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A5XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
#define REG_A5XX_VFD_CONTROL_3 0x0000e403
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A5XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A5XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A5XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A5XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
#define REG_A5XX_VFD_CONTROL_4 0x0000e404
@@ -3700,12 +3874,18 @@ static inline uint32_t A5XX_VFD_DECODE_INSTR_IDX(uint32_t val)
return ((val) << A5XX_VFD_DECODE_INSTR_IDX__SHIFT) & A5XX_VFD_DECODE_INSTR_IDX__MASK;
}
#define A5XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
-#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x3ff00000
+#define A5XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
#define A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
static inline uint32_t A5XX_VFD_DECODE_INSTR_FORMAT(enum a5xx_vtx_fmt val)
{
return ((val) << A5XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A5XX_VFD_DECODE_INSTR_FORMAT__MASK;
}
+#define A5XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A5XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A5XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A5XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A5XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
#define A5XX_VFD_DECODE_INSTR_UNK30 0x40000000
#define A5XX_VFD_DECODE_INSTR_FLOAT 0x80000000
@@ -3960,6 +4140,7 @@ static inline uint32_t A5XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_BLEND_CNTL 0x0000e5c9
#define A5XX_SP_BLEND_CNTL_ENABLED 0x00000001
#define A5XX_SP_BLEND_CNTL_UNK8 0x00000100
+#define A5XX_SP_BLEND_CNTL_ALPHA_TO_COVERAGE 0x00000400
#define REG_A5XX_SP_FS_OUTPUT_CNTL 0x0000e5ca
#define A5XX_SP_FS_OUTPUT_CNTL_MRT__MASK 0x0000000f
@@ -4001,16 +4182,12 @@ static inline uint32_t A5XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A5XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
}
+#define A5XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A5XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
#define A5XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
#define REG_A5XX_UNKNOWN_E5DB 0x0000e5db
-#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
-
-#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
-
-#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
-
#define REG_A5XX_SP_CS_CTRL_REG0 0x0000e5f0
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00000008
#define A5XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 3
@@ -4039,7 +4216,39 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
return ((val) << A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
}
-#define REG_A5XX_UNKNOWN_E600 0x0000e600
+#define REG_A5XX_UNKNOWN_E5F2 0x0000e5f2
+
+#define REG_A5XX_SP_CS_OBJ_START_LO 0x0000e5f3
+
+#define REG_A5XX_SP_CS_OBJ_START_HI 0x0000e5f4
+
+#define REG_A5XX_SP_HS_CTRL_REG0 0x0000e600
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_HS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E602 0x0000e602
@@ -4047,13 +4256,67 @@ static inline uint32_t A5XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
#define REG_A5XX_SP_HS_OBJ_START_HI 0x0000e604
+#define REG_A5XX_SP_DS_CTRL_REG0 0x0000e610
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_DS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+
#define REG_A5XX_UNKNOWN_E62B 0x0000e62b
#define REG_A5XX_SP_DS_OBJ_START_LO 0x0000e62c
#define REG_A5XX_SP_DS_OBJ_START_HI 0x0000e62d
-#define REG_A5XX_UNKNOWN_E640 0x0000e640
+#define REG_A5XX_SP_GS_CTRL_REG0 0x0000e640
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00000008
+#define A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 3
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A5XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x000003f0
+#define A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 4
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x0000fc00
+#define A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 10
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A5XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A5XX_SP_GS_CTRL_REG0_VARYING 0x00010000
+#define A5XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x00100000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0xfe000000
+#define A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 25
+static inline uint32_t A5XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A5XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
#define REG_A5XX_UNKNOWN_E65B 0x0000e65b
@@ -4173,6 +4436,18 @@ static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
{
return ((val) << A5XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A5XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
#define REG_A5XX_HLSQ_CONTROL_3_REG 0x0000e787
#define A5XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
@@ -4375,34 +4650,52 @@ static inline uint32_t A5XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
}
#define REG_A5XX_HLSQ_CS_NDRANGE_1 0x0000e7b1
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_SIZE_X(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_SIZE_X__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_2 0x0000e7b2
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A5XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_3 0x0000e7b3
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_SIZE_Y__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_4 0x0000e7b4
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A5XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
#define REG_A5XX_HLSQ_CS_NDRANGE_5 0x0000e7b5
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK 0xffffffff
-#define A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT 0
-static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z(uint32_t val)
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
{
- return ((val) << A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_SIZE_Z__MASK;
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
}
#define REG_A5XX_HLSQ_CS_NDRANGE_6 0x0000e7b6
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A5XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
#define REG_A5XX_HLSQ_CS_CNTL_0 0x0000e7b7
#define A5XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
@@ -4468,6 +4761,8 @@ static inline uint32_t A5XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
#define REG_A5XX_HLSQ_CS_INSTRLEN 0x0000e7dd
+#define REG_A5XX_RB_2D_BLIT_CNTL 0x00002100
+
#define REG_A5XX_RB_2D_SRC_SOLID_DW0 0x00002101
#define REG_A5XX_RB_2D_SRC_SOLID_DW1 0x00002102
@@ -4483,12 +4778,19 @@ static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_SRC_LO 0x00002108
@@ -4515,12 +4817,19 @@ static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_RB_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_RB_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_RB_2D_DST_LO 0x00002111
@@ -4548,6 +4857,8 @@ static inline uint32_t A5XX_RB_2D_DST_SIZE_ARRAY_PITCH(uint32_t val)
#define REG_A5XX_RB_2D_DST_FLAGS_HI 0x00002144
+#define REG_A5XX_GRAS_2D_BLIT_CNTL 0x00002180
+
#define REG_A5XX_GRAS_2D_SRC_INFO 0x00002181
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
#define A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
@@ -4555,12 +4866,19 @@ static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_SRC_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_SRC_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_SRC_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_SRC_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_SRC_INFO_FLAGS 0x00001000
#define REG_A5XX_GRAS_2D_DST_INFO 0x00002182
#define A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
@@ -4569,12 +4887,19 @@ static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT(enum a5xx_color_fmt va
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_FORMAT__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A5XX_GRAS_2D_DST_INFO_TILE_MODE(enum a5xx_tile_mode val)
+{
+ return ((val) << A5XX_GRAS_2D_DST_INFO_TILE_MODE__SHIFT) & A5XX_GRAS_2D_DST_INFO_TILE_MODE__MASK;
+}
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
#define A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT 10
static inline uint32_t A5XX_GRAS_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
{
return ((val) << A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__SHIFT) & A5XX_GRAS_2D_DST_INFO_COLOR_SWAP__MASK;
}
+#define A5XX_GRAS_2D_DST_INFO_FLAGS 0x00001000
#define REG_A5XX_UNKNOWN_2100 0x00002100
@@ -4698,6 +5023,12 @@ static inline uint32_t A5XX_TEX_CONST_0_MIPLVLS(uint32_t val)
{
return ((val) << A5XX_TEX_CONST_0_MIPLVLS__SHIFT) & A5XX_TEX_CONST_0_MIPLVLS__MASK;
}
+#define A5XX_TEX_CONST_0_SAMPLES__MASK 0x00300000
+#define A5XX_TEX_CONST_0_SAMPLES__SHIFT 20
+static inline uint32_t A5XX_TEX_CONST_0_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A5XX_TEX_CONST_0_SAMPLES__SHIFT) & A5XX_TEX_CONST_0_SAMPLES__MASK;
+}
#define A5XX_TEX_CONST_0_FMT__MASK 0x3fc00000
#define A5XX_TEX_CONST_0_FMT__SHIFT 22
static inline uint32_t A5XX_TEX_CONST_0_FMT(enum a5xx_tex_fmt val)
@@ -4788,5 +5119,81 @@ static inline uint32_t A5XX_TEX_CONST_5_DEPTH(uint32_t val)
#define REG_A5XX_TEX_CONST_11 0x0000000b
+#define REG_A5XX_SSBO_0_0 0x00000000
+#define A5XX_SSBO_0_0_BASE_LO__MASK 0xffffffe0
+#define A5XX_SSBO_0_0_BASE_LO__SHIFT 5
+static inline uint32_t A5XX_SSBO_0_0_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A5XX_SSBO_0_0_BASE_LO__SHIFT) & A5XX_SSBO_0_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_0_1 0x00000001
+#define A5XX_SSBO_0_1_PITCH__MASK 0x003fffff
+#define A5XX_SSBO_0_1_PITCH__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_1_PITCH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_1_PITCH__SHIFT) & A5XX_SSBO_0_1_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_2 0x00000002
+#define A5XX_SSBO_0_2_ARRAY_PITCH__MASK 0x03fff000
+#define A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT 12
+static inline uint32_t A5XX_SSBO_0_2_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A5XX_SSBO_0_2_ARRAY_PITCH__SHIFT) & A5XX_SSBO_0_2_ARRAY_PITCH__MASK;
+}
+
+#define REG_A5XX_SSBO_0_3 0x00000003
+#define A5XX_SSBO_0_3_CPP__MASK 0x0000003f
+#define A5XX_SSBO_0_3_CPP__SHIFT 0
+static inline uint32_t A5XX_SSBO_0_3_CPP(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_0_3_CPP__SHIFT) & A5XX_SSBO_0_3_CPP__MASK;
+}
+
+#define REG_A5XX_SSBO_1_0 0x00000000
+#define A5XX_SSBO_1_0_FMT__MASK 0x0000ff00
+#define A5XX_SSBO_1_0_FMT__SHIFT 8
+static inline uint32_t A5XX_SSBO_1_0_FMT(enum a5xx_tex_fmt val)
+{
+ return ((val) << A5XX_SSBO_1_0_FMT__SHIFT) & A5XX_SSBO_1_0_FMT__MASK;
+}
+#define A5XX_SSBO_1_0_WIDTH__MASK 0xffff0000
+#define A5XX_SSBO_1_0_WIDTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_0_WIDTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_0_WIDTH__SHIFT) & A5XX_SSBO_1_0_WIDTH__MASK;
+}
+
+#define REG_A5XX_SSBO_1_1 0x00000001
+#define A5XX_SSBO_1_1_HEIGHT__MASK 0x0000ffff
+#define A5XX_SSBO_1_1_HEIGHT__SHIFT 0
+static inline uint32_t A5XX_SSBO_1_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_HEIGHT__SHIFT) & A5XX_SSBO_1_1_HEIGHT__MASK;
+}
+#define A5XX_SSBO_1_1_DEPTH__MASK 0xffff0000
+#define A5XX_SSBO_1_1_DEPTH__SHIFT 16
+static inline uint32_t A5XX_SSBO_1_1_DEPTH(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_1_1_DEPTH__SHIFT) & A5XX_SSBO_1_1_DEPTH__MASK;
+}
+
+#define REG_A5XX_SSBO_2_0 0x00000000
+#define A5XX_SSBO_2_0_BASE_LO__MASK 0xffffffff
+#define A5XX_SSBO_2_0_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_0_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_0_BASE_LO__SHIFT) & A5XX_SSBO_2_0_BASE_LO__MASK;
+}
+
+#define REG_A5XX_SSBO_2_1 0x00000001
+#define A5XX_SSBO_2_1_BASE_HI__MASK 0xffffffff
+#define A5XX_SSBO_2_1_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_SSBO_2_1_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_SSBO_2_1_BASE_HI__SHIFT) & A5XX_SSBO_2_1_BASE_HI__MASK;
+}
+
#endif /* A5XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
index d39400e5bc42..ab1d9308c311 100644
--- a/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/a5xx_gpu.c
@@ -11,6 +11,7 @@
*
*/
+#include <linux/kernel.h>
#include <linux/types.h>
#include <linux/cpumask.h>
#include <linux/qcom_scm.h>
@@ -19,6 +20,8 @@
#include <linux/soc/qcom/mdt_loader.h>
#include <linux/pm_opp.h>
#include <linux/nvmem-consumer.h>
+#include <linux/iopoll.h>
+#include <linux/slab.h>
#include "msm_gem.h"
#include "msm_mmu.h"
#include "a5xx_gpu.h"
@@ -91,12 +94,13 @@ static int zap_shader_load_mdt(struct msm_gpu *gpu, const char *fwname)
ret = qcom_mdt_load(dev, fw, fwname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
} else {
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
ret = qcom_mdt_load(dev, fw, newname, GPU_PAS_ID,
mem_region, mem_phys, mem_size, NULL);
+ kfree(newname);
}
if (ret)
goto out;
@@ -1123,8 +1127,9 @@ static const u32 a5xx_registers[] = {
0xE800, 0xE806, 0xE810, 0xE89A, 0xE8A0, 0xE8A4, 0xE8AA, 0xE8EB,
0xE900, 0xE905, 0xEB80, 0xEB8F, 0xEBB0, 0xEBB0, 0xEC00, 0xEC05,
0xEC08, 0xECE9, 0xECF0, 0xECF0, 0xEA80, 0xEA80, 0xEA82, 0xEAA3,
- 0xEAA5, 0xEAC2, 0xA800, 0xA8FF, 0xAC60, 0xAC60, 0xB000, 0xB97F,
- 0xB9A0, 0xB9BF, ~0
+ 0xEAA5, 0xEAC2, 0xA800, 0xA800, 0xA820, 0xA828, 0xA840, 0xA87D,
+ 0XA880, 0xA88D, 0xA890, 0xA8A3, 0xA8D0, 0xA8D8, 0xA8E0, 0xA8F5,
+ 0xAC60, 0xAC60, ~0,
};
static void a5xx_dump(struct msm_gpu *gpu)
@@ -1195,19 +1200,231 @@ static int a5xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
return 0;
}
-#ifdef CONFIG_DEBUG_FS
-static void a5xx_show(struct msm_gpu *gpu, struct seq_file *m)
+struct a5xx_crashdumper {
+ void *ptr;
+ struct drm_gem_object *bo;
+ u64 iova;
+};
+
+struct a5xx_gpu_state {
+ struct msm_gpu_state base;
+ u32 *hlsqregs;
+};
+
+#define gpu_poll_timeout(gpu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gpu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+static int a5xx_crashdumper_init(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
{
- seq_printf(m, "status: %08x\n",
- gpu_read(gpu, REG_A5XX_RBBM_STATUS));
+ dumper->ptr = msm_gem_kernel_new_locked(gpu->dev,
+ SZ_1M, MSM_BO_UNCACHED, gpu->aspace,
+ &dumper->bo, &dumper->iova);
- /*
- * Temporarily disable hardware clock gating before going into
- * adreno_show to avoid issues while reading the registers
- */
+ if (IS_ERR(dumper->ptr))
+ return PTR_ERR(dumper->ptr);
+
+ return 0;
+}
+
+static void a5xx_crashdumper_free(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ msm_gem_put_iova(dumper->bo, gpu->aspace);
+ msm_gem_put_vaddr(dumper->bo);
+
+ drm_gem_object_unreference(dumper->bo);
+}
+
+static int a5xx_crashdumper_run(struct msm_gpu *gpu,
+ struct a5xx_crashdumper *dumper)
+{
+ u32 val;
+
+ if (IS_ERR_OR_NULL(dumper->ptr))
+ return -EINVAL;
+
+ gpu_write64(gpu, REG_A5XX_CP_CRASH_SCRIPT_BASE_LO,
+ REG_A5XX_CP_CRASH_SCRIPT_BASE_HI, dumper->iova);
+
+ gpu_write(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, 1);
+
+ return gpu_poll_timeout(gpu, REG_A5XX_CP_CRASH_DUMP_CNTL, val,
+ val & 0x04, 100, 10000);
+}
+
+/*
+ * These are a list of the registers that need to be read through the HLSQ
+ * aperture through the crashdumper. These are not nominally accessible from
+ * the CPU on a secure platform.
+ */
+static const struct {
+ u32 type;
+ u32 regoffset;
+ u32 count;
+} a5xx_hlsq_aperture_regs[] = {
+ { 0x35, 0xe00, 0x32 }, /* HSLQ non-context */
+ { 0x31, 0x2080, 0x1 }, /* HLSQ 2D context 0 */
+ { 0x33, 0x2480, 0x1 }, /* HLSQ 2D context 1 */
+ { 0x32, 0xe780, 0x62 }, /* HLSQ 3D context 0 */
+ { 0x34, 0xef80, 0x62 }, /* HLSQ 3D context 1 */
+ { 0x3f, 0x0ec0, 0x40 }, /* SP non-context */
+ { 0x3d, 0x2040, 0x1 }, /* SP 2D context 0 */
+ { 0x3b, 0x2440, 0x1 }, /* SP 2D context 1 */
+ { 0x3e, 0xe580, 0x170 }, /* SP 3D context 0 */
+ { 0x3c, 0xed80, 0x170 }, /* SP 3D context 1 */
+ { 0x3a, 0x0f00, 0x1c }, /* TP non-context */
+ { 0x38, 0x2000, 0xa }, /* TP 2D context 0 */
+ { 0x36, 0x2400, 0xa }, /* TP 2D context 1 */
+ { 0x39, 0xe700, 0x80 }, /* TP 3D context 0 */
+ { 0x37, 0xef00, 0x80 }, /* TP 3D context 1 */
+};
+
+static void a5xx_gpu_state_get_hlsq_regs(struct msm_gpu *gpu,
+ struct a5xx_gpu_state *a5xx_state)
+{
+ struct a5xx_crashdumper dumper = { 0 };
+ u32 offset, count = 0;
+ u64 *ptr;
+ int i;
+
+ if (a5xx_crashdumper_init(gpu, &dumper))
+ return;
+
+ /* The script will be written at offset 0 */
+ ptr = dumper.ptr;
+
+ /* Start writing the data at offset 256k */
+ offset = dumper.iova + (256 * SZ_1K);
+
+ /* Count how many additional registers to get from the HLSQ aperture */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++)
+ count += a5xx_hlsq_aperture_regs[i].count;
+
+ a5xx_state->hlsqregs = kcalloc(count, sizeof(u32), GFP_KERNEL);
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ /* Build the crashdump script */
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 type = a5xx_hlsq_aperture_regs[i].type;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ /* Write the register to select the desired bank */
+ *ptr++ = ((u64) type << 8);
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_READ_SEL) << 44) |
+ (1 << 21) | 1;
+
+ *ptr++ = offset;
+ *ptr++ = (((u64) REG_A5XX_HLSQ_DBG_AHB_READ_APERTURE) << 44)
+ | c;
+
+ offset += c * sizeof(u32);
+ }
+
+ /* Write two zeros to close off the script */
+ *ptr++ = 0;
+ *ptr++ = 0;
+
+ if (a5xx_crashdumper_run(gpu, &dumper)) {
+ kfree(a5xx_state->hlsqregs);
+ a5xx_crashdumper_free(gpu, &dumper);
+ return;
+ }
+
+ /* Copy the data from the crashdumper to the state */
+ memcpy(a5xx_state->hlsqregs, dumper.ptr + (256 * SZ_1K),
+ count * sizeof(u32));
+
+ a5xx_crashdumper_free(gpu, &dumper);
+}
+
+static struct msm_gpu_state *a5xx_gpu_state_get(struct msm_gpu *gpu)
+{
+ struct a5xx_gpu_state *a5xx_state = kzalloc(sizeof(*a5xx_state),
+ GFP_KERNEL);
+
+ if (!a5xx_state)
+ return ERR_PTR(-ENOMEM);
+
+ /* Temporarily disable hardware clock gating before reading the hw */
a5xx_set_hwcg(gpu, false);
- adreno_show(gpu, m);
+
+ /* First get the generic state from the adreno core */
+ adreno_gpu_state_get(gpu, &(a5xx_state->base));
+
+ a5xx_state->base.rbbm_status = gpu_read(gpu, REG_A5XX_RBBM_STATUS);
+
+ /* Get the HLSQ regs with the help of the crashdumper */
+ a5xx_gpu_state_get_hlsq_regs(gpu, a5xx_state);
+
a5xx_set_hwcg(gpu, true);
+
+ return &a5xx_state->base;
+}
+
+static void a5xx_gpu_state_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ kfree(a5xx_state->hlsqregs);
+
+ adreno_gpu_state_destroy(state);
+ kfree(a5xx_state);
+}
+
+int a5xx_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, a5xx_gpu_state_destroy);
+}
+
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void a5xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ int i, j;
+ u32 pos = 0;
+ struct a5xx_gpu_state *a5xx_state = container_of(state,
+ struct a5xx_gpu_state, base);
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ adreno_show(gpu, state, p);
+
+ /* Dump the additional a5xx HLSQ registers */
+ if (!a5xx_state->hlsqregs)
+ return;
+
+ drm_printf(p, "registers-hlsq:\n");
+
+ for (i = 0; i < ARRAY_SIZE(a5xx_hlsq_aperture_regs); i++) {
+ u32 o = a5xx_hlsq_aperture_regs[i].regoffset;
+ u32 c = a5xx_hlsq_aperture_regs[i].count;
+
+ for (j = 0; j < c; j++, pos++, o++) {
+ /*
+ * To keep the crashdump simple we pull the entire range
+ * for each register type but not all of the registers
+ * in the range are valid. Fortunately invalid registers
+ * stick out like a sore thumb with a value of
+ * 0xdeadbeef
+ */
+ if (a5xx_state->hlsqregs[pos] == 0xdeadbeef)
+ continue;
+
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ o << 2, a5xx_state->hlsqregs[pos]);
+ }
+ }
}
#endif
@@ -1239,11 +1456,15 @@ static const struct adreno_gpu_funcs funcs = {
.active_ring = a5xx_active_ring,
.irq = a5xx_irq,
.destroy = a5xx_destroy,
-#ifdef CONFIG_DEBUG_FS
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
.show = a5xx_show,
+#endif
+#if defined(CONFIG_DEBUG_FS)
.debugfs_init = a5xx_debugfs_init,
#endif
.gpu_busy = a5xx_gpu_busy,
+ .gpu_state_get = a5xx_gpu_state_get,
+ .gpu_state_put = a5xx_gpu_state_put,
},
.get_timestamp = a5xx_get_timestamp,
};
diff --git a/drivers/gpu/drm/msm/adreno/a6xx.xml.h b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
new file mode 100644
index 000000000000..87eab51f7000
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx.xml.h
@@ -0,0 +1,4562 @@
+#ifndef A6XX_XML
+#define A6XX_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+enum a6xx_color_fmt {
+ RB6_A8_UNORM = 2,
+ RB6_R8_UNORM = 3,
+ RB6_R8_SNORM = 4,
+ RB6_R8_UINT = 5,
+ RB6_R8_SINT = 6,
+ RB6_R4G4B4A4_UNORM = 8,
+ RB6_R5G5B5A1_UNORM = 10,
+ RB6_R5G6B5_UNORM = 14,
+ RB6_R8G8_UNORM = 15,
+ RB6_R8G8_SNORM = 16,
+ RB6_R8G8_UINT = 17,
+ RB6_R8G8_SINT = 18,
+ RB6_R16_UNORM = 21,
+ RB6_R16_SNORM = 22,
+ RB6_R16_FLOAT = 23,
+ RB6_R16_UINT = 24,
+ RB6_R16_SINT = 25,
+ RB6_R8G8B8A8_UNORM = 48,
+ RB6_R8G8B8_UNORM = 49,
+ RB6_R8G8B8A8_SNORM = 50,
+ RB6_R8G8B8A8_UINT = 51,
+ RB6_R8G8B8A8_SINT = 52,
+ RB6_R10G10B10A2_UNORM = 55,
+ RB6_R10G10B10A2_UINT = 58,
+ RB6_R11G11B10_FLOAT = 66,
+ RB6_R16G16_UNORM = 67,
+ RB6_R16G16_SNORM = 68,
+ RB6_R16G16_FLOAT = 69,
+ RB6_R16G16_UINT = 70,
+ RB6_R16G16_SINT = 71,
+ RB6_R32_FLOAT = 74,
+ RB6_R32_UINT = 75,
+ RB6_R32_SINT = 76,
+ RB6_R16G16B16A16_UNORM = 96,
+ RB6_R16G16B16A16_SNORM = 97,
+ RB6_R16G16B16A16_FLOAT = 98,
+ RB6_R16G16B16A16_UINT = 99,
+ RB6_R16G16B16A16_SINT = 100,
+ RB6_R32G32_FLOAT = 103,
+ RB6_R32G32_UINT = 104,
+ RB6_R32G32_SINT = 105,
+ RB6_R32G32B32A32_FLOAT = 130,
+ RB6_R32G32B32A32_UINT = 131,
+ RB6_R32G32B32A32_SINT = 132,
+ RB6_X8Z24_UNORM = 160,
+};
+
+enum a6xx_tile_mode {
+ TILE6_LINEAR = 0,
+ TILE6_2 = 2,
+ TILE6_3 = 3,
+};
+
+enum a6xx_vtx_fmt {
+ VFMT6_8_UNORM = 3,
+ VFMT6_8_SNORM = 4,
+ VFMT6_8_UINT = 5,
+ VFMT6_8_SINT = 6,
+ VFMT6_8_8_UNORM = 15,
+ VFMT6_8_8_SNORM = 16,
+ VFMT6_8_8_UINT = 17,
+ VFMT6_8_8_SINT = 18,
+ VFMT6_16_UNORM = 21,
+ VFMT6_16_SNORM = 22,
+ VFMT6_16_FLOAT = 23,
+ VFMT6_16_UINT = 24,
+ VFMT6_16_SINT = 25,
+ VFMT6_8_8_8_UNORM = 33,
+ VFMT6_8_8_8_SNORM = 34,
+ VFMT6_8_8_8_UINT = 35,
+ VFMT6_8_8_8_SINT = 36,
+ VFMT6_8_8_8_8_UNORM = 48,
+ VFMT6_8_8_8_8_SNORM = 50,
+ VFMT6_8_8_8_8_UINT = 51,
+ VFMT6_8_8_8_8_SINT = 52,
+ VFMT6_10_10_10_2_UNORM = 54,
+ VFMT6_10_10_10_2_SNORM = 57,
+ VFMT6_10_10_10_2_UINT = 58,
+ VFMT6_10_10_10_2_SINT = 59,
+ VFMT6_11_11_10_FLOAT = 66,
+ VFMT6_16_16_UNORM = 67,
+ VFMT6_16_16_SNORM = 68,
+ VFMT6_16_16_FLOAT = 69,
+ VFMT6_16_16_UINT = 70,
+ VFMT6_16_16_SINT = 71,
+ VFMT6_32_UNORM = 72,
+ VFMT6_32_SNORM = 73,
+ VFMT6_32_FLOAT = 74,
+ VFMT6_32_UINT = 75,
+ VFMT6_32_SINT = 76,
+ VFMT6_32_FIXED = 77,
+ VFMT6_16_16_16_UNORM = 88,
+ VFMT6_16_16_16_SNORM = 89,
+ VFMT6_16_16_16_FLOAT = 90,
+ VFMT6_16_16_16_UINT = 91,
+ VFMT6_16_16_16_SINT = 92,
+ VFMT6_16_16_16_16_UNORM = 96,
+ VFMT6_16_16_16_16_SNORM = 97,
+ VFMT6_16_16_16_16_FLOAT = 98,
+ VFMT6_16_16_16_16_UINT = 99,
+ VFMT6_16_16_16_16_SINT = 100,
+ VFMT6_32_32_UNORM = 101,
+ VFMT6_32_32_SNORM = 102,
+ VFMT6_32_32_FLOAT = 103,
+ VFMT6_32_32_UINT = 104,
+ VFMT6_32_32_SINT = 105,
+ VFMT6_32_32_FIXED = 106,
+ VFMT6_32_32_32_UNORM = 112,
+ VFMT6_32_32_32_SNORM = 113,
+ VFMT6_32_32_32_UINT = 114,
+ VFMT6_32_32_32_SINT = 115,
+ VFMT6_32_32_32_FLOAT = 116,
+ VFMT6_32_32_32_FIXED = 117,
+ VFMT6_32_32_32_32_UNORM = 128,
+ VFMT6_32_32_32_32_SNORM = 129,
+ VFMT6_32_32_32_32_FLOAT = 130,
+ VFMT6_32_32_32_32_UINT = 131,
+ VFMT6_32_32_32_32_SINT = 132,
+ VFMT6_32_32_32_32_FIXED = 133,
+};
+
+enum a6xx_tex_fmt {
+ TFMT6_A8_UNORM = 2,
+ TFMT6_8_UNORM = 3,
+ TFMT6_8_SNORM = 4,
+ TFMT6_8_UINT = 5,
+ TFMT6_8_SINT = 6,
+ TFMT6_4_4_4_4_UNORM = 8,
+ TFMT6_5_5_5_1_UNORM = 10,
+ TFMT6_5_6_5_UNORM = 14,
+ TFMT6_8_8_UNORM = 15,
+ TFMT6_8_8_SNORM = 16,
+ TFMT6_8_8_UINT = 17,
+ TFMT6_8_8_SINT = 18,
+ TFMT6_L8_A8_UNORM = 19,
+ TFMT6_16_UNORM = 21,
+ TFMT6_16_SNORM = 22,
+ TFMT6_16_FLOAT = 23,
+ TFMT6_16_UINT = 24,
+ TFMT6_16_SINT = 25,
+ TFMT6_8_8_8_8_UNORM = 48,
+ TFMT6_8_8_8_UNORM = 49,
+ TFMT6_8_8_8_8_SNORM = 50,
+ TFMT6_8_8_8_8_UINT = 51,
+ TFMT6_8_8_8_8_SINT = 52,
+ TFMT6_9_9_9_E5_FLOAT = 53,
+ TFMT6_10_10_10_2_UNORM = 54,
+ TFMT6_10_10_10_2_UINT = 58,
+ TFMT6_11_11_10_FLOAT = 66,
+ TFMT6_16_16_UNORM = 67,
+ TFMT6_16_16_SNORM = 68,
+ TFMT6_16_16_FLOAT = 69,
+ TFMT6_16_16_UINT = 70,
+ TFMT6_16_16_SINT = 71,
+ TFMT6_32_FLOAT = 74,
+ TFMT6_32_UINT = 75,
+ TFMT6_32_SINT = 76,
+ TFMT6_16_16_16_16_UNORM = 96,
+ TFMT6_16_16_16_16_SNORM = 97,
+ TFMT6_16_16_16_16_FLOAT = 98,
+ TFMT6_16_16_16_16_UINT = 99,
+ TFMT6_16_16_16_16_SINT = 100,
+ TFMT6_32_32_FLOAT = 103,
+ TFMT6_32_32_UINT = 104,
+ TFMT6_32_32_SINT = 105,
+ TFMT6_32_32_32_UINT = 114,
+ TFMT6_32_32_32_SINT = 115,
+ TFMT6_32_32_32_FLOAT = 116,
+ TFMT6_32_32_32_32_FLOAT = 130,
+ TFMT6_32_32_32_32_UINT = 131,
+ TFMT6_32_32_32_32_SINT = 132,
+ TFMT6_X8Z24_UNORM = 160,
+ TFMT6_ETC2_RG11_UNORM = 171,
+ TFMT6_ETC2_RG11_SNORM = 172,
+ TFMT6_ETC2_R11_UNORM = 173,
+ TFMT6_ETC2_R11_SNORM = 174,
+ TFMT6_ETC1 = 175,
+ TFMT6_ETC2_RGB8 = 176,
+ TFMT6_ETC2_RGBA8 = 177,
+ TFMT6_ETC2_RGB8A1 = 178,
+ TFMT6_DXT1 = 179,
+ TFMT6_DXT3 = 180,
+ TFMT6_DXT5 = 181,
+ TFMT6_RGTC1_UNORM = 183,
+ TFMT6_RGTC1_SNORM = 184,
+ TFMT6_RGTC2_UNORM = 187,
+ TFMT6_RGTC2_SNORM = 188,
+ TFMT6_BPTC_UFLOAT = 190,
+ TFMT6_BPTC_FLOAT = 191,
+ TFMT6_BPTC = 192,
+ TFMT6_ASTC_4x4 = 193,
+ TFMT6_ASTC_5x4 = 194,
+ TFMT6_ASTC_5x5 = 195,
+ TFMT6_ASTC_6x5 = 196,
+ TFMT6_ASTC_6x6 = 197,
+ TFMT6_ASTC_8x5 = 198,
+ TFMT6_ASTC_8x6 = 199,
+ TFMT6_ASTC_8x8 = 200,
+ TFMT6_ASTC_10x5 = 201,
+ TFMT6_ASTC_10x6 = 202,
+ TFMT6_ASTC_10x8 = 203,
+ TFMT6_ASTC_10x10 = 204,
+ TFMT6_ASTC_12x10 = 205,
+ TFMT6_ASTC_12x12 = 206,
+};
+
+enum a6xx_tex_fetchsize {
+ TFETCH6_1_BYTE = 0,
+ TFETCH6_2_BYTE = 1,
+ TFETCH6_4_BYTE = 2,
+ TFETCH6_8_BYTE = 3,
+ TFETCH6_16_BYTE = 4,
+};
+
+enum a6xx_depth_format {
+ DEPTH6_NONE = 0,
+ DEPTH6_16 = 1,
+ DEPTH6_24_8 = 2,
+ DEPTH6_32 = 4,
+};
+
+enum a6xx_cp_perfcounter_select {
+ PERF_CP_ALWAYS_COUNT = 0,
+};
+
+enum a6xx_tex_filter {
+ A6XX_TEX_NEAREST = 0,
+ A6XX_TEX_LINEAR = 1,
+ A6XX_TEX_ANISO = 2,
+};
+
+enum a6xx_tex_clamp {
+ A6XX_TEX_REPEAT = 0,
+ A6XX_TEX_CLAMP_TO_EDGE = 1,
+ A6XX_TEX_MIRROR_REPEAT = 2,
+ A6XX_TEX_CLAMP_TO_BORDER = 3,
+ A6XX_TEX_MIRROR_CLAMP = 4,
+};
+
+enum a6xx_tex_aniso {
+ A6XX_TEX_ANISO_1 = 0,
+ A6XX_TEX_ANISO_2 = 1,
+ A6XX_TEX_ANISO_4 = 2,
+ A6XX_TEX_ANISO_8 = 3,
+ A6XX_TEX_ANISO_16 = 4,
+};
+
+enum a6xx_tex_swiz {
+ A6XX_TEX_X = 0,
+ A6XX_TEX_Y = 1,
+ A6XX_TEX_Z = 2,
+ A6XX_TEX_W = 3,
+ A6XX_TEX_ZERO = 4,
+ A6XX_TEX_ONE = 5,
+};
+
+enum a6xx_tex_type {
+ A6XX_TEX_1D = 0,
+ A6XX_TEX_2D = 1,
+ A6XX_TEX_CUBE = 2,
+ A6XX_TEX_3D = 3,
+};
+
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPU_IDLE 0x00000001
+#define A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR 0x00000002
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW 0x00000040
+#define A6XX_RBBM_INT_0_MASK_RBBM_GPC_ERROR 0x00000080
+#define A6XX_RBBM_INT_0_MASK_CP_SW 0x00000100
+#define A6XX_RBBM_INT_0_MASK_CP_HW_ERROR 0x00000200
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_DEPTH_TS 0x00000400
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_FLUSH_COLOR_TS 0x00000800
+#define A6XX_RBBM_INT_0_MASK_CP_CCU_RESOLVE_TS 0x00001000
+#define A6XX_RBBM_INT_0_MASK_CP_IB2 0x00002000
+#define A6XX_RBBM_INT_0_MASK_CP_IB1 0x00004000
+#define A6XX_RBBM_INT_0_MASK_CP_RB 0x00008000
+#define A6XX_RBBM_INT_0_MASK_CP_RB_DONE_TS 0x00020000
+#define A6XX_RBBM_INT_0_MASK_CP_WT_DONE_TS 0x00040000
+#define A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS 0x00100000
+#define A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW 0x00400000
+#define A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT 0x00800000
+#define A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS 0x01000000
+#define A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR 0x02000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_0 0x04000000
+#define A6XX_RBBM_INT_0_MASK_DEBBUS_INTR_1 0x08000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_CPU_IRQ 0x40000000
+#define A6XX_RBBM_INT_0_MASK_ISDB_UNDER_DEBUG 0x80000000
+#define A6XX_CP_INT_CP_OPCODE_ERROR 0x00000001
+#define A6XX_CP_INT_CP_UCODE_ERROR 0x00000002
+#define A6XX_CP_INT_CP_HW_FAULT_ERROR 0x00000004
+#define A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR 0x00000010
+#define A6XX_CP_INT_CP_AHB_ERROR 0x00000020
+#define A6XX_CP_INT_CP_VSD_PARITY_ERROR 0x00000040
+#define A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR 0x00000080
+#define REG_A6XX_CP_RB_BASE 0x00000800
+
+#define REG_A6XX_CP_RB_BASE_HI 0x00000801
+
+#define REG_A6XX_CP_RB_CNTL 0x00000802
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_LO 0x00000804
+
+#define REG_A6XX_CP_RB_RPTR_ADDR_HI 0x00000805
+
+#define REG_A6XX_CP_RB_RPTR 0x00000806
+
+#define REG_A6XX_CP_RB_WPTR 0x00000807
+
+#define REG_A6XX_CP_SQE_CNTL 0x00000808
+
+#define REG_A6XX_CP_HW_FAULT 0x00000821
+
+#define REG_A6XX_CP_INTERRUPT_STATUS 0x00000823
+
+#define REG_A6XX_CP_PROTECT_STATUS 0x00000824
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_LO 0x00000830
+
+#define REG_A6XX_CP_SQE_INSTR_BASE_HI 0x00000831
+
+#define REG_A6XX_CP_MISC_CNTL 0x00000840
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_1 0x000008c1
+
+#define REG_A6XX_CP_ROQ_THRESHOLDS_2 0x000008c2
+
+#define REG_A6XX_CP_MEM_POOL_SIZE 0x000008c3
+
+#define REG_A6XX_CP_CHICKEN_DBG 0x00000841
+
+#define REG_A6XX_CP_ADDR_MODE_CNTL 0x00000842
+
+#define REG_A6XX_CP_DBG_ECO_CNTL 0x00000843
+
+#define REG_A6XX_CP_PROTECT_CNTL 0x0000084f
+
+static inline uint32_t REG_A6XX_CP_SCRATCH(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_SCRATCH_REG(uint32_t i0) { return 0x00000883 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_CP_PROTECT_REG(uint32_t i0) { return 0x00000850 + 0x1*i0; }
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__MASK 0x0003ffff
+#define A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT 0
+static inline uint32_t A6XX_CP_PROTECT_REG_BASE_ADDR(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_BASE_ADDR__SHIFT) & A6XX_CP_PROTECT_REG_BASE_ADDR__MASK;
+}
+#define A6XX_CP_PROTECT_REG_MASK_LEN__MASK 0x7ffc0000
+#define A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT 18
+static inline uint32_t A6XX_CP_PROTECT_REG_MASK_LEN(uint32_t val)
+{
+ return ((val) << A6XX_CP_PROTECT_REG_MASK_LEN__SHIFT) & A6XX_CP_PROTECT_REG_MASK_LEN__MASK;
+}
+#define A6XX_CP_PROTECT_REG_READ 0x80000000
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_CNTL 0x000008a0
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_LO 0x000008a1
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_SMMU_INFO_HI 0x000008a2
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_LO 0x000008a3
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_NON_SECURE_RESTORE_ADDR_HI 0x000008a4
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_LO 0x000008a5
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_PRIV_SECURE_RESTORE_ADDR_HI 0x000008a6
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_LO 0x000008a7
+
+#define REG_A6XX_CP_CONTEXT_SWITCH_NON_PRIV_RESTORE_ADDR_HI 0x000008a8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_0 0x000008d0
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_1 0x000008d1
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_2 0x000008d2
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_3 0x000008d3
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_4 0x000008d4
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_5 0x000008d5
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_6 0x000008d6
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_7 0x000008d7
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_8 0x000008d8
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_9 0x000008d9
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_10 0x000008da
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_11 0x000008db
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_12 0x000008dc
+
+#define REG_A6XX_CP_PERFCTR_CP_SEL_13 0x000008dd
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_LO 0x00000900
+
+#define REG_A6XX_CP_CRASH_SCRIPT_BASE_HI 0x00000901
+
+#define REG_A6XX_CP_CRASH_DUMP_CNTL 0x00000902
+
+#define REG_A6XX_CP_CRASH_DUMP_STATUS 0x00000903
+
+#define REG_A6XX_CP_SQE_STAT_ADDR 0x00000908
+
+#define REG_A6XX_CP_SQE_STAT_DATA 0x00000909
+
+#define REG_A6XX_CP_DRAW_STATE_ADDR 0x0000090a
+
+#define REG_A6XX_CP_DRAW_STATE_DATA 0x0000090b
+
+#define REG_A6XX_CP_ROQ_DBG_ADDR 0x0000090c
+
+#define REG_A6XX_CP_ROQ_DBG_DATA 0x0000090d
+
+#define REG_A6XX_CP_MEM_POOL_DBG_ADDR 0x0000090e
+
+#define REG_A6XX_CP_MEM_POOL_DBG_DATA 0x0000090f
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_ADDR 0x00000910
+
+#define REG_A6XX_CP_SQE_UCODE_DBG_DATA 0x00000911
+
+#define REG_A6XX_CP_IB1_BASE 0x00000928
+
+#define REG_A6XX_CP_IB1_BASE_HI 0x00000929
+
+#define REG_A6XX_CP_IB1_REM_SIZE 0x0000092a
+
+#define REG_A6XX_CP_IB2_BASE 0x0000092b
+
+#define REG_A6XX_CP_IB2_BASE_HI 0x0000092c
+
+#define REG_A6XX_CP_IB2_REM_SIZE 0x0000092d
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_LO 0x00000980
+
+#define REG_A6XX_CP_ALWAYS_ON_COUNTER_HI 0x00000981
+
+#define REG_A6XX_CP_AHB_CNTL 0x0000098d
+
+#define REG_A6XX_CP_APERTURE_CNTL_HOST 0x00000a00
+
+#define REG_A6XX_CP_APERTURE_CNTL_CD 0x00000a03
+
+#define REG_A6XX_VSC_ADDR_MODE_CNTL 0x00000c01
+
+#define REG_A6XX_RBBM_INT_0_STATUS 0x00000201
+
+#define REG_A6XX_RBBM_STATUS 0x00000210
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB 0x00800000
+#define A6XX_RBBM_STATUS_GPU_BUSY_IGN_AHB_CP 0x00400000
+#define A6XX_RBBM_STATUS_HLSQ_BUSY 0x00200000
+#define A6XX_RBBM_STATUS_VSC_BUSY 0x00100000
+#define A6XX_RBBM_STATUS_TPL1_BUSY 0x00080000
+#define A6XX_RBBM_STATUS_SP_BUSY 0x00040000
+#define A6XX_RBBM_STATUS_UCHE_BUSY 0x00020000
+#define A6XX_RBBM_STATUS_VPC_BUSY 0x00010000
+#define A6XX_RBBM_STATUS_VFD_BUSY 0x00008000
+#define A6XX_RBBM_STATUS_TESS_BUSY 0x00004000
+#define A6XX_RBBM_STATUS_PC_VSD_BUSY 0x00002000
+#define A6XX_RBBM_STATUS_PC_DCALL_BUSY 0x00001000
+#define A6XX_RBBM_STATUS_COM_DCOM_BUSY 0x00000800
+#define A6XX_RBBM_STATUS_LRZ_BUSY 0x00000400
+#define A6XX_RBBM_STATUS_A2D_BUSY 0x00000200
+#define A6XX_RBBM_STATUS_CCU_BUSY 0x00000100
+#define A6XX_RBBM_STATUS_RB_BUSY 0x00000080
+#define A6XX_RBBM_STATUS_RAS_BUSY 0x00000040
+#define A6XX_RBBM_STATUS_TSE_BUSY 0x00000020
+#define A6XX_RBBM_STATUS_VBIF_BUSY 0x00000010
+#define A6XX_RBBM_STATUS_GFX_DBGC_BUSY 0x00000008
+#define A6XX_RBBM_STATUS_CP_BUSY 0x00000004
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CP_MASTER 0x00000002
+#define A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER 0x00000001
+
+#define REG_A6XX_RBBM_STATUS3 0x00000213
+
+#define REG_A6XX_RBBM_VBIF_GX_RESET_STATUS 0x00000215
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_LO 0x00000400
+
+#define REG_A6XX_RBBM_PERFCTR_CP_0_HI 0x00000401
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_LO 0x00000402
+
+#define REG_A6XX_RBBM_PERFCTR_CP_1_HI 0x00000403
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_LO 0x00000404
+
+#define REG_A6XX_RBBM_PERFCTR_CP_2_HI 0x00000405
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_LO 0x00000406
+
+#define REG_A6XX_RBBM_PERFCTR_CP_3_HI 0x00000407
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_LO 0x00000408
+
+#define REG_A6XX_RBBM_PERFCTR_CP_4_HI 0x00000409
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_LO 0x0000040a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_5_HI 0x0000040b
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_LO 0x0000040c
+
+#define REG_A6XX_RBBM_PERFCTR_CP_6_HI 0x0000040d
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_LO 0x0000040e
+
+#define REG_A6XX_RBBM_PERFCTR_CP_7_HI 0x0000040f
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_LO 0x00000410
+
+#define REG_A6XX_RBBM_PERFCTR_CP_8_HI 0x00000411
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_LO 0x00000412
+
+#define REG_A6XX_RBBM_PERFCTR_CP_9_HI 0x00000413
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_LO 0x00000414
+
+#define REG_A6XX_RBBM_PERFCTR_CP_10_HI 0x00000415
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_LO 0x00000416
+
+#define REG_A6XX_RBBM_PERFCTR_CP_11_HI 0x00000417
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_LO 0x00000418
+
+#define REG_A6XX_RBBM_PERFCTR_CP_12_HI 0x00000419
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_LO 0x0000041a
+
+#define REG_A6XX_RBBM_PERFCTR_CP_13_HI 0x0000041b
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_LO 0x0000041c
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_0_HI 0x0000041d
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_LO 0x0000041e
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_1_HI 0x0000041f
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_LO 0x00000420
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_2_HI 0x00000421
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_LO 0x00000422
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_3_HI 0x00000423
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_LO 0x00000424
+
+#define REG_A6XX_RBBM_PERFCTR_PC_0_HI 0x00000425
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_LO 0x00000426
+
+#define REG_A6XX_RBBM_PERFCTR_PC_1_HI 0x00000427
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_LO 0x00000428
+
+#define REG_A6XX_RBBM_PERFCTR_PC_2_HI 0x00000429
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_LO 0x0000042a
+
+#define REG_A6XX_RBBM_PERFCTR_PC_3_HI 0x0000042b
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_LO 0x0000042c
+
+#define REG_A6XX_RBBM_PERFCTR_PC_4_HI 0x0000042d
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_LO 0x0000042e
+
+#define REG_A6XX_RBBM_PERFCTR_PC_5_HI 0x0000042f
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_LO 0x00000430
+
+#define REG_A6XX_RBBM_PERFCTR_PC_6_HI 0x00000431
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_LO 0x00000432
+
+#define REG_A6XX_RBBM_PERFCTR_PC_7_HI 0x00000433
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_LO 0x00000434
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_0_HI 0x00000435
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_LO 0x00000436
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_1_HI 0x00000437
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_LO 0x00000438
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_2_HI 0x00000439
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_LO 0x0000043a
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_3_HI 0x0000043b
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_LO 0x0000043c
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_4_HI 0x0000043d
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_LO 0x0000043e
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_5_HI 0x0000043f
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_LO 0x00000440
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_6_HI 0x00000441
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_LO 0x00000442
+
+#define REG_A6XX_RBBM_PERFCTR_VFD_7_HI 0x00000443
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_LO 0x00000444
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_0_HI 0x00000445
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_LO 0x00000446
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_1_HI 0x00000447
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_LO 0x00000448
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_2_HI 0x00000449
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_LO 0x0000044a
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_3_HI 0x0000044b
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_LO 0x0000044c
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_4_HI 0x0000044d
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_LO 0x0000044e
+
+#define REG_A6XX_RBBM_PERFCTR_HLSQ_5_HI 0x0000044f
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_LO 0x00000450
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_0_HI 0x00000451
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_LO 0x00000452
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_1_HI 0x00000453
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_LO 0x00000454
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_2_HI 0x00000455
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_LO 0x00000456
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_3_HI 0x00000457
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_LO 0x00000458
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_4_HI 0x00000459
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_LO 0x0000045a
+
+#define REG_A6XX_RBBM_PERFCTR_VPC_5_HI 0x0000045b
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_LO 0x0000045c
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_0_HI 0x0000045d
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_LO 0x0000045e
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_1_HI 0x0000045f
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_LO 0x00000460
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_2_HI 0x00000461
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_LO 0x00000462
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_3_HI 0x00000463
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_LO 0x00000464
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_CCU_4_HI 0x00000465
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_LO 0x00000466
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_0_HI 0x00000467
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_LO 0x00000468
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_1_HI 0x00000469
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_LO 0x0000046a
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_2_HI 0x0000046b
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_LO 0x0000046c
+
+#define REG_A6XX_RBBM_PERFCTR_TSE_3_HI 0x0000046d
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_LO 0x0000046e
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_0_HI 0x0000046f
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_LO 0x00000470
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_1_HI 0x00000471
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_LO 0x00000472
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_2_HI 0x00000473
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_LO 0x00000474
+
+#define REG_A6XX_RBBM_PERFCTR_RAS_3_HI 0x00000475
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_LO 0x00000476
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_0_HI 0x00000477
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_LO 0x00000478
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_1_HI 0x00000479
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_LO 0x0000047a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_2_HI 0x0000047b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_LO 0x0000047c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_3_HI 0x0000047d
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_LO 0x0000047e
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_4_HI 0x0000047f
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_LO 0x00000480
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_5_HI 0x00000481
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_LO 0x00000482
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_6_HI 0x00000483
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_LO 0x00000484
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_7_HI 0x00000485
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_LO 0x00000486
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_8_HI 0x00000487
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_LO 0x00000488
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_9_HI 0x00000489
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_LO 0x0000048a
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_10_HI 0x0000048b
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_LO 0x0000048c
+
+#define REG_A6XX_RBBM_PERFCTR_UCHE_11_HI 0x0000048d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_LO 0x0000048e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_0_HI 0x0000048f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_LO 0x00000490
+
+#define REG_A6XX_RBBM_PERFCTR_TP_1_HI 0x00000491
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_LO 0x00000492
+
+#define REG_A6XX_RBBM_PERFCTR_TP_2_HI 0x00000493
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_LO 0x00000494
+
+#define REG_A6XX_RBBM_PERFCTR_TP_3_HI 0x00000495
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_LO 0x00000496
+
+#define REG_A6XX_RBBM_PERFCTR_TP_4_HI 0x00000497
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_LO 0x00000498
+
+#define REG_A6XX_RBBM_PERFCTR_TP_5_HI 0x00000499
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_LO 0x0000049a
+
+#define REG_A6XX_RBBM_PERFCTR_TP_6_HI 0x0000049b
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_LO 0x0000049c
+
+#define REG_A6XX_RBBM_PERFCTR_TP_7_HI 0x0000049d
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_LO 0x0000049e
+
+#define REG_A6XX_RBBM_PERFCTR_TP_8_HI 0x0000049f
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_LO 0x000004a0
+
+#define REG_A6XX_RBBM_PERFCTR_TP_9_HI 0x000004a1
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_LO 0x000004a2
+
+#define REG_A6XX_RBBM_PERFCTR_TP_10_HI 0x000004a3
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_LO 0x000004a4
+
+#define REG_A6XX_RBBM_PERFCTR_TP_11_HI 0x000004a5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_LO 0x000004a6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_0_HI 0x000004a7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_LO 0x000004a8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_1_HI 0x000004a9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_LO 0x000004aa
+
+#define REG_A6XX_RBBM_PERFCTR_SP_2_HI 0x000004ab
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_LO 0x000004ac
+
+#define REG_A6XX_RBBM_PERFCTR_SP_3_HI 0x000004ad
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_LO 0x000004ae
+
+#define REG_A6XX_RBBM_PERFCTR_SP_4_HI 0x000004af
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_LO 0x000004b0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_5_HI 0x000004b1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_LO 0x000004b2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_6_HI 0x000004b3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_LO 0x000004b4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_7_HI 0x000004b5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_LO 0x000004b6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_8_HI 0x000004b7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_LO 0x000004b8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_9_HI 0x000004b9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_LO 0x000004ba
+
+#define REG_A6XX_RBBM_PERFCTR_SP_10_HI 0x000004bb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_LO 0x000004bc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_11_HI 0x000004bd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_LO 0x000004be
+
+#define REG_A6XX_RBBM_PERFCTR_SP_12_HI 0x000004bf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_LO 0x000004c0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_13_HI 0x000004c1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_LO 0x000004c2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_14_HI 0x000004c3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_LO 0x000004c4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_15_HI 0x000004c5
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_LO 0x000004c6
+
+#define REG_A6XX_RBBM_PERFCTR_SP_16_HI 0x000004c7
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_LO 0x000004c8
+
+#define REG_A6XX_RBBM_PERFCTR_SP_17_HI 0x000004c9
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_LO 0x000004ca
+
+#define REG_A6XX_RBBM_PERFCTR_SP_18_HI 0x000004cb
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_LO 0x000004cc
+
+#define REG_A6XX_RBBM_PERFCTR_SP_19_HI 0x000004cd
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_LO 0x000004ce
+
+#define REG_A6XX_RBBM_PERFCTR_SP_20_HI 0x000004cf
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_LO 0x000004d0
+
+#define REG_A6XX_RBBM_PERFCTR_SP_21_HI 0x000004d1
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_LO 0x000004d2
+
+#define REG_A6XX_RBBM_PERFCTR_SP_22_HI 0x000004d3
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_LO 0x000004d4
+
+#define REG_A6XX_RBBM_PERFCTR_SP_23_HI 0x000004d5
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_LO 0x000004d6
+
+#define REG_A6XX_RBBM_PERFCTR_RB_0_HI 0x000004d7
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_LO 0x000004d8
+
+#define REG_A6XX_RBBM_PERFCTR_RB_1_HI 0x000004d9
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_LO 0x000004da
+
+#define REG_A6XX_RBBM_PERFCTR_RB_2_HI 0x000004db
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_LO 0x000004dc
+
+#define REG_A6XX_RBBM_PERFCTR_RB_3_HI 0x000004dd
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_LO 0x000004de
+
+#define REG_A6XX_RBBM_PERFCTR_RB_4_HI 0x000004df
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_LO 0x000004e0
+
+#define REG_A6XX_RBBM_PERFCTR_RB_5_HI 0x000004e1
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_LO 0x000004e2
+
+#define REG_A6XX_RBBM_PERFCTR_RB_6_HI 0x000004e3
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_LO 0x000004e4
+
+#define REG_A6XX_RBBM_PERFCTR_RB_7_HI 0x000004e5
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_LO 0x000004e6
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_0_HI 0x000004e7
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_LO 0x000004e8
+
+#define REG_A6XX_RBBM_PERFCTR_VSC_1_HI 0x000004e9
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_LO 0x000004ea
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_0_HI 0x000004eb
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_LO 0x000004ec
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_1_HI 0x000004ed
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_LO 0x000004ee
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_2_HI 0x000004ef
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_LO 0x000004f0
+
+#define REG_A6XX_RBBM_PERFCTR_LRZ_3_HI 0x000004f1
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_LO 0x000004f2
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_0_HI 0x000004f3
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_LO 0x000004f4
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_1_HI 0x000004f5
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_LO 0x000004f6
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_2_HI 0x000004f7
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_LO 0x000004f8
+
+#define REG_A6XX_RBBM_PERFCTR_CMP_3_HI 0x000004f9
+
+#define REG_A6XX_RBBM_PERFCTR_CNTL 0x00000500
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD0 0x00000501
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD1 0x00000502
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD2 0x00000503
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_CMD3 0x00000504
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_LO 0x00000505
+
+#define REG_A6XX_RBBM_PERFCTR_LOAD_VALUE_HI 0x00000506
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_0 0x00000507
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_1 0x00000508
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_2 0x00000509
+
+#define REG_A6XX_RBBM_PERFCTR_RBBM_SEL_3 0x0000050a
+
+#define REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED 0x0000050b
+
+#define REG_A6XX_RBBM_ISDB_CNT 0x00000533
+
+#define REG_A6XX_RBBM_SECVID_TRUST_CNTL 0x0000f400
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO 0x0000f800
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI 0x0000f801
+
+#define REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE 0x0000f802
+
+#define REG_A6XX_RBBM_SECVID_TSB_CNTL 0x0000f803
+
+#define REG_A6XX_RBBM_SECVID_TSB_ADDR_MODE_CNTL 0x0000f810
+
+#define REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL 0x00000010
+
+#define REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL 0x0000001f
+
+#define REG_A6XX_RBBM_INT_CLEAR_CMD 0x00000037
+
+#define REG_A6XX_RBBM_INT_0_MASK 0x00000038
+
+#define REG_A6XX_RBBM_SP_HYST_CNT 0x00000042
+
+#define REG_A6XX_RBBM_SW_RESET_CMD 0x00000043
+
+#define REG_A6XX_RBBM_RAC_THRESHOLD_CNT 0x00000044
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD 0x00000045
+
+#define REG_A6XX_RBBM_BLOCK_SW_RESET_CMD2 0x00000046
+
+#define REG_A6XX_RBBM_CLOCK_CNTL 0x000000ae
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP0 0x000000b0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP1 0x000000b1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP2 0x000000b2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_SP3 0x000000b3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP0 0x000000b4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP1 0x000000b5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP2 0x000000b6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_SP3 0x000000b7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP0 0x000000b8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP1 0x000000b9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP2 0x000000ba
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_SP3 0x000000bb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP0 0x000000bc
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP1 0x000000bd
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP2 0x000000be
+
+#define REG_A6XX_RBBM_CLOCK_HYST_SP3 0x000000bf
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP0 0x000000c0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP1 0x000000c1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP2 0x000000c2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TP3 0x000000c3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP0 0x000000c4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP1 0x000000c5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP2 0x000000c6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_TP3 0x000000c7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP0 0x000000c8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP1 0x000000c9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP2 0x000000ca
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_TP3 0x000000cb
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP0 0x000000cc
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP1 0x000000cd
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP2 0x000000ce
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_TP3 0x000000cf
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP0 0x000000d0
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP1 0x000000d1
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP2 0x000000d2
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TP3 0x000000d3
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP0 0x000000d4
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP1 0x000000d5
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP2 0x000000d6
+
+#define REG_A6XX_RBBM_CLOCK_DELAY2_TP3 0x000000d7
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP0 0x000000d8
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP1 0x000000d9
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP2 0x000000da
+
+#define REG_A6XX_RBBM_CLOCK_DELAY3_TP3 0x000000db
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP0 0x000000dc
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP1 0x000000dd
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP2 0x000000de
+
+#define REG_A6XX_RBBM_CLOCK_DELAY4_TP3 0x000000df
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP0 0x000000e0
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP1 0x000000e1
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP2 0x000000e2
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TP3 0x000000e3
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP0 0x000000e4
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP1 0x000000e5
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP2 0x000000e6
+
+#define REG_A6XX_RBBM_CLOCK_HYST2_TP3 0x000000e7
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP0 0x000000e8
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP1 0x000000e9
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP2 0x000000ea
+
+#define REG_A6XX_RBBM_CLOCK_HYST3_TP3 0x000000eb
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP0 0x000000ec
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP1 0x000000ed
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP2 0x000000ee
+
+#define REG_A6XX_RBBM_CLOCK_HYST4_TP3 0x000000ef
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB0 0x000000f0
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB1 0x000000f1
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB2 0x000000f2
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RB3 0x000000f3
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB0 0x000000f4
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB1 0x000000f5
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB2 0x000000f6
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RB3 0x000000f7
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU0 0x000000f8
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU1 0x000000f9
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU2 0x000000fa
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_CCU3 0x000000fb
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0 0x00000100
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1 0x00000101
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2 0x00000102
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3 0x00000103
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_RAC 0x00000104
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_RAC 0x00000105
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_RAC 0x00000106
+
+#define REG_A6XX_RBBM_CLOCK_HYST_RAC 0x00000107
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM 0x00000108
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM 0x00000109
+
+#define REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM 0x0000010a
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_UCHE 0x0000010b
+
+#define REG_A6XX_RBBM_CLOCK_CNTL2_UCHE 0x0000010c
+
+#define REG_A6XX_RBBM_CLOCK_CNTL3_UCHE 0x0000010d
+
+#define REG_A6XX_RBBM_CLOCK_CNTL4_UCHE 0x0000010e
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_UCHE 0x0000010f
+
+#define REG_A6XX_RBBM_CLOCK_HYST_UCHE 0x00000110
+
+#define REG_A6XX_RBBM_CLOCK_MODE_VFD 0x00000111
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_VFD 0x00000112
+
+#define REG_A6XX_RBBM_CLOCK_HYST_VFD 0x00000113
+
+#define REG_A6XX_RBBM_CLOCK_MODE_GPC 0x00000114
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GPC 0x00000115
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GPC 0x00000116
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2 0x00000117
+
+#define REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX 0x00000118
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX 0x00000119
+
+#define REG_A6XX_RBBM_CLOCK_HYST_GMU_GX 0x0000011a
+
+#define REG_A6XX_RBBM_CLOCK_MODE_HLSQ 0x0000011b
+
+#define REG_A6XX_RBBM_CLOCK_DELAY_HLSQ 0x0000011c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_A 0x00000600
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_B 0x00000601
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_C 0x00000602
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_SEL_D 0x00000603
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLT 0x00000604
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_CNTLM 0x00000605
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_0 0x00000608
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_1 0x00000609
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_2 0x0000060a
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_IVTL_3 0x0000060b
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_0 0x0000060c
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_1 0x0000060d
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_2 0x0000060e
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_MASKL_3 0x0000060f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_0 0x00000610
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_BYTEL_1 0x00000611
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0000062f
+
+#define REG_A6XX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00000630
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_0 0x00000cd8
+
+#define REG_A6XX_VSC_PERFCTR_VSC_SEL_1 0x00000cd9
+
+#define REG_A6XX_GRAS_ADDR_MODE_CNTL 0x00008601
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_0 0x00008610
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_1 0x00008611
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_2 0x00008612
+
+#define REG_A6XX_GRAS_PERFCTR_TSE_SEL_3 0x00008613
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_0 0x00008614
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_1 0x00008615
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_2 0x00008616
+
+#define REG_A6XX_GRAS_PERFCTR_RAS_SEL_3 0x00008617
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_0 0x00008618
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_1 0x00008619
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_2 0x0000861a
+
+#define REG_A6XX_GRAS_PERFCTR_LRZ_SEL_3 0x0000861b
+
+#define REG_A6XX_RB_ADDR_MODE_CNTL 0x00008e05
+
+#define REG_A6XX_RB_NC_MODE_CNTL 0x00008e08
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_0 0x00008e10
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_1 0x00008e11
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_2 0x00008e12
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_3 0x00008e13
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_4 0x00008e14
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_5 0x00008e15
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_6 0x00008e16
+
+#define REG_A6XX_RB_PERFCTR_RB_SEL_7 0x00008e17
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_0 0x00008e18
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_1 0x00008e19
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_2 0x00008e1a
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_3 0x00008e1b
+
+#define REG_A6XX_RB_PERFCTR_CCU_SEL_4 0x00008e1c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_0 0x00008e2c
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_1 0x00008e2d
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_2 0x00008e2e
+
+#define REG_A6XX_RB_PERFCTR_CMP_SEL_3 0x00008e2f
+
+#define REG_A6XX_RB_RB_SUB_BLOCK_SEL_CNTL_CD 0x00008e3d
+
+#define REG_A6XX_RB_CONTEXT_SWITCH_GMEM_SAVE_RESTORE 0x00008e50
+
+#define REG_A6XX_PC_DBG_ECO_CNTL 0x00009e00
+
+#define REG_A6XX_PC_ADDR_MODE_CNTL 0x00009e01
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_0 0x00009e34
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_1 0x00009e35
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_2 0x00009e36
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_3 0x00009e37
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_4 0x00009e38
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_5 0x00009e39
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_6 0x00009e3a
+
+#define REG_A6XX_PC_PERFCTR_PC_SEL_7 0x00009e3b
+
+#define REG_A6XX_HLSQ_ADDR_MODE_CNTL 0x0000be05
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_0 0x0000be10
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_1 0x0000be11
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_2 0x0000be12
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_3 0x0000be13
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_4 0x0000be14
+
+#define REG_A6XX_HLSQ_PERFCTR_HLSQ_SEL_5 0x0000be15
+
+#define REG_A6XX_HLSQ_DBG_AHB_READ_APERTURE 0x0000c800
+
+#define REG_A6XX_HLSQ_DBG_READ_SEL 0x0000d000
+
+#define REG_A6XX_VFD_ADDR_MODE_CNTL 0x0000a601
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_0 0x0000a610
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_1 0x0000a611
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_2 0x0000a612
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_3 0x0000a613
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_4 0x0000a614
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_5 0x0000a615
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_6 0x0000a616
+
+#define REG_A6XX_VFD_PERFCTR_VFD_SEL_7 0x0000a617
+
+#define REG_A6XX_VPC_ADDR_MODE_CNTL 0x00009601
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_0 0x00009604
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_1 0x00009605
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_2 0x00009606
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_3 0x00009607
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_4 0x00009608
+
+#define REG_A6XX_VPC_PERFCTR_VPC_SEL_5 0x00009609
+
+#define REG_A6XX_UCHE_ADDR_MODE_CNTL 0x00000e00
+
+#define REG_A6XX_UCHE_MODE_CNTL 0x00000e01
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_LO 0x00000e05
+
+#define REG_A6XX_UCHE_WRITE_RANGE_MAX_HI 0x00000e06
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_LO 0x00000e07
+
+#define REG_A6XX_UCHE_WRITE_THRU_BASE_HI 0x00000e08
+
+#define REG_A6XX_UCHE_TRAP_BASE_LO 0x00000e09
+
+#define REG_A6XX_UCHE_TRAP_BASE_HI 0x00000e0a
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_LO 0x00000e0b
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MIN_HI 0x00000e0c
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_LO 0x00000e0d
+
+#define REG_A6XX_UCHE_GMEM_RANGE_MAX_HI 0x00000e0e
+
+#define REG_A6XX_UCHE_CACHE_WAYS 0x00000e17
+
+#define REG_A6XX_UCHE_FILTER_CNTL 0x00000e18
+
+#define REG_A6XX_UCHE_CLIENT_PF 0x00000e19
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__MASK 0x000000ff
+#define A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT 0
+static inline uint32_t A6XX_UCHE_CLIENT_PF_PERFSEL(uint32_t val)
+{
+ return ((val) << A6XX_UCHE_CLIENT_PF_PERFSEL__SHIFT) & A6XX_UCHE_CLIENT_PF_PERFSEL__MASK;
+}
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_0 0x00000e1c
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_1 0x00000e1d
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_2 0x00000e1e
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_3 0x00000e1f
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_4 0x00000e20
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_5 0x00000e21
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_6 0x00000e22
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_7 0x00000e23
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_8 0x00000e24
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_9 0x00000e25
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_10 0x00000e26
+
+#define REG_A6XX_UCHE_PERFCTR_UCHE_SEL_11 0x00000e27
+
+#define REG_A6XX_SP_ADDR_MODE_CNTL 0x0000ae01
+
+#define REG_A6XX_SP_NC_MODE_CNTL 0x0000ae02
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_0 0x0000ae10
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_1 0x0000ae11
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_2 0x0000ae12
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_3 0x0000ae13
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_4 0x0000ae14
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_5 0x0000ae15
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_6 0x0000ae16
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_7 0x0000ae17
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_8 0x0000ae18
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_9 0x0000ae19
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_10 0x0000ae1a
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_11 0x0000ae1b
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_12 0x0000ae1c
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_13 0x0000ae1d
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_14 0x0000ae1e
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_15 0x0000ae1f
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_16 0x0000ae20
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_17 0x0000ae21
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_18 0x0000ae22
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_19 0x0000ae23
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_20 0x0000ae24
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_21 0x0000ae25
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_22 0x0000ae26
+
+#define REG_A6XX_SP_PERFCTR_SP_SEL_23 0x0000ae27
+
+#define REG_A6XX_TPL1_ADDR_MODE_CNTL 0x0000b601
+
+#define REG_A6XX_TPL1_NC_MODE_CNTL 0x0000b604
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_0 0x0000b610
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_1 0x0000b611
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_2 0x0000b612
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_3 0x0000b613
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_4 0x0000b614
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_5 0x0000b615
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_6 0x0000b616
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_7 0x0000b617
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_8 0x0000b618
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_9 0x0000b619
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_10 0x0000b61a
+
+#define REG_A6XX_TPL1_PERFCTR_TP_SEL_11 0x0000b61b
+
+#define REG_A6XX_VBIF_VERSION 0x00003000
+
+#define REG_A6XX_VBIF_GATE_OFF_WRREQ_EN 0x0000302a
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL0 0x00003080
+
+#define REG_A6XX_VBIF_XIN_HALT_CTRL1 0x00003081
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL0 0x000030d0
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL1 0x000030d1
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL2 0x000030d2
+
+#define REG_A6XX_VBIF_PERF_CNT_SEL3 0x000030d3
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW0 0x000030d8
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW1 0x000030d9
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW2 0x000030da
+
+#define REG_A6XX_VBIF_PERF_CNT_LOW3 0x000030db
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH0 0x000030e0
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH1 0x000030e1
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH2 0x000030e2
+
+#define REG_A6XX_VBIF_PERF_CNT_HIGH3 0x000030e3
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN0 0x00003100
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN1 0x00003101
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_EN2 0x00003102
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW0 0x00003110
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW1 0x00003111
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_LOW2 0x00003112
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH0 0x00003118
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH1 0x00003119
+
+#define REG_A6XX_VBIF_PERF_PWR_CNT_HIGH2 0x0000311a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_A 0x00018400
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_B 0x00018401
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_C 0x00018402
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_SEL_D 0x00018403
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK 0x000000ff
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_INDEX__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK 0x0000ff00
+#define A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_SEL_D_PING_BLK_SEL__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLT 0x00018404
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK 0x0000003f
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_TRACEEN__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK 0x00007000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_GRANU__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLT_SEGT__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_CNTLM 0x00018405
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_CNTLM_ENABLE__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_0 0x00018408
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_1 0x00018409
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_2 0x0001840a
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_IVTL_3 0x0001840b
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_0 0x0001840c
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_1 0x0001840d
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_2 0x0001840e
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_MASKL_3 0x0001840f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0 0x00018410
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL0__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL1__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL2__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL3__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL4__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL5__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL6__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_0_BYTEL7__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1 0x00018411
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK 0x0000000f
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT 0
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL8__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK 0x000000f0
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT 4
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL9__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK 0x00000f00
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT 8
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL10__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK 0x0000f000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT 12
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL11__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK 0x000f0000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT 16
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL12__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK 0x00f00000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT 20
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL13__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK 0x0f000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT 24
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL14__MASK;
+}
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK 0xf0000000
+#define A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT 28
+static inline uint32_t A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15(uint32_t val)
+{
+ return ((val) << A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__SHIFT) & A6XX_CX_DBGC_CFG_DBGBUS_BYTEL_1_BYTEL15__MASK;
+}
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF1 0x0001842f
+
+#define REG_A6XX_CX_DBGC_CFG_DBGBUS_TRACE_BUF2 0x00018430
+
+#define REG_A6XX_PDC_GPU_ENABLE_PDC 0x00021140
+
+#define REG_A6XX_PDC_GPU_SEQ_START_ADDR 0x00021148
+
+#define REG_A6XX_PDC_GPU_TCS0_CONTROL 0x00021540
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_ENABLE_BANK 0x00021541
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD_WAIT_FOR_CMPL_BANK 0x00021542
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_MSGID 0x00021543
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_ADDR 0x00021544
+
+#define REG_A6XX_PDC_GPU_TCS0_CMD0_DATA 0x00021545
+
+#define REG_A6XX_PDC_GPU_TCS1_CONTROL 0x00021572
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK 0x00021573
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK 0x00021574
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID 0x00021575
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR 0x00021576
+
+#define REG_A6XX_PDC_GPU_TCS1_CMD0_DATA 0x00021577
+
+#define REG_A6XX_PDC_GPU_TCS2_CONTROL 0x000215a4
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_ENABLE_BANK 0x000215a5
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD_WAIT_FOR_CMPL_BANK 0x000215a6
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_MSGID 0x000215a7
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_ADDR 0x000215a8
+
+#define REG_A6XX_PDC_GPU_TCS2_CMD0_DATA 0x000215a9
+
+#define REG_A6XX_PDC_GPU_TCS3_CONTROL 0x000215d6
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK 0x000215d7
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK 0x000215d8
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID 0x000215d9
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR 0x000215da
+
+#define REG_A6XX_PDC_GPU_TCS3_CMD0_DATA 0x000215db
+
+#define REG_A6XX_PDC_GPU_SEQ_MEM_0 0x000a0000
+
+#define REG_A6XX_X1_WINDOW_OFFSET 0x000088d4
+#define A6XX_X1_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X1_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X1_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_X__SHIFT) & A6XX_X1_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X1_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X1_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X1_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X1_WINDOW_OFFSET_Y__SHIFT) & A6XX_X1_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X2_WINDOW_OFFSET 0x0000b4d1
+#define A6XX_X2_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X2_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X2_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_X__SHIFT) & A6XX_X2_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X2_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X2_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X2_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X2_WINDOW_OFFSET_Y__SHIFT) & A6XX_X2_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X3_WINDOW_OFFSET 0x0000b307
+#define A6XX_X3_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_X3_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_X3_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_X__SHIFT) & A6XX_X3_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_X3_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_X3_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_X3_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_X3_WINDOW_OFFSET_Y__SHIFT) & A6XX_X3_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_X1_BIN_SIZE 0x000080a1
+#define A6XX_X1_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X1_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X1_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X1_BIN_SIZE_WIDTH__SHIFT) & A6XX_X1_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X1_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X1_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X1_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X1_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X1_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X2_BIN_SIZE 0x00008800
+#define A6XX_X2_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X2_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X2_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X2_BIN_SIZE_WIDTH__SHIFT) & A6XX_X2_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X2_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X2_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X2_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X2_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X2_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_X3_BIN_SIZE 0x000088d3
+#define A6XX_X3_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_X3_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_X3_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_X3_BIN_SIZE_WIDTH__SHIFT) & A6XX_X3_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_X3_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_X3_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_X3_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_X3_BIN_SIZE_HEIGHT__SHIFT) & A6XX_X3_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_BIN_SIZE 0x00000c02
+#define A6XX_VSC_BIN_SIZE_WIDTH__MASK 0x000000ff
+#define A6XX_VSC_BIN_SIZE_WIDTH__SHIFT 0
+static inline uint32_t A6XX_VSC_BIN_SIZE_WIDTH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_VSC_BIN_SIZE_WIDTH__SHIFT) & A6XX_VSC_BIN_SIZE_WIDTH__MASK;
+}
+#define A6XX_VSC_BIN_SIZE_HEIGHT__MASK 0x0001ff00
+#define A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT 8
+static inline uint32_t A6XX_VSC_BIN_SIZE_HEIGHT(uint32_t val)
+{
+ return ((val >> 4) << A6XX_VSC_BIN_SIZE_HEIGHT__SHIFT) & A6XX_VSC_BIN_SIZE_HEIGHT__MASK;
+}
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_LO 0x00000c03
+
+#define REG_A6XX_VSC_SIZE_ADDRESS_HI 0x00000c04
+
+#define REG_A6XX_VSC_BIN_COUNT 0x00000c06
+#define A6XX_VSC_BIN_COUNT_NX__MASK 0x000007fe
+#define A6XX_VSC_BIN_COUNT_NX__SHIFT 1
+static inline uint32_t A6XX_VSC_BIN_COUNT_NX(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NX__SHIFT) & A6XX_VSC_BIN_COUNT_NX__MASK;
+}
+#define A6XX_VSC_BIN_COUNT_NY__MASK 0x001ff800
+#define A6XX_VSC_BIN_COUNT_NY__SHIFT 11
+static inline uint32_t A6XX_VSC_BIN_COUNT_NY(uint32_t val)
+{
+ return ((val) << A6XX_VSC_BIN_COUNT_NY__SHIFT) & A6XX_VSC_BIN_COUNT_NY__MASK;
+}
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_PIPE_CONFIG_REG(uint32_t i0) { return 0x00000c10 + 0x1*i0; }
+#define A6XX_VSC_PIPE_CONFIG_REG_X__MASK 0x000003ff
+#define A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT 0
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_X(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_X__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_X__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__MASK 0x000ffc00
+#define A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT 10
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_Y(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_Y__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_Y__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_W__MASK 0x03f00000
+#define A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT 20
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_W(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_W__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_W__MASK;
+}
+#define A6XX_VSC_PIPE_CONFIG_REG_H__MASK 0xfc000000
+#define A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT 26
+static inline uint32_t A6XX_VSC_PIPE_CONFIG_REG_H(uint32_t val)
+{
+ return ((val) << A6XX_VSC_PIPE_CONFIG_REG_H__SHIFT) & A6XX_VSC_PIPE_CONFIG_REG_H__MASK;
+}
+
+#define REG_A6XX_VSC_XXX_ADDRESS_LO 0x00000c30
+
+#define REG_A6XX_VSC_XXX_ADDRESS_HI 0x00000c31
+
+#define REG_A6XX_VSC_XXX_PITCH 0x00000c32
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_LO 0x00000c34
+
+#define REG_A6XX_VSC_PIPE_DATA_ADDRESS_HI 0x00000c35
+
+#define REG_A6XX_VSC_PIPE_DATA_PITCH 0x00000c36
+
+static inline uint32_t REG_A6XX_VSC_SIZE(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VSC_SIZE_REG(uint32_t i0) { return 0x00000c78 + 0x1*i0; }
+
+#define REG_A6XX_UCHE_UNKNOWN_0E12 0x00000e12
+
+#define REG_A6XX_GRAS_UNKNOWN_8001 0x00008001
+
+#define REG_A6XX_GRAS_UNKNOWN_8004 0x00008004
+
+#define REG_A6XX_GRAS_CNTL 0x00008005
+#define A6XX_GRAS_CNTL_VARYING 0x00000001
+#define A6XX_GRAS_CNTL_XCOORD 0x00000040
+#define A6XX_GRAS_CNTL_YCOORD 0x00000080
+#define A6XX_GRAS_CNTL_ZCOORD 0x00000100
+#define A6XX_GRAS_CNTL_WCOORD 0x00000200
+
+#define REG_A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ 0x00008006
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK 0x000003ff
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_HORZ__MASK;
+}
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK 0x000ffc00
+#define A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT 10
+static inline uint32_t A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__SHIFT) & A6XX_GRAS_CL_GUARDBAND_CLIP_ADJ_VERT__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XOFFSET_0 0x00008010
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_XOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_XSCALE_0 0x00008011
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_XSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_XSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_XSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YOFFSET_0 0x00008012
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_YOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_YSCALE_0 0x00008013
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_YSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_YSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_YSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZOFFSET_0 0x00008014
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZOFFSET_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZOFFSET_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZOFFSET_0__MASK;
+}
+
+#define REG_A6XX_GRAS_CL_VPORT_ZSCALE_0 0x00008015
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK 0xffffffff
+#define A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT 0
+static inline uint32_t A6XX_GRAS_CL_VPORT_ZSCALE_0(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_CL_VPORT_ZSCALE_0__SHIFT) & A6XX_GRAS_CL_VPORT_ZSCALE_0__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_CNTL 0x00008090
+#define A6XX_GRAS_SU_CNTL_CULL_FRONT 0x00000001
+#define A6XX_GRAS_SU_CNTL_CULL_BACK 0x00000002
+#define A6XX_GRAS_SU_CNTL_FRONT_CW 0x00000004
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK 0x000007f8
+#define A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT 3
+static inline uint32_t A6XX_GRAS_SU_CNTL_LINEHALFWIDTH(float val)
+{
+ return ((((int32_t)(val * 4.0))) << A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__SHIFT) & A6XX_GRAS_SU_CNTL_LINEHALFWIDTH__MASK;
+}
+#define A6XX_GRAS_SU_CNTL_POLY_OFFSET 0x00000800
+#define A6XX_GRAS_SU_CNTL_MSAA_ENABLE 0x00002000
+
+#define REG_A6XX_GRAS_SU_POINT_MINMAX 0x00008091
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK 0x0000ffff
+#define A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MIN(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MIN__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MIN__MASK;
+}
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK 0xffff0000
+#define A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT 16
+static inline uint32_t A6XX_GRAS_SU_POINT_MINMAX_MAX(float val)
+{
+ return ((((uint32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_MINMAX_MAX__SHIFT) & A6XX_GRAS_SU_POINT_MINMAX_MAX__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POINT_SIZE 0x00008092
+#define A6XX_GRAS_SU_POINT_SIZE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POINT_SIZE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POINT_SIZE(float val)
+{
+ return ((((int32_t)(val * 16.0))) << A6XX_GRAS_SU_POINT_SIZE__SHIFT) & A6XX_GRAS_SU_POINT_SIZE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_SCALE 0x00008095
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_SCALE(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_SCALE__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_SCALE__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET 0x00008096
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP 0x00008097
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK 0xffffffff
+#define A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP(float val)
+{
+ return ((fui(val)) << A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__SHIFT) & A6XX_GRAS_SU_POLY_OFFSET_OFFSET_CLAMP__MASK;
+}
+
+#define REG_A6XX_GRAS_SU_DEPTH_BUFFER_INFO 0x00008098
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_GRAS_SU_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8099 0x00008099
+
+#define REG_A6XX_GRAS_UNKNOWN_809B 0x0000809b
+
+#define REG_A6XX_GRAS_RAS_MSAA_CNTL 0x000080a2
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_GRAS_DEST_MSAA_CNTL 0x000080a3
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_GRAS_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_GRAS_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_GRAS_UNKNOWN_80A4 0x000080a4
+
+#define REG_A6XX_GRAS_UNKNOWN_80A5 0x000080a5
+
+#define REG_A6XX_GRAS_UNKNOWN_80A6 0x000080a6
+
+#define REG_A6XX_GRAS_UNKNOWN_80AF 0x000080af
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0 0x000080b0
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0 0x000080b1
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_SCREEN_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0 0x000080d0
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_TL_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0 0x000080d1
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_X__MASK;
+}
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__SHIFT) & A6XX_GRAS_SC_VIEWPORT_SCISSOR_BR_0_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_TL 0x000080f0
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_SC_WINDOW_SCISSOR_BR 0x000080f1
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_X__MASK;
+}
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__SHIFT) & A6XX_GRAS_SC_WINDOW_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_CNTL 0x00008100
+#define A6XX_GRAS_LRZ_CNTL_ENABLE 0x00000001
+#define A6XX_GRAS_LRZ_CNTL_LRZ_WRITE 0x00000002
+#define A6XX_GRAS_LRZ_CNTL_GREATER 0x00000004
+
+#define REG_A6XX_GRAS_2D_BLIT_INFO 0x00008102
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__SHIFT) & A6XX_GRAS_2D_BLIT_INFO_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_LO 0x00008103
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_BASE_HI 0x00008104
+
+#define REG_A6XX_GRAS_LRZ_BUFFER_PITCH 0x00008105
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_GRAS_LRZ_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_LO 0x00008106
+
+#define REG_A6XX_GRAS_LRZ_FAST_CLEAR_BUFFER_BASE_HI 0x00008107
+
+#define REG_A6XX_GRAS_2D_BLIT_CNTL 0x00008400
+
+#define REG_A6XX_GRAS_2D_SRC_TL_X 0x00008401
+#define A6XX_GRAS_2D_SRC_TL_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_X_X__SHIFT) & A6XX_GRAS_2D_SRC_TL_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_X 0x00008402
+#define A6XX_GRAS_2D_SRC_BR_X_X__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_X_X__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_X_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_X_X__SHIFT) & A6XX_GRAS_2D_SRC_BR_X_X__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_TL_Y 0x00008403
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_TL_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_TL_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_TL_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_SRC_BR_Y 0x00008404
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__MASK 0x00ffff00
+#define A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT 8
+static inline uint32_t A6XX_GRAS_2D_SRC_BR_Y_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_SRC_BR_Y_Y__SHIFT) & A6XX_GRAS_2D_SRC_BR_Y_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_TL 0x00008405
+#define A6XX_GRAS_2D_DST_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_TL_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_TL_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_X__SHIFT) & A6XX_GRAS_2D_DST_TL_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_TL_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_TL_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_TL_Y__SHIFT) & A6XX_GRAS_2D_DST_TL_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_2D_DST_BR 0x00008406
+#define A6XX_GRAS_2D_DST_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_2D_DST_BR_X__MASK 0x00007fff
+#define A6XX_GRAS_2D_DST_BR_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_2D_DST_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_X__SHIFT) & A6XX_GRAS_2D_DST_BR_X__MASK;
+}
+#define A6XX_GRAS_2D_DST_BR_Y__MASK 0x7fff0000
+#define A6XX_GRAS_2D_DST_BR_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_2D_DST_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_2D_DST_BR_Y__SHIFT) & A6XX_GRAS_2D_DST_BR_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_1 0x0000840a
+#define A6XX_GRAS_RESOLVE_CNTL_1_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_1_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_1_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_1_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_RESOLVE_CNTL_2 0x0000840b
+#define A6XX_GRAS_RESOLVE_CNTL_2_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__MASK 0x00007fff
+#define A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT 0
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_X(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_X__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_X__MASK;
+}
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK 0x7fff0000
+#define A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT 16
+static inline uint32_t A6XX_GRAS_RESOLVE_CNTL_2_Y(uint32_t val)
+{
+ return ((val) << A6XX_GRAS_RESOLVE_CNTL_2_Y__SHIFT) & A6XX_GRAS_RESOLVE_CNTL_2_Y__MASK;
+}
+
+#define REG_A6XX_GRAS_UNKNOWN_8600 0x00008600
+
+#define REG_A6XX_RB_RAS_MSAA_CNTL 0x00008802
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_RB_DEST_MSAA_CNTL 0x00008803
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_RB_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_RB_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_RB_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_RB_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_RB_UNKNOWN_8804 0x00008804
+
+#define REG_A6XX_RB_UNKNOWN_8805 0x00008805
+
+#define REG_A6XX_RB_UNKNOWN_8806 0x00008806
+
+#define REG_A6XX_RB_RENDER_CONTROL0 0x00008809
+#define A6XX_RB_RENDER_CONTROL0_VARYING 0x00000001
+#define A6XX_RB_RENDER_CONTROL0_XCOORD 0x00000040
+#define A6XX_RB_RENDER_CONTROL0_YCOORD 0x00000080
+#define A6XX_RB_RENDER_CONTROL0_ZCOORD 0x00000100
+#define A6XX_RB_RENDER_CONTROL0_WCOORD 0x00000200
+#define A6XX_RB_RENDER_CONTROL0_UNK10 0x00000400
+
+#define REG_A6XX_RB_RENDER_CONTROL1 0x0000880a
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEMASK 0x00000001
+#define A6XX_RB_RENDER_CONTROL1_FACENESS 0x00000002
+#define A6XX_RB_RENDER_CONTROL1_SAMPLEID 0x00000008
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL0 0x0000880b
+#define A6XX_RB_FS_OUTPUT_CNTL0_FRAG_WRITES_Z 0x00000002
+
+#define REG_A6XX_RB_FS_OUTPUT_CNTL1 0x0000880c
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_RB_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_RB_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_RB_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+#define REG_A6XX_RB_RENDER_COMPONENTS 0x0000880d
+#define A6XX_RB_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_RB_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_RB_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_RB_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_RB_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_RB_DITHER_CNTL 0x0000880e
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK 0x00000003
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT 0
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT0__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK 0x0000000c
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT 2
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT1__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK 0x00000030
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT 4
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT2__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK 0x000000c0
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT 6
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT3__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK 0x00000300
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT 8
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT4__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK 0x00000c00
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT 10
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT5__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK 0x00001000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT 12
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT6__MASK;
+}
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK 0x0000c000
+#define A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT 14
+static inline uint32_t A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7(enum adreno_rb_dither_mode val)
+{
+ return ((val) << A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__SHIFT) & A6XX_RB_DITHER_CNTL_DITHER_MODE_MRT7__MASK;
+}
+
+#define REG_A6XX_RB_SRGB_CNTL 0x0000880f
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_RB_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_RB_UNKNOWN_8818 0x00008818
+
+#define REG_A6XX_RB_UNKNOWN_8819 0x00008819
+
+#define REG_A6XX_RB_UNKNOWN_881A 0x0000881a
+
+#define REG_A6XX_RB_UNKNOWN_881B 0x0000881b
+
+#define REG_A6XX_RB_UNKNOWN_881C 0x0000881c
+
+#define REG_A6XX_RB_UNKNOWN_881D 0x0000881d
+
+#define REG_A6XX_RB_UNKNOWN_881E 0x0000881e
+
+static inline uint32_t REG_A6XX_RB_MRT(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_CONTROL(uint32_t i0) { return 0x00008820 + 0x8*i0; }
+#define A6XX_RB_MRT_CONTROL_BLEND 0x00000001
+#define A6XX_RB_MRT_CONTROL_BLEND2 0x00000002
+#define A6XX_RB_MRT_CONTROL_ROP_ENABLE 0x00000004
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__MASK 0x00000078
+#define A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT 3
+static inline uint32_t A6XX_RB_MRT_CONTROL_ROP_CODE(enum a3xx_rop_code val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_ROP_CODE__SHIFT) & A6XX_RB_MRT_CONTROL_ROP_CODE__MASK;
+}
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK 0x00000780
+#define A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT 7
+static inline uint32_t A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE(uint32_t val)
+{
+ return ((val) << A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__SHIFT) & A6XX_RB_MRT_CONTROL_COMPONENT_ENABLE__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BLEND_CONTROL(uint32_t i0) { return 0x00008821 + 0x8*i0; }
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK 0x0000001f
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK 0x000000e0
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT 5
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK 0x00001f00
+#define A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_RGB_DEST_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK 0x001f0000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT 16
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_SRC_FACTOR__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK 0x00e00000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT 21
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE(enum a3xx_rb_blend_opcode val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_BLEND_OPCODE__MASK;
+}
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK 0x1f000000
+#define A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT 24
+static inline uint32_t A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR(enum adreno_rb_blend_factor val)
+{
+ return ((val) << A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__SHIFT) & A6XX_RB_MRT_BLEND_CONTROL_ALPHA_DEST_FACTOR__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BUF_INFO(uint32_t i0) { return 0x00008822 + 0x8*i0; }
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_TILE_MODE__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK 0x00006000
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT 13
+static inline uint32_t A6XX_RB_MRT_BUF_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_MRT_BUF_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_MRT_BUF_INFO_COLOR_SRGB 0x00008000
+
+static inline uint32_t REG_A6XX_RB_MRT_PITCH(uint32_t i0) { return 0x00008823 + 0x8*i0; }
+#define A6XX_RB_MRT_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_PITCH__SHIFT) & A6XX_RB_MRT_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_ARRAY_PITCH(uint32_t i0) { return 0x00008824 + 0x8*i0; }
+#define A6XX_RB_MRT_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_MRT_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_MRT_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_ARRAY_PITCH__MASK;
+}
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_LO(uint32_t i0) { return 0x00008825 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_HI(uint32_t i0) { return 0x00008826 + 0x8*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_BASE_GMEM(uint32_t i0) { return 0x00008827 + 0x8*i0; }
+
+#define REG_A6XX_RB_BLEND_RED_F32 0x00008860
+#define A6XX_RB_BLEND_RED_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_RED_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_RED_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_RED_F32__SHIFT) & A6XX_RB_BLEND_RED_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_GREEN_F32 0x00008861
+#define A6XX_RB_BLEND_GREEN_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_GREEN_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_GREEN_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_GREEN_F32__SHIFT) & A6XX_RB_BLEND_GREEN_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_BLUE_F32 0x00008862
+#define A6XX_RB_BLEND_BLUE_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_BLUE_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_BLUE_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_BLUE_F32__SHIFT) & A6XX_RB_BLEND_BLUE_F32__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_ALPHA_F32 0x00008863
+#define A6XX_RB_BLEND_ALPHA_F32__MASK 0xffffffff
+#define A6XX_RB_BLEND_ALPHA_F32__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_ALPHA_F32(float val)
+{
+ return ((fui(val)) << A6XX_RB_BLEND_ALPHA_F32__SHIFT) & A6XX_RB_BLEND_ALPHA_F32__MASK;
+}
+
+#define REG_A6XX_RB_ALPHA_CONTROL 0x00008864
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK 0x000000ff
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT 0
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_REF__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_REF__MASK;
+}
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST 0x00000100
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK 0x00000e00
+#define A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT 9
+static inline uint32_t A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__SHIFT) & A6XX_RB_ALPHA_CONTROL_ALPHA_TEST_FUNC__MASK;
+}
+
+#define REG_A6XX_RB_BLEND_CNTL 0x00008865
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK 0x000000ff
+#define A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT 0
+static inline uint32_t A6XX_RB_BLEND_CNTL_ENABLE_BLEND(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_ENABLE_BLEND__SHIFT) & A6XX_RB_BLEND_CNTL_ENABLE_BLEND__MASK;
+}
+#define A6XX_RB_BLEND_CNTL_INDEPENDENT_BLEND 0x00000100
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK 0xffff0000
+#define A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT 16
+static inline uint32_t A6XX_RB_BLEND_CNTL_SAMPLE_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLEND_CNTL_SAMPLE_MASK__SHIFT) & A6XX_RB_BLEND_CNTL_SAMPLE_MASK__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_CNTL 0x00008871
+#define A6XX_RB_DEPTH_CNTL_Z_ENABLE 0x00000001
+#define A6XX_RB_DEPTH_CNTL_Z_WRITE_ENABLE 0x00000002
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__MASK 0x0000001c
+#define A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT 2
+static inline uint32_t A6XX_RB_DEPTH_CNTL_ZFUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_DEPTH_CNTL_ZFUNC__SHIFT) & A6XX_RB_DEPTH_CNTL_ZFUNC__MASK;
+}
+#define A6XX_RB_DEPTH_CNTL_Z_TEST_ENABLE 0x00000040
+
+#define REG_A6XX_RB_DEPTH_BUFFER_INFO 0x00008872
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK 0x00000007
+#define A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT(enum a6xx_depth_format val)
+{
+ return ((val) << A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__SHIFT) & A6XX_RB_DEPTH_BUFFER_INFO_DEPTH_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_PITCH 0x00008873
+#define A6XX_RB_DEPTH_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH 0x00008874
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_DEPTH_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_LO 0x00008875
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_HI 0x00008876
+
+#define REG_A6XX_RB_DEPTH_BUFFER_BASE_GMEM 0x00008877
+
+#define REG_A6XX_RB_UNKNOWN_8878 0x00008878
+
+#define REG_A6XX_RB_UNKNOWN_8879 0x00008879
+
+#define REG_A6XX_RB_STENCIL_CONTROL 0x00008880
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE 0x00000001
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_ENABLE_BF 0x00000002
+#define A6XX_RB_STENCIL_CONTROL_STENCIL_READ 0x00000004
+#define A6XX_RB_STENCIL_CONTROL_FUNC__MASK 0x00000700
+#define A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT 8
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL__MASK 0x00003800
+#define A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT 11
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__MASK 0x0001c000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT 14
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK 0x000e0000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT 17
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK 0x00700000
+#define A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT 20
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FUNC_BF(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FUNC_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FUNC_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK 0x03800000
+#define A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT 23
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_FAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_FAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_FAIL_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK 0x1c000000
+#define A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT 26
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZPASS_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZPASS_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZPASS_BF__MASK;
+}
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK 0xe0000000
+#define A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT 29
+static inline uint32_t A6XX_RB_STENCIL_CONTROL_ZFAIL_BF(enum adreno_stencil_op val)
+{
+ return ((val) << A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__SHIFT) & A6XX_RB_STENCIL_CONTROL_ZFAIL_BF__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_INFO 0x00008881
+#define A6XX_RB_STENCIL_INFO_SEPARATE_STENCIL 0x00000001
+
+#define REG_A6XX_RB_STENCIL_BUFFER_PITCH 0x00008882
+#define A6XX_RB_STENCIL_BUFFER_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH 0x00008883
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__SHIFT) & A6XX_RB_STENCIL_BUFFER_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_LO 0x00008884
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_HI 0x00008885
+
+#define REG_A6XX_RB_STENCIL_BUFFER_BASE_GMEM 0x00008886
+
+#define REG_A6XX_RB_STENCILREF 0x00008887
+#define A6XX_RB_STENCILREF_REF__MASK 0x000000ff
+#define A6XX_RB_STENCILREF_REF__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILREF_REF(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILREF_REF__SHIFT) & A6XX_RB_STENCILREF_REF__MASK;
+}
+
+#define REG_A6XX_RB_STENCILMASK 0x00008888
+#define A6XX_RB_STENCILMASK_MASK__MASK 0x000000ff
+#define A6XX_RB_STENCILMASK_MASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILMASK_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILMASK_MASK__SHIFT) & A6XX_RB_STENCILMASK_MASK__MASK;
+}
+
+#define REG_A6XX_RB_STENCILWRMASK 0x00008889
+#define A6XX_RB_STENCILWRMASK_WRMASK__MASK 0x000000ff
+#define A6XX_RB_STENCILWRMASK_WRMASK__SHIFT 0
+static inline uint32_t A6XX_RB_STENCILWRMASK_WRMASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_STENCILWRMASK_WRMASK__SHIFT) & A6XX_RB_STENCILWRMASK_WRMASK__MASK;
+}
+
+#define REG_A6XX_RB_WINDOW_OFFSET 0x00008890
+#define A6XX_RB_WINDOW_OFFSET_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_WINDOW_OFFSET_X__MASK 0x00007fff
+#define A6XX_RB_WINDOW_OFFSET_X__SHIFT 0
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_X__SHIFT) & A6XX_RB_WINDOW_OFFSET_X__MASK;
+}
+#define A6XX_RB_WINDOW_OFFSET_Y__MASK 0x7fff0000
+#define A6XX_RB_WINDOW_OFFSET_Y__SHIFT 16
+static inline uint32_t A6XX_RB_WINDOW_OFFSET_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_WINDOW_OFFSET_Y__SHIFT) & A6XX_RB_WINDOW_OFFSET_Y__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_CONTROL 0x00008891
+#define A6XX_RB_SAMPLE_COUNT_CONTROL_COPY 0x00000002
+
+#define REG_A6XX_RB_UNKNOWN_88D0 0x000088d0
+
+#define REG_A6XX_RB_BLIT_SCISSOR_TL 0x000088d1
+#define A6XX_RB_BLIT_SCISSOR_TL_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_TL_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_TL_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_TL_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_TL_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_SCISSOR_BR 0x000088d2
+#define A6XX_RB_BLIT_SCISSOR_BR_WINDOW_OFFSET_DISABLE 0x80000000
+#define A6XX_RB_BLIT_SCISSOR_BR_X__MASK 0x00007fff
+#define A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_X(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_X__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_X__MASK;
+}
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__MASK 0x7fff0000
+#define A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT 16
+static inline uint32_t A6XX_RB_BLIT_SCISSOR_BR_Y(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_SCISSOR_BR_Y__SHIFT) & A6XX_RB_BLIT_SCISSOR_BR_Y__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_BASE_GMEM 0x000088d6
+
+#define REG_A6XX_RB_BLIT_DST_INFO 0x000088d7
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK 0x00000003
+#define A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_BLIT_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_FLAGS 0x00000004
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK 0x00007f80
+#define A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT 7
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK 0x00000060
+#define A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT 5
+static inline uint32_t A6XX_RB_BLIT_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_BLIT_DST_INFO_COLOR_SWAP__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_LO 0x000088d8
+
+#define REG_A6XX_RB_BLIT_DST_HI 0x000088d9
+
+#define REG_A6XX_RB_BLIT_DST_PITCH 0x000088da
+#define A6XX_RB_BLIT_DST_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_PITCH__SHIFT) & A6XX_RB_BLIT_DST_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_DST_ARRAY_PITCH 0x000088db
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK 0xffffffff
+#define A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_BLIT_DST_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_BLIT_DST_ARRAY_PITCH__SHIFT) & A6XX_RB_BLIT_DST_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_LO 0x000088dc
+
+#define REG_A6XX_RB_BLIT_FLAG_DST_HI 0x000088dd
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW0 0x000088df
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW1 0x000088e0
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW2 0x000088e1
+
+#define REG_A6XX_RB_BLIT_CLEAR_COLOR_DW3 0x000088e2
+
+#define REG_A6XX_RB_BLIT_INFO 0x000088e3
+#define A6XX_RB_BLIT_INFO_UNK0 0x00000001
+#define A6XX_RB_BLIT_INFO_FAST_CLEAR 0x00000002
+#define A6XX_RB_BLIT_INFO_INTEGER 0x00000004
+#define A6XX_RB_BLIT_INFO_UNK3 0x00000008
+#define A6XX_RB_BLIT_INFO_MASK__MASK 0x000000f0
+#define A6XX_RB_BLIT_INFO_MASK__SHIFT 4
+static inline uint32_t A6XX_RB_BLIT_INFO_MASK(uint32_t val)
+{
+ return ((val) << A6XX_RB_BLIT_INFO_MASK__SHIFT) & A6XX_RB_BLIT_INFO_MASK__MASK;
+}
+
+#define REG_A6XX_RB_UNKNOWN_88F0 0x000088f0
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_LO 0x00008900
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_BASE_HI 0x00008901
+
+#define REG_A6XX_RB_DEPTH_FLAG_BUFFER_PITCH 0x00008902
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_LO(uint32_t i0) { return 0x00008903 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_ADDR_HI(uint32_t i0) { return 0x00008904 + 0x3*i0; }
+
+static inline uint32_t REG_A6XX_RB_MRT_FLAG_BUFFER_PITCH(uint32_t i0) { return 0x00008905 + 0x3*i0; }
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK 0x000007ff
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_PITCH__MASK;
+}
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK 0x003ff800
+#define A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT 11
+static inline uint32_t A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 5) << A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__SHIFT) & A6XX_RB_MRT_FLAG_BUFFER_PITCH_ARRAY_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_LO 0x00008927
+
+#define REG_A6XX_RB_SAMPLE_COUNT_ADDR_HI 0x00008928
+
+#define REG_A6XX_RB_2D_BLIT_CNTL 0x00008c00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK 0x0000ff00
+#define A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT 8
+static inline uint32_t A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_BLIT_CNTL_COLOR_FORMAT__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_INFO 0x00008c17
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_FORMAT__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_RB_2D_DST_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_TILE_MODE__SHIFT) & A6XX_RB_2D_DST_INFO_TILE_MODE__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_RB_2D_DST_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_RB_2D_DST_INFO_COLOR_SWAP__SHIFT) & A6XX_RB_2D_DST_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_RB_2D_DST_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_RB_2D_DST_LO 0x00008c18
+
+#define REG_A6XX_RB_2D_DST_HI 0x00008c19
+
+#define REG_A6XX_RB_2D_DST_SIZE 0x00008c1a
+#define A6XX_RB_2D_DST_SIZE_PITCH__MASK 0x0000ffff
+#define A6XX_RB_2D_DST_SIZE_PITCH__SHIFT 0
+static inline uint32_t A6XX_RB_2D_DST_SIZE_PITCH(uint32_t val)
+{
+ return ((val >> 6) << A6XX_RB_2D_DST_SIZE_PITCH__SHIFT) & A6XX_RB_2D_DST_SIZE_PITCH__MASK;
+}
+
+#define REG_A6XX_RB_2D_DST_FLAGS_LO 0x00008c20
+
+#define REG_A6XX_RB_2D_DST_FLAGS_HI 0x00008c21
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C0 0x00008c2c
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C1 0x00008c2d
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C2 0x00008c2e
+
+#define REG_A6XX_RB_2D_SRC_SOLID_C3 0x00008c2f
+
+#define REG_A6XX_RB_UNKNOWN_8E01 0x00008e01
+
+#define REG_A6XX_RB_CCU_CNTL 0x00008e07
+
+#define REG_A6XX_VPC_UNKNOWN_9101 0x00009101
+
+#define REG_A6XX_VPC_GS_SIV_CNTL 0x00009104
+
+#define REG_A6XX_VPC_UNKNOWN_9108 0x00009108
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_INTERP_MODE(uint32_t i0) { return 0x00009200 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VARYING_PS_REPL_MODE(uint32_t i0) { return 0x00009208 + 0x1*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9210 0x00009210
+
+#define REG_A6XX_VPC_UNKNOWN_9211 0x00009211
+
+static inline uint32_t REG_A6XX_VPC_VAR(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VPC_VAR_DISABLE(uint32_t i0) { return 0x00009212 + 0x1*i0; }
+
+#define REG_A6XX_VPC_SO_CNTL 0x00009216
+#define A6XX_VPC_SO_CNTL_ENABLE 0x00010000
+
+#define REG_A6XX_VPC_SO_PROG 0x00009217
+#define A6XX_VPC_SO_PROG_A_BUF__MASK 0x00000003
+#define A6XX_VPC_SO_PROG_A_BUF__SHIFT 0
+static inline uint32_t A6XX_VPC_SO_PROG_A_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_A_BUF__SHIFT) & A6XX_VPC_SO_PROG_A_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_OFF__MASK 0x000007fc
+#define A6XX_VPC_SO_PROG_A_OFF__SHIFT 2
+static inline uint32_t A6XX_VPC_SO_PROG_A_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_A_OFF__SHIFT) & A6XX_VPC_SO_PROG_A_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_A_EN 0x00000800
+#define A6XX_VPC_SO_PROG_B_BUF__MASK 0x00003000
+#define A6XX_VPC_SO_PROG_B_BUF__SHIFT 12
+static inline uint32_t A6XX_VPC_SO_PROG_B_BUF(uint32_t val)
+{
+ return ((val) << A6XX_VPC_SO_PROG_B_BUF__SHIFT) & A6XX_VPC_SO_PROG_B_BUF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_OFF__MASK 0x007fc000
+#define A6XX_VPC_SO_PROG_B_OFF__SHIFT 14
+static inline uint32_t A6XX_VPC_SO_PROG_B_OFF(uint32_t val)
+{
+ return ((val >> 2) << A6XX_VPC_SO_PROG_B_OFF__SHIFT) & A6XX_VPC_SO_PROG_B_OFF__MASK;
+}
+#define A6XX_VPC_SO_PROG_B_EN 0x00800000
+
+static inline uint32_t REG_A6XX_VPC_SO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_LO(uint32_t i0) { return 0x0000921a + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_BASE_HI(uint32_t i0) { return 0x0000921b + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_SIZE(uint32_t i0) { return 0x0000921c + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_NCOMP(uint32_t i0) { return 0x0000921d + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_BUFFER_OFFSET(uint32_t i0) { return 0x0000921e + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_LO(uint32_t i0) { return 0x0000921f + 0x7*i0; }
+
+static inline uint32_t REG_A6XX_VPC_SO_FLUSH_BASE_HI(uint32_t i0) { return 0x00009220 + 0x7*i0; }
+
+#define REG_A6XX_VPC_UNKNOWN_9236 0x00009236
+
+#define REG_A6XX_VPC_UNKNOWN_9300 0x00009300
+
+#define REG_A6XX_VPC_PACK 0x00009301
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__MASK 0x000000ff
+#define A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_VPC_PACK_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_STRIDE_IN_VPC__SHIFT) & A6XX_VPC_PACK_STRIDE_IN_VPC__MASK;
+}
+#define A6XX_VPC_PACK_NUMNONPOSVAR__MASK 0x0000ff00
+#define A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT 8
+static inline uint32_t A6XX_VPC_PACK_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_NUMNONPOSVAR__SHIFT) & A6XX_VPC_PACK_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_PACK_PSIZELOC__MASK 0x00ff0000
+#define A6XX_VPC_PACK_PSIZELOC__SHIFT 16
+static inline uint32_t A6XX_VPC_PACK_PSIZELOC(uint32_t val)
+{
+ return ((val) << A6XX_VPC_PACK_PSIZELOC__SHIFT) & A6XX_VPC_PACK_PSIZELOC__MASK;
+}
+
+#define REG_A6XX_VPC_CNTL_0 0x00009304
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK 0x000000ff
+#define A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT 0
+static inline uint32_t A6XX_VPC_CNTL_0_NUMNONPOSVAR(uint32_t val)
+{
+ return ((val) << A6XX_VPC_CNTL_0_NUMNONPOSVAR__SHIFT) & A6XX_VPC_CNTL_0_NUMNONPOSVAR__MASK;
+}
+#define A6XX_VPC_CNTL_0_VARYING 0x00010000
+
+#define REG_A6XX_VPC_SO_BUF_CNTL 0x00009305
+#define A6XX_VPC_SO_BUF_CNTL_BUF0 0x00000001
+#define A6XX_VPC_SO_BUF_CNTL_BUF1 0x00000008
+#define A6XX_VPC_SO_BUF_CNTL_BUF2 0x00000040
+#define A6XX_VPC_SO_BUF_CNTL_BUF3 0x00000200
+#define A6XX_VPC_SO_BUF_CNTL_ENABLE 0x00008000
+
+#define REG_A6XX_VPC_UNKNOWN_9600 0x00009600
+
+#define REG_A6XX_VPC_UNKNOWN_9602 0x00009602
+
+#define REG_A6XX_PC_UNKNOWN_9801 0x00009801
+
+#define REG_A6XX_PC_RESTART_INDEX 0x00009803
+
+#define REG_A6XX_PC_MODE_CNTL 0x00009804
+
+#define REG_A6XX_PC_UNKNOWN_9805 0x00009805
+
+#define REG_A6XX_PC_UNKNOWN_9981 0x00009981
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_0 0x00009b00
+#define A6XX_PC_PRIMITIVE_CNTL_0_PRIMITIVE_RESTART 0x00000001
+#define A6XX_PC_PRIMITIVE_CNTL_0_PROVOKING_VTX_LAST 0x00000002
+
+#define REG_A6XX_PC_PRIMITIVE_CNTL_1 0x00009b01
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK 0x0000007f
+#define A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT 0
+static inline uint32_t A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC(uint32_t val)
+{
+ return ((val) << A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__SHIFT) & A6XX_PC_PRIMITIVE_CNTL_1_STRIDE_IN_VPC__MASK;
+}
+
+#define REG_A6XX_PC_UNKNOWN_9B06 0x00009b06
+
+#define REG_A6XX_PC_UNKNOWN_9B07 0x00009b07
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_LO 0x00009e08
+
+#define REG_A6XX_PC_TESSFACTOR_ADDR_HI 0x00009e09
+
+#define REG_A6XX_PC_UNKNOWN_9E72 0x00009e72
+
+#define REG_A6XX_VFD_CONTROL_0 0x0000a000
+#define A6XX_VFD_CONTROL_0_VTXCNT__MASK 0x0000003f
+#define A6XX_VFD_CONTROL_0_VTXCNT__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_0_VTXCNT(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_0_VTXCNT__SHIFT) & A6XX_VFD_CONTROL_0_VTXCNT__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_1 0x0000a001
+#define A6XX_VFD_CONTROL_1_REGID4VTX__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4VTX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4VTX__SHIFT) & A6XX_VFD_CONTROL_1_REGID4VTX__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4INST__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_1_REGID4INST__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4INST(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4INST__SHIFT) & A6XX_VFD_CONTROL_1_REGID4INST__MASK;
+}
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_1_REGID4PRIMID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_1_REGID4PRIMID__SHIFT) & A6XX_VFD_CONTROL_1_REGID4PRIMID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_2 0x0000a002
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK 0x000000ff
+#define A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT 0
+static inline uint32_t A6XX_VFD_CONTROL_2_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_2_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_2_REGID_PATCHID__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_3 0x0000a003
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK 0x0000ff00
+#define A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT 8
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_PATCHID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_PATCHID__SHIFT) & A6XX_VFD_CONTROL_3_REGID_PATCHID__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__MASK 0x00ff0000
+#define A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT 16
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSX__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSX__MASK;
+}
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__MASK 0xff000000
+#define A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT 24
+static inline uint32_t A6XX_VFD_CONTROL_3_REGID_TESSY(uint32_t val)
+{
+ return ((val) << A6XX_VFD_CONTROL_3_REGID_TESSY__SHIFT) & A6XX_VFD_CONTROL_3_REGID_TESSY__MASK;
+}
+
+#define REG_A6XX_VFD_CONTROL_4 0x0000a004
+
+#define REG_A6XX_VFD_CONTROL_5 0x0000a005
+
+#define REG_A6XX_VFD_CONTROL_6 0x0000a006
+
+#define REG_A6XX_VFD_MODE_CNTL 0x0000a007
+#define A6XX_VFD_MODE_CNTL_BINNING_PASS 0x00000001
+
+#define REG_A6XX_VFD_UNKNOWN_A008 0x0000a008
+
+#define REG_A6XX_VFD_INDEX_OFFSET 0x0000a00e
+
+#define REG_A6XX_VFD_INSTANCE_START_OFFSET 0x0000a00f
+
+static inline uint32_t REG_A6XX_VFD_FETCH(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_LO(uint32_t i0) { return 0x0000a010 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_BASE_HI(uint32_t i0) { return 0x0000a011 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_SIZE(uint32_t i0) { return 0x0000a012 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_FETCH_STRIDE(uint32_t i0) { return 0x0000a013 + 0x4*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DECODE_INSTR(uint32_t i0) { return 0x0000a090 + 0x2*i0; }
+#define A6XX_VFD_DECODE_INSTR_IDX__MASK 0x0000001f
+#define A6XX_VFD_DECODE_INSTR_IDX__SHIFT 0
+static inline uint32_t A6XX_VFD_DECODE_INSTR_IDX(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_IDX__SHIFT) & A6XX_VFD_DECODE_INSTR_IDX__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_INSTANCED 0x00020000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__MASK 0x0ff00000
+#define A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT 20
+static inline uint32_t A6XX_VFD_DECODE_INSTR_FORMAT(enum a6xx_vtx_fmt val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_FORMAT__SHIFT) & A6XX_VFD_DECODE_INSTR_FORMAT__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_SWAP__MASK 0x30000000
+#define A6XX_VFD_DECODE_INSTR_SWAP__SHIFT 28
+static inline uint32_t A6XX_VFD_DECODE_INSTR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_VFD_DECODE_INSTR_SWAP__SHIFT) & A6XX_VFD_DECODE_INSTR_SWAP__MASK;
+}
+#define A6XX_VFD_DECODE_INSTR_UNK30 0x40000000
+#define A6XX_VFD_DECODE_INSTR_FLOAT 0x80000000
+
+static inline uint32_t REG_A6XX_VFD_DECODE_STEP_RATE(uint32_t i0) { return 0x0000a091 + 0x2*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_VFD_DEST_CNTL_INSTR(uint32_t i0) { return 0x0000a0d0 + 0x1*i0; }
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK 0x0000000f
+#define A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT 0
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_WRITEMASK__MASK;
+}
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK 0x00000ff0
+#define A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT 4
+static inline uint32_t A6XX_VFD_DEST_CNTL_INSTR_REGID(uint32_t val)
+{
+ return ((val) << A6XX_VFD_DEST_CNTL_INSTR_REGID__SHIFT) & A6XX_VFD_DEST_CNTL_INSTR_REGID__MASK;
+}
+
+#define REG_A6XX_SP_UNKNOWN_A0F8 0x0000a0f8
+
+#define REG_A6XX_SP_PRIMITIVE_CNTL 0x0000a802
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK 0x0000001f
+#define A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT 0
+static inline uint32_t A6XX_SP_PRIMITIVE_CNTL_VSOUT(uint32_t val)
+{
+ return ((val) << A6XX_SP_PRIMITIVE_CNTL_VSOUT__SHIFT) & A6XX_SP_PRIMITIVE_CNTL_VSOUT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_OUT(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_OUT_REG(uint32_t i0) { return 0x0000a803 + 0x1*i0; }
+#define A6XX_SP_VS_OUT_REG_A_REGID__MASK 0x000000ff
+#define A6XX_SP_VS_OUT_REG_A_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_A_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK 0x00000f00
+#define A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT 8
+static inline uint32_t A6XX_SP_VS_OUT_REG_A_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_A_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_A_COMPMASK__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_REGID__MASK 0x00ff0000
+#define A6XX_SP_VS_OUT_REG_B_REGID__SHIFT 16
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_REGID__SHIFT) & A6XX_SP_VS_OUT_REG_B_REGID__MASK;
+}
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK 0x0f000000
+#define A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT 24
+static inline uint32_t A6XX_SP_VS_OUT_REG_B_COMPMASK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_OUT_REG_B_COMPMASK__SHIFT) & A6XX_SP_VS_OUT_REG_B_COMPMASK__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_VS_VPC_DST_REG(uint32_t i0) { return 0x0000a813 + 0x1*i0; }
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK 0x000000ff
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT 0
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC0(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC0__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC0__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK 0x0000ff00
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT 8
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC1(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC1__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC1__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK 0x00ff0000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT 16
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC2(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC2__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC2__MASK;
+}
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK 0xff000000
+#define A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT 24
+static inline uint32_t A6XX_SP_VS_VPC_DST_REG_OUTLOC3(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_VPC_DST_REG_OUTLOC3__SHIFT) & A6XX_SP_VS_VPC_DST_REG_OUTLOC3__MASK;
+}
+
+#define REG_A6XX_SP_VS_CTRL_REG0 0x0000a800
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_VS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_VS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_VS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_VS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_VS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_VS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_VS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_VS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_VS_OBJ_START_LO 0x0000a81c
+
+#define REG_A6XX_SP_VS_OBJ_START_HI 0x0000a81d
+
+#define REG_A6XX_SP_VS_TEX_COUNT 0x0000a822
+
+#define REG_A6XX_SP_VS_CONFIG 0x0000a823
+#define A6XX_SP_VS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_VS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_VS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_VS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NTEX__SHIFT) & A6XX_SP_VS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_VS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_VS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_VS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_VS_CONFIG_NSAMP__SHIFT) & A6XX_SP_VS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_VS_INSTRLEN 0x0000a824
+
+#define REG_A6XX_SP_HS_CTRL_REG0 0x0000a830
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_HS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_HS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_HS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_HS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_HS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_HS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_HS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_HS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_HS_UNKNOWN_A831 0x0000a831
+
+#define REG_A6XX_SP_HS_OBJ_START_LO 0x0000a834
+
+#define REG_A6XX_SP_HS_OBJ_START_HI 0x0000a835
+
+#define REG_A6XX_SP_HS_TEX_COUNT 0x0000a83a
+
+#define REG_A6XX_SP_HS_CONFIG 0x0000a83b
+#define A6XX_SP_HS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_HS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_HS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_HS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NTEX__SHIFT) & A6XX_SP_HS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_HS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_HS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_HS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_HS_CONFIG_NSAMP__SHIFT) & A6XX_SP_HS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_HS_INSTRLEN 0x0000a83c
+
+#define REG_A6XX_SP_DS_CTRL_REG0 0x0000a840
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_DS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_DS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_DS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_DS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_DS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_DS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_DS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_DS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_DS_OBJ_START_LO 0x0000a85c
+
+#define REG_A6XX_SP_DS_OBJ_START_HI 0x0000a85d
+
+#define REG_A6XX_SP_DS_TEX_COUNT 0x0000a862
+
+#define REG_A6XX_SP_DS_CONFIG 0x0000a863
+#define A6XX_SP_DS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_DS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_DS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_DS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NTEX__SHIFT) & A6XX_SP_DS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_DS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_DS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_DS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_DS_CONFIG_NSAMP__SHIFT) & A6XX_SP_DS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_DS_INSTRLEN 0x0000a864
+
+#define REG_A6XX_SP_GS_CTRL_REG0 0x0000a870
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_GS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_GS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_GS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_GS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_GS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_GS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_GS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_GS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_GS_UNKNOWN_A871 0x0000a871
+
+#define REG_A6XX_SP_GS_OBJ_START_LO 0x0000a88d
+
+#define REG_A6XX_SP_GS_OBJ_START_HI 0x0000a88e
+
+#define REG_A6XX_SP_GS_TEX_COUNT 0x0000a893
+
+#define REG_A6XX_SP_GS_CONFIG 0x0000a894
+#define A6XX_SP_GS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_GS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_GS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_GS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NTEX__SHIFT) & A6XX_SP_GS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_GS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_GS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_GS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_GS_CONFIG_NSAMP__SHIFT) & A6XX_SP_GS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_GS_INSTRLEN 0x0000a895
+
+#define REG_A6XX_SP_VS_TEX_SAMP_LO 0x0000a8a0
+
+#define REG_A6XX_SP_VS_TEX_SAMP_HI 0x0000a8a1
+
+#define REG_A6XX_SP_HS_TEX_SAMP_LO 0x0000a8a2
+
+#define REG_A6XX_SP_HS_TEX_SAMP_HI 0x0000a8a3
+
+#define REG_A6XX_SP_DS_TEX_SAMP_LO 0x0000a8a4
+
+#define REG_A6XX_SP_DS_TEX_SAMP_HI 0x0000a8a5
+
+#define REG_A6XX_SP_GS_TEX_SAMP_LO 0x0000a8a6
+
+#define REG_A6XX_SP_GS_TEX_SAMP_HI 0x0000a8a7
+
+#define REG_A6XX_SP_VS_TEX_CONST_LO 0x0000a8a8
+
+#define REG_A6XX_SP_VS_TEX_CONST_HI 0x0000a8a9
+
+#define REG_A6XX_SP_HS_TEX_CONST_LO 0x0000a8aa
+
+#define REG_A6XX_SP_HS_TEX_CONST_HI 0x0000a8ab
+
+#define REG_A6XX_SP_DS_TEX_CONST_LO 0x0000a8ac
+
+#define REG_A6XX_SP_DS_TEX_CONST_HI 0x0000a8ad
+
+#define REG_A6XX_SP_GS_TEX_CONST_LO 0x0000a8ae
+
+#define REG_A6XX_SP_GS_TEX_CONST_HI 0x0000a8af
+
+#define REG_A6XX_SP_FS_CTRL_REG0 0x0000a980
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_FS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_FS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_FS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_FS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_FS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_FS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_FS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_FS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_FS_OBJ_START_LO 0x0000a983
+
+#define REG_A6XX_SP_FS_OBJ_START_HI 0x0000a984
+
+#define REG_A6XX_SP_BLEND_CNTL 0x0000a989
+#define A6XX_SP_BLEND_CNTL_ENABLED 0x00000001
+#define A6XX_SP_BLEND_CNTL_UNK8 0x00000100
+
+#define REG_A6XX_SP_SRGB_CNTL 0x0000a98a
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT0 0x00000001
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT1 0x00000002
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT2 0x00000004
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT3 0x00000008
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT4 0x00000010
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT5 0x00000020
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT6 0x00000040
+#define A6XX_SP_SRGB_CNTL_SRGB_MRT7 0x00000080
+
+#define REG_A6XX_SP_FS_RENDER_COMPONENTS 0x0000a98b
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK 0x0000000f
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT 0
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT0(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT0__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT0__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK 0x000000f0
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT 4
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT1(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT1__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT1__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK 0x00000f00
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT 8
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT2(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT2__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT2__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK 0x0000f000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT 12
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT3(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT3__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT3__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK 0x000f0000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT 16
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT4(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT4__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT4__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK 0x00f00000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT 20
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT5(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT5__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT5__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK 0x0f000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT 24
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT6(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT6__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT6__MASK;
+}
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK 0xf0000000
+#define A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT 28
+static inline uint32_t A6XX_SP_FS_RENDER_COMPONENTS_RT7(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_RENDER_COMPONENTS_RT7__SHIFT) & A6XX_SP_FS_RENDER_COMPONENTS_RT7__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL0 0x0000a98c
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK 0x0000ff00
+#define A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT 8
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL0_DEPTH_REGID__MASK;
+}
+
+#define REG_A6XX_SP_FS_OUTPUT_CNTL1 0x0000a98d
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK 0x0000000f
+#define A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_CNTL1_MRT(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_CNTL1_MRT__SHIFT) & A6XX_SP_FS_OUTPUT_CNTL1_MRT__MASK;
+}
+
+static inline uint32_t REG_A6XX_SP_FS_MRT(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_MRT_REG(uint32_t i0) { return 0x0000a996 + 0x1*i0; }
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_FS_MRT_REG_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_FS_MRT_REG_COLOR_FORMAT__SHIFT) & A6XX_SP_FS_MRT_REG_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_FS_MRT_REG_COLOR_SINT 0x00000100
+#define A6XX_SP_FS_MRT_REG_COLOR_UINT 0x00000200
+#define A6XX_SP_FS_MRT_REG_COLOR_SRGB 0x00000400
+
+#define REG_A6XX_SP_FS_TEX_COUNT 0x0000a9a7
+
+#define REG_A6XX_SP_UNKNOWN_A9A8 0x0000a9a8
+
+#define REG_A6XX_SP_FS_TEX_SAMP_LO 0x0000a9e0
+
+#define REG_A6XX_SP_FS_TEX_SAMP_HI 0x0000a9e1
+
+#define REG_A6XX_SP_CS_TEX_SAMP_LO 0x0000a9e2
+
+#define REG_A6XX_SP_CS_TEX_SAMP_HI 0x0000a9e3
+
+#define REG_A6XX_SP_FS_TEX_CONST_LO 0x0000a9e4
+
+#define REG_A6XX_SP_FS_TEX_CONST_HI 0x0000a9e5
+
+#define REG_A6XX_SP_CS_TEX_CONST_LO 0x0000a9e6
+
+#define REG_A6XX_SP_CS_TEX_CONST_HI 0x0000a9e7
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+
+static inline uint32_t REG_A6XX_SP_FS_OUTPUT_REG(uint32_t i0) { return 0x0000a98e + 0x1*i0; }
+#define A6XX_SP_FS_OUTPUT_REG_REGID__MASK 0x000000ff
+#define A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT 0
+static inline uint32_t A6XX_SP_FS_OUTPUT_REG_REGID(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_OUTPUT_REG_REGID__SHIFT) & A6XX_SP_FS_OUTPUT_REG_REGID__MASK;
+}
+#define A6XX_SP_FS_OUTPUT_REG_HALF_PRECISION 0x00000100
+
+#define REG_A6XX_SP_CS_CTRL_REG0 0x0000a9b0
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK 0x0000007e
+#define A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT 1
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_HALFREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK 0x00001f80
+#define A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT 7
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__SHIFT) & A6XX_SP_CS_CTRL_REG0_FULLREGFOOTPRINT__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK 0x000fc000
+#define A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT 14
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_BRANCHSTACK(uint32_t val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__SHIFT) & A6XX_SP_CS_CTRL_REG0_BRANCHSTACK__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK 0x00100000
+#define A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT 20
+static inline uint32_t A6XX_SP_CS_CTRL_REG0_THREADSIZE(enum a3xx_threadsize val)
+{
+ return ((val) << A6XX_SP_CS_CTRL_REG0_THREADSIZE__SHIFT) & A6XX_SP_CS_CTRL_REG0_THREADSIZE__MASK;
+}
+#define A6XX_SP_CS_CTRL_REG0_VARYING 0x00400000
+#define A6XX_SP_CS_CTRL_REG0_PIXLODENABLE 0x04000000
+#define A6XX_SP_CS_CTRL_REG0_MERGEDREGS 0x80000000
+
+#define REG_A6XX_SP_CS_OBJ_START_LO 0x0000a9b4
+
+#define REG_A6XX_SP_CS_OBJ_START_HI 0x0000a9b5
+
+#define REG_A6XX_SP_CS_INSTRLEN 0x0000a9bc
+
+#define REG_A6XX_SP_UNKNOWN_AB00 0x0000ab00
+
+#define REG_A6XX_SP_FS_CONFIG 0x0000ab04
+#define A6XX_SP_FS_CONFIG_ENABLED 0x00000100
+#define A6XX_SP_FS_CONFIG_NTEX__MASK 0x0001fe00
+#define A6XX_SP_FS_CONFIG_NTEX__SHIFT 9
+static inline uint32_t A6XX_SP_FS_CONFIG_NTEX(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NTEX__SHIFT) & A6XX_SP_FS_CONFIG_NTEX__MASK;
+}
+#define A6XX_SP_FS_CONFIG_NSAMP__MASK 0x01fe0000
+#define A6XX_SP_FS_CONFIG_NSAMP__SHIFT 17
+static inline uint32_t A6XX_SP_FS_CONFIG_NSAMP(uint32_t val)
+{
+ return ((val) << A6XX_SP_FS_CONFIG_NSAMP__SHIFT) & A6XX_SP_FS_CONFIG_NSAMP__MASK;
+}
+
+#define REG_A6XX_SP_FS_INSTRLEN 0x0000ab05
+
+#define REG_A6XX_SP_UNKNOWN_AE00 0x0000ae00
+
+#define REG_A6XX_SP_UNKNOWN_AE04 0x0000ae04
+
+#define REG_A6XX_SP_UNKNOWN_AE0F 0x0000ae0f
+
+#define REG_A6XX_SP_UNKNOWN_B182 0x0000b182
+
+#define REG_A6XX_SP_TP_RAS_MSAA_CNTL 0x0000b300
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_RAS_MSAA_CNTL_SAMPLES__MASK;
+}
+
+#define REG_A6XX_SP_TP_DEST_MSAA_CNTL 0x0000b301
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK 0x00000003
+#define A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT 0
+static inline uint32_t A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES(enum a3xx_msaa_samples val)
+{
+ return ((val) << A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__SHIFT) & A6XX_SP_TP_DEST_MSAA_CNTL_SAMPLES__MASK;
+}
+#define A6XX_SP_TP_DEST_MSAA_CNTL_MSAA_DISABLE 0x00000004
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_LO 0x0000b302
+
+#define REG_A6XX_SP_TP_BORDER_COLOR_BASE_ADDR_HI 0x0000b303
+
+#define REG_A6XX_SP_TP_UNKNOWN_B304 0x0000b304
+
+#define REG_A6XX_SP_PS_2D_SRC_INFO 0x0000b4c0
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK 0x000000ff
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT 0
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT(enum a6xx_color_fmt val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_FORMAT__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK 0x00000300
+#define A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT 8
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_TILE_MODE__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK 0x00000c00
+#define A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT 10
+static inline uint32_t A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__SHIFT) & A6XX_SP_PS_2D_SRC_INFO_COLOR_SWAP__MASK;
+}
+#define A6XX_SP_PS_2D_SRC_INFO_FLAGS 0x00001000
+
+#define REG_A6XX_SP_PS_2D_SRC_LO 0x0000b4c2
+
+#define REG_A6XX_SP_PS_2D_SRC_HI 0x0000b4c3
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_LO 0x0000b4ca
+
+#define REG_A6XX_SP_PS_2D_SRC_FLAGS_HI 0x0000b4cb
+
+#define REG_A6XX_SP_UNKNOWN_B600 0x0000b600
+
+#define REG_A6XX_SP_UNKNOWN_B605 0x0000b605
+
+#define REG_A6XX_HLSQ_VS_CNTL 0x0000b800
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_VS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_VS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_VS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_HS_CNTL 0x0000b801
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_HS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_HS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_HS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_DS_CNTL 0x0000b802
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_DS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_DS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_DS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_GS_CNTL 0x0000b803
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_GS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_GS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_GS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_1_REG 0x0000b982
+
+#define REG_A6XX_HLSQ_CONTROL_2_REG 0x0000b983
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_FACEREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_FACEREGID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_FACEREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK 0x0000ff00
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__SHIFT) & A6XX_HLSQ_CONTROL_2_REG_SAMPLEMASK__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_3_REG 0x0000b984
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK 0x000000ff
+#define A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__SHIFT) & A6XX_HLSQ_CONTROL_3_REG_FRAGCOORDXYREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_4_REG 0x0000b985
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK 0x00ff0000
+#define A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_XYCOORDREGID__MASK;
+}
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__SHIFT) & A6XX_HLSQ_CONTROL_4_REG_ZWCOORDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CONTROL_5_REG 0x0000b986
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_0 0x0000b990
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK 0x00000003
+#define A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_KERNELDIM__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK 0x00000ffc
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT 2
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEX__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK 0x003ff000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT 12
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEY__MASK;
+}
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK 0xffc00000
+#define A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__SHIFT) & A6XX_HLSQ_CS_NDRANGE_0_LOCALSIZEZ__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_1 0x0000b991
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_1_GLOBALSIZE_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_2 0x0000b992
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__SHIFT) & A6XX_HLSQ_CS_NDRANGE_2_GLOBALOFF_X__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_3 0x0000b993
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_3_GLOBALSIZE_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_4 0x0000b994
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__SHIFT) & A6XX_HLSQ_CS_NDRANGE_4_GLOBALOFF_Y__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_5 0x0000b995
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_5_GLOBALSIZE_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_NDRANGE_6 0x0000b996
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK 0xffffffff
+#define A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__SHIFT) & A6XX_HLSQ_CS_NDRANGE_6_GLOBALOFF_Z__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_CNTL_0 0x0000b997
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK 0x000000ff
+#define A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT 0
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_WGIDCONSTID__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__MASK 0x0000ff00
+#define A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT 8
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK0(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK0__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK0__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__MASK 0x00ff0000
+#define A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT 16
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_UNK1(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_UNK1__SHIFT) & A6XX_HLSQ_CS_CNTL_0_UNK1__MASK;
+}
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK 0xff000000
+#define A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT 24
+static inline uint32_t A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID(uint32_t val)
+{
+ return ((val) << A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__SHIFT) & A6XX_HLSQ_CS_CNTL_0_LOCALIDREGID__MASK;
+}
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_X 0x0000b999
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Y 0x0000b99a
+
+#define REG_A6XX_HLSQ_CS_KERNEL_GROUP_Z 0x0000b99b
+
+#define REG_A6XX_HLSQ_UPDATE_CNTL 0x0000bb08
+
+#define REG_A6XX_HLSQ_FS_CNTL 0x0000bb10
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK 0x000000ff
+#define A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT 0
+static inline uint32_t A6XX_HLSQ_FS_CNTL_CONSTLEN(uint32_t val)
+{
+ return ((val >> 2) << A6XX_HLSQ_FS_CNTL_CONSTLEN__SHIFT) & A6XX_HLSQ_FS_CNTL_CONSTLEN__MASK;
+}
+
+#define REG_A6XX_HLSQ_UNKNOWN_BB11 0x0000bb11
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE00 0x0000be00
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE01 0x0000be01
+
+#define REG_A6XX_HLSQ_UNKNOWN_BE04 0x0000be04
+
+#define REG_A6XX_TEX_SAMP_0 0x00000000
+#define A6XX_TEX_SAMP_0_MIPFILTER_LINEAR_NEAR 0x00000001
+#define A6XX_TEX_SAMP_0_XY_MAG__MASK 0x00000006
+#define A6XX_TEX_SAMP_0_XY_MAG__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MAG(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MAG__SHIFT) & A6XX_TEX_SAMP_0_XY_MAG__MASK;
+}
+#define A6XX_TEX_SAMP_0_XY_MIN__MASK 0x00000018
+#define A6XX_TEX_SAMP_0_XY_MIN__SHIFT 3
+static inline uint32_t A6XX_TEX_SAMP_0_XY_MIN(enum a6xx_tex_filter val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_XY_MIN__SHIFT) & A6XX_TEX_SAMP_0_XY_MIN__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_S__MASK 0x000000e0
+#define A6XX_TEX_SAMP_0_WRAP_S__SHIFT 5
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_S(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_S__SHIFT) & A6XX_TEX_SAMP_0_WRAP_S__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_T__MASK 0x00000700
+#define A6XX_TEX_SAMP_0_WRAP_T__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_T(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_T__SHIFT) & A6XX_TEX_SAMP_0_WRAP_T__MASK;
+}
+#define A6XX_TEX_SAMP_0_WRAP_R__MASK 0x00003800
+#define A6XX_TEX_SAMP_0_WRAP_R__SHIFT 11
+static inline uint32_t A6XX_TEX_SAMP_0_WRAP_R(enum a6xx_tex_clamp val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_WRAP_R__SHIFT) & A6XX_TEX_SAMP_0_WRAP_R__MASK;
+}
+#define A6XX_TEX_SAMP_0_ANISO__MASK 0x0001c000
+#define A6XX_TEX_SAMP_0_ANISO__SHIFT 14
+static inline uint32_t A6XX_TEX_SAMP_0_ANISO(enum a6xx_tex_aniso val)
+{
+ return ((val) << A6XX_TEX_SAMP_0_ANISO__SHIFT) & A6XX_TEX_SAMP_0_ANISO__MASK;
+}
+#define A6XX_TEX_SAMP_0_LOD_BIAS__MASK 0xfff80000
+#define A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT 19
+static inline uint32_t A6XX_TEX_SAMP_0_LOD_BIAS(float val)
+{
+ return ((((int32_t)(val * 256.0))) << A6XX_TEX_SAMP_0_LOD_BIAS__SHIFT) & A6XX_TEX_SAMP_0_LOD_BIAS__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_1 0x00000001
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK 0x0000000e
+#define A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT 1
+static inline uint32_t A6XX_TEX_SAMP_1_COMPARE_FUNC(enum adreno_compare_func val)
+{
+ return ((val) << A6XX_TEX_SAMP_1_COMPARE_FUNC__SHIFT) & A6XX_TEX_SAMP_1_COMPARE_FUNC__MASK;
+}
+#define A6XX_TEX_SAMP_1_CUBEMAPSEAMLESSFILTOFF 0x00000010
+#define A6XX_TEX_SAMP_1_UNNORM_COORDS 0x00000020
+#define A6XX_TEX_SAMP_1_MIPFILTER_LINEAR_FAR 0x00000040
+#define A6XX_TEX_SAMP_1_MAX_LOD__MASK 0x000fff00
+#define A6XX_TEX_SAMP_1_MAX_LOD__SHIFT 8
+static inline uint32_t A6XX_TEX_SAMP_1_MAX_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MAX_LOD__SHIFT) & A6XX_TEX_SAMP_1_MAX_LOD__MASK;
+}
+#define A6XX_TEX_SAMP_1_MIN_LOD__MASK 0xfff00000
+#define A6XX_TEX_SAMP_1_MIN_LOD__SHIFT 20
+static inline uint32_t A6XX_TEX_SAMP_1_MIN_LOD(float val)
+{
+ return ((((uint32_t)(val * 256.0))) << A6XX_TEX_SAMP_1_MIN_LOD__SHIFT) & A6XX_TEX_SAMP_1_MIN_LOD__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_2 0x00000002
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK 0xfffffff0
+#define A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT 4
+static inline uint32_t A6XX_TEX_SAMP_2_BCOLOR_OFFSET(uint32_t val)
+{
+ return ((val) << A6XX_TEX_SAMP_2_BCOLOR_OFFSET__SHIFT) & A6XX_TEX_SAMP_2_BCOLOR_OFFSET__MASK;
+}
+
+#define REG_A6XX_TEX_SAMP_3 0x00000003
+
+#define REG_A6XX_TEX_CONST_0 0x00000000
+#define A6XX_TEX_CONST_0_TILE_MODE__MASK 0x00000003
+#define A6XX_TEX_CONST_0_TILE_MODE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_0_TILE_MODE(enum a6xx_tile_mode val)
+{
+ return ((val) << A6XX_TEX_CONST_0_TILE_MODE__SHIFT) & A6XX_TEX_CONST_0_TILE_MODE__MASK;
+}
+#define A6XX_TEX_CONST_0_SRGB 0x00000004
+#define A6XX_TEX_CONST_0_SWIZ_X__MASK 0x00000070
+#define A6XX_TEX_CONST_0_SWIZ_X__SHIFT 4
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_X(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_X__SHIFT) & A6XX_TEX_CONST_0_SWIZ_X__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Y__MASK 0x00000380
+#define A6XX_TEX_CONST_0_SWIZ_Y__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Y(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Y__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Y__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_Z__MASK 0x00001c00
+#define A6XX_TEX_CONST_0_SWIZ_Z__SHIFT 10
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_Z(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_Z__SHIFT) & A6XX_TEX_CONST_0_SWIZ_Z__MASK;
+}
+#define A6XX_TEX_CONST_0_SWIZ_W__MASK 0x0000e000
+#define A6XX_TEX_CONST_0_SWIZ_W__SHIFT 13
+static inline uint32_t A6XX_TEX_CONST_0_SWIZ_W(enum a6xx_tex_swiz val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWIZ_W__SHIFT) & A6XX_TEX_CONST_0_SWIZ_W__MASK;
+}
+#define A6XX_TEX_CONST_0_MIPLVLS__MASK 0x000f0000
+#define A6XX_TEX_CONST_0_MIPLVLS__SHIFT 16
+static inline uint32_t A6XX_TEX_CONST_0_MIPLVLS(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_0_MIPLVLS__SHIFT) & A6XX_TEX_CONST_0_MIPLVLS__MASK;
+}
+#define A6XX_TEX_CONST_0_FMT__MASK 0x3fc00000
+#define A6XX_TEX_CONST_0_FMT__SHIFT 22
+static inline uint32_t A6XX_TEX_CONST_0_FMT(enum a6xx_tex_fmt val)
+{
+ return ((val) << A6XX_TEX_CONST_0_FMT__SHIFT) & A6XX_TEX_CONST_0_FMT__MASK;
+}
+#define A6XX_TEX_CONST_0_SWAP__MASK 0xc0000000
+#define A6XX_TEX_CONST_0_SWAP__SHIFT 30
+static inline uint32_t A6XX_TEX_CONST_0_SWAP(enum a3xx_color_swap val)
+{
+ return ((val) << A6XX_TEX_CONST_0_SWAP__SHIFT) & A6XX_TEX_CONST_0_SWAP__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_1 0x00000001
+#define A6XX_TEX_CONST_1_WIDTH__MASK 0x00007fff
+#define A6XX_TEX_CONST_1_WIDTH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_1_WIDTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_WIDTH__SHIFT) & A6XX_TEX_CONST_1_WIDTH__MASK;
+}
+#define A6XX_TEX_CONST_1_HEIGHT__MASK 0x3fff8000
+#define A6XX_TEX_CONST_1_HEIGHT__SHIFT 15
+static inline uint32_t A6XX_TEX_CONST_1_HEIGHT(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_1_HEIGHT__SHIFT) & A6XX_TEX_CONST_1_HEIGHT__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_2 0x00000002
+#define A6XX_TEX_CONST_2_FETCHSIZE__MASK 0x0000000f
+#define A6XX_TEX_CONST_2_FETCHSIZE__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_2_FETCHSIZE(enum a6xx_tex_fetchsize val)
+{
+ return ((val) << A6XX_TEX_CONST_2_FETCHSIZE__SHIFT) & A6XX_TEX_CONST_2_FETCHSIZE__MASK;
+}
+#define A6XX_TEX_CONST_2_PITCH__MASK 0x1fffff80
+#define A6XX_TEX_CONST_2_PITCH__SHIFT 7
+static inline uint32_t A6XX_TEX_CONST_2_PITCH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_2_PITCH__SHIFT) & A6XX_TEX_CONST_2_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_2_TYPE__MASK 0x60000000
+#define A6XX_TEX_CONST_2_TYPE__SHIFT 29
+static inline uint32_t A6XX_TEX_CONST_2_TYPE(enum a6xx_tex_type val)
+{
+ return ((val) << A6XX_TEX_CONST_2_TYPE__SHIFT) & A6XX_TEX_CONST_2_TYPE__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_3 0x00000003
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__MASK 0x00003fff
+#define A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_3_ARRAY_PITCH(uint32_t val)
+{
+ return ((val >> 12) << A6XX_TEX_CONST_3_ARRAY_PITCH__SHIFT) & A6XX_TEX_CONST_3_ARRAY_PITCH__MASK;
+}
+#define A6XX_TEX_CONST_3_FLAG 0x10000000
+
+#define REG_A6XX_TEX_CONST_4 0x00000004
+#define A6XX_TEX_CONST_4_BASE_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_4_BASE_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_4_BASE_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_4_BASE_LO__SHIFT) & A6XX_TEX_CONST_4_BASE_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_5 0x00000005
+#define A6XX_TEX_CONST_5_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_5_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_5_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_BASE_HI__SHIFT) & A6XX_TEX_CONST_5_BASE_HI__MASK;
+}
+#define A6XX_TEX_CONST_5_DEPTH__MASK 0x3ffe0000
+#define A6XX_TEX_CONST_5_DEPTH__SHIFT 17
+static inline uint32_t A6XX_TEX_CONST_5_DEPTH(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_5_DEPTH__SHIFT) & A6XX_TEX_CONST_5_DEPTH__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_6 0x00000006
+
+#define REG_A6XX_TEX_CONST_7 0x00000007
+#define A6XX_TEX_CONST_7_FLAG_LO__MASK 0xffffffe0
+#define A6XX_TEX_CONST_7_FLAG_LO__SHIFT 5
+static inline uint32_t A6XX_TEX_CONST_7_FLAG_LO(uint32_t val)
+{
+ return ((val >> 5) << A6XX_TEX_CONST_7_FLAG_LO__SHIFT) & A6XX_TEX_CONST_7_FLAG_LO__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_8 0x00000008
+#define A6XX_TEX_CONST_8_BASE_HI__MASK 0x0001ffff
+#define A6XX_TEX_CONST_8_BASE_HI__SHIFT 0
+static inline uint32_t A6XX_TEX_CONST_8_BASE_HI(uint32_t val)
+{
+ return ((val) << A6XX_TEX_CONST_8_BASE_HI__SHIFT) & A6XX_TEX_CONST_8_BASE_HI__MASK;
+}
+
+#define REG_A6XX_TEX_CONST_9 0x00000009
+
+#define REG_A6XX_TEX_CONST_10 0x0000000a
+
+#define REG_A6XX_TEX_CONST_11 0x0000000b
+
+#define REG_A6XX_TEX_CONST_12 0x0000000c
+
+#define REG_A6XX_TEX_CONST_13 0x0000000d
+
+#define REG_A6XX_TEX_CONST_14 0x0000000e
+
+#define REG_A6XX_TEX_CONST_15 0x0000000f
+
+
+#endif /* A6XX_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.c b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
new file mode 100644
index 000000000000..bbb8126ec5c5
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.c
@@ -0,0 +1,1207 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/clk.h>
+#include <linux/iopoll.h>
+#include <linux/pm_opp.h>
+#include <soc/qcom/cmd-db.h>
+
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static irqreturn_t a6xx_gmu_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS);
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, status);
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE) {
+ dev_err_ratelimited(gmu->dev, "GMU watchdog expired\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR)
+ dev_err_ratelimited(gmu->dev, "GMU AHB bus error\n");
+
+ if (status & A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+ dev_err_ratelimited(gmu->dev, "GMU fence error: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_AHB_FENCE_STATUS));
+
+ return IRQ_HANDLED;
+}
+
+static irqreturn_t a6xx_hfi_irq(int irq, void *data)
+{
+ struct a6xx_gmu *gmu = data;
+ u32 status;
+
+ status = gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, status);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ)
+ tasklet_schedule(&gmu->hfi_tasklet);
+
+ if (status & A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT) {
+ dev_err_ratelimited(gmu->dev, "GMU firmware fault\n");
+
+ /* Temporary until we can recover safely */
+ BUG();
+ }
+
+ return IRQ_HANDLED;
+}
+
+/* Check to see if the GX rail is still powered */
+static bool a6xx_gmu_gx_is_on(struct a6xx_gmu *gmu)
+{
+ u32 val = gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS);
+
+ return !(val &
+ (A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF |
+ A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF));
+}
+
+static int a6xx_gmu_set_freq(struct a6xx_gmu *gmu, int index)
+{
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_ACK_OPTION, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_PERF_SETTING,
+ ((index << 24) & 0xff) | (3 & 0xf));
+
+ /*
+ * Send an invalid index as a vote for the bus bandwidth and let the
+ * firmware decide on the right vote
+ */
+ gmu_write(gmu, REG_A6XX_GMU_DCVS_BW_SETTING, 0xff);
+
+ /* Set and clear the OOB for DCVS to trigger the GMU */
+ a6xx_gmu_set_oob(gmu, GMU_OOB_DCVS_SET);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_DCVS_SET);
+
+ return gmu_read(gmu, REG_A6XX_GMU_DCVS_RETURN);
+}
+
+static bool a6xx_gmu_check_idle_level(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int local = gmu->idle_level;
+
+ /* SPTP and IFPC both report as IFPC */
+ if (gmu->idle_level == GMU_IDLE_STATE_SPTP)
+ local = GMU_IDLE_STATE_IFPC;
+
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val == local) {
+ if (gmu->idle_level != GMU_IDLE_STATE_IFPC ||
+ !a6xx_gmu_gx_is_on(gmu))
+ return true;
+ }
+
+ return false;
+}
+
+/* Wait for the GMU to get to its most idle state */
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ return spin_until(a6xx_gmu_check_idle_level(gmu));
+}
+
+static int a6xx_gmu_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 1);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_SYSRESET, 0);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, val,
+ val == 0xbabeface, 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "GMU firmware initialization timed out\n");
+
+ return ret;
+}
+
+static int a6xx_gmu_hfi_start(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ gmu_rmw(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ, 0);
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_CTRL_INIT, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_HFI_CTRL_STATUS, val,
+ val & 1, 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to start the HFI queues\n");
+
+ return ret;
+}
+
+/* Trigger a OOB (out of band) request to the GMU */
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ int ret;
+ u32 val;
+ int request, ack;
+ const char *name;
+
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ request = GMU_OOB_GPU_SET_REQUEST;
+ ack = GMU_OOB_GPU_SET_ACK;
+ name = "GPU_SET";
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ request = GMU_OOB_BOOT_SLUMBER_REQUEST;
+ ack = GMU_OOB_BOOT_SLUMBER_ACK;
+ name = "BOOT_SLUMBER";
+ break;
+ case GMU_OOB_DCVS_SET:
+ request = GMU_OOB_DCVS_REQUEST;
+ ack = GMU_OOB_DCVS_ACK;
+ name = "GPU_DCVS";
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ /* Trigger the equested OOB operation */
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 1 << request);
+
+ /* Wait for the acknowledge interrupt */
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO, val,
+ val & (1 << ack), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Timeout waiting for GMU OOB set %s: 0x%x\n",
+ name,
+ gmu_read(gmu, REG_A6XX_GMU_GMU2HOST_INTR_INFO));
+
+ /* Clear the acknowledge interrupt */
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, 1 << ack);
+
+ return ret;
+}
+
+/* Clear a pending OOB state in the GMU */
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state)
+{
+ switch (state) {
+ case GMU_OOB_GPU_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_GPU_SET_CLEAR);
+ break;
+ case GMU_OOB_BOOT_SLUMBER:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_BOOT_SLUMBER_CLEAR);
+ break;
+ case GMU_OOB_DCVS_SET:
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET,
+ 1 << GMU_OOB_DCVS_CLEAR);
+ break;
+ }
+}
+
+/* Enable CPU control of SPTP power power collapse */
+static int a6xx_sptprac_enable(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778000);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x38) == 0x28, 1, 100);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+ }
+
+ return 0;
+}
+
+/* Disable CPU control of SPTP power power collapse */
+static void a6xx_sptprac_disable(struct a6xx_gmu *gmu)
+{
+ u32 val;
+ int ret;
+
+ /* Make sure retention is on */
+ gmu_rmw(gmu, REG_A6XX_GPU_CC_GX_GDSCR, 0, (1 << 11));
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL, 0x778001);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS, val,
+ (val & 0x04), 100, 10000);
+
+ if (ret)
+ dev_err(gmu->dev, "failed to power off SPTPRAC: 0x%x\n",
+ gmu_read(gmu, REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS));
+}
+
+/* Let the GMU know we are starting a boot sequence */
+static int a6xx_gmu_gfx_rail_on(struct a6xx_gmu *gmu)
+{
+ u32 vote;
+
+ /* Let the GMU know we are getting ready for boot */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 0);
+
+ /* Choose the "default" power level as the highest available */
+ vote = gmu->gx_arc_votes[gmu->nr_gpu_freqs - 1];
+
+ gmu_write(gmu, REG_A6XX_GMU_GX_VOTE_IDX, vote & 0xff);
+ gmu_write(gmu, REG_A6XX_GMU_MX_VOTE_IDX, (vote >> 8) & 0xff);
+
+ /* Let the GMU know the boot sequence has started */
+ return a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+}
+
+/* Let the GMU know that we are about to go into slumber */
+static int a6xx_gmu_notify_slumber(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /* Disable the power counter so the GMU isn't busy */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 0);
+
+ /* Disable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP)
+ a6xx_sptprac_disable(gmu);
+
+ /* Tell the GMU to get ready to slumber */
+ gmu_write(gmu, REG_A6XX_GMU_BOOT_SLUMBER_OPTION, 1);
+
+ ret = a6xx_gmu_set_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ if (!ret) {
+ /* Check to see if the GMU really did slumber */
+ if (gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE)
+ != 0x0f) {
+ dev_err(gmu->dev, "The GMU did not go into slumber\n");
+ ret = -ETIMEDOUT;
+ }
+ }
+
+ /* Put fence into allow mode */
+ gmu_write(gmu, REG_A6XX_GMU_AO_AHB_FENCE_CTRL, 0);
+ return ret;
+}
+
+static int a6xx_rpmh_start(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1 << 1);
+ /* Wait for the register to finish posting */
+ wmb();
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GMU_RSCC_CONTROL_ACK, val,
+ val & (1 << 1), 100, 10000);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to power on the GPU RSC\n");
+ return ret;
+ }
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_RSCC_SEQ_BUSY_DRV0, val,
+ !val, 100, 10000);
+
+ if (!ret) {
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+
+ /* Re-enable the power counter */
+ gmu_write(gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE, 1);
+ return 0;
+ }
+
+ dev_err(gmu->dev, "GPU RSC sequence stuck while waking up the GPU\n");
+ return ret;
+}
+
+static void a6xx_rpmh_stop(struct a6xx_gmu *gmu)
+{
+ int ret;
+ u32 val;
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 1);
+
+ ret = gmu_poll_timeout(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0,
+ val, val & (1 << 16), 100, 10000);
+ if (ret)
+ dev_err(gmu->dev, "Unable to power off the GPU RSC\n");
+
+ gmu_write(gmu, REG_A6XX_GMU_RSCC_CONTROL_REQ, 0);
+}
+
+static void a6xx_gmu_rpmh_init(struct a6xx_gmu *gmu)
+{
+ /* Disable SDE clock gating */
+ gmu_write(gmu, REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0, BIT(24));
+
+ /* Setup RSC PDC handshake for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0, 1);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 2, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA + 4, 0x80000000);
+ gmu_write(gmu, REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR + 4, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_OVERRIDE_START_ADDR, 0);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_SEQ_START_ADDR, 0x4520);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_LO, 0x4510);
+ gmu_write(gmu, REG_A6XX_RSCC_PDC_MATCH_VALUE_HI, 0x4514);
+
+ /* Load RSC sequencer uCode for sleep and wakeup */
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0, 0xa7a506a0);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 1, 0xa1e6a6e7);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 2, 0xa2e081e1);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 3, 0xe9a982e2);
+ gmu_write(gmu, REG_A6XX_RSCC_SEQ_MEM_0_DRV0 + 4, 0x0020e8a8);
+
+ /* Load PDC sequencer uCode for power up and power down sequence */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0, 0xfebea1e1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 1, 0xa5a4a3a2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 2, 0x8382a6e0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 3, 0xbce3e284);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_MEM_0 + 4, 0x002081fc);
+
+ /* Set TCS commands used by PDC sequence for low power modes */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA, 1);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 4, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS1_CMD0_DATA + 8, 0x0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_ENABLE_BANK, 7);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD_WAIT_FOR_CMPL_BANK, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CONTROL, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR, 0x30010);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA, 2);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 4, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 4, 0x30000);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 4, 0x3);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_MSGID + 8, 0x10108);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_ADDR + 8, 0x30080);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_TCS3_CMD0_DATA + 8, 0x3);
+
+ /* Setup GPU PDC */
+ pdc_write(gmu, REG_A6XX_PDC_GPU_SEQ_START_ADDR, 0);
+ pdc_write(gmu, REG_A6XX_PDC_GPU_ENABLE_PDC, 0x80000001);
+
+ /* ensure no writes happen before the uCode is fully written */
+ wmb();
+}
+
+/*
+ * The lowest 16 bits of this value are the number of XO clock cycles for main
+ * hysteresis which is set at 0x1680 cycles (300 us). The higher 16 bits are
+ * for the shorter hysteresis that happens after main - this is 0xa (.5 us)
+ */
+
+#define GMU_PWR_COL_HYST 0x000a1680
+
+/* Set up the idle state for the GMU */
+static void a6xx_gmu_power_config(struct a6xx_gmu *gmu)
+{
+ /* Disable GMU WB/RB buffer */
+ gmu_write(gmu, REG_A6XX_GMU_SYS_BUS_CONFIG, 0x1);
+
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0x9c40400);
+
+ switch (gmu->idle_level) {
+ case GMU_IDLE_STATE_IFPC:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE);
+ /* Fall through */
+ case GMU_IDLE_STATE_SPTP:
+ gmu_write(gmu, REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST,
+ GMU_PWR_COL_HYST);
+ gmu_rmw(gmu, REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL, 0,
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE |
+ A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE);
+ }
+
+ /* Enable RPMh GPU client */
+ gmu_rmw(gmu, REG_A6XX_GMU_RPMH_CTRL, 0,
+ A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE |
+ A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE);
+}
+
+static int a6xx_gmu_fw_start(struct a6xx_gmu *gmu, unsigned int state)
+{
+ static bool rpmh_init;
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ int i, ret;
+ u32 chipid;
+ u32 *image;
+
+ if (state == GMU_WARM_BOOT) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ } else {
+ if (WARN(!adreno_gpu->fw[ADRENO_FW_GMU],
+ "GMU firmware is not loaded\n"))
+ return -ENOENT;
+
+ /* Sanity check the size of the firmware that was loaded */
+ if (adreno_gpu->fw[ADRENO_FW_GMU]->size > 0x8000) {
+ dev_err(gmu->dev,
+ "GMU firmware is bigger than the available region\n");
+ return -EINVAL;
+ }
+
+ /* Turn on register retention */
+ gmu_write(gmu, REG_A6XX_GMU_GENERAL_7, 1);
+
+ /* We only need to load the RPMh microcode once */
+ if (!rpmh_init) {
+ a6xx_gmu_rpmh_init(gmu);
+ rpmh_init = true;
+ } else if (state != GMU_RESET) {
+ ret = a6xx_rpmh_start(gmu);
+ if (ret)
+ return ret;
+ }
+
+ image = (u32 *) adreno_gpu->fw[ADRENO_FW_GMU]->data;
+
+ for (i = 0; i < adreno_gpu->fw[ADRENO_FW_GMU]->size >> 2; i++)
+ gmu_write(gmu, REG_A6XX_GMU_CM3_ITCM_START + i,
+ image[i]);
+ }
+
+ gmu_write(gmu, REG_A6XX_GMU_CM3_FW_INIT_RESULT, 0);
+ gmu_write(gmu, REG_A6XX_GMU_CM3_BOOT_CONFIG, 0x02);
+
+ /* Write the iova of the HFI table */
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_ADDR, gmu->hfi->iova);
+ gmu_write(gmu, REG_A6XX_GMU_HFI_QTBL_INFO, 1);
+
+ gmu_write(gmu, REG_A6XX_GMU_AHB_FENCE_RANGE_0,
+ (1 << 31) | (0xa << 18) | (0xa0));
+
+ chipid = adreno_gpu->rev.core << 24;
+ chipid |= adreno_gpu->rev.major << 16;
+ chipid |= adreno_gpu->rev.minor << 12;
+ chipid |= adreno_gpu->rev.patchid << 8;
+
+ gmu_write(gmu, REG_A6XX_GMU_HFI_SFR_ADDR, chipid);
+
+ /* Set up the lowest idle level on the GMU */
+ a6xx_gmu_power_config(gmu);
+
+ ret = a6xx_gmu_start(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_gmu_gfx_rail_on(gmu);
+ if (ret)
+ return ret;
+
+ /* Enable SPTP_PC if the CPU is responsible for it */
+ if (gmu->idle_level < GMU_IDLE_STATE_SPTP) {
+ ret = a6xx_sptprac_enable(gmu);
+ if (ret)
+ return ret;
+ }
+
+ ret = a6xx_gmu_hfi_start(gmu);
+ if (ret)
+ return ret;
+
+ /* FIXME: Do we need this wmb() here? */
+ wmb();
+
+ return 0;
+}
+
+#define A6XX_HFI_IRQ_MASK \
+ (A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ | \
+ A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT)
+
+#define A6XX_GMU_IRQ_MASK \
+ (A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR | \
+ A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR)
+
+static void a6xx_gmu_irq_enable(struct a6xx_gmu *gmu)
+{
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_CLR, ~0);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK,
+ ~A6XX_GMU_IRQ_MASK);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK,
+ ~A6XX_HFI_IRQ_MASK);
+
+ enable_irq(gmu->gmu_irq);
+ enable_irq(gmu->hfi_irq);
+}
+
+static void a6xx_gmu_irq_disable(struct a6xx_gmu *gmu)
+{
+ disable_irq(gmu->gmu_irq);
+ disable_irq(gmu->hfi_irq);
+
+ gmu_write(gmu, REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK, ~0);
+ gmu_write(gmu, REG_A6XX_GMU_GMU2HOST_INTR_MASK, ~0);
+}
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int ret;
+ u32 val;
+
+ /* Flush all the queues */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Force off SPTP in case the GMU is managing it */
+ a6xx_sptprac_disable(gmu);
+
+ /* Make sure there are no outstanding RPMh votes */
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS0_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS1_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS2_DRV0_STATUS, val,
+ (val & 1), 100, 10000);
+ gmu_poll_timeout(gmu, REG_A6XX_RSCC_TCS3_DRV0_STATUS, val,
+ (val & 1), 100, 1000);
+
+ /* Force off the GX GSDC */
+ regulator_force_disable(gmu->gx);
+
+ /* Disable the resources */
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+ pm_runtime_put_sync(gmu->dev);
+
+ /* Re-enable the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ ret = a6xx_gmu_fw_start(gmu, GMU_RESET);
+ if (!ret)
+ ret = a6xx_hfi_start(gmu, GMU_COLD_BOOT);
+
+ /* Set the GPU back to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+int a6xx_gmu_resume(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ int status, ret;
+
+ if (WARN(!gmu->mmio, "The GMU is not set up yet\n"))
+ return 0;
+
+ /* Turn on the resources */
+ pm_runtime_get_sync(gmu->dev);
+
+ /* Use a known rate to bring up the GMU */
+ clk_set_rate(gmu->core_clk, 200000000);
+ ret = clk_bulk_prepare_enable(gmu->nr_clocks, gmu->clocks);
+ if (ret)
+ goto out;
+
+ a6xx_gmu_irq_enable(gmu);
+
+ /* Check to see if we are doing a cold or warm boot */
+ status = gmu_read(gmu, REG_A6XX_GMU_GENERAL_7) == 1 ?
+ GMU_WARM_BOOT : GMU_COLD_BOOT;
+
+ ret = a6xx_gmu_fw_start(gmu, status);
+ if (ret)
+ goto out;
+
+ ret = a6xx_hfi_start(gmu, status);
+
+ /* Set the GPU to the highest power frequency */
+ a6xx_gmu_set_freq(gmu, gmu->nr_gpu_freqs - 1);
+
+out:
+ /* Make sure to turn off the boot OOB request on error */
+ if (ret)
+ a6xx_gmu_clear_oob(gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu)
+{
+ u32 reg;
+
+ if (!gmu->mmio)
+ return true;
+
+ reg = gmu_read(gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS);
+
+ if (reg & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB)
+ return false;
+
+ return true;
+}
+
+int a6xx_gmu_stop(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ u32 val;
+
+ /*
+ * The GMU may still be in slumber unless the GPU started so check and
+ * skip putting it back into slumber if so
+ */
+ val = gmu_read(gmu, REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE);
+
+ if (val != 0xf) {
+ int ret = a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Temporary until we can recover safely */
+ BUG_ON(ret);
+
+ /* tell the GMU we want to slumber */
+ a6xx_gmu_notify_slumber(gmu);
+
+ ret = gmu_poll_timeout(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS, val,
+ !(val & A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB),
+ 100, 10000);
+
+ /*
+ * Let the user know we failed to slumber but don't worry too
+ * much because we are powering down anyway
+ */
+
+ if (ret)
+ dev_err(gmu->dev,
+ "Unable to slumber GMU: status = 0%x/0%x\n",
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS),
+ gmu_read(gmu,
+ REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2));
+ }
+
+ /* Turn off HFI */
+ a6xx_hfi_stop(gmu);
+
+ /* Stop the interrupts and mask the hardware */
+ a6xx_gmu_irq_disable(gmu);
+
+ /* Tell RPMh to power off the GPU */
+ a6xx_rpmh_stop(gmu);
+
+ clk_bulk_disable_unprepare(gmu->nr_clocks, gmu->clocks);
+
+ pm_runtime_put_sync(gmu->dev);
+
+ return 0;
+}
+
+static void a6xx_gmu_memory_free(struct a6xx_gmu *gmu, struct a6xx_gmu_bo *bo)
+{
+ int count, i;
+ u64 iova;
+
+ if (IS_ERR_OR_NULL(bo))
+ return;
+
+ count = bo->size >> PAGE_SHIFT;
+ iova = bo->iova;
+
+ for (i = 0; i < count; i++, iova += PAGE_SIZE) {
+ iommu_unmap(gmu->domain, iova, PAGE_SIZE);
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+}
+
+static struct a6xx_gmu_bo *a6xx_gmu_memory_alloc(struct a6xx_gmu *gmu,
+ size_t size)
+{
+ struct a6xx_gmu_bo *bo;
+ int ret, count, i;
+
+ bo = kzalloc(sizeof(*bo), GFP_KERNEL);
+ if (!bo)
+ return ERR_PTR(-ENOMEM);
+
+ bo->size = PAGE_ALIGN(size);
+
+ count = bo->size >> PAGE_SHIFT;
+
+ bo->pages = kcalloc(count, sizeof(struct page *), GFP_KERNEL);
+ if (!bo->pages) {
+ kfree(bo);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ for (i = 0; i < count; i++) {
+ bo->pages[i] = alloc_page(GFP_KERNEL);
+ if (!bo->pages[i])
+ goto err;
+ }
+
+ bo->iova = gmu->uncached_iova_base;
+
+ for (i = 0; i < count; i++) {
+ ret = iommu_map(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ page_to_phys(bo->pages[i]), PAGE_SIZE,
+ IOMMU_READ | IOMMU_WRITE);
+
+ if (ret) {
+ dev_err(gmu->dev, "Unable to map GMU buffer object\n");
+
+ for (i = i - 1 ; i >= 0; i--)
+ iommu_unmap(gmu->domain,
+ bo->iova + (PAGE_SIZE * i),
+ PAGE_SIZE);
+
+ goto err;
+ }
+ }
+
+ bo->virt = vmap(bo->pages, count, VM_IOREMAP,
+ pgprot_writecombine(PAGE_KERNEL));
+ if (!bo->virt)
+ goto err;
+
+ /* Align future IOVA addresses on 1MB boundaries */
+ gmu->uncached_iova_base += ALIGN(size, SZ_1M);
+
+ return bo;
+
+err:
+ for (i = 0; i < count; i++) {
+ if (bo->pages[i])
+ __free_pages(bo->pages[i], 0);
+ }
+
+ kfree(bo->pages);
+ kfree(bo);
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static int a6xx_gmu_memory_probe(struct a6xx_gmu *gmu)
+{
+ int ret;
+
+ /*
+ * The GMU address space is hardcoded to treat the range
+ * 0x60000000 - 0x80000000 as un-cached memory. All buffers shared
+ * between the GMU and the CPU will live in this space
+ */
+ gmu->uncached_iova_base = 0x60000000;
+
+
+ gmu->domain = iommu_domain_alloc(&platform_bus_type);
+ if (!gmu->domain)
+ return -ENODEV;
+
+ ret = iommu_attach_device(gmu->domain, gmu->dev);
+
+ if (ret) {
+ iommu_domain_free(gmu->domain);
+ gmu->domain = NULL;
+ }
+
+ return ret;
+}
+
+/* Get the list of RPMh voltage levels from cmd-db */
+static int a6xx_gmu_rpmh_arc_cmds(const char *id, void *vals, int size)
+{
+ u32 len = cmd_db_read_aux_data_len(id);
+
+ if (!len)
+ return 0;
+
+ if (WARN_ON(len > size))
+ return -EINVAL;
+
+ cmd_db_read_aux_data(id, vals, len);
+
+ /*
+ * The data comes back as an array of unsigned shorts so adjust the
+ * count accordingly
+ */
+ return len >> 1;
+}
+
+/* Return the 'arc-level' for the given frequency */
+static u32 a6xx_gmu_get_arc_level(struct device *dev, unsigned long freq)
+{
+ struct dev_pm_opp *opp;
+ struct device_node *np;
+ u32 val = 0;
+
+ if (!freq)
+ return 0;
+
+ opp = dev_pm_opp_find_freq_exact(dev, freq, true);
+ if (IS_ERR(opp))
+ return 0;
+
+ np = dev_pm_opp_get_of_node(opp);
+
+ if (np) {
+ of_property_read_u32(np, "qcom,level", &val);
+ of_node_put(np);
+ }
+
+ dev_pm_opp_put(opp);
+
+ return val;
+}
+
+static int a6xx_gmu_rpmh_arc_votes_init(struct device *dev, u32 *votes,
+ unsigned long *freqs, int freqs_count,
+ u16 *pri, int pri_count,
+ u16 *sec, int sec_count)
+{
+ int i, j;
+
+ /* Construct a vote for each frequency */
+ for (i = 0; i < freqs_count; i++) {
+ u8 pindex = 0, sindex = 0;
+ u32 level = a6xx_gmu_get_arc_level(dev, freqs[i]);
+
+ /* Get the primary index that matches the arc level */
+ for (j = 0; j < pri_count; j++) {
+ if (pri[j] >= level) {
+ pindex = j;
+ break;
+ }
+ }
+
+ if (j == pri_count) {
+ dev_err(dev,
+ "Level %u not found in in the RPMh list\n",
+ level);
+ dev_err(dev, "Available levels:\n");
+ for (j = 0; j < pri_count; j++)
+ dev_err(dev, " %u\n", pri[j]);
+
+ return -EINVAL;
+ }
+
+ /*
+ * Look for a level in in the secondary list that matches. If
+ * nothing fits, use the maximum non zero vote
+ */
+
+ for (j = 0; j < sec_count; j++) {
+ if (sec[j] >= level) {
+ sindex = j;
+ break;
+ } else if (sec[j]) {
+ sindex = j;
+ }
+ }
+
+ /* Construct the vote */
+ votes[i] = ((pri[pindex] & 0xffff) << 16) |
+ (sindex << 8) | pindex;
+ }
+
+ return 0;
+}
+
+/*
+ * The GMU votes with the RPMh for itself and on behalf of the GPU but we need
+ * to construct the list of votes on the CPU and send it over. Query the RPMh
+ * voltage levels and build the votes
+ */
+
+static int a6xx_gmu_rpmh_votes_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ u16 gx[16], cx[16], mx[16];
+ u32 gxcount, cxcount, mxcount;
+ int ret;
+
+ /* Get the list of available voltage levels for each component */
+ gxcount = a6xx_gmu_rpmh_arc_cmds("gfx.lvl", gx, sizeof(gx));
+ cxcount = a6xx_gmu_rpmh_arc_cmds("cx.lvl", cx, sizeof(cx));
+ mxcount = a6xx_gmu_rpmh_arc_cmds("mx.lvl", mx, sizeof(mx));
+
+ /* Build the GX votes */
+ ret = a6xx_gmu_rpmh_arc_votes_init(&gpu->pdev->dev, gmu->gx_arc_votes,
+ gmu->gpu_freqs, gmu->nr_gpu_freqs,
+ gx, gxcount, mx, mxcount);
+
+ /* Build the CX votes */
+ ret |= a6xx_gmu_rpmh_arc_votes_init(gmu->dev, gmu->cx_arc_votes,
+ gmu->gmu_freqs, gmu->nr_gmu_freqs,
+ cx, cxcount, mx, mxcount);
+
+ return ret;
+}
+
+static int a6xx_gmu_build_freq_table(struct device *dev, unsigned long *freqs,
+ u32 size)
+{
+ int count = dev_pm_opp_get_opp_count(dev);
+ struct dev_pm_opp *opp;
+ int i, index = 0;
+ unsigned long freq = 1;
+
+ /*
+ * The OPP table doesn't contain the "off" frequency level so we need to
+ * add 1 to the table size to account for it
+ */
+
+ if (WARN(count + 1 > size,
+ "The GMU frequency table is being truncated\n"))
+ count = size - 1;
+
+ /* Set the "off" frequency */
+ freqs[index++] = 0;
+
+ for (i = 0; i < count; i++) {
+ opp = dev_pm_opp_find_freq_ceil(dev, &freq);
+ if (IS_ERR(opp))
+ break;
+
+ dev_pm_opp_put(opp);
+ freqs[index++] = freq++;
+ }
+
+ return index;
+}
+
+static int a6xx_gmu_pwrlevels_probe(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gpu *a6xx_gpu = container_of(gmu, struct a6xx_gpu, gmu);
+ struct adreno_gpu *adreno_gpu = &a6xx_gpu->base;
+ struct msm_gpu *gpu = &adreno_gpu->base;
+
+ int ret = 0;
+
+ /*
+ * The GMU handles its own frequency switching so build a list of
+ * available frequencies to send during initialization
+ */
+ ret = dev_pm_opp_of_add_table(gmu->dev);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to set the OPP table for the GMU\n");
+ return ret;
+ }
+
+ gmu->nr_gmu_freqs = a6xx_gmu_build_freq_table(gmu->dev,
+ gmu->gmu_freqs, ARRAY_SIZE(gmu->gmu_freqs));
+
+ /*
+ * The GMU also handles GPU frequency switching so build a list
+ * from the GPU OPP table
+ */
+ gmu->nr_gpu_freqs = a6xx_gmu_build_freq_table(&gpu->pdev->dev,
+ gmu->gpu_freqs, ARRAY_SIZE(gmu->gpu_freqs));
+
+ /* Build the list of RPMh votes that we'll send to the GMU */
+ return a6xx_gmu_rpmh_votes_init(gmu);
+}
+
+static int a6xx_gmu_clocks_probe(struct a6xx_gmu *gmu)
+{
+ int ret = msm_clk_bulk_get(gmu->dev, &gmu->clocks);
+
+ if (ret < 1)
+ return ret;
+
+ gmu->nr_clocks = ret;
+
+ gmu->core_clk = msm_clk_bulk_get_clock(gmu->clocks,
+ gmu->nr_clocks, "gmu");
+
+ return 0;
+}
+
+static void __iomem *a6xx_gmu_get_mmio(struct platform_device *pdev,
+ const char *name)
+{
+ void __iomem *ret;
+ struct resource *res = platform_get_resource_byname(pdev,
+ IORESOURCE_MEM, name);
+
+ if (!res) {
+ dev_err(&pdev->dev, "Unable to find the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ ret = devm_ioremap(&pdev->dev, res->start, resource_size(res));
+ if (!ret) {
+ dev_err(&pdev->dev, "Unable to map the %s registers\n", name);
+ return ERR_PTR(-EINVAL);
+ }
+
+ return ret;
+}
+
+static int a6xx_gmu_get_irq(struct a6xx_gmu *gmu, struct platform_device *pdev,
+ const char *name, irq_handler_t handler)
+{
+ int irq, ret;
+
+ irq = platform_get_irq_byname(pdev, name);
+
+ ret = devm_request_irq(&pdev->dev, irq, handler, IRQF_TRIGGER_HIGH,
+ name, gmu);
+ if (ret) {
+ dev_err(&pdev->dev, "Unable to get interrupt %s\n", name);
+ return ret;
+ }
+
+ disable_irq(irq);
+
+ return irq;
+}
+
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+
+ if (IS_ERR_OR_NULL(gmu->mmio))
+ return;
+
+ pm_runtime_disable(gmu->dev);
+ a6xx_gmu_stop(a6xx_gpu);
+
+ a6xx_gmu_irq_disable(gmu);
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+}
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node)
+{
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ struct platform_device *pdev = of_find_device_by_node(node);
+ int ret;
+
+ if (!pdev)
+ return -ENODEV;
+
+ gmu->dev = &pdev->dev;
+
+ of_dma_configure(gmu->dev, node, false);
+
+ /* Fow now, don't do anything fancy until we get our feet under us */
+ gmu->idle_level = GMU_IDLE_STATE_ACTIVE;
+
+ pm_runtime_enable(gmu->dev);
+ gmu->gx = devm_regulator_get(gmu->dev, "vdd");
+
+ /* Get the list of clocks */
+ ret = a6xx_gmu_clocks_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Set up the IOMMU context bank */
+ ret = a6xx_gmu_memory_probe(gmu);
+ if (ret)
+ return ret;
+
+ /* Allocate memory for for the HFI queues */
+ gmu->hfi = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->hfi))
+ goto err;
+
+ /* Allocate memory for the GMU debug region */
+ gmu->debug = a6xx_gmu_memory_alloc(gmu, SZ_16K);
+ if (IS_ERR(gmu->debug))
+ goto err;
+
+ /* Map the GMU registers */
+ gmu->mmio = a6xx_gmu_get_mmio(pdev, "gmu");
+
+ /* Map the GPU power domain controller registers */
+ gmu->pdc_mmio = a6xx_gmu_get_mmio(pdev, "gmu_pdc");
+
+ if (IS_ERR(gmu->mmio) || IS_ERR(gmu->pdc_mmio))
+ goto err;
+
+ /* Get the HFI and GMU interrupts */
+ gmu->hfi_irq = a6xx_gmu_get_irq(gmu, pdev, "hfi", a6xx_hfi_irq);
+ gmu->gmu_irq = a6xx_gmu_get_irq(gmu, pdev, "gmu", a6xx_gmu_irq);
+
+ if (gmu->hfi_irq < 0 || gmu->gmu_irq < 0)
+ goto err;
+
+ /* Set up a tasklet to handle GMU HFI responses */
+ tasklet_init(&gmu->hfi_tasklet, a6xx_hfi_task, (unsigned long) gmu);
+
+ /* Get the power levels for the GMU and GPU */
+ a6xx_gmu_pwrlevels_probe(gmu);
+
+ /* Set up the HFI queues */
+ a6xx_hfi_init(gmu);
+
+ return 0;
+err:
+ a6xx_gmu_memory_free(gmu, gmu->hfi);
+
+ if (gmu->domain) {
+ iommu_detach_device(gmu->domain, gmu->dev);
+
+ iommu_domain_free(gmu->domain);
+ }
+
+ return -ENODEV;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
new file mode 100644
index 000000000000..d9a386c18799
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.h
@@ -0,0 +1,162 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_GMU_H_
+#define _A6XX_GMU_H_
+
+#include <linux/interrupt.h>
+#include "msm_drv.h"
+#include "a6xx_hfi.h"
+
+struct a6xx_gmu_bo {
+ void *virt;
+ size_t size;
+ u64 iova;
+ struct page **pages;
+};
+
+/*
+ * These define the different GMU wake up options - these define how both the
+ * CPU and the GMU bring up the hardware
+ */
+
+/* THe GMU has already been booted and the rentention registers are active */
+#define GMU_WARM_BOOT 0
+
+/* the GMU is coming up for the first time or back from a power collapse */
+#define GMU_COLD_BOOT 1
+
+/* The GMU is being soft reset after a fault */
+#define GMU_RESET 2
+
+/*
+ * These define the level of control that the GMU has - the higher the number
+ * the more things that the GMU hardware controls on its own.
+ */
+
+/* The GMU does not do any idle state management */
+#define GMU_IDLE_STATE_ACTIVE 0
+
+/* The GMU manages SPTP power collapse */
+#define GMU_IDLE_STATE_SPTP 2
+
+/* The GMU does automatic IFPC (intra-frame power collapse) */
+#define GMU_IDLE_STATE_IFPC 3
+
+struct a6xx_gmu {
+ struct device *dev;
+
+ void * __iomem mmio;
+ void * __iomem pdc_mmio;
+
+ int hfi_irq;
+ int gmu_irq;
+
+ struct regulator *gx;
+
+ struct iommu_domain *domain;
+ u64 uncached_iova_base;
+
+ int idle_level;
+
+ struct a6xx_gmu_bo *hfi;
+ struct a6xx_gmu_bo *debug;
+
+ int nr_clocks;
+ struct clk_bulk_data *clocks;
+ struct clk *core_clk;
+
+ int nr_gpu_freqs;
+ unsigned long gpu_freqs[16];
+ u32 gx_arc_votes[16];
+
+ int nr_gmu_freqs;
+ unsigned long gmu_freqs[4];
+ u32 cx_arc_votes[4];
+
+ struct a6xx_hfi_queue queues[2];
+
+ struct tasklet_struct hfi_tasklet;
+};
+
+static inline u32 gmu_read(struct a6xx_gmu *gmu, u32 offset)
+{
+ return msm_readl(gmu->mmio + (offset << 2));
+}
+
+static inline void gmu_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->mmio + (offset << 2));
+}
+
+static inline void pdc_write(struct a6xx_gmu *gmu, u32 offset, u32 value)
+{
+ return msm_writel(value, gmu->pdc_mmio + (offset << 2));
+}
+
+static inline void gmu_rmw(struct a6xx_gmu *gmu, u32 reg, u32 mask, u32 or)
+{
+ u32 val = gmu_read(gmu, reg);
+
+ val &= ~mask;
+
+ gmu_write(gmu, reg, val | or);
+}
+
+#define gmu_poll_timeout(gmu, addr, val, cond, interval, timeout) \
+ readl_poll_timeout((gmu)->mmio + ((addr) << 2), val, cond, \
+ interval, timeout)
+
+/*
+ * These are the available OOB (out of band requests) to the GMU where "out of
+ * band" means that the CPU talks to the GMU directly and not through HFI.
+ * Normally this works by writing a ITCM/DTCM register and then triggering a
+ * interrupt (the "request" bit) and waiting for an acknowledgment (the "ack"
+ * bit). The state is cleared by writing the "clear' bit to the GMU interrupt.
+ *
+ * These are used to force the GMU/GPU to stay on during a critical sequence or
+ * for hardware workarounds.
+ */
+
+enum a6xx_gmu_oob_state {
+ GMU_OOB_BOOT_SLUMBER = 0,
+ GMU_OOB_GPU_SET,
+ GMU_OOB_DCVS_SET,
+};
+
+/* These are the interrupt / ack bits for each OOB request that are set
+ * in a6xx_gmu_set_oob and a6xx_clear_oob
+ */
+
+/*
+ * Let the GMU know that a boot or slumber operation has started. The value in
+ * REG_A6XX_GMU_BOOT_SLUMBER_OPTION lets the GMU know which operation we are
+ * doing
+ */
+#define GMU_OOB_BOOT_SLUMBER_REQUEST 22
+#define GMU_OOB_BOOT_SLUMBER_ACK 30
+#define GMU_OOB_BOOT_SLUMBER_CLEAR 30
+
+/*
+ * Set a new power level for the GPU when the CPU is doing frequency scaling
+ */
+#define GMU_OOB_DCVS_REQUEST 23
+#define GMU_OOB_DCVS_ACK 31
+#define GMU_OOB_DCVS_CLEAR 31
+
+/*
+ * Let the GMU know to not turn off any GPU registers while the CPU is in a
+ * critical section
+ */
+#define GMU_OOB_GPU_SET_REQUEST 16
+#define GMU_OOB_GPU_SET_ACK 24
+#define GMU_OOB_GPU_SET_CLEAR 24
+
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu);
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state);
+void a6xx_hfi_stop(struct a6xx_gmu *gmu);
+
+void a6xx_hfi_task(unsigned long data);
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
new file mode 100644
index 000000000000..ef68098d2adc
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gmu.xml.h
@@ -0,0 +1,382 @@
+#ifndef A6XX_GMU_XML
+#define A6XX_GMU_XML
+
+/* Autogenerated file, DO NOT EDIT manually!
+
+This file was generated by the rules-ng-ng headergen tool in this git repository:
+http://github.com/freedreno/envytools/
+git clone https://github.com/freedreno/envytools.git
+
+The rules-ng-ng source files this header was generated from are:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
+- Rob Clark <robdclark@gmail.com> (robclark)
+- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
+
+Permission is hereby granted, free of charge, to any person obtaining
+a copy of this software and associated documentation files (the
+"Software"), to deal in the Software without restriction, including
+without limitation the rights to use, copy, modify, merge, publish,
+distribute, sublicense, and/or sell copies of the Software, and to
+permit persons to whom the Software is furnished to do so, subject to
+the following conditions:
+
+The above copyright notice and this permission notice (including the
+next paragraph) shall be included in all copies or substantial
+portions of the Software.
+
+THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
+EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
+MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
+IN NO EVENT SHALL THE COPYRIGHT OWNER(S) AND/OR ITS SUPPLIERS BE
+LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
+OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
+WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
+*/
+
+
+#define A6XX_GMU_GPU_IDLE_STATUS_BUSY_IGN_AHB 0x00800000
+#define A6XX_GMU_GPU_IDLE_STATUS_CX_GX_CPU_BUSY_IGN_AHB 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_SET_MASK 0x00400000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CHECK_MASK 0x40000000
+#define A6XX_GMU_OOB_BOOT_SLUMBER_CLEAR_MASK 0x40000000
+#define A6XX_GMU_OOB_DCVS_SET_MASK 0x00800000
+#define A6XX_GMU_OOB_DCVS_CHECK_MASK 0x80000000
+#define A6XX_GMU_OOB_DCVS_CLEAR_MASK 0x80000000
+#define A6XX_GMU_OOB_GPU_SET_MASK 0x00040000
+#define A6XX_GMU_OOB_GPU_CHECK_MASK 0x04000000
+#define A6XX_GMU_OOB_GPU_CLEAR_MASK 0x04000000
+#define A6XX_GMU_OOB_PERFCNTR_SET_MASK 0x00020000
+#define A6XX_GMU_OOB_PERFCNTR_CHECK_MASK 0x02000000
+#define A6XX_GMU_OOB_PERFCNTR_CLEAR_MASK 0x02000000
+#define A6XX_HFI_IRQ_MSGQ_MASK 0x00000001
+#define A6XX_HFI_IRQ_DSGQ_MASK 0x00000002
+#define A6XX_HFI_IRQ_BLOCKED_MSG_MASK 0x00000004
+#define A6XX_HFI_IRQ_CM3_FAULT_MASK 0x00800000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__MASK 0x007f0000
+#define A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT 16
+static inline uint32_t A6XX_HFI_IRQ_GMU_ERR_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_GMU_ERR_MASK__SHIFT) & A6XX_HFI_IRQ_GMU_ERR_MASK__MASK;
+}
+#define A6XX_HFI_IRQ_OOB_MASK__MASK 0xff000000
+#define A6XX_HFI_IRQ_OOB_MASK__SHIFT 24
+static inline uint32_t A6XX_HFI_IRQ_OOB_MASK(uint32_t val)
+{
+ return ((val) << A6XX_HFI_IRQ_OOB_MASK__SHIFT) & A6XX_HFI_IRQ_OOB_MASK__MASK;
+}
+#define A6XX_HFI_H2F_IRQ_MASK_BIT 0x00000001
+#define REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL 0x00000080
+
+#define REG_A6XX_GMU_GX_SPTPRAC_POWER_CONTROL 0x00000081
+
+#define REG_A6XX_GMU_CM3_ITCM_START 0x00000c00
+
+#define REG_A6XX_GMU_CM3_DTCM_START 0x00001c00
+
+#define REG_A6XX_GMU_NMI_CONTROL_STATUS 0x000023f0
+
+#define REG_A6XX_GMU_BOOT_SLUMBER_OPTION 0x000023f8
+
+#define REG_A6XX_GMU_GX_VOTE_IDX 0x000023f9
+
+#define REG_A6XX_GMU_MX_VOTE_IDX 0x000023fa
+
+#define REG_A6XX_GMU_DCVS_ACK_OPTION 0x000023fc
+
+#define REG_A6XX_GMU_DCVS_PERF_SETTING 0x000023fd
+
+#define REG_A6XX_GMU_DCVS_BW_SETTING 0x000023fe
+
+#define REG_A6XX_GMU_DCVS_RETURN 0x000023ff
+
+#define REG_A6XX_GMU_SYS_BUS_CONFIG 0x00004c0f
+
+#define REG_A6XX_GMU_CM3_SYSRESET 0x00005000
+
+#define REG_A6XX_GMU_CM3_BOOT_CONFIG 0x00005001
+
+#define REG_A6XX_GMU_CM3_FW_BUSY 0x0000501a
+
+#define REG_A6XX_GMU_CM3_FW_INIT_RESULT 0x0000501c
+
+#define REG_A6XX_GMU_CM3_CFG 0x0000502d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE 0x00005040
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0 0x00005041
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_1 0x00005042
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_L 0x00005044
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_0_H 0x00005045
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_L 0x00005046
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_1_H 0x00005047
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_L 0x00005048
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_2_H 0x00005049
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_L 0x0000504a
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_3_H 0x0000504b
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_L 0x0000504c
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_4_H 0x0000504d
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_L 0x0000504e
+
+#define REG_A6XX_GMU_CX_GMU_POWER_COUNTER_XOCLK_5_H 0x0000504f
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_CTRL 0x000050c0
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_IFPC_ENABLE 0x00000001
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_HM_POWER_COLLAPSE_ENABLE 0x00000002
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_SPTPRAC_POWER_CONTROL_ENABLE 0x00000004
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK 0x00003c00
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT 10
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_NUM_PASS_SKIPS__MASK;
+}
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK 0xffffc000
+#define A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT 14
+static inline uint32_t A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH(uint32_t val)
+{
+ return ((val) << A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__SHIFT) & A6XX_GMU_PWR_COL_INTER_FRAME_CTRL_MIN_PASS_LENGTH__MASK;
+}
+
+#define REG_A6XX_GMU_PWR_COL_INTER_FRAME_HYST 0x000050c1
+
+#define REG_A6XX_GMU_PWR_COL_SPTPRAC_HYST 0x000050c2
+
+#define REG_A6XX_GMU_SPTPRAC_PWR_CLK_STATUS 0x000050d0
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_OFF 0x00000001
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWERING_ON 0x00000002
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_ON 0x00000004
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SPTPRAC_GDSC_POWER_OFF 0x00000008
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_SP_CLOCK_OFF 0x00000010
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GMU_UP_POWER_STATE 0x00000020
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_GDSC_POWER_OFF 0x00000040
+#define A6XX_GMU_SPTPRAC_PWR_CLK_STATUS_GX_HM_CLK_OFF 0x00000080
+
+#define REG_A6XX_GMU_GPU_NAP_CTRL 0x000050e4
+#define A6XX_GMU_GPU_NAP_CTRL_HW_NAP_ENABLE 0x00000001
+#define A6XX_GMU_GPU_NAP_CTRL_SID__MASK 0x000001f0
+#define A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT 4
+static inline uint32_t A6XX_GMU_GPU_NAP_CTRL_SID(uint32_t val)
+{
+ return ((val) << A6XX_GMU_GPU_NAP_CTRL_SID__SHIFT) & A6XX_GMU_GPU_NAP_CTRL_SID__MASK;
+}
+
+#define REG_A6XX_GMU_RPMH_CTRL 0x000050e8
+#define A6XX_GMU_RPMH_CTRL_RPMH_INTERFACE_ENABLE 0x00000001
+#define A6XX_GMU_RPMH_CTRL_LLC_VOTE_ENABLE 0x00000010
+#define A6XX_GMU_RPMH_CTRL_DDR_VOTE_ENABLE 0x00000100
+#define A6XX_GMU_RPMH_CTRL_MX_VOTE_ENABLE 0x00000200
+#define A6XX_GMU_RPMH_CTRL_CX_VOTE_ENABLE 0x00000400
+#define A6XX_GMU_RPMH_CTRL_GFX_VOTE_ENABLE 0x00000800
+#define A6XX_GMU_RPMH_CTRL_DDR_MIN_VOTE_ENABLE 0x00001000
+#define A6XX_GMU_RPMH_CTRL_MX_MIN_VOTE_ENABLE 0x00002000
+#define A6XX_GMU_RPMH_CTRL_CX_MIN_VOTE_ENABLE 0x00004000
+#define A6XX_GMU_RPMH_CTRL_GFX_MIN_VOTE_ENABLE 0x00008000
+
+#define REG_A6XX_GMU_RPMH_HYST_CTRL 0x000050e9
+
+#define REG_A6XX_GPU_GMU_CX_GMU_RPMH_POWER_STATE 0x000050ec
+
+#define REG_A6XX_GMU_BOOT_KMD_LM_HANDSHAKE 0x000051f0
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_CTRL 0x00005157
+
+#define REG_A6XX_GMU_LLM_GLM_SLEEP_STATUS 0x00005158
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_L 0x00005088
+
+#define REG_A6XX_GMU_ALWAYS_ON_COUNTER_H 0x00005089
+
+#define REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE 0x000050c3
+
+#define REG_A6XX_GMU_HFI_CTRL_STATUS 0x00005180
+
+#define REG_A6XX_GMU_HFI_VERSION_INFO 0x00005181
+
+#define REG_A6XX_GMU_HFI_SFR_ADDR 0x00005182
+
+#define REG_A6XX_GMU_HFI_MMAP_ADDR 0x00005183
+
+#define REG_A6XX_GMU_HFI_QTBL_INFO 0x00005184
+
+#define REG_A6XX_GMU_HFI_QTBL_ADDR 0x00005185
+
+#define REG_A6XX_GMU_HFI_CTRL_INIT 0x00005186
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_SET 0x00005190
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_CLR 0x00005191
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_INFO 0x00005192
+#define A6XX_GMU_GMU2HOST_INTR_INFO_MSGQ 0x00000001
+#define A6XX_GMU_GMU2HOST_INTR_INFO_CM3_FAULT 0x00800000
+
+#define REG_A6XX_GMU_GMU2HOST_INTR_MASK 0x00005193
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_SET 0x00005194
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_CLR 0x00005195
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_RAW_INFO 0x00005196
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_0 0x00005197
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_1 0x00005198
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_2 0x00005199
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_EN_3 0x0000519a
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_0 0x0000519b
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_1 0x0000519c
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_2 0x0000519d
+
+#define REG_A6XX_GMU_HOST2GMU_INTR_INFO_3 0x0000519e
+
+#define REG_A6XX_GMU_GENERAL_1 0x000051c6
+
+#define REG_A6XX_GMU_GENERAL_7 0x000051cc
+
+#define REG_A6XX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_ENABLE_REG 0x00008920
+
+#define REG_A6XX_GPU_GMU_CX_GMU_ISENSE_CTRL 0x0000515d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL3 0x00008578
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL2 0x00008558
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_0 0x00008580
+
+#define REG_A6XX_GPU_CS_A_SENSOR_CTRL_2 0x00027ada
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_CONTROL1 0x00008957
+
+#define REG_A6XX_GPU_CS_SENSOR_GENERAL_STATUS 0x0000881a
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_0 0x0000881d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_2 0x0000881f
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_STATUS1_4 0x00008821
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_CS_AMP_PERIOD_CTRL 0x0000896d
+
+#define REG_A6XX_GPU_CS_AMP_CALIBRATION_DONE 0x00008965
+
+#define REG_A6XX_GPU_GMU_CX_GMU_PWR_THRESHOLD 0x0000514d
+
+#define REG_A6XX_GMU_AO_INTERRUPT_EN 0x00009303
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_CLR 0x00009304
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_STATUS 0x00009305
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_WDOG_BITE 0x00000001
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_RSCC_COMP 0x00000002
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_VDROOP 0x00000004
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_FENCE_ERR 0x00000008
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_DBD_WAKEUP 0x00000010
+#define A6XX_GMU_AO_HOST_INTERRUPT_STATUS_HOST_AHB_BUS_ERROR 0x00000020
+
+#define REG_A6XX_GMU_AO_HOST_INTERRUPT_MASK 0x00009306
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_MODE_CNTL 0x00009309
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_DELAY_CNTL 0x0000930a
+
+#define REG_A6XX_GPU_GMU_AO_GMU_CGC_HYST_CNTL 0x0000930b
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS 0x0000930c
+#define A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS_GPUBUSYIGNAHB 0x00800000
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_STATUS2 0x0000930d
+
+#define REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK 0x0000930e
+
+#define REG_A6XX_GMU_AO_AHB_FENCE_CTRL 0x00009310
+
+#define REG_A6XX_GMU_AHB_FENCE_STATUS 0x00009313
+
+#define REG_A6XX_GMU_RBBM_INT_UNMASKED_STATUS 0x00009315
+
+#define REG_A6XX_GMU_AO_SPARE_CNTL 0x00009316
+
+#define REG_A6XX_GPU_RSCC_RSC_STATUS0_DRV0 0x00008c04
+
+#define REG_A6XX_GMU_RSCC_CONTROL_REQ 0x00009307
+
+#define REG_A6XX_GMU_RSCC_CONTROL_ACK 0x00009308
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_0 0x00009311
+
+#define REG_A6XX_GMU_AHB_FENCE_RANGE_1 0x00009312
+
+#define REG_A6XX_GPU_CC_GX_GDSCR 0x00009c03
+
+#define REG_A6XX_GPU_CC_GX_DOMAIN_MISC 0x00009d42
+
+#define REG_A6XX_RSCC_PDC_SEQ_START_ADDR 0x00008c08
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_LO 0x00008c09
+
+#define REG_A6XX_RSCC_PDC_MATCH_VALUE_HI 0x00008c0a
+
+#define REG_A6XX_RSCC_PDC_SLAVE_ID_DRV0 0x00008c0b
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_ADDR 0x00008c0d
+
+#define REG_A6XX_RSCC_HIDDEN_TCS_CMD0_DATA 0x00008c0e
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_L_DRV0 0x00008c82
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT0_TIMESTAMP_H_DRV0 0x00008c83
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_EN_DRV0 0x00008c89
+
+#define REG_A6XX_RSCC_TIMESTAMP_UNIT1_OUTPUT_DRV0 0x00008c8c
+
+#define REG_A6XX_RSCC_OVERRIDE_START_ADDR 0x00008d00
+
+#define REG_A6XX_RSCC_SEQ_BUSY_DRV0 0x00008d01
+
+#define REG_A6XX_RSCC_SEQ_MEM_0_DRV0 0x00008d80
+
+#define REG_A6XX_RSCC_TCS0_DRV0_STATUS 0x00008f46
+
+#define REG_A6XX_RSCC_TCS1_DRV0_STATUS 0x000090ae
+
+#define REG_A6XX_RSCC_TCS2_DRV0_STATUS 0x00009216
+
+#define REG_A6XX_RSCC_TCS3_DRV0_STATUS 0x0000937e
+
+
+#endif /* A6XX_GMU_XML */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.c b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
new file mode 100644
index 000000000000..c629f742a1d1
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.c
@@ -0,0 +1,818 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+
+#include "msm_gem.h"
+#include "msm_mmu.h"
+#include "a6xx_gpu.h"
+#include "a6xx_gmu.xml.h"
+
+static inline bool _a6xx_check_idle(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Check that the GMU is idle */
+ if (!a6xx_gmu_isidle(&a6xx_gpu->gmu))
+ return false;
+
+ /* Check tha the CX master is idle */
+ if (gpu_read(gpu, REG_A6XX_RBBM_STATUS) &
+ ~A6XX_RBBM_STATUS_CP_AHB_BUSY_CX_MASTER)
+ return false;
+
+ return !(gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS) &
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT);
+}
+
+bool a6xx_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ /* wait for CP to drain ringbuffer: */
+ if (!adreno_idle(gpu, ring))
+ return false;
+
+ if (spin_until(_a6xx_check_idle(gpu))) {
+ DRM_ERROR("%s: %ps: timeout waiting for GPU to idle: status %8.8X irq %8.8X rptr/wptr %d/%d\n",
+ gpu->name, __builtin_return_address(0),
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR));
+ return false;
+ }
+
+ return true;
+}
+
+static void a6xx_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
+{
+ uint32_t wptr;
+ unsigned long flags;
+
+ spin_lock_irqsave(&ring->lock, flags);
+
+ /* Copy the shadow to the actual register */
+ ring->cur = ring->next;
+
+ /* Make sure to wrap wptr if we need to */
+ wptr = get_wptr(ring);
+
+ spin_unlock_irqrestore(&ring->lock, flags);
+
+ /* Make sure everything is posted before making a decision */
+ mb();
+
+ gpu_write(gpu, REG_A6XX_CP_RB_WPTR, wptr);
+}
+
+static void a6xx_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
+ struct msm_file_private *ctx)
+{
+ struct msm_drm_private *priv = gpu->dev->dev_private;
+ struct msm_ringbuffer *ring = submit->ring;
+ unsigned int i;
+
+ /* Invalidate CCU depth and color */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_DEPTH);
+
+ OUT_PKT7(ring, CP_EVENT_WRITE, 1);
+ OUT_RING(ring, PC_CCU_INVALIDATE_COLOR);
+
+ /* Submit the commands */
+ for (i = 0; i < submit->nr_cmds; i++) {
+ switch (submit->cmd[i].type) {
+ case MSM_SUBMIT_CMD_IB_TARGET_BUF:
+ break;
+ case MSM_SUBMIT_CMD_CTX_RESTORE_BUF:
+ if (priv->lastctx == ctx)
+ break;
+ case MSM_SUBMIT_CMD_BUF:
+ OUT_PKT7(ring, CP_INDIRECT_BUFFER_PFE, 3);
+ OUT_RING(ring, lower_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, upper_32_bits(submit->cmd[i].iova));
+ OUT_RING(ring, submit->cmd[i].size);
+ break;
+ }
+ }
+
+ /* Write the fence to the scratch register */
+ OUT_PKT4(ring, REG_A6XX_CP_SCRATCH_REG(2), 1);
+ OUT_RING(ring, submit->seqno);
+
+ /*
+ * Execute a CACHE_FLUSH_TS event. This will ensure that the
+ * timestamp is written to the memory and then triggers the interrupt
+ */
+ OUT_PKT7(ring, CP_EVENT_WRITE, 4);
+ OUT_RING(ring, CACHE_FLUSH_TS | (1 << 31));
+ OUT_RING(ring, lower_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, upper_32_bits(rbmemptr(ring, fence)));
+ OUT_RING(ring, submit->seqno);
+
+ a6xx_flush(gpu, ring);
+}
+
+static const struct {
+ u32 offset;
+ u32 value;
+} a6xx_hwcg[] = {
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_SP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP0, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP1, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP2, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_SP3, 0x02022220},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP0, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP1, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP2, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_DELAY_SP3, 0x00000080},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP0, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP1, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP2, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_HYST_SP3, 0x0000f3cf},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP0, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP1, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP2, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TP3, 0x02222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_TP3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP0, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP1, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP2, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_TP3, 0x00022222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST2_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP0, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP1, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP2, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST3_TP3, 0x77777777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP0, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP1, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP2, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_HYST4_TP3, 0x00077777},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY2_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP0, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP1, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP2, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY3_TP3, 0x11111111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP0, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP1, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP2, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_DELAY4_TP3, 0x00011111},
+ {REG_A6XX_RBBM_CLOCK_CNTL_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL3_UCHE, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL4_UCHE, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_HYST_UCHE, 0x00000004},
+ {REG_A6XX_RBBM_CLOCK_DELAY_UCHE, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB0, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB1, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB2, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RB3, 0x22222222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB0, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB1, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB2, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RB3, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU0, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU1, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU2, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_CNTL_CCU3, 0x00002220},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU0, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU1, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU2, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_HYST_RB_CCU3, 0x00040f00},
+ {REG_A6XX_RBBM_CLOCK_CNTL_RAC, 0x05022022},
+ {REG_A6XX_RBBM_CLOCK_CNTL2_RAC, 0x00005555},
+ {REG_A6XX_RBBM_CLOCK_DELAY_RAC, 0x00000011},
+ {REG_A6XX_RBBM_CLOCK_HYST_RAC, 0x00445044},
+ {REG_A6XX_RBBM_CLOCK_CNTL_TSE_RAS_RBBM, 0x04222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_GPC, 0x00222222},
+ {REG_A6XX_RBBM_CLOCK_MODE_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_HYST_TSE_RAS_RBBM, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_HYST_GPC, 0x04104004},
+ {REG_A6XX_RBBM_CLOCK_HYST_VFD, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ, 0x00000000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_TSE_RAS_RBBM, 0x00004000},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GPC, 0x00000200},
+ {REG_A6XX_RBBM_CLOCK_DELAY_VFD, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_HLSQ_2, 0x00000002},
+ {REG_A6XX_RBBM_CLOCK_MODE_HLSQ, 0x00002222},
+ {REG_A6XX_RBBM_CLOCK_CNTL_GMU_GX, 0x00000222},
+ {REG_A6XX_RBBM_CLOCK_DELAY_GMU_GX, 0x00000111},
+ {REG_A6XX_RBBM_CLOCK_HYST_GMU_GX, 0x00000555}
+};
+
+static void a6xx_set_hwcg(struct msm_gpu *gpu, bool state)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct a6xx_gmu *gmu = &a6xx_gpu->gmu;
+ unsigned int i;
+ u32 val;
+
+ val = gpu_read(gpu, REG_A6XX_RBBM_CLOCK_CNTL);
+
+ /* Don't re-program the registers if they are already correct */
+ if ((!state && !val) || (state && (val == 0x8aa8aa02)))
+ return;
+
+ /* Disable SP clock before programming HWCG registers */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 1, 0);
+
+ for (i = 0; i < ARRAY_SIZE(a6xx_hwcg); i++)
+ gpu_write(gpu, a6xx_hwcg[i].offset,
+ state ? a6xx_hwcg[i].value : 0);
+
+ /* Enable SP clock */
+ gmu_rmw(gmu, REG_A6XX_GPU_GMU_GX_SPTPRAC_CLOCK_CONTROL, 0, 1);
+
+ gpu_write(gpu, REG_A6XX_RBBM_CLOCK_CNTL, state ? 0x8aa8aa02 : 0);
+}
+
+static int a6xx_cp_init(struct msm_gpu *gpu)
+{
+ struct msm_ringbuffer *ring = gpu->rb[0];
+
+ OUT_PKT7(ring, CP_ME_INIT, 8);
+
+ OUT_RING(ring, 0x0000002f);
+
+ /* Enable multiple hardware contexts */
+ OUT_RING(ring, 0x00000003);
+
+ /* Enable error detection */
+ OUT_RING(ring, 0x20000000);
+
+ /* Don't enable header dump */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ /* No workarounds enabled */
+ OUT_RING(ring, 0x00000000);
+
+ /* Pad rest of the cmds with 0's */
+ OUT_RING(ring, 0x00000000);
+ OUT_RING(ring, 0x00000000);
+
+ a6xx_flush(gpu, ring);
+ return a6xx_idle(gpu, ring) ? 0 : -EINVAL;
+}
+
+static int a6xx_ucode_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (!a6xx_gpu->sqe_bo) {
+ a6xx_gpu->sqe_bo = adreno_fw_create_bo(gpu,
+ adreno_gpu->fw[ADRENO_FW_SQE], &a6xx_gpu->sqe_iova);
+
+ if (IS_ERR(a6xx_gpu->sqe_bo)) {
+ int ret = PTR_ERR(a6xx_gpu->sqe_bo);
+
+ a6xx_gpu->sqe_bo = NULL;
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "Could not allocate SQE ucode: %d\n", ret);
+
+ return ret;
+ }
+ }
+
+ gpu_write64(gpu, REG_A6XX_CP_SQE_INSTR_BASE_LO,
+ REG_A6XX_CP_SQE_INSTR_BASE_HI, a6xx_gpu->sqe_iova);
+
+ return 0;
+}
+
+#define A6XX_INT_MASK (A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_CP_HW_ERROR | \
+ A6XX_RBBM_INT_0_MASK_CP_IB2 | \
+ A6XX_RBBM_INT_0_MASK_CP_IB1 | \
+ A6XX_RBBM_INT_0_MASK_CP_RB | \
+ A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS | \
+ A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW | \
+ A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT | \
+ A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS | \
+ A6XX_RBBM_INT_0_MASK_UCHE_TRAP_INTR)
+
+static int a6xx_hw_init(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ /* Make sure the GMU keeps the GPU on while we set it up */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_CNTL, 0);
+
+ /*
+ * Disable the trusted memory range - we don't actually supported secure
+ * memory rendering at this point in time and we don't want to block off
+ * part of the virtual memory space.
+ */
+ gpu_write64(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_LO,
+ REG_A6XX_RBBM_SECVID_TSB_TRUSTED_BASE_HI, 0x00000000);
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TSB_TRUSTED_SIZE, 0x00000000);
+
+ /* enable hardware clockgating */
+ a6xx_set_hwcg(gpu, true);
+
+ /* VBIF start */
+ gpu_write(gpu, REG_A6XX_VBIF_GATE_OFF_WRREQ_EN, 0x00000009);
+ gpu_write(gpu, REG_A6XX_RBBM_VBIF_CLIENT_QOS_CNTL, 0x3);
+
+ /* Make all blocks contribute to the GPU BUSY perf counter */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_GPU_BUSY_MASKED, 0xffffffff);
+
+ /* Disable L2 bypass in the UCHE */
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_LO, 0xffffffc0);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_RANGE_MAX_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_TRAP_BASE_HI, 0x0001ffff);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_LO, 0xfffff000);
+ gpu_write(gpu, REG_A6XX_UCHE_WRITE_THRU_BASE_HI, 0x0001ffff);
+
+ /* Set the GMEM VA range [0x100000:0x100000 + gpu->gmem - 1] */
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MIN_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MIN_HI, 0x00100000);
+
+ gpu_write64(gpu, REG_A6XX_UCHE_GMEM_RANGE_MAX_LO,
+ REG_A6XX_UCHE_GMEM_RANGE_MAX_HI,
+ 0x00100000 + adreno_gpu->gmem - 1);
+
+ gpu_write(gpu, REG_A6XX_UCHE_FILTER_CNTL, 0x804);
+ gpu_write(gpu, REG_A6XX_UCHE_CACHE_WAYS, 0x4);
+
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_2, 0x010000c0);
+ gpu_write(gpu, REG_A6XX_CP_ROQ_THRESHOLDS_1, 0x8040362c);
+
+ /* Setting the mem pool size */
+ gpu_write(gpu, REG_A6XX_CP_MEM_POOL_SIZE, 128);
+
+ /* Setting the primFifo thresholds default values */
+ gpu_write(gpu, REG_A6XX_PC_DBG_ECO_CNTL, (0x300 << 11));
+
+ /* Set the AHB default slave response to "ERROR" */
+ gpu_write(gpu, REG_A6XX_CP_AHB_CNTL, 0x1);
+
+ /* Turn on performance counters */
+ gpu_write(gpu, REG_A6XX_RBBM_PERFCTR_CNTL, 0x1);
+
+ /* Select CP0 to always count cycles */
+ gpu_write(gpu, REG_A6XX_CP_PERFCTR_CP_SEL_0, PERF_CP_ALWAYS_COUNT);
+
+ /* FIXME: not sure if this should live here or in a6xx_gmu.c */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GPU_GMU_AO_GPU_CX_BUSY_MASK,
+ 0xff000000);
+ gmu_rmw(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_SELECT_0,
+ 0xff, 0x20);
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_CX_GMU_POWER_COUNTER_ENABLE,
+ 0x01);
+
+ gpu_write(gpu, REG_A6XX_RB_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_TPL1_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_SP_NC_MODE_CNTL, 2 << 1);
+ gpu_write(gpu, REG_A6XX_UCHE_MODE_CNTL, 2 << 21);
+
+ /* Enable fault detection */
+ gpu_write(gpu, REG_A6XX_RBBM_INTERFACE_HANG_INT_CNTL,
+ (1 << 30) | 0x1fffff);
+
+ gpu_write(gpu, REG_A6XX_UCHE_CLIENT_PF, 1);
+
+ /* Protect registers from the CP */
+ gpu_write(gpu, REG_A6XX_CP_PROTECT_CNTL, 0x00000003);
+
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(0),
+ A6XX_PROTECT_RDONLY(0x600, 0x51));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(1), A6XX_PROTECT_RW(0xae50, 0x2));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(2), A6XX_PROTECT_RW(0x9624, 0x13));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(3), A6XX_PROTECT_RW(0x8630, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(4), A6XX_PROTECT_RW(0x9e70, 0x1));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(5), A6XX_PROTECT_RW(0x9e78, 0x187));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(6), A6XX_PROTECT_RW(0xf000, 0x810));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(7),
+ A6XX_PROTECT_RDONLY(0xfc00, 0x3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(8), A6XX_PROTECT_RW(0x50e, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(9), A6XX_PROTECT_RDONLY(0x50f, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(10), A6XX_PROTECT_RW(0x510, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(11),
+ A6XX_PROTECT_RDONLY(0x0, 0x4f9));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(12),
+ A6XX_PROTECT_RDONLY(0x501, 0xa));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(13),
+ A6XX_PROTECT_RDONLY(0x511, 0x44));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(14), A6XX_PROTECT_RW(0xe00, 0xe));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(15), A6XX_PROTECT_RW(0x8e00, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(16), A6XX_PROTECT_RW(0x8e50, 0xf));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(17), A6XX_PROTECT_RW(0xbe02, 0x0));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(18),
+ A6XX_PROTECT_RW(0xbe20, 0x11f3));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(19), A6XX_PROTECT_RW(0x800, 0x82));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(20), A6XX_PROTECT_RW(0x8a0, 0x8));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(21), A6XX_PROTECT_RW(0x8ab, 0x19));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(22), A6XX_PROTECT_RW(0x900, 0x4d));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(23), A6XX_PROTECT_RW(0x98d, 0x76));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(24),
+ A6XX_PROTECT_RDONLY(0x8d0, 0x23));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(25),
+ A6XX_PROTECT_RDONLY(0x980, 0x4));
+ gpu_write(gpu, REG_A6XX_CP_PROTECT(26), A6XX_PROTECT_RW(0xa630, 0x0));
+
+ /* Enable interrupts */
+ gpu_write(gpu, REG_A6XX_RBBM_INT_0_MASK, A6XX_INT_MASK);
+
+ ret = adreno_hw_init(gpu);
+ if (ret)
+ goto out;
+
+ ret = a6xx_ucode_init(gpu);
+ if (ret)
+ goto out;
+
+ /* Always come up on rb 0 */
+ a6xx_gpu->cur_ring = gpu->rb[0];
+
+ /* Enable the SQE_to start the CP engine */
+ gpu_write(gpu, REG_A6XX_CP_SQE_CNTL, 1);
+
+ ret = a6xx_cp_init(gpu);
+ if (ret)
+ goto out;
+
+ gpu_write(gpu, REG_A6XX_RBBM_SECVID_TRUST_CNTL, 0x0);
+
+out:
+ /*
+ * Tell the GMU that we are done touching the GPU and it can start power
+ * management
+ */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ /* Take the GMU out of its special boot mode */
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_BOOT_SLUMBER);
+
+ return ret;
+}
+
+static void a6xx_dump(struct msm_gpu *gpu)
+{
+ dev_info(&gpu->pdev->dev, "status: %08x\n",
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS));
+ adreno_dump(gpu);
+}
+
+#define VBIF_RESET_ACK_TIMEOUT 100
+#define VBIF_RESET_ACK_MASK 0x00f0
+
+static void a6xx_recover(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int i;
+
+ adreno_dump_info(gpu);
+
+ for (i = 0; i < 8; i++)
+ dev_info(&gpu->pdev->dev, "CP_SCRATCH_REG%d: %u\n", i,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(i)));
+
+ if (hang_debug)
+ a6xx_dump(gpu);
+
+ /*
+ * Turn off keep alive that might have been enabled by the hang
+ * interrupt
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 0);
+
+ gpu->funcs->pm_suspend(gpu);
+ gpu->funcs->pm_resume(gpu);
+
+ msm_gpu_hw_init(gpu);
+}
+
+static int a6xx_fault_handler(void *arg, unsigned long iova, int flags)
+{
+ struct msm_gpu *gpu = arg;
+
+ pr_warn_ratelimited("*** gpu fault: iova=%08lx, flags=%d (%u,%u,%u,%u)\n",
+ iova, flags,
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(4)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(5)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(6)),
+ gpu_read(gpu, REG_A6XX_CP_SCRATCH_REG(7)));
+
+ return -EFAULT;
+}
+
+static void a6xx_cp_hw_err_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_CP_INTERRUPT_STATUS);
+
+ if (status & A6XX_CP_INT_CP_OPCODE_ERROR) {
+ u32 val;
+
+ gpu_write(gpu, REG_A6XX_CP_SQE_STAT_ADDR, 1);
+ val = gpu_read(gpu, REG_A6XX_CP_SQE_STAT_DATA);
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | opcode error | possible opcode=0x%8.8X\n",
+ val);
+ }
+
+ if (status & A6XX_CP_INT_CP_UCODE_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP ucode error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_HW_FAULT_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | HW fault | status=0x%8.8X\n",
+ gpu_read(gpu, REG_A6XX_CP_HW_FAULT));
+
+ if (status & A6XX_CP_INT_CP_REGISTER_PROTECTION_ERROR) {
+ u32 val = gpu_read(gpu, REG_A6XX_CP_PROTECT_STATUS);
+
+ dev_err_ratelimited(&gpu->pdev->dev,
+ "CP | protected mode error | %s | addr=0x%8.8X | status=0x%8.8X\n",
+ val & (1 << 20) ? "READ" : "WRITE",
+ (val & 0x3ffff), val);
+ }
+
+ if (status & A6XX_CP_INT_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP AHB error interrupt\n");
+
+ if (status & A6XX_CP_INT_CP_VSD_PARITY_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP VSD decoder parity error\n");
+
+ if (status & A6XX_CP_INT_CP_ILLEGAL_INSTR_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP illegal instruction error\n");
+
+}
+
+static void a6xx_fault_detect_irq(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ struct drm_device *dev = gpu->dev;
+ struct msm_drm_private *priv = dev->dev_private;
+ struct msm_ringbuffer *ring = gpu->funcs->active_ring(gpu);
+
+ /*
+ * Force the GPU to stay on until after we finish
+ * collecting information
+ */
+ gmu_write(&a6xx_gpu->gmu, REG_A6XX_GMU_GMU_PWR_COL_KEEPALIVE, 1);
+
+ DRM_DEV_ERROR(&gpu->pdev->dev,
+ "gpu fault ring %d fence %x status %8.8X rb %4.4x/%4.4x ib1 %16.16llX/%4.4x ib2 %16.16llX/%4.4x\n",
+ ring ? ring->id : -1, ring ? ring->seqno : 0,
+ gpu_read(gpu, REG_A6XX_RBBM_STATUS),
+ gpu_read(gpu, REG_A6XX_CP_RB_RPTR),
+ gpu_read(gpu, REG_A6XX_CP_RB_WPTR),
+ gpu_read64(gpu, REG_A6XX_CP_IB1_BASE, REG_A6XX_CP_IB1_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB1_REM_SIZE),
+ gpu_read64(gpu, REG_A6XX_CP_IB2_BASE, REG_A6XX_CP_IB2_BASE_HI),
+ gpu_read(gpu, REG_A6XX_CP_IB2_REM_SIZE));
+
+ /* Turn off the hangcheck timer to keep it from bothering us */
+ del_timer(&gpu->hangcheck_timer);
+
+ queue_work(priv->wq, &gpu->recover_work);
+}
+
+static irqreturn_t a6xx_irq(struct msm_gpu *gpu)
+{
+ u32 status = gpu_read(gpu, REG_A6XX_RBBM_INT_0_STATUS);
+
+ gpu_write(gpu, REG_A6XX_RBBM_INT_CLEAR_CMD, status);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_HANG_DETECT)
+ a6xx_fault_detect_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_AHB_ERROR)
+ dev_err_ratelimited(&gpu->pdev->dev, "CP | AHB bus error\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_HW_ERROR)
+ a6xx_cp_hw_err_irq(gpu);
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_ASYNCFIFO_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB ASYNC overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_RBBM_ATB_BUS_OVERFLOW)
+ dev_err_ratelimited(&gpu->pdev->dev, "RBBM | ATB bus overflow\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_UCHE_OOB_ACCESS)
+ dev_err_ratelimited(&gpu->pdev->dev, "UCHE | Out of bounds access\n");
+
+ if (status & A6XX_RBBM_INT_0_MASK_CP_CACHE_FLUSH_TS)
+ msm_gpu_retire(gpu);
+
+ return IRQ_HANDLED;
+}
+
+static const u32 a6xx_register_offsets[REG_ADRENO_REGISTER_MAX] = {
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE, REG_A6XX_CP_RB_BASE),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_BASE_HI, REG_A6XX_CP_RB_BASE_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR,
+ REG_A6XX_CP_RB_RPTR_ADDR_LO),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR_ADDR_HI,
+ REG_A6XX_CP_RB_RPTR_ADDR_HI),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_RPTR, REG_A6XX_CP_RB_RPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_WPTR, REG_A6XX_CP_RB_WPTR),
+ REG_ADRENO_DEFINE(REG_ADRENO_CP_RB_CNTL, REG_A6XX_CP_RB_CNTL),
+};
+
+static const u32 a6xx_registers[] = {
+ 0x0000, 0x0002, 0x0010, 0x0010, 0x0012, 0x0012, 0x0018, 0x001b,
+ 0x001e, 0x0032, 0x0038, 0x003c, 0x0042, 0x0042, 0x0044, 0x0044,
+ 0x0047, 0x0047, 0x0056, 0x0056, 0x00ad, 0x00ae, 0x00b0, 0x00fb,
+ 0x0100, 0x011d, 0x0200, 0x020d, 0x0210, 0x0213, 0x0218, 0x023d,
+ 0x0400, 0x04f9, 0x0500, 0x0500, 0x0505, 0x050b, 0x050e, 0x0511,
+ 0x0533, 0x0533, 0x0540, 0x0555, 0x0800, 0x0808, 0x0810, 0x0813,
+ 0x0820, 0x0821, 0x0823, 0x0827, 0x0830, 0x0833, 0x0840, 0x0843,
+ 0x084f, 0x086f, 0x0880, 0x088a, 0x08a0, 0x08ab, 0x08c0, 0x08c4,
+ 0x08d0, 0x08dd, 0x08f0, 0x08f3, 0x0900, 0x0903, 0x0908, 0x0911,
+ 0x0928, 0x093e, 0x0942, 0x094d, 0x0980, 0x0984, 0x098d, 0x0996,
+ 0x0998, 0x099e, 0x09a0, 0x09a6, 0x09a8, 0x09ae, 0x09b0, 0x09b1,
+ 0x09c2, 0x09c8, 0x0a00, 0x0a03, 0x0c00, 0x0c04, 0x0c06, 0x0c06,
+ 0x0c10, 0x0cd9, 0x0e00, 0x0e0e, 0x0e10, 0x0e13, 0x0e17, 0x0e19,
+ 0x0e1c, 0x0e2b, 0x0e30, 0x0e32, 0x0e38, 0x0e39, 0x8600, 0x8601,
+ 0x8610, 0x861b, 0x8620, 0x8620, 0x8628, 0x862b, 0x8630, 0x8637,
+ 0x8e01, 0x8e01, 0x8e04, 0x8e05, 0x8e07, 0x8e08, 0x8e0c, 0x8e0c,
+ 0x8e10, 0x8e1c, 0x8e20, 0x8e25, 0x8e28, 0x8e28, 0x8e2c, 0x8e2f,
+ 0x8e3b, 0x8e3e, 0x8e40, 0x8e43, 0x8e50, 0x8e5e, 0x8e70, 0x8e77,
+ 0x9600, 0x9604, 0x9624, 0x9637, 0x9e00, 0x9e01, 0x9e03, 0x9e0e,
+ 0x9e11, 0x9e16, 0x9e19, 0x9e19, 0x9e1c, 0x9e1c, 0x9e20, 0x9e23,
+ 0x9e30, 0x9e31, 0x9e34, 0x9e34, 0x9e70, 0x9e72, 0x9e78, 0x9e79,
+ 0x9e80, 0x9fff, 0xa600, 0xa601, 0xa603, 0xa603, 0xa60a, 0xa60a,
+ 0xa610, 0xa617, 0xa630, 0xa630,
+ ~0
+};
+
+static int a6xx_pm_resume(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+ int ret;
+
+ ret = a6xx_gmu_resume(a6xx_gpu);
+
+ gpu->needs_hw_init = true;
+
+ return ret;
+}
+
+static int a6xx_pm_suspend(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /*
+ * Make sure the GMU is idle before continuing (because some transitions
+ * may use VBIF
+ */
+ a6xx_gmu_wait_for_idle(a6xx_gpu);
+
+ /* Clear the VBIF pipe before shutting down */
+ /* FIXME: This accesses the GPU - do we need to make sure it is on? */
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0xf);
+ spin_until((gpu_read(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL1) & 0xf) == 0xf);
+ gpu_write(gpu, REG_A6XX_VBIF_XIN_HALT_CTRL0, 0);
+
+ return a6xx_gmu_stop(a6xx_gpu);
+}
+
+static int a6xx_get_timestamp(struct msm_gpu *gpu, uint64_t *value)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ /* Force the GPU power on so we can read this register */
+ a6xx_gmu_set_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+
+ *value = gpu_read64(gpu, REG_A6XX_RBBM_PERFCTR_CP_0_LO,
+ REG_A6XX_RBBM_PERFCTR_CP_0_HI);
+
+ a6xx_gmu_clear_oob(&a6xx_gpu->gmu, GMU_OOB_GPU_SET);
+ return 0;
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+static void a6xx_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ adreno_show(gpu, state, p);
+}
+#endif
+
+static struct msm_ringbuffer *a6xx_active_ring(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ return a6xx_gpu->cur_ring;
+}
+
+static void a6xx_destroy(struct msm_gpu *gpu)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ struct a6xx_gpu *a6xx_gpu = to_a6xx_gpu(adreno_gpu);
+
+ if (a6xx_gpu->sqe_bo) {
+ if (a6xx_gpu->sqe_iova)
+ msm_gem_put_iova(a6xx_gpu->sqe_bo, gpu->aspace);
+ drm_gem_object_unreference_unlocked(a6xx_gpu->sqe_bo);
+ }
+
+ a6xx_gmu_remove(a6xx_gpu);
+
+ adreno_gpu_cleanup(adreno_gpu);
+ kfree(a6xx_gpu);
+}
+
+static const struct adreno_gpu_funcs funcs = {
+ .base = {
+ .get_param = adreno_get_param,
+ .hw_init = a6xx_hw_init,
+ .pm_suspend = a6xx_pm_suspend,
+ .pm_resume = a6xx_pm_resume,
+ .recover = a6xx_recover,
+ .submit = a6xx_submit,
+ .flush = a6xx_flush,
+ .active_ring = a6xx_active_ring,
+ .irq = a6xx_irq,
+ .destroy = a6xx_destroy,
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+ .show = a6xx_show,
+#endif
+ },
+ .get_timestamp = a6xx_get_timestamp,
+};
+
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct platform_device *pdev = priv->gpu_pdev;
+ struct device_node *node;
+ struct a6xx_gpu *a6xx_gpu;
+ struct adreno_gpu *adreno_gpu;
+ struct msm_gpu *gpu;
+ int ret;
+
+ a6xx_gpu = kzalloc(sizeof(*a6xx_gpu), GFP_KERNEL);
+ if (!a6xx_gpu)
+ return ERR_PTR(-ENOMEM);
+
+ adreno_gpu = &a6xx_gpu->base;
+ gpu = &adreno_gpu->base;
+
+ adreno_gpu->registers = a6xx_registers;
+ adreno_gpu->reg_offsets = a6xx_register_offsets;
+
+ ret = adreno_gpu_init(dev, pdev, adreno_gpu, &funcs, 1);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ /* Check if there is a GMU phandle and set it up */
+ node = of_parse_phandle(pdev->dev.of_node, "gmu", 0);
+
+ /* FIXME: How do we gracefully handle this? */
+ BUG_ON(!node);
+
+ ret = a6xx_gmu_probe(a6xx_gpu, node);
+ if (ret) {
+ a6xx_destroy(&(a6xx_gpu->base.base));
+ return ERR_PTR(ret);
+ }
+
+ if (gpu->aspace)
+ msm_mmu_set_fault_handler(gpu->aspace->mmu, gpu,
+ a6xx_fault_handler);
+
+ return gpu;
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_gpu.h b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
new file mode 100644
index 000000000000..dd69e5b0e692
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_gpu.h
@@ -0,0 +1,60 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef __A6XX_GPU_H__
+#define __A6XX_GPU_H__
+
+
+#include "adreno_gpu.h"
+#include "a6xx.xml.h"
+
+#include "a6xx_gmu.h"
+
+extern bool hang_debug;
+
+struct a6xx_gpu {
+ struct adreno_gpu base;
+
+ struct drm_gem_object *sqe_bo;
+ uint64_t sqe_iova;
+
+ struct msm_ringbuffer *cur_ring;
+
+ struct a6xx_gmu gmu;
+};
+
+#define to_a6xx_gpu(x) container_of(x, struct a6xx_gpu, base)
+
+/*
+ * Given a register and a count, return a value to program into
+ * REG_CP_PROTECT_REG(n) - this will block both reads and writes for _len
+ * registers starting at _reg.
+ */
+#define A6XX_PROTECT_RW(_reg, _len) \
+ ((1 << 31) | \
+ (((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+/*
+ * Same as above, but allow reads over the range. For areas of mixed use (such
+ * as performance counters) this allows us to protect a much larger range with a
+ * single register
+ */
+#define A6XX_PROTECT_RDONLY(_reg, _len) \
+ ((((_len) & 0x3FFF) << 18) | ((_reg) & 0x3FFFF))
+
+
+int a6xx_gmu_resume(struct a6xx_gpu *gpu);
+int a6xx_gmu_stop(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_wait_for_idle(struct a6xx_gpu *gpu);
+
+int a6xx_gmu_reset(struct a6xx_gpu *a6xx_gpu);
+bool a6xx_gmu_isidle(struct a6xx_gmu *gmu);
+
+int a6xx_gmu_set_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+void a6xx_gmu_clear_oob(struct a6xx_gmu *gmu, enum a6xx_gmu_oob_state state);
+
+int a6xx_gmu_probe(struct a6xx_gpu *a6xx_gpu, struct device_node *node);
+void a6xx_gmu_remove(struct a6xx_gpu *a6xx_gpu);
+
+#endif /* __A6XX_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.c b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
new file mode 100644
index 000000000000..f19ef4cb6ea4
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.c
@@ -0,0 +1,435 @@
+// SPDX-License-Identifier: GPL-2.0
+/* Copyright (c) 2017-2018 The Linux Foundation. All rights reserved. */
+
+#include <linux/completion.h>
+#include <linux/circ_buf.h>
+#include <linux/list.h>
+
+#include "a6xx_gmu.h"
+#include "a6xx_gmu.xml.h"
+
+#define HFI_MSG_ID(val) [val] = #val
+
+static const char * const a6xx_hfi_msg_id[] = {
+ HFI_MSG_ID(HFI_H2F_MSG_INIT),
+ HFI_MSG_ID(HFI_H2F_MSG_FW_VERSION),
+ HFI_MSG_ID(HFI_H2F_MSG_BW_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_PERF_TABLE),
+ HFI_MSG_ID(HFI_H2F_MSG_TEST),
+};
+
+static int a6xx_hfi_queue_read(struct a6xx_hfi_queue *queue, u32 *data,
+ u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, hdr, index = header->read_index;
+
+ if (header->read_index == header->write_index) {
+ header->rx_request = 1;
+ return 0;
+ }
+
+ hdr = queue->data[index];
+
+ /*
+ * If we are to assume that the GMU firmware is in fact a rational actor
+ * and is programmed to not send us a larger response than we expect
+ * then we can also assume that if the header size is unexpectedly large
+ * that it is due to memory corruption and/or hardware failure. In this
+ * case the only reasonable course of action is to BUG() to help harden
+ * the failure.
+ */
+
+ BUG_ON(HFI_HEADER_SIZE(hdr) > dwords);
+
+ for (i = 0; i < HFI_HEADER_SIZE(hdr); i++) {
+ data[i] = queue->data[index];
+ index = (index + 1) % header->size;
+ }
+
+ header->read_index = index;
+ return HFI_HEADER_SIZE(hdr);
+}
+
+static int a6xx_hfi_queue_write(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_queue *queue, u32 *data, u32 dwords)
+{
+ struct a6xx_hfi_queue_header *header = queue->header;
+ u32 i, space, index = header->write_index;
+
+ spin_lock(&queue->lock);
+
+ space = CIRC_SPACE(header->write_index, header->read_index,
+ header->size);
+ if (space < dwords) {
+ header->dropped++;
+ spin_unlock(&queue->lock);
+ return -ENOSPC;
+ }
+
+ for (i = 0; i < dwords; i++) {
+ queue->data[index] = data[i];
+ index = (index + 1) % header->size;
+ }
+
+ header->write_index = index;
+ spin_unlock(&queue->lock);
+
+ gmu_write(gmu, REG_A6XX_GMU_HOST2GMU_INTR_SET, 0x01);
+ return 0;
+}
+
+struct a6xx_hfi_response {
+ u32 id;
+ u32 seqnum;
+ struct list_head node;
+ struct completion complete;
+
+ u32 error;
+ u32 payload[16];
+};
+
+/*
+ * Incoming HFI ack messages can come in out of order so we need to store all
+ * the pending messages on a list until they are handled.
+ */
+static spinlock_t hfi_ack_lock = __SPIN_LOCK_UNLOCKED(message_lock);
+static LIST_HEAD(hfi_ack_list);
+
+static void a6xx_hfi_handle_ack(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_response *resp;
+ u32 id, seqnum;
+
+ /* msg->ret_header contains the header of the message being acked */
+ id = HFI_HEADER_ID(msg->ret_header);
+ seqnum = HFI_HEADER_SEQNUM(msg->ret_header);
+
+ spin_lock(&hfi_ack_lock);
+ list_for_each_entry(resp, &hfi_ack_list, node) {
+ if (resp->id == id && resp->seqnum == seqnum) {
+ resp->error = msg->error;
+ memcpy(resp->payload, msg->payload,
+ sizeof(resp->payload));
+
+ complete(&resp->complete);
+ spin_unlock(&hfi_ack_lock);
+ return;
+ }
+ }
+ spin_unlock(&hfi_ack_lock);
+
+ dev_err(gmu->dev, "Nobody was waiting for HFI message %d\n", seqnum);
+}
+
+static void a6xx_hfi_handle_error(struct a6xx_gmu *gmu,
+ struct a6xx_hfi_msg_response *msg)
+{
+ struct a6xx_hfi_msg_error *error = (struct a6xx_hfi_msg_error *) msg;
+
+ dev_err(gmu->dev, "GMU firmware error %d\n", error->code);
+}
+
+void a6xx_hfi_task(unsigned long data)
+{
+ struct a6xx_gmu *gmu = (struct a6xx_gmu *) data;
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_RESPONSE_QUEUE];
+ struct a6xx_hfi_msg_response resp;
+
+ for (;;) {
+ u32 id;
+ int ret = a6xx_hfi_queue_read(queue, (u32 *) &resp,
+ sizeof(resp) >> 2);
+
+ /* Returns the number of bytes copied or negative on error */
+ if (ret <= 0) {
+ if (ret < 0)
+ dev_err(gmu->dev,
+ "Unable to read the HFI message queue\n");
+ break;
+ }
+
+ id = HFI_HEADER_ID(resp.header);
+
+ if (id == HFI_F2H_MSG_ACK)
+ a6xx_hfi_handle_ack(gmu, &resp);
+ else if (id == HFI_F2H_MSG_ERROR)
+ a6xx_hfi_handle_error(gmu, &resp);
+ }
+}
+
+static int a6xx_hfi_send_msg(struct a6xx_gmu *gmu, int id,
+ void *data, u32 size, u32 *payload, u32 payload_size)
+{
+ struct a6xx_hfi_queue *queue = &gmu->queues[HFI_COMMAND_QUEUE];
+ struct a6xx_hfi_response resp = { 0 };
+ int ret, dwords = size >> 2;
+ u32 seqnum;
+
+ seqnum = atomic_inc_return(&queue->seqnum) % 0xfff;
+
+ /* First dword of the message is the message header - fill it in */
+ *((u32 *) data) = (seqnum << 20) | (HFI_MSG_CMD << 16) |
+ (dwords << 8) | id;
+
+ init_completion(&resp.complete);
+ resp.id = id;
+ resp.seqnum = seqnum;
+
+ spin_lock_bh(&hfi_ack_lock);
+ list_add_tail(&resp.node, &hfi_ack_list);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ ret = a6xx_hfi_queue_write(gmu, queue, data, dwords);
+ if (ret) {
+ dev_err(gmu->dev, "Unable to send message %s id %d\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ goto out;
+ }
+
+ /* Wait up to 5 seconds for the response */
+ ret = wait_for_completion_timeout(&resp.complete,
+ msecs_to_jiffies(5000));
+ if (!ret) {
+ dev_err(gmu->dev,
+ "Message %s id %d timed out waiting for response\n",
+ a6xx_hfi_msg_id[id], seqnum);
+ ret = -ETIMEDOUT;
+ } else
+ ret = 0;
+
+out:
+ spin_lock_bh(&hfi_ack_lock);
+ list_del(&resp.node);
+ spin_unlock_bh(&hfi_ack_lock);
+
+ if (ret)
+ return ret;
+
+ if (resp.error) {
+ dev_err(gmu->dev, "Message %s id %d returned error %d\n",
+ a6xx_hfi_msg_id[id], seqnum, resp.error);
+ return -EINVAL;
+ }
+
+ if (payload && payload_size) {
+ int copy = min_t(u32, payload_size, sizeof(resp.payload));
+
+ memcpy(payload, resp.payload, copy);
+ }
+
+ return 0;
+}
+
+static int a6xx_hfi_send_gmu_init(struct a6xx_gmu *gmu, int boot_state)
+{
+ struct a6xx_hfi_msg_gmu_init_cmd msg = { 0 };
+
+ msg.dbg_buffer_addr = (u32) gmu->debug->iova;
+ msg.dbg_buffer_size = (u32) gmu->debug->size;
+ msg.boot_state = boot_state;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_INIT, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_get_fw_version(struct a6xx_gmu *gmu, u32 *version)
+{
+ struct a6xx_hfi_msg_fw_version msg = { 0 };
+
+ /* Currently supporting version 1.1 */
+ msg.supported_version = (1 << 28) | (1 << 16);
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_FW_VERSION, &msg, sizeof(msg),
+ version, sizeof(*version));
+}
+
+static int a6xx_hfi_send_perf_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_perf_table msg = { 0 };
+ int i;
+
+ msg.num_gpu_levels = gmu->nr_gpu_freqs;
+ msg.num_gmu_levels = gmu->nr_gmu_freqs;
+
+ for (i = 0; i < gmu->nr_gpu_freqs; i++) {
+ msg.gx_votes[i].vote = gmu->gx_arc_votes[i];
+ msg.gx_votes[i].freq = gmu->gpu_freqs[i] / 1000;
+ }
+
+ for (i = 0; i < gmu->nr_gmu_freqs; i++) {
+ msg.cx_votes[i].vote = gmu->cx_arc_votes[i];
+ msg.cx_votes[i].freq = gmu->gmu_freqs[i] / 1000;
+ }
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_PERF_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_bw_table(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_bw_table msg = { 0 };
+
+ /*
+ * The sdm845 GMU doesn't do bus frequency scaling on its own but it
+ * does need at least one entry in the list because it might be accessed
+ * when the GMU is shutting down. Send a single "off" entry.
+ */
+
+ msg.bw_level_num = 1;
+
+ msg.ddr_cmds_num = 3;
+ msg.ddr_wait_bitmask = 0x07;
+
+ msg.ddr_cmds_addrs[0] = 0x50000;
+ msg.ddr_cmds_addrs[1] = 0x5005c;
+ msg.ddr_cmds_addrs[2] = 0x5000c;
+
+ msg.ddr_cmds_data[0][0] = 0x40000000;
+ msg.ddr_cmds_data[0][1] = 0x40000000;
+ msg.ddr_cmds_data[0][2] = 0x40000000;
+
+ /*
+ * These are the CX (CNOC) votes. This is used but the values for the
+ * sdm845 GMU are known and fixed so we can hard code them.
+ */
+
+ msg.cnoc_cmds_num = 3;
+ msg.cnoc_wait_bitmask = 0x05;
+
+ msg.cnoc_cmds_addrs[0] = 0x50034;
+ msg.cnoc_cmds_addrs[1] = 0x5007c;
+ msg.cnoc_cmds_addrs[2] = 0x5004c;
+
+ msg.cnoc_cmds_data[0][0] = 0x40000000;
+ msg.cnoc_cmds_data[0][1] = 0x00000000;
+ msg.cnoc_cmds_data[0][2] = 0x40000000;
+
+ msg.cnoc_cmds_data[1][0] = 0x60000001;
+ msg.cnoc_cmds_data[1][1] = 0x20000001;
+ msg.cnoc_cmds_data[1][2] = 0x60000001;
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_BW_TABLE, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+static int a6xx_hfi_send_test(struct a6xx_gmu *gmu)
+{
+ struct a6xx_hfi_msg_test msg = { 0 };
+
+ return a6xx_hfi_send_msg(gmu, HFI_H2F_MSG_TEST, &msg, sizeof(msg),
+ NULL, 0);
+}
+
+int a6xx_hfi_start(struct a6xx_gmu *gmu, int boot_state)
+{
+ int ret;
+
+ ret = a6xx_hfi_send_gmu_init(gmu, boot_state);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_get_fw_version(gmu, NULL);
+ if (ret)
+ return ret;
+
+ /*
+ * We have to get exchange version numbers per the sequence but at this
+ * point th kernel driver doesn't need to know the exact version of
+ * the GMU firmware
+ */
+
+ ret = a6xx_hfi_send_perf_table(gmu);
+ if (ret)
+ return ret;
+
+ ret = a6xx_hfi_send_bw_table(gmu);
+ if (ret)
+ return ret;
+
+ /*
+ * Let the GMU know that there won't be any more HFI messages until next
+ * boot
+ */
+ a6xx_hfi_send_test(gmu);
+
+ return 0;
+}
+
+void a6xx_hfi_stop(struct a6xx_gmu *gmu)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(gmu->queues); i++) {
+ struct a6xx_hfi_queue *queue = &gmu->queues[i];
+
+ if (!queue->header)
+ continue;
+
+ if (queue->header->read_index != queue->header->write_index)
+ dev_err(gmu->dev, "HFI queue %d is not empty\n", i);
+
+ queue->header->read_index = 0;
+ queue->header->write_index = 0;
+ }
+}
+
+static void a6xx_hfi_queue_init(struct a6xx_hfi_queue *queue,
+ struct a6xx_hfi_queue_header *header, void *virt, u64 iova,
+ u32 id)
+{
+ spin_lock_init(&queue->lock);
+ queue->header = header;
+ queue->data = virt;
+ atomic_set(&queue->seqnum, 0);
+
+ /* Set up the shared memory header */
+ header->iova = iova;
+ header->type = 10 << 8 | id;
+ header->status = 1;
+ header->size = SZ_4K >> 2;
+ header->msg_size = 0;
+ header->dropped = 0;
+ header->rx_watermark = 1;
+ header->tx_watermark = 1;
+ header->rx_request = 1;
+ header->tx_request = 0;
+ header->read_index = 0;
+ header->write_index = 0;
+}
+
+void a6xx_hfi_init(struct a6xx_gmu *gmu)
+{
+ struct a6xx_gmu_bo *hfi = gmu->hfi;
+ struct a6xx_hfi_queue_table_header *table = hfi->virt;
+ struct a6xx_hfi_queue_header *headers = hfi->virt + sizeof(*table);
+ u64 offset;
+ int table_size;
+
+ /*
+ * The table size is the size of the table header plus all of the queue
+ * headers
+ */
+ table_size = sizeof(*table);
+ table_size += (ARRAY_SIZE(gmu->queues) *
+ sizeof(struct a6xx_hfi_queue_header));
+
+ table->version = 0;
+ table->size = table_size;
+ /* First queue header is located immediately after the table header */
+ table->qhdr0_offset = sizeof(*table) >> 2;
+ table->qhdr_size = sizeof(struct a6xx_hfi_queue_header) >> 2;
+ table->num_queues = ARRAY_SIZE(gmu->queues);
+ table->active_queues = ARRAY_SIZE(gmu->queues);
+
+ /* Command queue */
+ offset = SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[0], &headers[0], hfi->virt + offset,
+ hfi->iova + offset, 0);
+
+ /* GMU response queue */
+ offset += SZ_4K;
+ a6xx_hfi_queue_init(&gmu->queues[1], &headers[1], hfi->virt + offset,
+ hfi->iova + offset, 4);
+}
diff --git a/drivers/gpu/drm/msm/adreno/a6xx_hfi.h b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
new file mode 100644
index 000000000000..60d1319fa44f
--- /dev/null
+++ b/drivers/gpu/drm/msm/adreno/a6xx_hfi.h
@@ -0,0 +1,127 @@
+/* SPDX-License-Identifier: GPL-2.0 */
+/* Copyright (c) 2017 The Linux Foundation. All rights reserved. */
+
+#ifndef _A6XX_HFI_H_
+#define _A6XX_HFI_H_
+
+struct a6xx_hfi_queue_table_header {
+ u32 version;
+ u32 size; /* Size of the queue table in dwords */
+ u32 qhdr0_offset; /* Offset of the first queue header */
+ u32 qhdr_size; /* Size of the queue headers */
+ u32 num_queues; /* Number of total queues */
+ u32 active_queues; /* Number of active queues */
+};
+
+struct a6xx_hfi_queue_header {
+ u32 status;
+ u32 iova;
+ u32 type;
+ u32 size;
+ u32 msg_size;
+ u32 dropped;
+ u32 rx_watermark;
+ u32 tx_watermark;
+ u32 rx_request;
+ u32 tx_request;
+ u32 read_index;
+ u32 write_index;
+};
+
+struct a6xx_hfi_queue {
+ struct a6xx_hfi_queue_header *header;
+ spinlock_t lock;
+ u32 *data;
+ atomic_t seqnum;
+};
+
+/* This is the outgoing queue to the GMU */
+#define HFI_COMMAND_QUEUE 0
+
+/* THis is the incoming response queue from the GMU */
+#define HFI_RESPONSE_QUEUE 1
+
+#define HFI_HEADER_ID(msg) ((msg) & 0xff)
+#define HFI_HEADER_SIZE(msg) (((msg) >> 8) & 0xff)
+#define HFI_HEADER_SEQNUM(msg) (((msg) >> 20) & 0xfff)
+
+/* FIXME: Do we need this or can we use ARRAY_SIZE? */
+#define HFI_RESPONSE_PAYLOAD_SIZE 16
+
+/* HFI message types */
+
+#define HFI_MSG_CMD 0
+#define HFI_MSG_ACK 2
+
+#define HFI_F2H_MSG_ACK 126
+
+struct a6xx_hfi_msg_response {
+ u32 header;
+ u32 ret_header;
+ u32 error;
+ u32 payload[HFI_RESPONSE_PAYLOAD_SIZE];
+};
+
+#define HFI_F2H_MSG_ERROR 100
+
+struct a6xx_hfi_msg_error {
+ u32 header;
+ u32 code;
+ u32 payload[2];
+};
+
+#define HFI_H2F_MSG_INIT 0
+
+struct a6xx_hfi_msg_gmu_init_cmd {
+ u32 header;
+ u32 seg_id;
+ u32 dbg_buffer_addr;
+ u32 dbg_buffer_size;
+ u32 boot_state;
+};
+
+#define HFI_H2F_MSG_FW_VERSION 1
+
+struct a6xx_hfi_msg_fw_version {
+ u32 header;
+ u32 supported_version;
+};
+
+#define HFI_H2F_MSG_PERF_TABLE 4
+
+struct perf_level {
+ u32 vote;
+ u32 freq;
+};
+
+struct a6xx_hfi_msg_perf_table {
+ u32 header;
+ u32 num_gpu_levels;
+ u32 num_gmu_levels;
+
+ struct perf_level gx_votes[16];
+ struct perf_level cx_votes[4];
+};
+
+#define HFI_H2F_MSG_BW_TABLE 3
+
+struct a6xx_hfi_msg_bw_table {
+ u32 header;
+ u32 bw_level_num;
+ u32 cnoc_cmds_num;
+ u32 ddr_cmds_num;
+ u32 cnoc_wait_bitmask;
+ u32 ddr_wait_bitmask;
+ u32 cnoc_cmds_addrs[6];
+ u32 cnoc_cmds_data[2][6];
+ u32 ddr_cmds_addrs[8];
+ u32 ddr_cmds_data[16][8];
+};
+
+#define HFI_H2F_MSG_TEST 5
+
+struct a6xx_hfi_msg_test {
+ u32 header;
+};
+
+#endif
diff --git a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
index b634cf71352b..5dace1350810 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_common.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -44,6 +46,14 @@ WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
*/
+enum chip {
+ A2XX = 0,
+ A3XX = 0,
+ A4XX = 0,
+ A5XX = 0,
+ A6XX = 0,
+};
+
enum adreno_pa_su_sc_draw {
PC_DRAW_POINTS = 0,
PC_DRAW_LINES = 1,
@@ -181,6 +191,12 @@ enum a3xx_rb_blend_opcode {
BLEND_MAX_DST_SRC = 4,
};
+enum a4xx_tess_spacing {
+ EQUAL_SPACING = 0,
+ ODD_SPACING = 2,
+ EVEN_SPACING = 3,
+};
+
#define REG_AXXX_CP_RB_BASE 0x000001c0
#define REG_AXXX_CP_RB_CNTL 0x000001c1
diff --git a/drivers/gpu/drm/msm/adreno/adreno_device.c b/drivers/gpu/drm/msm/adreno/adreno_device.c
index 0ae5ace65462..7d3e9a129ac7 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_device.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_device.c
@@ -35,6 +35,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_256K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 0, 6, 0),
@@ -45,6 +46,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_128K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 2, ANY_ID, ANY_ID),
@@ -55,6 +57,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a300_pfp.fw",
},
.gmem = SZ_512K,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(3, 3, 0, ANY_ID),
@@ -65,6 +68,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a330_pfp.fw",
},
.gmem = SZ_1M,
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a3xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 2, 0, ANY_ID),
@@ -75,6 +79,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(4, 3, 0, ANY_ID),
@@ -85,6 +90,7 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_PFP] = "a420_pfp.fw",
},
.gmem = (SZ_1M + SZ_512K),
+ .inactive_period = DRM_MSM_INACTIVE_PERIOD,
.init = a4xx_gpu_init,
}, {
.rev = ADRENO_REV(5, 3, 0, 2),
@@ -96,10 +102,25 @@ static const struct adreno_info gpulist[] = {
[ADRENO_FW_GPMU] = "a530v3_gpmu.fw2",
},
.gmem = SZ_1M,
+ /*
+ * Increase inactive period to 250 to avoid bouncing
+ * the GDSC which appears to make it grumpy
+ */
+ .inactive_period = 250,
.quirks = ADRENO_QUIRK_TWO_PASS_USE_WFI |
ADRENO_QUIRK_FAULT_DETECT_MASK,
.init = a5xx_gpu_init,
.zapfw = "a530_zap.mdt",
+ }, {
+ .rev = ADRENO_REV(6, 3, 0, ANY_ID),
+ .revn = 630,
+ .name = "A630",
+ .fw = {
+ [ADRENO_FW_SQE] = "a630_sqe.fw",
+ [ADRENO_FW_GMU] = "a630_gmu.bin",
+ },
+ .gmem = SZ_1M,
+ .init = a6xx_gpu_init,
},
};
@@ -116,6 +137,8 @@ MODULE_FIRMWARE("qcom/a530_zap.mdt");
MODULE_FIRMWARE("qcom/a530_zap.b00");
MODULE_FIRMWARE("qcom/a530_zap.b01");
MODULE_FIRMWARE("qcom/a530_zap.b02");
+MODULE_FIRMWARE("qcom/a630_sqe.fw");
+MODULE_FIRMWARE("qcom/a630_gmu.bin");
static inline bool _rev_match(uint8_t entry, uint8_t id)
{
@@ -144,6 +167,7 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
struct msm_drm_private *priv = dev->dev_private;
struct platform_device *pdev = priv->gpu_pdev;
struct msm_gpu *gpu = NULL;
+ struct adreno_gpu *adreno_gpu;
int ret;
if (pdev)
@@ -154,11 +178,31 @@ struct msm_gpu *adreno_load_gpu(struct drm_device *dev)
return NULL;
}
- pm_runtime_get_sync(&pdev->dev);
+ adreno_gpu = to_adreno_gpu(gpu);
+
+ /*
+ * The number one reason for HW init to fail is if the firmware isn't
+ * loaded yet. Try that first and don't bother continuing on
+ * otherwise
+ */
+
+ ret = adreno_load_fw(adreno_gpu);
+ if (ret)
+ return NULL;
+
+ /* Make sure pm runtime is active and reset any previous errors */
+ pm_runtime_set_active(&pdev->dev);
+
+ ret = pm_runtime_get_sync(&pdev->dev);
+ if (ret < 0) {
+ dev_err(dev->dev, "Couldn't power up the GPU: %d\n", ret);
+ return NULL;
+ }
+
mutex_lock(&dev->struct_mutex);
ret = msm_gpu_hw_init(gpu);
mutex_unlock(&dev->struct_mutex);
- pm_runtime_put_sync(&pdev->dev);
+ pm_runtime_put_autosuspend(&pdev->dev);
if (ret) {
dev_err(dev->dev, "gpu hw init failed: %d\n", ret);
return NULL;
@@ -316,6 +360,7 @@ static int adreno_suspend(struct device *dev)
#endif
static const struct dev_pm_ops adreno_pm_ops = {
+ SET_SYSTEM_SLEEP_PM_OPS(pm_runtime_force_suspend, pm_runtime_force_resume)
SET_RUNTIME_PM_OPS(adreno_suspend, adreno_resume, NULL)
};
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.c b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
index 17d0506d058c..da1363a0c54d 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.c
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.c
@@ -17,7 +17,10 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/ascii85.h>
+#include <linux/kernel.h>
#include <linux/pm_opp.h>
+#include <linux/slab.h>
#include "adreno_gpu.h"
#include "msm_gem.h"
#include "msm_mmu.h"
@@ -70,10 +73,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
{
struct drm_device *drm = adreno_gpu->base.dev;
const struct firmware *fw = NULL;
- char newname[strlen("qcom/") + strlen(fwname) + 1];
+ char *newname;
int ret;
- sprintf(newname, "qcom/%s", fwname);
+ newname = kasprintf(GFP_KERNEL, "qcom/%s", fwname);
+ if (!newname)
+ return ERR_PTR(-ENOMEM);
/*
* Try first to load from qcom/$fwfile using a direct load (to avoid
@@ -87,11 +92,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from new location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_NEW;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -106,11 +112,12 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s from legacy location\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_LEGACY;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
fwname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
@@ -126,19 +133,23 @@ adreno_request_fw(struct adreno_gpu *adreno_gpu, const char *fwname)
dev_info(drm->dev, "loaded %s with helper\n",
newname);
adreno_gpu->fwloc = FW_LOCATION_HELPER;
- return fw;
+ goto out;
} else if (adreno_gpu->fwloc != FW_LOCATION_UNKNOWN) {
dev_err(drm->dev, "failed to load %s: %d\n",
newname, ret);
- return ERR_PTR(ret);
+ fw = ERR_PTR(ret);
+ goto out;
}
}
dev_err(drm->dev, "failed to load %s\n", fwname);
- return ERR_PTR(-ENOENT);
+ fw = ERR_PTR(-ENOENT);
+out:
+ kfree(newname);
+ return fw;
}
-static int adreno_load_fw(struct adreno_gpu *adreno_gpu)
+int adreno_load_fw(struct adreno_gpu *adreno_gpu)
{
int i;
@@ -368,40 +379,185 @@ bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring)
return false;
}
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m)
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state)
{
struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i, count = 0;
+
+ kref_init(&state->ref);
+
+ ktime_get_real_ts64(&state->time);
+
+ for (i = 0; i < gpu->nr_rings; i++) {
+ int size = 0, j;
+
+ state->ring[i].fence = gpu->rb[i]->memptrs->fence;
+ state->ring[i].iova = gpu->rb[i]->iova;
+ state->ring[i].seqno = gpu->rb[i]->seqno;
+ state->ring[i].rptr = get_rptr(adreno_gpu, gpu->rb[i]);
+ state->ring[i].wptr = get_wptr(gpu->rb[i]);
+
+ /* Copy at least 'wptr' dwords of the data */
+ size = state->ring[i].wptr;
+
+ /* After wptr find the last non zero dword to save space */
+ for (j = state->ring[i].wptr; j < MSM_GPU_RINGBUFFER_SZ >> 2; j++)
+ if (gpu->rb[i]->start[j])
+ size = j + 1;
+
+ if (size) {
+ state->ring[i].data = kmalloc(size << 2, GFP_KERNEL);
+ if (state->ring[i].data) {
+ memcpy(state->ring[i].data, gpu->rb[i]->start, size << 2);
+ state->ring[i].data_size = size << 2;
+ }
+ }
+ }
+
+ /* Count the number of registers */
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2)
+ count += adreno_gpu->registers[i + 1] -
+ adreno_gpu->registers[i] + 1;
+
+ state->registers = kcalloc(count * 2, sizeof(u32), GFP_KERNEL);
+ if (state->registers) {
+ int pos = 0;
+
+ for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
+ u32 start = adreno_gpu->registers[i];
+ u32 end = adreno_gpu->registers[i + 1];
+ u32 addr;
+
+ for (addr = start; addr <= end; addr++) {
+ state->registers[pos++] = addr;
+ state->registers[pos++] = gpu_read(gpu, addr);
+ }
+ }
+
+ state->nr_registers = count;
+ }
+
+ return 0;
+}
+
+void adreno_gpu_state_destroy(struct msm_gpu_state *state)
+{
int i;
- seq_printf(m, "revision: %d (%d.%d.%d.%d)\n",
+ for (i = 0; i < ARRAY_SIZE(state->ring); i++)
+ kfree(state->ring[i].data);
+
+ for (i = 0; state->bos && i < state->nr_bos; i++)
+ kvfree(state->bos[i].data);
+
+ kfree(state->bos);
+ kfree(state->comm);
+ kfree(state->cmd);
+ kfree(state->registers);
+}
+
+static void adreno_gpu_state_kref_destroy(struct kref *kref)
+{
+ struct msm_gpu_state *state = container_of(kref,
+ struct msm_gpu_state, ref);
+
+ adreno_gpu_state_destroy(state);
+ kfree(state);
+}
+
+int adreno_gpu_state_put(struct msm_gpu_state *state)
+{
+ if (IS_ERR_OR_NULL(state))
+ return 1;
+
+ return kref_put(&state->ref, adreno_gpu_state_kref_destroy);
+}
+
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+
+static void adreno_show_object(struct drm_printer *p, u32 *ptr, int len)
+{
+ char out[ASCII85_BUFSZ];
+ long l, datalen, i;
+
+ if (!ptr || !len)
+ return;
+
+ /*
+ * Only dump the non-zero part of the buffer - rarely will any data
+ * completely fill the entire allocated size of the buffer
+ */
+ for (datalen = 0, i = 0; i < len >> 2; i++) {
+ if (ptr[i])
+ datalen = (i << 2) + 1;
+ }
+
+ /* Skip printing the object if it is empty */
+ if (datalen == 0)
+ return;
+
+ l = ascii85_encode_len(datalen);
+
+ drm_puts(p, " data: !!ascii85 |\n");
+ drm_puts(p, " ");
+
+ for (i = 0; i < l; i++)
+ drm_puts(p, ascii85_encode(ptr[i], out));
+
+ drm_puts(p, "\n");
+}
+
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p)
+{
+ struct adreno_gpu *adreno_gpu = to_adreno_gpu(gpu);
+ int i;
+
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ drm_printf(p, "revision: %d (%d.%d.%d.%d)\n",
adreno_gpu->info->revn, adreno_gpu->rev.core,
adreno_gpu->rev.major, adreno_gpu->rev.minor,
adreno_gpu->rev.patchid);
- for (i = 0; i < gpu->nr_rings; i++) {
- struct msm_ringbuffer *ring = gpu->rb[i];
+ drm_printf(p, "rbbm-status: 0x%08x\n", state->rbbm_status);
- seq_printf(m, "rb %d: fence: %d/%d\n", i,
- ring->memptrs->fence, ring->seqno);
+ drm_puts(p, "ringbuffer:\n");
- seq_printf(m, " rptr: %d\n",
- get_rptr(adreno_gpu, ring));
- seq_printf(m, "rb wptr: %d\n", get_wptr(ring));
+ for (i = 0; i < gpu->nr_rings; i++) {
+ drm_printf(p, " - id: %d\n", i);
+ drm_printf(p, " iova: 0x%016llx\n", state->ring[i].iova);
+ drm_printf(p, " last-fence: %d\n", state->ring[i].seqno);
+ drm_printf(p, " retired-fence: %d\n", state->ring[i].fence);
+ drm_printf(p, " rptr: %d\n", state->ring[i].rptr);
+ drm_printf(p, " wptr: %d\n", state->ring[i].wptr);
+ drm_printf(p, " size: %d\n", MSM_GPU_RINGBUFFER_SZ);
+
+ adreno_show_object(p, state->ring[i].data,
+ state->ring[i].data_size);
}
- /* dump these out in a form that can be parsed by demsm: */
- seq_printf(m, "IO:region %s 00000000 00020000\n", gpu->name);
- for (i = 0; adreno_gpu->registers[i] != ~0; i += 2) {
- uint32_t start = adreno_gpu->registers[i];
- uint32_t end = adreno_gpu->registers[i+1];
- uint32_t addr;
+ if (state->bos) {
+ drm_puts(p, "bos:\n");
- for (addr = start; addr <= end; addr++) {
- uint32_t val = gpu_read(gpu, addr);
- seq_printf(m, "IO:R %08x %08x\n", addr<<2, val);
+ for (i = 0; i < state->nr_bos; i++) {
+ drm_printf(p, " - iova: 0x%016llx\n",
+ state->bos[i].iova);
+ drm_printf(p, " size: %zd\n", state->bos[i].size);
+
+ adreno_show_object(p, state->bos[i].data,
+ state->bos[i].size);
}
}
+
+ drm_puts(p, "registers:\n");
+
+ for (i = 0; i < state->nr_registers; i++) {
+ drm_printf(p, " - { offset: 0x%04x, value: 0x%08x }\n",
+ state->registers[i * 2] << 2,
+ state->registers[(i * 2) + 1]);
+ }
}
#endif
@@ -565,7 +721,8 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
adreno_get_pwrlevels(&pdev->dev, gpu);
- pm_runtime_set_autosuspend_delay(&pdev->dev, DRM_MSM_INACTIVE_PERIOD);
+ pm_runtime_set_autosuspend_delay(&pdev->dev,
+ adreno_gpu->info->inactive_period);
pm_runtime_use_autosuspend(&pdev->dev);
pm_runtime_enable(&pdev->dev);
diff --git a/drivers/gpu/drm/msm/adreno/adreno_gpu.h b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
index d6b0e7b813f4..de6e6ee42fba 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_gpu.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_gpu.h
@@ -50,7 +50,9 @@ enum adreno_regs {
enum {
ADRENO_FW_PM4 = 0,
+ ADRENO_FW_SQE = 0, /* a6xx */
ADRENO_FW_PFP = 1,
+ ADRENO_FW_GMU = 1, /* a6xx */
ADRENO_FW_GPMU = 2,
ADRENO_FW_MAX,
};
@@ -84,6 +86,7 @@ struct adreno_info {
enum adreno_quirks quirks;
struct msm_gpu *(*init)(struct drm_device *dev);
const char *zapfw;
+ u32 inactive_period;
};
const struct adreno_info *adreno_info(struct adreno_rev rev);
@@ -214,8 +217,9 @@ void adreno_submit(struct msm_gpu *gpu, struct msm_gem_submit *submit,
struct msm_file_private *ctx);
void adreno_flush(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
bool adreno_idle(struct msm_gpu *gpu, struct msm_ringbuffer *ring);
-#ifdef CONFIG_DEBUG_FS
-void adreno_show(struct msm_gpu *gpu, struct seq_file *m);
+#if defined(CONFIG_DEBUG_FS) || defined(CONFIG_DEV_COREDUMP)
+void adreno_show(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
#endif
void adreno_dump_info(struct msm_gpu *gpu);
void adreno_dump(struct msm_gpu *gpu);
@@ -226,7 +230,12 @@ int adreno_gpu_init(struct drm_device *drm, struct platform_device *pdev,
struct adreno_gpu *gpu, const struct adreno_gpu_funcs *funcs,
int nr_rings);
void adreno_gpu_cleanup(struct adreno_gpu *gpu);
+int adreno_load_fw(struct adreno_gpu *adreno_gpu);
+void adreno_gpu_state_destroy(struct msm_gpu_state *state);
+
+int adreno_gpu_state_get(struct msm_gpu *gpu, struct msm_gpu_state *state);
+int adreno_gpu_state_put(struct msm_gpu_state *state);
/* ringbuffer helpers (the parts that are adreno specific) */
@@ -328,6 +337,7 @@ static inline void adreno_gpu_write(struct adreno_gpu *gpu,
struct msm_gpu *a3xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a4xx_gpu_init(struct drm_device *dev);
struct msm_gpu *a5xx_gpu_init(struct drm_device *dev);
+struct msm_gpu *a6xx_gpu_init(struct drm_device *dev);
static inline void adreno_gpu_write64(struct adreno_gpu *gpu,
enum adreno_regs lo, enum adreno_regs hi, u64 data)
diff --git a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
index fb605a3534cf..03a91e10b310 100644
--- a/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
+++ b/drivers/gpu/drm/msm/adreno/adreno_pm4.xml.h
@@ -8,17 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/adreno.xml ( 431 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a2xx.xml ( 37162 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_common.xml ( 13324 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/adreno_pm4.xml ( 31866 bytes, from 2017-06-06 18:26:14)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a4xx.xml ( 111898 bytes, from 2017-06-06 18:23:59)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/a5xx.xml ( 139480 bytes, from 2017-06-16 12:44:39)
-- /home/robclark/src/freedreno/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/adreno.xml ( 501 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a2xx.xml ( 36805 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_common.xml ( 13634 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/adreno_pm4.xml ( 42393 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a3xx.xml ( 83840 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a4xx.xml ( 112086 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/a5xx.xml ( 147240 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx.xml ( 101627 bytes, from 2018-08-06 18:45:45)
+- /home/robclark/src/envytools/rnndb/adreno/a6xx_gmu.xml ( 10431 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/adreno/ocmem.xml ( 1773 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
@@ -71,7 +73,8 @@ enum vgt_event_type {
FLUSH_SO_1 = 18,
FLUSH_SO_2 = 19,
FLUSH_SO_3 = 20,
- UNK_19 = 25,
+ PC_CCU_INVALIDATE_DEPTH = 24,
+ PC_CCU_INVALIDATE_COLOR = 25,
UNK_1C = 28,
UNK_1D = 29,
BLIT = 30,
@@ -199,9 +202,12 @@ enum adreno_pm4_type3_packets {
CP_WAIT_MEM_WRITES = 18,
CP_COND_REG_EXEC = 71,
CP_MEM_TO_REG = 66,
+ CP_EXEC_CS_INDIRECT = 65,
CP_EXEC_CS = 51,
CP_PERFCOUNTER_ACTION = 80,
CP_SMMU_TABLE_UPDATE = 83,
+ CP_SET_MARKER = 101,
+ CP_SET_PSEUDO_REG = 86,
CP_CONTEXT_REG_BUNCH = 92,
CP_YIELD_ENABLE = 28,
CP_SKIP_IB2_ENABLE_GLOBAL = 29,
@@ -215,7 +221,10 @@ enum adreno_pm4_type3_packets {
CP_COMPUTE_CHECKPOINT = 110,
CP_MEM_TO_MEM = 115,
CP_BLIT = 44,
- CP_UNK_39 = 57,
+ CP_REG_TEST = 57,
+ CP_SET_MODE = 99,
+ CP_LOAD_STATE6_GEOM = 50,
+ CP_LOAD_STATE6_FRAG = 52,
IN_IB_PREFETCH_END = 23,
IN_SUBBLK_PREFETCH = 31,
IN_INSTR_PREFETCH = 32,
@@ -224,6 +233,11 @@ enum adreno_pm4_type3_packets {
IN_INCR_UPDT_STATE = 85,
IN_INCR_UPDT_CONST = 86,
IN_INCR_UPDT_INSTR = 87,
+ PKT4 = 4,
+ CP_UNK_A6XX_14 = 20,
+ CP_UNK_A6XX_36 = 54,
+ CP_UNK_A6XX_55 = 85,
+ UNK_A6XX_6D = 109,
};
enum adreno_state_block {
@@ -278,6 +292,33 @@ enum a4xx_state_src {
SS4_INDIRECT = 2,
};
+enum a6xx_state_block {
+ SB6_VS_TEX = 0,
+ SB6_HS_TEX = 1,
+ SB6_DS_TEX = 2,
+ SB6_GS_TEX = 3,
+ SB6_FS_TEX = 4,
+ SB6_CS_TEX = 5,
+ SB6_VS_SHADER = 8,
+ SB6_HS_SHADER = 9,
+ SB6_DS_SHADER = 10,
+ SB6_GS_SHADER = 11,
+ SB6_FS_SHADER = 12,
+ SB6_CS_SHADER = 13,
+ SB6_SSBO = 14,
+ SB6_CS_SSBO = 15,
+};
+
+enum a6xx_state_type {
+ ST6_SHADER = 0,
+ ST6_CONSTANTS = 1,
+};
+
+enum a6xx_state_src {
+ SS6_DIRECT = 0,
+ SS6_INDIRECT = 2,
+};
+
enum a4xx_index_size {
INDEX4_SIZE_8_BIT = 0,
INDEX4_SIZE_16_BIT = 1,
@@ -300,6 +341,7 @@ enum render_mode_cmd {
GMEM = 3,
BLIT2D = 5,
BLIT2DSCALE = 7,
+ END2D = 8,
};
enum cp_blit_cmd {
@@ -308,6 +350,22 @@ enum cp_blit_cmd {
BLIT_OP_SCALE = 3,
};
+enum a6xx_render_mode {
+ RM6_BYPASS = 1,
+ RM6_BINNING = 2,
+ RM6_GMEM = 4,
+ RM6_BLIT2D = 5,
+ RM6_RESOLVE = 6,
+};
+
+enum pseudo_reg {
+ SMMU_INFO = 0,
+ NON_SECURE_SAVE_ADDR = 1,
+ SECURE_SAVE_ADDR = 2,
+ NON_PRIV_SAVE_ADDR = 3,
+ COUNTER = 4,
+};
+
#define REG_CP_LOAD_STATE_0 0x00000000
#define CP_LOAD_STATE_0_DST_OFF__MASK 0x0000ffff
#define CP_LOAD_STATE_0_DST_OFF__SHIFT 0
@@ -349,7 +407,7 @@ static inline uint32_t CP_LOAD_STATE_1_EXT_SRC_ADDR(uint32_t val)
}
#define REG_CP_LOAD_STATE4_0 0x00000000
-#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x0000ffff
+#define CP_LOAD_STATE4_0_DST_OFF__MASK 0x00003fff
#define CP_LOAD_STATE4_0_DST_OFF__SHIFT 0
static inline uint32_t CP_LOAD_STATE4_0_DST_OFF(uint32_t val)
{
@@ -396,6 +454,54 @@ static inline uint32_t CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI(uint32_t val)
return ((val) << CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE4_2_EXT_SRC_ADDR_HI__MASK;
}
+#define REG_CP_LOAD_STATE6_0 0x00000000
+#define CP_LOAD_STATE6_0_DST_OFF__MASK 0x00003fff
+#define CP_LOAD_STATE6_0_DST_OFF__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_0_DST_OFF(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_DST_OFF__SHIFT) & CP_LOAD_STATE6_0_DST_OFF__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_TYPE__MASK 0x00004000
+#define CP_LOAD_STATE6_0_STATE_TYPE__SHIFT 14
+static inline uint32_t CP_LOAD_STATE6_0_STATE_TYPE(enum a6xx_state_type val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_TYPE__SHIFT) & CP_LOAD_STATE6_0_STATE_TYPE__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_SRC__MASK 0x00030000
+#define CP_LOAD_STATE6_0_STATE_SRC__SHIFT 16
+static inline uint32_t CP_LOAD_STATE6_0_STATE_SRC(enum a6xx_state_src val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_SRC__SHIFT) & CP_LOAD_STATE6_0_STATE_SRC__MASK;
+}
+#define CP_LOAD_STATE6_0_STATE_BLOCK__MASK 0x003c0000
+#define CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT 18
+static inline uint32_t CP_LOAD_STATE6_0_STATE_BLOCK(enum a6xx_state_block val)
+{
+ return ((val) << CP_LOAD_STATE6_0_STATE_BLOCK__SHIFT) & CP_LOAD_STATE6_0_STATE_BLOCK__MASK;
+}
+#define CP_LOAD_STATE6_0_NUM_UNIT__MASK 0xffc00000
+#define CP_LOAD_STATE6_0_NUM_UNIT__SHIFT 22
+static inline uint32_t CP_LOAD_STATE6_0_NUM_UNIT(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_0_NUM_UNIT__SHIFT) & CP_LOAD_STATE6_0_NUM_UNIT__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_1 0x00000001
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK 0xfffffffc
+#define CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT 2
+static inline uint32_t CP_LOAD_STATE6_1_EXT_SRC_ADDR(uint32_t val)
+{
+ return ((val >> 2) << CP_LOAD_STATE6_1_EXT_SRC_ADDR__SHIFT) & CP_LOAD_STATE6_1_EXT_SRC_ADDR__MASK;
+}
+
+#define REG_CP_LOAD_STATE6_2 0x00000002
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK 0xffffffff
+#define CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT 0
+static inline uint32_t CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI(uint32_t val)
+{
+ return ((val) << CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__SHIFT) & CP_LOAD_STATE6_2_EXT_SRC_ADDR_HI__MASK;
+}
+
#define REG_CP_DRAW_INDX_0 0x00000000
#define CP_DRAW_INDX_0_VIZ_QUERY__MASK 0xffffffff
#define CP_DRAW_INDX_0_VIZ_QUERY__SHIFT 0
@@ -580,6 +686,153 @@ static inline uint32_t CP_DRAW_INDX_OFFSET_5_INDX_SIZE(uint32_t val)
return ((val) << CP_DRAW_INDX_OFFSET_5_INDX_SIZE__SHIFT) & CP_DRAW_INDX_OFFSET_5_INDX_SIZE__MASK;
}
+#define REG_A4XX_CP_DRAW_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDIRECT_0_TESS_MODE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDIRECT_1_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDIRECT_1_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDIRECT_1_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDIRECT_2_INDIRECT_HI__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_0 0x00000000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK 0x0000003f
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE(enum pc_di_primtype val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_PRIM_TYPE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK 0x000000c0
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT 6
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT(enum pc_di_src_sel val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_SOURCE_SELECT__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK 0x00000300
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT 8
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL(enum pc_di_vis_cull_mode val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_VIS_CULL__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK 0x00000c00
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT 10
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE(enum a4xx_index_size val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_INDEX_SIZE__MASK;
+}
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK 0x01f00000
+#define A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT 20
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_0_TESS_MODE__MASK;
+}
+
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_2_INDX_SIZE__MASK;
+}
+
+#define REG_A4XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK 0xffffffff
+#define A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT 0
+static inline uint32_t A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT(uint32_t val)
+{
+ return ((val) << A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__SHIFT) & A4XX_CP_DRAW_INDX_INDIRECT_3_INDIRECT__MASK;
+}
+
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_1 0x00000001
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_1_INDX_BASE_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_2 0x00000002
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_2_INDX_BASE_HI__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_3 0x00000003
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_3_MAX_INDICES__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_4 0x00000004
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_4_INDIRECT_LO__MASK;
+}
+
+#define REG_A5XX_CP_DRAW_INDX_INDIRECT_5 0x00000005
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK 0xffffffff
+#define A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT 0
+static inline uint32_t A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__SHIFT) & A5XX_CP_DRAW_INDX_INDIRECT_5_INDIRECT_HI__MASK;
+}
+
static inline uint32_t REG_CP_SET_DRAW_STATE_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
static inline uint32_t REG_CP_SET_DRAW_STATE__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
@@ -593,6 +846,12 @@ static inline uint32_t CP_SET_DRAW_STATE__0_COUNT(uint32_t val)
#define CP_SET_DRAW_STATE__0_DISABLE 0x00020000
#define CP_SET_DRAW_STATE__0_DISABLE_ALL_GROUPS 0x00040000
#define CP_SET_DRAW_STATE__0_LOAD_IMMED 0x00080000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK 0x00f00000
+#define CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT 20
+static inline uint32_t CP_SET_DRAW_STATE__0_ENABLE_MASK(uint32_t val)
+{
+ return ((val) << CP_SET_DRAW_STATE__0_ENABLE_MASK__SHIFT) & CP_SET_DRAW_STATE__0_ENABLE_MASK__MASK;
+}
#define CP_SET_DRAW_STATE__0_GROUP_ID__MASK 0x1f000000
#define CP_SET_DRAW_STATE__0_GROUP_ID__SHIFT 24
static inline uint32_t CP_SET_DRAW_STATE__0_GROUP_ID(uint32_t val)
@@ -708,6 +967,22 @@ static inline uint32_t CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI(uint32_t val)
return ((val) << CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_4_BIN_SIZE_ADDRESS_HI__MASK;
}
+#define REG_CP_SET_BIN_DATA5_5 0x00000005
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__SHIFT) & CP_SET_BIN_DATA5_5_XXX_ADDRESS_LO__MASK;
+}
+
+#define REG_CP_SET_BIN_DATA5_6 0x00000006
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK 0xffffffff
+#define CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT 0
+static inline uint32_t CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI(uint32_t val)
+{
+ return ((val) << CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__SHIFT) & CP_SET_BIN_DATA5_6_XXX_ADDRESS_HI__MASK;
+}
+
#define REG_CP_REG_TO_MEM_0 0x00000000
#define CP_REG_TO_MEM_0_REG__MASK 0x0000ffff
#define CP_REG_TO_MEM_0_REG__SHIFT 0
@@ -732,6 +1007,46 @@ static inline uint32_t CP_REG_TO_MEM_1_DEST(uint32_t val)
return ((val) << CP_REG_TO_MEM_1_DEST__SHIFT) & CP_REG_TO_MEM_1_DEST__MASK;
}
+#define REG_CP_REG_TO_MEM_2 0x00000002
+#define CP_REG_TO_MEM_2_DEST_HI__MASK 0xffffffff
+#define CP_REG_TO_MEM_2_DEST_HI__SHIFT 0
+static inline uint32_t CP_REG_TO_MEM_2_DEST_HI(uint32_t val)
+{
+ return ((val) << CP_REG_TO_MEM_2_DEST_HI__SHIFT) & CP_REG_TO_MEM_2_DEST_HI__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_0 0x00000000
+#define CP_MEM_TO_REG_0_REG__MASK 0x0000ffff
+#define CP_MEM_TO_REG_0_REG__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_0_REG(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_REG__SHIFT) & CP_MEM_TO_REG_0_REG__MASK;
+}
+#define CP_MEM_TO_REG_0_CNT__MASK 0x3ff80000
+#define CP_MEM_TO_REG_0_CNT__SHIFT 19
+static inline uint32_t CP_MEM_TO_REG_0_CNT(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_0_CNT__SHIFT) & CP_MEM_TO_REG_0_CNT__MASK;
+}
+#define CP_MEM_TO_REG_0_64B 0x40000000
+#define CP_MEM_TO_REG_0_ACCUMULATE 0x80000000
+
+#define REG_CP_MEM_TO_REG_1 0x00000001
+#define CP_MEM_TO_REG_1_SRC__MASK 0xffffffff
+#define CP_MEM_TO_REG_1_SRC__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_1_SRC(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_1_SRC__SHIFT) & CP_MEM_TO_REG_1_SRC__MASK;
+}
+
+#define REG_CP_MEM_TO_REG_2 0x00000002
+#define CP_MEM_TO_REG_2_SRC_HI__MASK 0xffffffff
+#define CP_MEM_TO_REG_2_SRC_HI__SHIFT 0
+static inline uint32_t CP_MEM_TO_REG_2_SRC_HI(uint32_t val)
+{
+ return ((val) << CP_MEM_TO_REG_2_SRC_HI__SHIFT) & CP_MEM_TO_REG_2_SRC_HI__MASK;
+}
+
#define REG_CP_MEM_TO_MEM_0 0x00000000
#define CP_MEM_TO_MEM_0_NEG_A 0x00000001
#define CP_MEM_TO_MEM_0_NEG_B 0x00000002
@@ -953,15 +1268,15 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_1_ADDR_0_HI(uint32_t val)
#define REG_CP_COMPUTE_CHECKPOINT_2 0x00000002
#define REG_CP_COMPUTE_CHECKPOINT_3 0x00000003
-
-#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK 0xffffffff
-#define CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT 0
-static inline uint32_t CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN(uint32_t val)
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK 0xffffffff
+#define CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT 0
+static inline uint32_t CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN(uint32_t val)
{
- return ((val) << CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_4_ADDR_1_LEN__MASK;
+ return ((val) << CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__SHIFT) & CP_COMPUTE_CHECKPOINT_3_ADDR_1_LEN__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_4 0x00000004
+
#define REG_CP_COMPUTE_CHECKPOINT_5 0x00000005
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__MASK 0xffffffff
#define CP_COMPUTE_CHECKPOINT_5_ADDR_1_LO__SHIFT 0
@@ -978,6 +1293,8 @@ static inline uint32_t CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI(uint32_t val)
return ((val) << CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__SHIFT) & CP_COMPUTE_CHECKPOINT_6_ADDR_1_HI__MASK;
}
+#define REG_CP_COMPUTE_CHECKPOINT_7 0x00000007
+
#define REG_CP_PERFCOUNTER_ACTION_0 0x00000000
#define REG_CP_PERFCOUNTER_ACTION_1 0x00000001
@@ -1032,13 +1349,13 @@ static inline uint32_t CP_BLIT_0_OP(enum cp_blit_cmd val)
}
#define REG_CP_BLIT_1 0x00000001
-#define CP_BLIT_1_SRC_X1__MASK 0x0000ffff
+#define CP_BLIT_1_SRC_X1__MASK 0x00003fff
#define CP_BLIT_1_SRC_X1__SHIFT 0
static inline uint32_t CP_BLIT_1_SRC_X1(uint32_t val)
{
return ((val) << CP_BLIT_1_SRC_X1__SHIFT) & CP_BLIT_1_SRC_X1__MASK;
}
-#define CP_BLIT_1_SRC_Y1__MASK 0xffff0000
+#define CP_BLIT_1_SRC_Y1__MASK 0x3fff0000
#define CP_BLIT_1_SRC_Y1__SHIFT 16
static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
{
@@ -1046,13 +1363,13 @@ static inline uint32_t CP_BLIT_1_SRC_Y1(uint32_t val)
}
#define REG_CP_BLIT_2 0x00000002
-#define CP_BLIT_2_SRC_X2__MASK 0x0000ffff
+#define CP_BLIT_2_SRC_X2__MASK 0x00003fff
#define CP_BLIT_2_SRC_X2__SHIFT 0
static inline uint32_t CP_BLIT_2_SRC_X2(uint32_t val)
{
return ((val) << CP_BLIT_2_SRC_X2__SHIFT) & CP_BLIT_2_SRC_X2__MASK;
}
-#define CP_BLIT_2_SRC_Y2__MASK 0xffff0000
+#define CP_BLIT_2_SRC_Y2__MASK 0x3fff0000
#define CP_BLIT_2_SRC_Y2__SHIFT 16
static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
{
@@ -1060,13 +1377,13 @@ static inline uint32_t CP_BLIT_2_SRC_Y2(uint32_t val)
}
#define REG_CP_BLIT_3 0x00000003
-#define CP_BLIT_3_DST_X1__MASK 0x0000ffff
+#define CP_BLIT_3_DST_X1__MASK 0x00003fff
#define CP_BLIT_3_DST_X1__SHIFT 0
static inline uint32_t CP_BLIT_3_DST_X1(uint32_t val)
{
return ((val) << CP_BLIT_3_DST_X1__SHIFT) & CP_BLIT_3_DST_X1__MASK;
}
-#define CP_BLIT_3_DST_Y1__MASK 0xffff0000
+#define CP_BLIT_3_DST_Y1__MASK 0x3fff0000
#define CP_BLIT_3_DST_Y1__SHIFT 16
static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
{
@@ -1074,13 +1391,13 @@ static inline uint32_t CP_BLIT_3_DST_Y1(uint32_t val)
}
#define REG_CP_BLIT_4 0x00000004
-#define CP_BLIT_4_DST_X2__MASK 0x0000ffff
+#define CP_BLIT_4_DST_X2__MASK 0x00003fff
#define CP_BLIT_4_DST_X2__SHIFT 0
static inline uint32_t CP_BLIT_4_DST_X2(uint32_t val)
{
return ((val) << CP_BLIT_4_DST_X2__SHIFT) & CP_BLIT_4_DST_X2__MASK;
}
-#define CP_BLIT_4_DST_Y2__MASK 0xffff0000
+#define CP_BLIT_4_DST_Y2__MASK 0x3fff0000
#define CP_BLIT_4_DST_Y2__SHIFT 16
static inline uint32_t CP_BLIT_4_DST_Y2(uint32_t val)
{
@@ -1113,5 +1430,129 @@ static inline uint32_t CP_EXEC_CS_3_NGROUPS_Z(uint32_t val)
return ((val) << CP_EXEC_CS_3_NGROUPS_Z__SHIFT) & CP_EXEC_CS_3_NGROUPS_Z__MASK;
}
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_0 0x00000000
+
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK 0xffffffff
+#define A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT 0
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_1_ADDR(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_1_ADDR__MASK;
+}
+
+#define REG_A4XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK 0x00000ffc
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT 2
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEX__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK 0x003ff000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT 12
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEY__MASK;
+}
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK 0xffc00000
+#define A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__SHIFT) & A4XX_CP_EXEC_CS_INDIRECT_2_LOCALSIZEZ__MASK;
+}
+
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_1 0x00000001
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_1_ADDR_LO__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_2 0x00000002
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK 0xffffffff
+#define A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT 0
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_2_ADDR_HI__MASK;
+}
+
+#define REG_A5XX_CP_EXEC_CS_INDIRECT_3 0x00000003
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK 0x00000ffc
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT 2
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEX__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK 0x003ff000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT 12
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEY__MASK;
+}
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK 0xffc00000
+#define A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT 22
+static inline uint32_t A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ(uint32_t val)
+{
+ return ((val) << A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__SHIFT) & A5XX_CP_EXEC_CS_INDIRECT_3_LOCALSIZEZ__MASK;
+}
+
+#define REG_A2XX_CP_SET_MARKER_0 0x00000000
+#define A2XX_CP_SET_MARKER_0_MARKER__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MARKER__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MARKER(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MARKER__SHIFT) & A2XX_CP_SET_MARKER_0_MARKER__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_MODE__MASK 0x0000000f
+#define A2XX_CP_SET_MARKER_0_MODE__SHIFT 0
+static inline uint32_t A2XX_CP_SET_MARKER_0_MODE(enum a6xx_render_mode val)
+{
+ return ((val) << A2XX_CP_SET_MARKER_0_MODE__SHIFT) & A2XX_CP_SET_MARKER_0_MODE__MASK;
+}
+#define A2XX_CP_SET_MARKER_0_IFPC 0x00000100
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG_(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__0(uint32_t i0) { return 0x00000000 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK 0x00000007
+#define A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG(enum pseudo_reg val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__SHIFT) & A2XX_CP_SET_PSEUDO_REG__0_PSEUDO_REG__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__1(uint32_t i0) { return 0x00000001 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__1_LO(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__1_LO__SHIFT) & A2XX_CP_SET_PSEUDO_REG__1_LO__MASK;
+}
+
+static inline uint32_t REG_A2XX_CP_SET_PSEUDO_REG__2(uint32_t i0) { return 0x00000002 + 0x3*i0; }
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__MASK 0xffffffff
+#define A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT 0
+static inline uint32_t A2XX_CP_SET_PSEUDO_REG__2_HI(uint32_t val)
+{
+ return ((val) << A2XX_CP_SET_PSEUDO_REG__2_HI__SHIFT) & A2XX_CP_SET_PSEUDO_REG__2_HI__MASK;
+}
+
+#define REG_A2XX_CP_REG_TEST_0 0x00000000
+#define A2XX_CP_REG_TEST_0_REG__MASK 0x00000fff
+#define A2XX_CP_REG_TEST_0_REG__SHIFT 0
+static inline uint32_t A2XX_CP_REG_TEST_0_REG(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_REG__SHIFT) & A2XX_CP_REG_TEST_0_REG__MASK;
+}
+#define A2XX_CP_REG_TEST_0_BIT__MASK 0x01f00000
+#define A2XX_CP_REG_TEST_0_BIT__SHIFT 20
+static inline uint32_t A2XX_CP_REG_TEST_0_BIT(uint32_t val)
+{
+ return ((val) << A2XX_CP_REG_TEST_0_BIT__SHIFT) & A2XX_CP_REG_TEST_0_BIT__MASK;
+}
+#define A2XX_CP_REG_TEST_0_UNK25 0x02000000
+
#endif /* ADRENO_PM4_XML */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
new file mode 100644
index 000000000000..879c13fe74e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.c
@@ -0,0 +1,479 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_core_irq.h"
+#include "dpu_trace.h"
+
+/**
+ * dpu_core_irq_callback_handler - dispatch core interrupts
+ * @arg: private data of callback handler
+ * @irq_idx: interrupt index
+ */
+static void dpu_core_irq_callback_handler(void *arg, int irq_idx)
+{
+ struct dpu_kms *dpu_kms = arg;
+ struct dpu_irq *irq_obj = &dpu_kms->irq_obj;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+
+ pr_debug("irq_idx=%d\n", irq_idx);
+
+ if (list_empty(&irq_obj->irq_cb_tbl[irq_idx])) {
+ DRM_ERROR("no registered cb, idx:%d enable_count:%d\n", irq_idx,
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]));
+ }
+
+ atomic_inc(&irq_obj->irq_counts[irq_idx]);
+
+ /*
+ * Perform registered function callback
+ */
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[irq_idx], list)
+ if (cb->func)
+ cb->func(cb->arg, irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ /*
+ * Clear pending interrupt status in HW.
+ * NOTE: dpu_core_irq_callback_handler is protected by top-level
+ * spinlock, so it is safe to clear any interrupt status here.
+ */
+ dpu_kms->hw_intr->ops.clear_intr_status_nolock(
+ dpu_kms->hw_intr,
+ irq_idx);
+}
+
+int dpu_core_irq_idx_lookup(struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type, u32 instance_idx)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.irq_idx_lookup)
+ return -EINVAL;
+
+ return dpu_kms->hw_intr->ops.irq_idx_lookup(intr_type,
+ instance_idx);
+}
+
+/**
+ * _dpu_core_irq_enable - enable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_enable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ unsigned long irq_flags;
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->irq_obj.enable_counts ||
+ !dpu_kms->irq_obj.irq_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_enable_idx(irq_idx, enable_count);
+
+ if (atomic_inc_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 1) {
+ ret = dpu_kms->hw_intr->ops.enable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to enable IRQ for irq_idx:%d\n",
+ irq_idx);
+
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ /* empty callback list but interrupt is enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n",
+ irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_enable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_enable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+/**
+ * _dpu_core_irq_disable - disable core interrupt given by the index
+ * @dpu_kms: Pointer to dpu kms context
+ * @irq_idx: interrupt index
+ */
+static int _dpu_core_irq_disable(struct dpu_kms *dpu_kms, int irq_idx)
+{
+ int ret = 0, enable_count;
+
+ if (!dpu_kms || !dpu_kms->hw_intr || !dpu_kms->irq_obj.enable_counts) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ enable_count = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]);
+ DRM_DEBUG_KMS("irq_idx=%d enable_count=%d\n", irq_idx, enable_count);
+ trace_dpu_core_irq_disable_idx(irq_idx, enable_count);
+
+ if (atomic_dec_return(&dpu_kms->irq_obj.enable_counts[irq_idx]) == 0) {
+ ret = dpu_kms->hw_intr->ops.disable_irq(
+ dpu_kms->hw_intr,
+ irq_idx);
+ if (ret)
+ DPU_ERROR("Fail to disable IRQ for irq_idx:%d\n",
+ irq_idx);
+ DPU_DEBUG("irq_idx=%d ret=%d\n", irq_idx, ret);
+ }
+
+ return ret;
+}
+
+int dpu_core_irq_disable(struct dpu_kms *dpu_kms, int *irq_idxs, u32 irq_count)
+{
+ int i, ret = 0, counts;
+
+ if (!dpu_kms || !irq_idxs || !irq_count) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ counts = atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idxs[0]]);
+ if (counts == 2)
+ DRM_ERROR("irq_idx=%d enable_count=%d\n", irq_idxs[0], counts);
+
+ for (i = 0; (i < irq_count) && !ret; i++)
+ ret = _dpu_core_irq_disable(dpu_kms, irq_idxs[i]);
+
+ return ret;
+}
+
+u32 dpu_core_irq_read(struct dpu_kms *dpu_kms, int irq_idx, bool clear)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.get_interrupt_status)
+ return 0;
+
+ if (irq_idx < 0) {
+ DPU_ERROR("[%pS] invalid irq_idx=%d\n",
+ __builtin_return_address(0), irq_idx);
+ return 0;
+ }
+
+ return dpu_kms->hw_intr->ops.get_interrupt_status(dpu_kms->hw_intr,
+ irq_idx, clear);
+}
+
+int dpu_core_irq_register_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_register_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ list_add_tail(&register_irq_cb->list,
+ &dpu_kms->irq_obj.irq_cb_tbl[irq_idx]);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+int dpu_core_irq_unregister_callback(struct dpu_kms *dpu_kms, int irq_idx,
+ struct dpu_irq_callback *register_irq_cb)
+{
+ unsigned long irq_flags;
+
+ if (!dpu_kms || !dpu_kms->irq_obj.irq_cb_tbl) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+
+ if (!register_irq_cb || !register_irq_cb->func) {
+ DPU_ERROR("invalid irq_cb:%d func:%d\n",
+ register_irq_cb != NULL,
+ register_irq_cb ?
+ register_irq_cb->func != NULL : -1);
+ return -EINVAL;
+ }
+
+ if (irq_idx < 0 || irq_idx >= dpu_kms->hw_intr->irq_idx_tbl_size) {
+ DPU_ERROR("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ DPU_DEBUG("[%pS] irq_idx=%d\n", __builtin_return_address(0), irq_idx);
+
+ spin_lock_irqsave(&dpu_kms->irq_obj.cb_lock, irq_flags);
+ trace_dpu_core_irq_unregister_callback(irq_idx, register_irq_cb);
+ list_del_init(&register_irq_cb->list);
+ /* empty callback list but interrupt is still enabled */
+ if (list_empty(&dpu_kms->irq_obj.irq_cb_tbl[irq_idx]) &&
+ atomic_read(&dpu_kms->irq_obj.enable_counts[irq_idx]))
+ DPU_ERROR("irq_idx=%d enabled with no callback\n", irq_idx);
+ spin_unlock_irqrestore(&dpu_kms->irq_obj.cb_lock, irq_flags);
+
+ return 0;
+}
+
+static void dpu_clear_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.clear_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.clear_all_irqs(dpu_kms->hw_intr);
+}
+
+static void dpu_disable_all_irqs(struct dpu_kms *dpu_kms)
+{
+ if (!dpu_kms || !dpu_kms->hw_intr ||
+ !dpu_kms->hw_intr->ops.disable_all_irqs)
+ return;
+
+ dpu_kms->hw_intr->ops.disable_all_irqs(dpu_kms->hw_intr);
+}
+
+#ifdef CONFIG_DEBUG_FS
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_core_irq_show(struct seq_file *s, void *v)
+{
+ struct dpu_irq *irq_obj = s->private;
+ struct dpu_irq_callback *cb;
+ unsigned long irq_flags;
+ int i, irq_count, enable_count, cb_count;
+
+ if (!irq_obj || !irq_obj->enable_counts || !irq_obj->irq_cb_tbl) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ for (i = 0; i < irq_obj->total_irqs; i++) {
+ spin_lock_irqsave(&irq_obj->cb_lock, irq_flags);
+ cb_count = 0;
+ irq_count = atomic_read(&irq_obj->irq_counts[i]);
+ enable_count = atomic_read(&irq_obj->enable_counts[i]);
+ list_for_each_entry(cb, &irq_obj->irq_cb_tbl[i], list)
+ cb_count++;
+ spin_unlock_irqrestore(&irq_obj->cb_lock, irq_flags);
+
+ if (irq_count || enable_count || cb_count)
+ seq_printf(s, "idx:%d irq:%d enable:%d cb:%d\n",
+ i, irq_count, enable_count, cb_count);
+ }
+
+ return 0;
+}
+
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_core_irq);
+
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->irq_obj.debugfs_file = debugfs_create_file("core_irq", 0600,
+ parent, &dpu_kms->irq_obj,
+ &dpu_debugfs_core_irq_fops);
+
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove(dpu_kms->irq_obj.debugfs_file);
+ dpu_kms->irq_obj.debugfs_file = NULL;
+}
+
+#else
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ return 0;
+}
+
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ spin_lock_init(&dpu_kms->irq_obj.cb_lock);
+
+ /* Create irq callbacks for all possible irq_idx */
+ dpu_kms->irq_obj.total_irqs = dpu_kms->hw_intr->irq_idx_tbl_size;
+ dpu_kms->irq_obj.irq_cb_tbl = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(struct list_head), GFP_KERNEL);
+ dpu_kms->irq_obj.enable_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ dpu_kms->irq_obj.irq_counts = kcalloc(dpu_kms->irq_obj.total_irqs,
+ sizeof(atomic_t), GFP_KERNEL);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++) {
+ INIT_LIST_HEAD(&dpu_kms->irq_obj.irq_cb_tbl[i]);
+ atomic_set(&dpu_kms->irq_obj.enable_counts[i], 0);
+ atomic_set(&dpu_kms->irq_obj.irq_counts[i], 0);
+ }
+}
+
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms)
+{
+ return 0;
+}
+
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid drm device\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ for (i = 0; i < dpu_kms->irq_obj.total_irqs; i++)
+ if (atomic_read(&dpu_kms->irq_obj.enable_counts[i]) ||
+ !list_empty(&dpu_kms->irq_obj.irq_cb_tbl[i]))
+ DPU_ERROR("irq_idx=%d still enabled/registered\n", i);
+
+ dpu_clear_all_irqs(dpu_kms);
+ dpu_disable_all_irqs(dpu_kms);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ kfree(dpu_kms->irq_obj.irq_cb_tbl);
+ kfree(dpu_kms->irq_obj.enable_counts);
+ kfree(dpu_kms->irq_obj.irq_counts);
+ dpu_kms->irq_obj.irq_cb_tbl = NULL;
+ dpu_kms->irq_obj.enable_counts = NULL;
+ dpu_kms->irq_obj.irq_counts = NULL;
+ dpu_kms->irq_obj.total_irqs = 0;
+}
+
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms)
+{
+ /*
+ * Read interrupt status from all sources. Interrupt status are
+ * stored within hw_intr.
+ * Function will also clear the interrupt status after reading.
+ * Individual interrupt status bit will only get stored if it
+ * is enabled.
+ */
+ dpu_kms->hw_intr->ops.get_interrupt_statuses(dpu_kms->hw_intr);
+
+ /*
+ * Dispatch to HW driver to handle interrupt lookup that is being
+ * fired. When matching interrupt is located, HW driver will call to
+ * dpu_core_irq_callback_handler with the irq_idx from the lookup table.
+ * dpu_core_irq_callback_handler will perform the registered function
+ * callback, and do the interrupt status clearing once the registered
+ * callback is finished.
+ */
+ dpu_kms->hw_intr->ops.dispatch_irqs(
+ dpu_kms->hw_intr,
+ dpu_core_irq_callback_handler,
+ dpu_kms);
+
+ return IRQ_HANDLED;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
new file mode 100644
index 000000000000..5e98bba46af5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_irq.h
@@ -0,0 +1,153 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_CORE_IRQ_H__
+#define __DPU_CORE_IRQ_H__
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+
+/**
+ * dpu_core_irq_preinstall - perform pre-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_preinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_postinstall - perform post-installation of core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_core_irq_postinstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_uninstall - uninstall core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: none
+ */
+void dpu_core_irq_uninstall(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq - core IRQ handler
+ * @dpu_kms: DPU handle
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_core_irq(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_core_irq_idx_lookup - IRQ helper function for lookup irq_idx from HW
+ * interrupt mapping table.
+ * @dpu_kms: DPU handle
+ * @intr_type: DPU HW interrupt type for lookup
+ * @instance_idx: DPU HW block instance defined in dpu_hw_mdss.h
+ * @return: irq_idx or -EINVAL when fail to lookup
+ */
+int dpu_core_irq_idx_lookup(
+ struct dpu_kms *dpu_kms,
+ enum dpu_intr_type intr_type,
+ uint32_t instance_idx);
+
+/**
+ * dpu_core_irq_enable - IRQ helper function for enabling one or more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success enabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is enabled if count is 0 before increment.
+ */
+int dpu_core_irq_enable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_disable - IRQ helper function for disabling one of more IRQs
+ * @dpu_kms: DPU handle
+ * @irq_idxs: Array of irq index
+ * @irq_count: Number of irq_idx provided in the array
+ * @return: 0 for success disabling IRQ, otherwise failure
+ *
+ * This function increments count on each enable and decrements on each
+ * disable. Interrupts is disabled if count is 0 after decrement.
+ */
+int dpu_core_irq_disable(
+ struct dpu_kms *dpu_kms,
+ int *irq_idxs,
+ uint32_t irq_count);
+
+/**
+ * dpu_core_irq_read - IRQ helper function for reading IRQ status
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @clear: True to clear the irq after read
+ * @return: non-zero if irq detected; otherwise no irq detected
+ */
+u32 dpu_core_irq_read(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ bool clear);
+
+/**
+ * dpu_core_irq_register_callback - For registering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must exist until un-registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_register_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_core_irq_unregister_callback - For unregistering callback function on IRQ
+ * interrupt
+ * @dpu_kms: DPU handle
+ * @irq_idx: irq index
+ * @irq_cb: IRQ callback structure, containing callback function
+ * and argument. Passing NULL for irq_cb will unregister
+ * the callback for the given irq_idx
+ * This must match with registration.
+ * @return: 0 for success registering callback, otherwise failure
+ *
+ * This function supports registration of multiple callbacks for each interrupt.
+ */
+int dpu_core_irq_unregister_callback(
+ struct dpu_kms *dpu_kms,
+ int irq_idx,
+ struct dpu_irq_callback *irq_cb);
+
+/**
+ * dpu_debugfs_core_irq_init - register core irq debugfs
+ * @dpu_kms: pointer to kms
+ * @parent: debugfs directory root
+ * @Return: 0 on success
+ */
+int dpu_debugfs_core_irq_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent);
+
+/**
+ * dpu_debugfs_core_irq_destroy - deregister core irq debugfs
+ * @dpu_kms: pointer to kms
+ */
+void dpu_debugfs_core_irq_destroy(struct dpu_kms *dpu_kms);
+
+#endif /* __DPU_CORE_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
new file mode 100644
index 000000000000..41c5191f9056
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.c
@@ -0,0 +1,637 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/errno.h>
+#include <linux/mutex.h>
+#include <linux/sort.h>
+#include <linux/clk.h>
+#include <linux/bitmap.h>
+
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_core_perf.h"
+
+#define DPU_PERF_MODE_STRING_SIZE 128
+
+/**
+ * enum dpu_perf_mode - performance tuning mode
+ * @DPU_PERF_MODE_NORMAL: performance controlled by user mode client
+ * @DPU_PERF_MODE_MINIMUM: performance bounded by minimum setting
+ * @DPU_PERF_MODE_FIXED: performance bounded by fixed setting
+ */
+enum dpu_perf_mode {
+ DPU_PERF_MODE_NORMAL,
+ DPU_PERF_MODE_MINIMUM,
+ DPU_PERF_MODE_FIXED,
+ DPU_PERF_MODE_MAX
+};
+
+static struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return NULL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static bool _dpu_core_perf_crtc_is_power_on(struct drm_crtc *crtc)
+{
+ return dpu_crtc_is_enabled(crtc);
+}
+
+static bool _dpu_core_video_mode_intf_connected(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ bool intf_connected = false;
+
+ if (!crtc)
+ goto end;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if ((dpu_crtc_get_intf_mode(tmp_crtc) == INTF_MODE_VIDEO) &&
+ _dpu_core_perf_crtc_is_power_on(tmp_crtc)) {
+ DPU_DEBUG("video interface connected crtc:%d\n",
+ tmp_crtc->base.id);
+ intf_connected = true;
+ goto end;
+ }
+ }
+
+end:
+ return intf_connected;
+}
+
+static void _dpu_core_perf_calc_crtc(struct dpu_kms *kms,
+ struct drm_crtc *crtc,
+ struct drm_crtc_state *state,
+ struct dpu_core_perf_params *perf)
+{
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+
+ if (!kms || !kms->catalog || !crtc || !state || !perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_cstate = to_dpu_crtc_state(state);
+ memset(perf, 0, sizeof(struct dpu_core_perf_params));
+
+ if (!dpu_cstate->bw_control) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->catalog->perf.max_bw_high *
+ 1000ULL;
+ perf->max_per_pipe_ib[i] = perf->bw_ctl[i];
+ }
+ perf->core_clk_rate = kms->perf.max_core_clk_rate;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_MINIMUM) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = 0;
+ perf->max_per_pipe_ib[i] = 0;
+ }
+ perf->core_clk_rate = 0;
+ } else if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ perf->bw_ctl[i] = kms->perf.fix_core_ab_vote;
+ perf->max_per_pipe_ib[i] = kms->perf.fix_core_ib_vote;
+ }
+ perf->core_clk_rate = kms->perf.fix_core_clk_rate;
+ }
+
+ DPU_DEBUG(
+ "crtc=%d clk_rate=%llu core_ib=%llu core_ab=%llu llcc_ib=%llu llcc_ab=%llu mem_ib=%llu mem_ab=%llu\n",
+ crtc->base.id, perf->core_clk_rate,
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ perf->max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ perf->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI]);
+}
+
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ u32 bw, threshold;
+ u64 bw_sum_of_intfs = 0;
+ enum dpu_crtc_client_type curr_client_type;
+ bool is_video_mode;
+ struct dpu_crtc_state *dpu_cstate;
+ struct drm_crtc *tmp_crtc;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid parameters\n");
+ return 0;
+ }
+
+ /* we only need bandwidth check on real-time clients (interfaces) */
+ if (dpu_crtc_get_client_type(crtc) == NRT_CLIENT)
+ return 0;
+
+ dpu_cstate = to_dpu_crtc_state(state);
+
+ /* obtain new values */
+ _dpu_core_perf_calc_crtc(kms, crtc, state, &dpu_cstate->new_perf);
+
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ bw_sum_of_intfs = dpu_cstate->new_perf.bw_ctl[i];
+ curr_client_type = dpu_crtc_get_client_type(crtc);
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ (dpu_crtc_get_client_type(tmp_crtc) ==
+ curr_client_type) &&
+ (tmp_crtc != crtc)) {
+ struct dpu_crtc_state *tmp_cstate =
+ to_dpu_crtc_state(tmp_crtc->state);
+
+ DPU_DEBUG("crtc:%d bw:%llu ctrl:%d\n",
+ tmp_crtc->base.id,
+ tmp_cstate->new_perf.bw_ctl[i],
+ tmp_cstate->bw_control);
+ /*
+ * For bw check only use the bw if the
+ * atomic property has been already set
+ */
+ if (tmp_cstate->bw_control)
+ bw_sum_of_intfs +=
+ tmp_cstate->new_perf.bw_ctl[i];
+ }
+ }
+
+ /* convert bandwidth to kb */
+ bw = DIV_ROUND_UP_ULL(bw_sum_of_intfs, 1000);
+ DPU_DEBUG("calculated bandwidth=%uk\n", bw);
+
+ is_video_mode = dpu_crtc_get_intf_mode(crtc) == INTF_MODE_VIDEO;
+ threshold = (is_video_mode ||
+ _dpu_core_video_mode_intf_connected(crtc)) ?
+ kms->catalog->perf.max_bw_low :
+ kms->catalog->perf.max_bw_high;
+
+ DPU_DEBUG("final threshold bw limit = %d\n", threshold);
+
+ if (!dpu_cstate->bw_control) {
+ DPU_DEBUG("bypass bandwidth check\n");
+ } else if (!threshold) {
+ DPU_ERROR("no bandwidth limits specified\n");
+ return -E2BIG;
+ } else if (bw > threshold) {
+ DPU_ERROR("exceeds bandwidth: %ukb > %ukb\n", bw,
+ threshold);
+ return -E2BIG;
+ }
+ }
+
+ return 0;
+}
+
+static int _dpu_core_perf_crtc_update_bus(struct dpu_kms *kms,
+ struct drm_crtc *crtc, u32 bus_id)
+{
+ struct dpu_core_perf_params perf = { { 0 } };
+ enum dpu_crtc_client_type curr_client_type
+ = dpu_crtc_get_client_type(crtc);
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int ret = 0;
+
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ curr_client_type ==
+ dpu_crtc_get_client_type(tmp_crtc)) {
+ dpu_cstate = to_dpu_crtc_state(tmp_crtc->state);
+
+ perf.max_per_pipe_ib[bus_id] =
+ max(perf.max_per_pipe_ib[bus_id],
+ dpu_cstate->new_perf.max_per_pipe_ib[bus_id]);
+
+ DPU_DEBUG("crtc=%d bus_id=%d bw=%llu\n",
+ tmp_crtc->base.id, bus_id,
+ dpu_cstate->new_perf.bw_ctl[bus_id]);
+ }
+ }
+ return ret;
+}
+
+/**
+ * @dpu_core_perf_crtc_release_bw() - request zero bandwidth
+ * @crtc - pointer to a crtc
+ *
+ * Function checks a state variable for the crtc, if all pending commit
+ * requests are done, meaning no more bandwidth is needed, release
+ * bandwidth request.
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc)
+{
+ struct drm_crtc *tmp_crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ struct dpu_kms *kms;
+ int i;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ /* only do this for command mode rt client */
+ if (dpu_crtc_get_intf_mode(crtc) != INTF_MODE_CMD)
+ return;
+
+ /*
+ * If video interface present, cmd panel bandwidth cannot be
+ * released.
+ */
+ if (dpu_crtc_get_intf_mode(crtc) == INTF_MODE_CMD)
+ drm_for_each_crtc(tmp_crtc, crtc->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(tmp_crtc) &&
+ dpu_crtc_get_intf_mode(tmp_crtc) ==
+ INTF_MODE_VIDEO)
+ return;
+ }
+
+ /* Release the bandwidth */
+ if (kms->perf.enable_bw_release) {
+ trace_dpu_cmd_release_bw(crtc->base.id);
+ DPU_DEBUG("Release BW crtc=%d\n", crtc->base.id);
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ dpu_crtc->cur_perf.bw_ctl[i] = 0;
+ _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ }
+ }
+}
+
+static int _dpu_core_perf_set_core_clk_rate(struct dpu_kms *kms, u64 rate)
+{
+ struct dss_clk *core_clk = kms->perf.core_clk;
+
+ if (core_clk->max_rate && (rate > core_clk->max_rate))
+ rate = core_clk->max_rate;
+
+ core_clk->rate = rate;
+ return msm_dss_clk_set_rate(core_clk, 1);
+}
+
+static u64 _dpu_core_perf_get_core_clk_rate(struct dpu_kms *kms)
+{
+ u64 clk_rate = kms->perf.perf_tune.min_core_clk;
+ struct drm_crtc *crtc;
+ struct dpu_crtc_state *dpu_cstate;
+
+ drm_for_each_crtc(crtc, kms->dev) {
+ if (_dpu_core_perf_crtc_is_power_on(crtc)) {
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+ clk_rate = max(dpu_cstate->new_perf.core_clk_rate,
+ clk_rate);
+ clk_rate = clk_round_rate(kms->perf.core_clk->clk,
+ clk_rate);
+ }
+ }
+
+ if (kms->perf.perf_tune.mode == DPU_PERF_MODE_FIXED)
+ clk_rate = kms->perf.fix_core_clk_rate;
+
+ DPU_DEBUG("clk:%llu\n", clk_rate);
+
+ return clk_rate;
+}
+
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req)
+{
+ struct dpu_core_perf_params *new, *old;
+ int update_bus = 0, update_clk = 0;
+ u64 clk_rate = 0;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_cstate;
+ int i;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_crtc_get_kms(crtc);
+ if (!kms || !kms->catalog) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+ priv = kms->dev->dev_private;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_cstate = to_dpu_crtc_state(crtc->state);
+
+ DPU_DEBUG("crtc:%d stop_req:%d core_clk:%llu\n",
+ crtc->base.id, stop_req, kms->perf.core_clk_rate);
+
+ old = &dpu_crtc->cur_perf;
+ new = &dpu_cstate->new_perf;
+
+ if (_dpu_core_perf_crtc_is_power_on(crtc) && !stop_req) {
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ /*
+ * cases for bus bandwidth update.
+ * 1. new bandwidth vote - "ab or ib vote" is higher
+ * than current vote for update request.
+ * 2. new bandwidth vote - "ab or ib vote" is lower
+ * than current vote at end of commit or stop.
+ */
+ if ((params_changed && ((new->bw_ctl[i] >
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] >
+ old->max_per_pipe_ib[i]))) ||
+ (!params_changed && ((new->bw_ctl[i] <
+ old->bw_ctl[i]) ||
+ (new->max_per_pipe_ib[i] <
+ old->max_per_pipe_ib[i])))) {
+ DPU_DEBUG(
+ "crtc=%d p=%d new_bw=%llu,old_bw=%llu\n",
+ crtc->base.id, params_changed,
+ new->bw_ctl[i], old->bw_ctl[i]);
+ old->bw_ctl[i] = new->bw_ctl[i];
+ old->max_per_pipe_ib[i] =
+ new->max_per_pipe_ib[i];
+ update_bus |= BIT(i);
+ }
+ }
+
+ if ((params_changed &&
+ (new->core_clk_rate > old->core_clk_rate)) ||
+ (!params_changed &&
+ (new->core_clk_rate < old->core_clk_rate))) {
+ old->core_clk_rate = new->core_clk_rate;
+ update_clk = 1;
+ }
+ } else {
+ DPU_DEBUG("crtc=%d disable\n", crtc->base.id);
+ memset(old, 0, sizeof(*old));
+ memset(new, 0, sizeof(*new));
+ update_bus = ~0;
+ update_clk = 1;
+ }
+ trace_dpu_perf_crtc_update(crtc->base.id,
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MNOC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_LLCC],
+ new->bw_ctl[DPU_POWER_HANDLE_DBUS_ID_EBI],
+ new->core_clk_rate, stop_req,
+ update_bus, update_clk);
+
+ for (i = 0; i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ if (update_bus & BIT(i)) {
+ ret = _dpu_core_perf_crtc_update_bus(kms, crtc, i);
+ if (ret) {
+ DPU_ERROR("crtc-%d: failed to update bw vote for bus-%d\n",
+ crtc->base.id, i);
+ return ret;
+ }
+ }
+ }
+
+ /*
+ * Update the clock after bandwidth vote to ensure
+ * bandwidth is available before clock rate is increased.
+ */
+ if (update_clk) {
+ clk_rate = _dpu_core_perf_get_core_clk_rate(kms);
+
+ trace_dpu_core_perf_update_clk(kms->dev, stop_req, clk_rate);
+
+ ret = _dpu_core_perf_set_core_clk_rate(kms, clk_rate);
+ if (ret) {
+ DPU_ERROR("failed to set %s clock rate %llu\n",
+ kms->perf.core_clk->clk_name, clk_rate);
+ return ret;
+ }
+
+ kms->perf.core_clk_rate = clk_rate;
+ DPU_DEBUG("update clk rate = %lld HZ\n", clk_rate);
+ }
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+
+static ssize_t _dpu_core_perf_mode_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ struct dpu_perf_cfg *cfg = &perf->catalog->perf;
+ u32 perf_mode = 0;
+ char buf[10];
+
+ if (!perf)
+ return -ENODEV;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtouint(buf, 0, &perf_mode))
+ return -EFAULT;
+
+ if (perf_mode >= DPU_PERF_MODE_MAX)
+ return -EFAULT;
+
+ if (perf_mode == DPU_PERF_MODE_FIXED) {
+ DRM_INFO("fix performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_MINIMUM) {
+ /* run the driver with max clk and BW vote */
+ perf->perf_tune.min_core_clk = perf->max_core_clk_rate;
+ perf->perf_tune.min_bus_vote =
+ (u64) cfg->max_bw_high * 1000;
+ DRM_INFO("minimum performance mode\n");
+ } else if (perf_mode == DPU_PERF_MODE_NORMAL) {
+ /* reset the perf tune params to 0 */
+ perf->perf_tune.min_core_clk = 0;
+ perf->perf_tune.min_bus_vote = 0;
+ DRM_INFO("normal performance mode\n");
+ }
+ perf->perf_tune.mode = perf_mode;
+
+ return count;
+}
+
+static ssize_t _dpu_core_perf_mode_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_core_perf *perf = file->private_data;
+ int len = 0;
+ char buf[DPU_PERF_MODE_STRING_SIZE] = {'\0'};
+
+ if (!perf)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf),
+ "mode %d min_mdp_clk %llu min_bus_vote %llu\n",
+ perf->perf_tune.mode,
+ perf->perf_tune.min_core_clk,
+ perf->perf_tune.min_bus_vote);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static const struct file_operations dpu_core_perf_mode_fops = {
+ .open = simple_open,
+ .read = _dpu_core_perf_mode_read,
+ .write = _dpu_core_perf_mode_write,
+};
+
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+ debugfs_remove_recursive(perf->debugfs_root);
+ perf->debugfs_root = NULL;
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ struct dpu_mdss_cfg *catalog = perf->catalog;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ priv = perf->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ perf->debugfs_root = debugfs_create_dir("core_perf", parent);
+ if (!perf->debugfs_root) {
+ DPU_ERROR("failed to create core perf debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_u64("max_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->max_core_clk_rate);
+ debugfs_create_u64("core_clk_rate", 0600, perf->debugfs_root,
+ &perf->core_clk_rate);
+ debugfs_create_u32("enable_bw_release", 0600, perf->debugfs_root,
+ (u32 *)&perf->enable_bw_release);
+ debugfs_create_u32("threshold_low", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_low);
+ debugfs_create_u32("threshold_high", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.max_bw_high);
+ debugfs_create_u32("min_core_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_core_ib);
+ debugfs_create_u32("min_llcc_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_llcc_ib);
+ debugfs_create_u32("min_dram_ib", 0600, perf->debugfs_root,
+ (u32 *)&catalog->perf.min_dram_ib);
+ debugfs_create_file("perf_mode", 0600, perf->debugfs_root,
+ (u32 *)perf, &dpu_core_perf_mode_fops);
+ debugfs_create_u64("fix_core_clk_rate", 0600, perf->debugfs_root,
+ &perf->fix_core_clk_rate);
+ debugfs_create_u64("fix_core_ib_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ib_vote);
+ debugfs_create_u64("fix_core_ab_vote", 0600, perf->debugfs_root,
+ &perf->fix_core_ab_vote);
+
+ return 0;
+}
+#else
+static void dpu_core_perf_debugfs_destroy(struct dpu_core_perf *perf)
+{
+}
+
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent)
+{
+ return 0;
+}
+#endif
+
+void dpu_core_perf_destroy(struct dpu_core_perf *perf)
+{
+ if (!perf) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_perf_debugfs_destroy(perf);
+ perf->max_core_clk_rate = 0;
+ perf->core_clk = NULL;
+ perf->phandle = NULL;
+ perf->catalog = NULL;
+ perf->dev = NULL;
+}
+
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk)
+{
+ perf->dev = dev;
+ perf->catalog = catalog;
+ perf->phandle = phandle;
+ perf->core_clk = core_clk;
+
+ perf->max_core_clk_rate = core_clk->max_rate;
+ if (!perf->max_core_clk_rate) {
+ DPU_DEBUG("optional max core clk rate, use default\n");
+ perf->max_core_clk_rate = DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE;
+ }
+
+ return 0;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
new file mode 100644
index 000000000000..fbcbe0c7527a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_core_perf.h
@@ -0,0 +1,133 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_CORE_PERF_H_
+#define _DPU_CORE_PERF_H_
+
+#include <linux/types.h>
+#include <linux/dcache.h>
+#include <linux/mutex.h>
+#include <drm/drm_crtc.h>
+
+#include "dpu_hw_catalog.h"
+#include "dpu_power_handle.h"
+
+#define DPU_PERF_DEFAULT_MAX_CORE_CLK_RATE 412500000
+
+/**
+ * struct dpu_core_perf_params - definition of performance parameters
+ * @max_per_pipe_ib: maximum instantaneous bandwidth request
+ * @bw_ctl: arbitrated bandwidth request
+ * @core_clk_rate: core clock rate request
+ */
+struct dpu_core_perf_params {
+ u64 max_per_pipe_ib[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 bw_ctl[DPU_POWER_HANDLE_DBUS_ID_MAX];
+ u64 core_clk_rate;
+};
+
+/**
+ * struct dpu_core_perf_tune - definition of performance tuning control
+ * @mode: performance mode
+ * @min_core_clk: minimum core clock
+ * @min_bus_vote: minimum bus vote
+ */
+struct dpu_core_perf_tune {
+ u32 mode;
+ u64 min_core_clk;
+ u64 min_bus_vote;
+};
+
+/**
+ * struct dpu_core_perf - definition of core performance context
+ * @dev: Pointer to drm device
+ * @debugfs_root: top level debug folder
+ * @catalog: Pointer to catalog configuration
+ * @phandle: Pointer to power handler
+ * @core_clk: Pointer to core clock structure
+ * @core_clk_rate: current core clock rate
+ * @max_core_clk_rate: maximum allowable core clock rate
+ * @perf_tune: debug control for performance tuning
+ * @enable_bw_release: debug control for bandwidth release
+ * @fix_core_clk_rate: fixed core clock request in Hz used in mode 2
+ * @fix_core_ib_vote: fixed core ib vote in bps used in mode 2
+ * @fix_core_ab_vote: fixed core ab vote in bps used in mode 2
+ */
+struct dpu_core_perf {
+ struct drm_device *dev;
+ struct dentry *debugfs_root;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_power_handle *phandle;
+ struct dss_clk *core_clk;
+ u64 core_clk_rate;
+ u64 max_core_clk_rate;
+ struct dpu_core_perf_tune perf_tune;
+ u32 enable_bw_release;
+ u64 fix_core_clk_rate;
+ u64 fix_core_ib_vote;
+ u64 fix_core_ab_vote;
+};
+
+/**
+ * dpu_core_perf_crtc_check - validate performance of the given crtc state
+ * @crtc: Pointer to crtc
+ * @state: Pointer to new crtc state
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state);
+
+/**
+ * dpu_core_perf_crtc_update - update performance of the given crtc
+ * @crtc: Pointer to crtc
+ * @params_changed: true if crtc parameters are modified
+ * @stop_req: true if this is a stop request
+ * return: zero if success, or error code otherwise
+ */
+int dpu_core_perf_crtc_update(struct drm_crtc *crtc,
+ int params_changed, bool stop_req);
+
+/**
+ * dpu_core_perf_crtc_release_bw - release bandwidth of the given crtc
+ * @crtc: Pointer to crtc
+ */
+void dpu_core_perf_crtc_release_bw(struct drm_crtc *crtc);
+
+/**
+ * dpu_core_perf_destroy - destroy the given core performance context
+ * @perf: Pointer to core performance context
+ */
+void dpu_core_perf_destroy(struct dpu_core_perf *perf);
+
+/**
+ * dpu_core_perf_init - initialize the given core performance context
+ * @perf: Pointer to core performance context
+ * @dev: Pointer to drm device
+ * @catalog: Pointer to catalog
+ * @phandle: Pointer to power handle
+ * @core_clk: pointer to core clock
+ */
+int dpu_core_perf_init(struct dpu_core_perf *perf,
+ struct drm_device *dev,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_power_handle *phandle,
+ struct dss_clk *core_clk);
+
+/**
+ * dpu_core_perf_debugfs_init - initialize debugfs for core performance context
+ * @perf: Pointer to core performance context
+ * @debugfs_parent: Pointer to parent debugfs
+ */
+int dpu_core_perf_debugfs_init(struct dpu_core_perf *perf,
+ struct dentry *parent);
+
+#endif /* _DPU_CORE_PERF_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
new file mode 100644
index 000000000000..80cbf75bc2ff
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.c
@@ -0,0 +1,2138 @@
+/*
+ * Copyright (c) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/sort.h>
+#include <linux/debugfs.h>
+#include <linux/ktime.h>
+#include <drm/drm_mode.h>
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_flip_work.h>
+#include <drm/drm_rect.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_crtc.h"
+#include "dpu_plane.h"
+#include "dpu_encoder.h"
+#include "dpu_vbif.h"
+#include "dpu_power_handle.h"
+#include "dpu_core_perf.h"
+#include "dpu_trace.h"
+
+#define DPU_DRM_BLEND_OP_NOT_DEFINED 0
+#define DPU_DRM_BLEND_OP_OPAQUE 1
+#define DPU_DRM_BLEND_OP_PREMULTIPLIED 2
+#define DPU_DRM_BLEND_OP_COVERAGE 3
+#define DPU_DRM_BLEND_OP_MAX 4
+
+/* layer mixer index on dpu_crtc */
+#define LEFT_MIXER 0
+#define RIGHT_MIXER 1
+
+#define MISR_BUFF_SIZE 256
+
+static inline struct dpu_kms *_dpu_crtc_get_kms(struct drm_crtc *crtc)
+{
+ struct msm_drm_private *priv;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return NULL;
+ }
+ priv = crtc->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return NULL;
+ }
+
+ return to_dpu_kms(priv->kms);
+}
+
+static inline int _dpu_crtc_power_enable(struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_crtc *crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid dpu crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ if (!crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid drm device\n");
+ return -EINVAL;
+ }
+
+ priv = crtc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_rp_to_crtc - get crtc from resource pool object
+ * @rp: Pointer to resource pool
+ * return: Pointer to drm crtc if success; null otherwise
+ */
+static struct drm_crtc *_dpu_crtc_rp_to_crtc(struct dpu_crtc_respool *rp)
+{
+ if (!rp)
+ return NULL;
+
+ return container_of(rp, struct dpu_crtc_state, rp)->base.crtc;
+}
+
+/**
+ * _dpu_crtc_rp_reclaim - reclaim unused, or all if forced, resources in pool
+ * @rp: Pointer to resource pool
+ * @force: True to reclaim all resources; otherwise, reclaim only unused ones
+ * return: None
+ */
+static void _dpu_crtc_rp_reclaim(struct dpu_crtc_respool *rp, bool force)
+{
+ struct dpu_crtc_res *res, *next;
+ struct drm_crtc *crtc;
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u %s\n", crtc->base.id, rp->sequence_id,
+ force ? "destroy" : "free_unused");
+
+ list_for_each_entry_safe(res, next, &rp->res_list, list) {
+ if (!force && !(res->flags & DPU_CRTC_RES_FLAG_FREE))
+ continue;
+ DPU_DEBUG("crtc%d.%u reclaim res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, rp->sequence_id,
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ list_del(&res->list);
+ if (res->ops.put)
+ res->ops.put(res->val);
+ kfree(res);
+ }
+}
+
+/**
+ * _dpu_crtc_rp_free_unused - free unused resource in pool
+ * @rp: Pointer to resource pool
+ * return: none
+ */
+static void _dpu_crtc_rp_free_unused(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ _dpu_crtc_rp_reclaim(rp, false);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_destroy - destroy resource pool
+ * @rp: Pointer to resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_destroy(struct dpu_crtc_respool *rp)
+{
+ mutex_lock(rp->rp_lock);
+ list_del_init(&rp->rp_list);
+ _dpu_crtc_rp_reclaim(rp, true);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_hw_blk_get - get callback for hardware block
+ * @val: Resource handle
+ * @type: Resource type
+ * @tag: Search tag for given resource
+ * return: Resource handle
+ */
+static void *_dpu_crtc_hw_blk_get(void *val, u32 type, u64 tag)
+{
+ DPU_DEBUG("res:%d/0x%llx/%pK\n", type, tag, val);
+ return dpu_hw_blk_get(val, type, tag);
+}
+
+/**
+ * _dpu_crtc_hw_blk_put - put callback for hardware block
+ * @val: Resource handle
+ * return: None
+ */
+static void _dpu_crtc_hw_blk_put(void *val)
+{
+ DPU_DEBUG("res://%pK\n", val);
+ dpu_hw_blk_put(val);
+}
+
+/**
+ * _dpu_crtc_rp_duplicate - duplicate resource pool and reset reference count
+ * @rp: Pointer to original resource pool
+ * @dup_rp: Pointer to duplicated resource pool
+ * return: None
+ */
+static void _dpu_crtc_rp_duplicate(struct dpu_crtc_respool *rp,
+ struct dpu_crtc_respool *dup_rp)
+{
+ struct dpu_crtc_res *res, *dup_res;
+ struct drm_crtc *crtc;
+
+ if (!rp || !dup_rp || !rp->rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ crtc = _dpu_crtc_rp_to_crtc(rp);
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ DPU_DEBUG("crtc%d.%u duplicate\n", crtc->base.id, rp->sequence_id);
+
+ mutex_lock(rp->rp_lock);
+ dup_rp->sequence_id = rp->sequence_id + 1;
+ INIT_LIST_HEAD(&dup_rp->res_list);
+ dup_rp->ops = rp->ops;
+ list_for_each_entry(res, &rp->res_list, list) {
+ dup_res = kzalloc(sizeof(struct dpu_crtc_res), GFP_KERNEL);
+ if (!dup_res) {
+ mutex_unlock(rp->rp_lock);
+ return;
+ }
+ INIT_LIST_HEAD(&dup_res->list);
+ atomic_set(&dup_res->refcount, 0);
+ dup_res->type = res->type;
+ dup_res->tag = res->tag;
+ dup_res->val = res->val;
+ dup_res->ops = res->ops;
+ dup_res->flags = DPU_CRTC_RES_FLAG_FREE;
+ DPU_DEBUG("crtc%d.%u dup res:0x%x/0x%llx/%pK/%d\n",
+ crtc->base.id, dup_rp->sequence_id,
+ dup_res->type, dup_res->tag, dup_res->val,
+ atomic_read(&dup_res->refcount));
+ list_add_tail(&dup_res->list, &dup_rp->res_list);
+ if (dup_res->ops.get)
+ dup_res->ops.get(dup_res->val, 0, -1);
+ }
+
+ dup_rp->rp_lock = rp->rp_lock;
+ dup_rp->rp_head = rp->rp_head;
+ INIT_LIST_HEAD(&dup_rp->rp_list);
+ list_add_tail(&dup_rp->rp_list, rp->rp_head);
+ mutex_unlock(rp->rp_lock);
+}
+
+/**
+ * _dpu_crtc_rp_reset - reset resource pool after allocation
+ * @rp: Pointer to original resource pool
+ * @rp_lock: Pointer to serialization resource pool lock
+ * @rp_head: Pointer to crtc resource pool head
+ * return: None
+ */
+static void _dpu_crtc_rp_reset(struct dpu_crtc_respool *rp,
+ struct mutex *rp_lock, struct list_head *rp_head)
+{
+ if (!rp || !rp_lock || !rp_head) {
+ DPU_ERROR("invalid resource pool\n");
+ return;
+ }
+
+ mutex_lock(rp_lock);
+ rp->rp_lock = rp_lock;
+ rp->rp_head = rp_head;
+ INIT_LIST_HEAD(&rp->rp_list);
+ rp->sequence_id = 0;
+ INIT_LIST_HEAD(&rp->res_list);
+ rp->ops.get = _dpu_crtc_hw_blk_get;
+ rp->ops.put = _dpu_crtc_hw_blk_put;
+ list_add_tail(&rp->rp_list, rp->rp_head);
+ mutex_unlock(rp_lock);
+}
+
+static void dpu_crtc_destroy(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ DPU_DEBUG("\n");
+
+ if (!crtc)
+ return;
+
+ dpu_crtc->phandle = NULL;
+
+ drm_crtc_cleanup(crtc);
+ mutex_destroy(&dpu_crtc->crtc_lock);
+ kfree(dpu_crtc);
+}
+
+static void _dpu_crtc_setup_blend_cfg(struct dpu_crtc_mixer *mixer,
+ struct dpu_plane_state *pstate)
+{
+ struct dpu_hw_mixer *lm = mixer->hw_lm;
+
+ /* default to opaque blending */
+ lm->ops.setup_blend_config(lm, pstate->stage, 0XFF, 0,
+ DPU_BLEND_FG_ALPHA_FG_CONST |
+ DPU_BLEND_BG_ALPHA_BG_CONST);
+}
+
+static void _dpu_crtc_program_lm_output_roi(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *crtc_state;
+ int lm_idx, lm_horiz_position;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ crtc_state = to_dpu_crtc_state(crtc->state);
+
+ lm_horiz_position = 0;
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ const struct drm_rect *lm_roi = &crtc_state->lm_bounds[lm_idx];
+ struct dpu_hw_mixer *hw_lm = dpu_crtc->mixers[lm_idx].hw_lm;
+ struct dpu_hw_mixer_cfg cfg;
+
+ if (!lm_roi || !drm_rect_visible(lm_roi))
+ continue;
+
+ cfg.out_width = drm_rect_width(lm_roi);
+ cfg.out_height = drm_rect_height(lm_roi);
+ cfg.right_mixer = lm_horiz_position++;
+ cfg.flags = 0;
+ hw_lm->ops.setup_mixer_out(hw_lm, &cfg);
+ }
+}
+
+static void _dpu_crtc_blend_setup_mixer(struct drm_crtc *crtc,
+ struct dpu_crtc *dpu_crtc, struct dpu_crtc_mixer *mixer)
+{
+ struct drm_plane *plane;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_format *format;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+ struct dpu_hw_stage_cfg *stage_cfg;
+
+ u32 flush_mask;
+ uint32_t stage_idx, lm_idx;
+ int zpos_cnt[DPU_STAGE_MAX + 1] = { 0 };
+ bool bg_alpha_enable = false;
+
+ if (!dpu_crtc || !mixer) {
+ DPU_ERROR("invalid dpu_crtc or mixer\n");
+ return;
+ }
+
+ ctl = mixer->hw_ctl;
+ lm = mixer->hw_lm;
+ stage_cfg = &dpu_crtc->stage_cfg;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ state = plane->state;
+ if (!state)
+ continue;
+
+ pstate = to_dpu_plane_state(state);
+ fb = state->fb;
+
+ dpu_plane_get_ctl_flush(plane, ctl, &flush_mask);
+
+ DPU_DEBUG("crtc %d stage:%d - plane %d sspp %d fb %d\n",
+ crtc->base.id,
+ pstate->stage,
+ plane->base.id,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ state->fb ? state->fb->base.id : -1);
+
+ format = to_dpu_format(msm_framebuffer_format(pstate->base.fb));
+ if (!format) {
+ DPU_ERROR("invalid format\n");
+ return;
+ }
+
+ if (pstate->stage == DPU_STAGE_BASE && format->alpha_enable)
+ bg_alpha_enable = true;
+
+ stage_idx = zpos_cnt[pstate->stage]++;
+ stage_cfg->stage[pstate->stage][stage_idx] =
+ dpu_plane_pipe(plane);
+ stage_cfg->multirect_index[pstate->stage][stage_idx] =
+ pstate->multirect_index;
+
+ trace_dpu_crtc_setup_mixer(DRMID(crtc), DRMID(plane),
+ state, pstate, stage_idx,
+ dpu_plane_pipe(plane) - SSPP_VIG0,
+ format->base.pixel_format,
+ fb ? fb->modifier : 0);
+
+ /* blend config update */
+ for (lm_idx = 0; lm_idx < dpu_crtc->num_mixers; lm_idx++) {
+ _dpu_crtc_setup_blend_cfg(mixer + lm_idx, pstate);
+
+ mixer[lm_idx].flush_mask |= flush_mask;
+
+ if (bg_alpha_enable && !format->alpha_enable)
+ mixer[lm_idx].mixer_op_mode = 0;
+ else
+ mixer[lm_idx].mixer_op_mode |=
+ 1 << pstate->stage;
+ }
+ }
+
+ _dpu_crtc_program_lm_output_roi(crtc);
+}
+
+/**
+ * _dpu_crtc_blend_setup - configure crtc mixers
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_blend_setup(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *dpu_crtc_state;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_mixer *lm;
+
+ int i;
+
+ if (!crtc)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_crtc_state = to_dpu_crtc_state(crtc->state);
+ mixer = dpu_crtc->mixers;
+
+ DPU_DEBUG("%s\n", dpu_crtc->name);
+
+ if (dpu_crtc->num_mixers > CRTC_DUAL_MIXERS) {
+ DPU_ERROR("invalid number mixers: %d\n", dpu_crtc->num_mixers);
+ return;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ if (!mixer[i].hw_lm || !mixer[i].hw_ctl) {
+ DPU_ERROR("invalid lm or ctl assigned to mixer\n");
+ return;
+ }
+ mixer[i].mixer_op_mode = 0;
+ mixer[i].flush_mask = 0;
+ if (mixer[i].hw_ctl->ops.clear_all_blendstages)
+ mixer[i].hw_ctl->ops.clear_all_blendstages(
+ mixer[i].hw_ctl);
+ }
+
+ /* initialize stage cfg */
+ memset(&dpu_crtc->stage_cfg, 0, sizeof(struct dpu_hw_stage_cfg));
+
+ _dpu_crtc_blend_setup_mixer(crtc, dpu_crtc, mixer);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ ctl = mixer[i].hw_ctl;
+ lm = mixer[i].hw_lm;
+
+ lm->ops.setup_alpha_out(lm, mixer[i].mixer_op_mode);
+
+ mixer[i].flush_mask |= ctl->ops.get_bitmask_mixer(ctl,
+ mixer[i].hw_lm->idx);
+
+ /* stage config flush mask */
+ ctl->ops.update_pending_flush(ctl, mixer[i].flush_mask);
+
+ DPU_DEBUG("lm %d, op_mode 0x%X, ctl %d, flush mask 0x%x\n",
+ mixer[i].hw_lm->idx - LM_0,
+ mixer[i].mixer_op_mode,
+ ctl->idx - CTL_0,
+ mixer[i].flush_mask);
+
+ ctl->ops.setup_blendstage(ctl, mixer[i].hw_lm->idx,
+ &dpu_crtc->stage_cfg);
+ }
+}
+
+/**
+ * _dpu_crtc_complete_flip - signal pending page_flip events
+ * Any pending vblank events are added to the vblank_event_list
+ * so that the next vblank interrupt shall signal them.
+ * However PAGE_FLIP events are not handled through the vblank_event_list.
+ * This API signals any pending PAGE_FLIP events requested through
+ * DRM_IOCTL_MODE_PAGE_FLIP and are cached in the dpu_crtc->event.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void _dpu_crtc_complete_flip(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_device *dev = crtc->dev;
+ unsigned long flags;
+
+ spin_lock_irqsave(&dev->event_lock, flags);
+ if (dpu_crtc->event) {
+ DRM_DEBUG_VBL("%s: send event: %pK\n", dpu_crtc->name,
+ dpu_crtc->event);
+ trace_dpu_crtc_complete_flip(DRMID(crtc));
+ drm_crtc_send_vblank_event(crtc, dpu_crtc->event);
+ dpu_crtc->event = NULL;
+ }
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+}
+
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+
+ if (!crtc || !crtc->dev) {
+ DPU_ERROR("invalid crtc\n");
+ return INTF_MODE_NONE;
+ }
+
+ drm_for_each_encoder(encoder, crtc->dev)
+ if (encoder->crtc == crtc)
+ return dpu_encoder_get_intf_mode(encoder);
+
+ return INTF_MODE_NONE;
+}
+
+static void dpu_crtc_vblank_cb(void *data)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+
+ /* keep statistics on vblank callback - with auto reset via debugfs */
+ if (ktime_compare(dpu_crtc->vblank_cb_time, ktime_set(0, 0)) == 0)
+ dpu_crtc->vblank_cb_time = ktime_get();
+ else
+ dpu_crtc->vblank_cb_count++;
+ _dpu_crtc_complete_flip(crtc);
+ drm_crtc_handle_vblank(crtc);
+ trace_dpu_crtc_vblank_cb(DRMID(crtc));
+}
+
+static void dpu_crtc_frame_event_work(struct kthread_work *work)
+{
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ struct drm_crtc *crtc;
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+ unsigned long flags;
+ bool frame_done = false;
+
+ if (!work) {
+ DPU_ERROR("invalid work handle\n");
+ return;
+ }
+
+ fevent = container_of(work, struct dpu_crtc_frame_event, work);
+ if (!fevent->crtc || !fevent->crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ crtc = fevent->crtc;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid kms handle\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+ DPU_ATRACE_BEGIN("crtc_frame_event");
+
+ DRM_DEBUG_KMS("crtc%d event:%u ts:%lld\n", crtc->base.id, fevent->event,
+ ktime_to_ns(fevent->ts));
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (atomic_read(&dpu_crtc->frame_pending) < 1) {
+ /* this should not happen */
+ DRM_ERROR("crtc%d ev:%u ts:%lld frame_pending:%d\n",
+ crtc->base.id,
+ fevent->event,
+ ktime_to_ns(fevent->ts),
+ atomic_read(&dpu_crtc->frame_pending));
+ } else if (atomic_dec_return(&dpu_crtc->frame_pending) == 0) {
+ /* release bandwidth and other resources */
+ trace_dpu_crtc_frame_event_done(DRMID(crtc),
+ fevent->event);
+ dpu_core_perf_crtc_release_bw(crtc);
+ } else {
+ trace_dpu_crtc_frame_event_more_pending(DRMID(crtc),
+ fevent->event);
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_DONE)
+ dpu_core_perf_crtc_update(crtc, 0, false);
+
+ if (fevent->event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR))
+ frame_done = true;
+ }
+
+ if (fevent->event & DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)
+ DPU_ERROR("crtc%d ts:%lld received panel dead event\n",
+ crtc->base.id, ktime_to_ns(fevent->ts));
+
+ if (frame_done)
+ complete_all(&dpu_crtc->frame_done_comp);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ list_add_tail(&fevent->list, &dpu_crtc->frame_event_list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+ DPU_ATRACE_END("crtc_frame_event");
+}
+
+/*
+ * dpu_crtc_frame_event_cb - crtc frame event callback API. CRTC module
+ * registers this API to encoder for all frame event callbacks like
+ * frame_error, frame_done, idle_timeout, etc. Encoder may call different events
+ * from different context - IRQ, user thread, commit_thread, etc. Each event
+ * should be carefully reviewed and should be processed in proper task context
+ * to avoid schedulin delay or properly manage the irq context's bottom half
+ * processing.
+ */
+static void dpu_crtc_frame_event_cb(void *data, u32 event)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *)data;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_crtc_frame_event *fevent;
+ unsigned long flags;
+ u32 crtc_id;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ /* Nothing to do on idle event */
+ if (event & DPU_ENCODER_FRAME_EVENT_IDLE)
+ return;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+ crtc_id = drm_crtc_index(crtc);
+
+ trace_dpu_crtc_frame_event_cb(DRMID(crtc), event);
+
+ spin_lock_irqsave(&dpu_crtc->spin_lock, flags);
+ fevent = list_first_entry_or_null(&dpu_crtc->frame_event_list,
+ struct dpu_crtc_frame_event, list);
+ if (fevent)
+ list_del_init(&fevent->list);
+ spin_unlock_irqrestore(&dpu_crtc->spin_lock, flags);
+
+ if (!fevent) {
+ DRM_ERROR("crtc%d event %d overflow\n", crtc->base.id, event);
+ return;
+ }
+
+ fevent->event = event;
+ fevent->crtc = crtc;
+ fevent->ts = ktime_get();
+ kthread_queue_work(&priv->event_thread[crtc_id].worker, &fevent->work);
+}
+
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ trace_dpu_crtc_complete_commit(DRMID(crtc));
+}
+
+static void _dpu_crtc_setup_mixer_for_encoder(
+ struct drm_crtc *crtc,
+ struct drm_encoder *enc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_kms *dpu_kms = _dpu_crtc_get_kms(crtc);
+ struct dpu_rm *rm = &dpu_kms->rm;
+ struct dpu_crtc_mixer *mixer;
+ struct dpu_hw_ctl *last_valid_ctl = NULL;
+ int i;
+ struct dpu_rm_hw_iter lm_iter, ctl_iter;
+
+ dpu_rm_init_hw_iter(&lm_iter, enc->base.id, DPU_HW_BLK_LM);
+ dpu_rm_init_hw_iter(&ctl_iter, enc->base.id, DPU_HW_BLK_CTL);
+
+ /* Set up all the mixers and ctls reserved by this encoder */
+ for (i = dpu_crtc->num_mixers; i < ARRAY_SIZE(dpu_crtc->mixers); i++) {
+ mixer = &dpu_crtc->mixers[i];
+
+ if (!dpu_rm_get_hw(rm, &lm_iter))
+ break;
+ mixer->hw_lm = (struct dpu_hw_mixer *)lm_iter.hw;
+
+ /* CTL may be <= LMs, if <, multiple LMs controlled by 1 CTL */
+ if (!dpu_rm_get_hw(rm, &ctl_iter)) {
+ DPU_DEBUG("no ctl assigned to lm %d, using previous\n",
+ mixer->hw_lm->idx - LM_0);
+ mixer->hw_ctl = last_valid_ctl;
+ } else {
+ mixer->hw_ctl = (struct dpu_hw_ctl *)ctl_iter.hw;
+ last_valid_ctl = mixer->hw_ctl;
+ }
+
+ /* Shouldn't happen, mixers are always >= ctls */
+ if (!mixer->hw_ctl) {
+ DPU_ERROR("no valid ctls found for lm %d\n",
+ mixer->hw_lm->idx - LM_0);
+ return;
+ }
+
+ mixer->encoder = enc;
+
+ dpu_crtc->num_mixers++;
+ DPU_DEBUG("setup mixer %d: lm %d\n",
+ i, mixer->hw_lm->idx - LM_0);
+ DPU_DEBUG("setup mixer %d: ctl %d\n",
+ i, mixer->hw_ctl->idx - CTL_0);
+ }
+}
+
+static void _dpu_crtc_setup_mixers(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct drm_encoder *enc;
+
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ /* Check for mixers on all encoders attached to this crtc */
+ list_for_each_entry(enc, &crtc->dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ _dpu_crtc_setup_mixer_for_encoder(crtc, enc);
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void _dpu_crtc_setup_lm_bounds(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *adj_mode;
+ u32 crtc_split_width;
+ int i;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ adj_mode = &state->adjusted_mode;
+ crtc_split_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, adj_mode);
+
+ for (i = 0; i < dpu_crtc->num_mixers; i++) {
+ struct drm_rect *r = &cstate->lm_bounds[i];
+ r->x1 = crtc_split_width * i;
+ r->y1 = 0;
+ r->x2 = r->x1 + crtc_split_width;
+ r->y2 = dpu_crtc_get_mixer_height(dpu_crtc, cstate, adj_mode);
+
+ trace_dpu_crtc_setup_lm_bounds(DRMID(crtc), i, r);
+ }
+
+ drm_mode_debug_printmodeline(adj_mode);
+}
+
+static void dpu_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ unsigned long flags;
+ struct dpu_crtc_smmu_state_data *smmu_state;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_begin\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ dev = crtc->dev;
+ smmu_state = &dpu_crtc->smmu_state;
+
+ if (!dpu_crtc->num_mixers) {
+ _dpu_crtc_setup_mixers(crtc);
+ _dpu_crtc_setup_lm_bounds(crtc, crtc->state);
+ }
+
+ if (dpu_crtc->event) {
+ WARN_ON(dpu_crtc->event);
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ /* encoder will trigger pending mask now */
+ dpu_encoder_trigger_kickoff_pending(encoder);
+ }
+
+ /*
+ * If no mixers have been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ _dpu_crtc_blend_setup(crtc);
+
+ /*
+ * PP_DONE irq is only used by command mode for now.
+ * It is better to request pending before FLUSH and START trigger
+ * to make sure no pp_done irq missed.
+ * This is safe because no pp_done will happen before SW trigger
+ * in command mode.
+ */
+}
+
+static void dpu_crtc_atomic_flush(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_device *dev;
+ struct drm_plane *plane;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+ unsigned long flags;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("crtc%d -> enable %d, skip atomic_flush\n",
+ crtc->base.id, crtc->state->enable);
+ return;
+ }
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ dev = crtc->dev;
+ priv = dev->dev_private;
+
+ if (crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index[%d]\n", crtc->index);
+ return;
+ }
+
+ event_thread = &priv->event_thread[crtc->index];
+
+ if (dpu_crtc->event) {
+ DPU_DEBUG("already received dpu_crtc->event\n");
+ } else {
+ spin_lock_irqsave(&dev->event_lock, flags);
+ dpu_crtc->event = crtc->state->event;
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&dev->event_lock, flags);
+ }
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to flush a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ /*
+ * For planes without commit update, drm framework will not add
+ * those planes to current state since hardware update is not
+ * required. However, if those planes were power collapsed since
+ * last commit cycle, driver has to restore the hardware state
+ * of those planes explicitly here prior to plane flush.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc)
+ dpu_plane_restore(plane);
+
+ /* update performance setting before crtc kickoff */
+ dpu_core_perf_crtc_update(crtc, 1, false);
+
+ /*
+ * Final plane updates: Give each plane a chance to complete all
+ * required writes/flushing before crtc's "flush
+ * everything" call below.
+ */
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ if (dpu_crtc->smmu_state.transition_error)
+ dpu_plane_set_error(plane, true);
+ dpu_plane_flush(plane);
+ }
+
+ /* Kickoff will be scheduled by outer layer */
+}
+
+/**
+ * dpu_crtc_destroy_state - state destroy hook
+ * @crtc: drm CRTC
+ * @state: CRTC state object to release
+ */
+static void dpu_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc || !state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ DPU_DEBUG("crtc%d\n", crtc->base.id);
+
+ _dpu_crtc_rp_destroy(&cstate->rp);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ kfree(cstate);
+}
+
+static int _dpu_crtc_wait_for_frame_done(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret, rc = 0;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ if (!atomic_read(&dpu_crtc->frame_pending)) {
+ DPU_DEBUG("no frames pending\n");
+ return 0;
+ }
+
+ DPU_ATRACE_BEGIN("frame done completion wait");
+ ret = wait_for_completion_timeout(&dpu_crtc->frame_done_comp,
+ msecs_to_jiffies(DPU_FRAME_DONE_TIMEOUT));
+ if (!ret) {
+ DRM_ERROR("frame done wait timed out, ret:%d\n", ret);
+ rc = -ETIMEDOUT;
+ }
+ DPU_ATRACE_END("frame done completion wait");
+
+ return rc;
+}
+
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_crtc_state *cstate;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+ dev = crtc->dev;
+ dpu_crtc = to_dpu_crtc(crtc);
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ priv = dpu_kms->dev->dev_private;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ /*
+ * If no mixers has been allocated in dpu_crtc_atomic_check(),
+ * it means we are trying to start a CRTC whose state is disabled:
+ * nothing else needs to be done.
+ */
+ if (unlikely(!dpu_crtc->num_mixers))
+ return;
+
+ DPU_ATRACE_BEGIN("crtc_commit");
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ struct dpu_encoder_kickoff_params params = { 0 };
+
+ if (encoder->crtc != crtc)
+ continue;
+
+ /*
+ * Encoder will flush/start now, unless it has a tx pending.
+ * If so, it may delay and flush at an irq event (e.g. ppdone)
+ */
+ dpu_encoder_prepare_for_kickoff(encoder, &params);
+ }
+
+ /* wait for frame_event_done completion */
+ DPU_ATRACE_BEGIN("wait_for_frame_done_event");
+ ret = _dpu_crtc_wait_for_frame_done(crtc);
+ DPU_ATRACE_END("wait_for_frame_done_event");
+ if (ret) {
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+ goto end;
+ }
+
+ if (atomic_inc_return(&dpu_crtc->frame_pending) == 1) {
+ /* acquire bandwidth and other resources */
+ DPU_DEBUG("crtc%d first commit\n", crtc->base.id);
+ } else
+ DPU_DEBUG("crtc%d commit\n", crtc->base.id);
+
+ dpu_crtc->play_count++;
+
+ dpu_vbif_clear_errors(dpu_kms);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_kickoff(encoder);
+ }
+
+end:
+ reinit_completion(&dpu_crtc->frame_done_comp);
+ DPU_ATRACE_END("crtc_commit");
+}
+
+/**
+ * _dpu_crtc_vblank_enable_no_lock - update power resource and vblank request
+ * @dpu_crtc: Pointer to dpu crtc structure
+ * @enable: Whether to enable/disable vblanks
+ *
+ * @Return: error code
+ */
+static int _dpu_crtc_vblank_enable_no_lock(
+ struct dpu_crtc *dpu_crtc, bool enable)
+{
+ struct drm_device *dev;
+ struct drm_crtc *crtc;
+ struct drm_encoder *enc;
+
+ if (!dpu_crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ crtc = &dpu_crtc->base;
+ dev = crtc->dev;
+
+ if (enable) {
+ int ret;
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ ret = _dpu_crtc_power_enable(dpu_crtc, true);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (ret)
+ return ret;
+
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc,
+ dpu_crtc_vblank_cb, (void *)crtc);
+ }
+ } else {
+ list_for_each_entry(enc, &dev->mode_config.encoder_list, head) {
+ if (enc->crtc != crtc)
+ continue;
+
+ trace_dpu_crtc_vblank_enable(DRMID(&dpu_crtc->base),
+ DRMID(enc), enable,
+ dpu_crtc);
+
+ dpu_encoder_register_vblank_callback(enc, NULL, NULL);
+ }
+
+ /* drop lock since power crtc cb may try to re-acquire lock */
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ mutex_lock(&dpu_crtc->crtc_lock);
+ }
+
+ return 0;
+}
+
+/**
+ * _dpu_crtc_set_suspend - notify crtc of suspend enable/disable
+ * @crtc: Pointer to drm crtc object
+ * @enable: true to enable suspend, false to indicate resume
+ */
+static void _dpu_crtc_set_suspend(struct drm_crtc *crtc, bool enable)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int ret = 0;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ priv = crtc->dev->dev_private;
+
+ if (!priv->kms) {
+ DPU_ERROR("invalid crtc kms\n");
+ return;
+ }
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ DRM_DEBUG_KMS("crtc%d suspend = %d\n", crtc->base.id, enable);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /*
+ * If the vblank is enabled, release a power reference on suspend
+ * and take it back during resume (if it is still enabled).
+ */
+ trace_dpu_crtc_set_suspend(DRMID(&dpu_crtc->base), enable, dpu_crtc);
+ if (dpu_crtc->suspend == enable)
+ DPU_DEBUG("crtc%d suspend already set to %d, ignoring update\n",
+ crtc->base.id, enable);
+ else if (dpu_crtc->enabled && dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, !enable);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+
+ dpu_crtc->suspend = enable;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+/**
+ * dpu_crtc_duplicate_state - state duplicate hook
+ * @crtc: Pointer to drm crtc structure
+ * @Returns: Pointer to new drm_crtc_state structure
+ */
+static struct drm_crtc_state *dpu_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate, *old_cstate;
+
+ if (!crtc || !crtc->state) {
+ DPU_ERROR("invalid argument(s)\n");
+ return NULL;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ old_cstate = to_dpu_crtc_state(crtc->state);
+ cstate = kmemdup(old_cstate, sizeof(*old_cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return NULL;
+ }
+
+ /* duplicate base helper */
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &cstate->base);
+
+ _dpu_crtc_rp_duplicate(&old_cstate->rp, &cstate->rp);
+
+ return &cstate->base;
+}
+
+/**
+ * dpu_crtc_reset - reset hook for CRTCs
+ * Resets the atomic state for @crtc by freeing the state pointer (which might
+ * be NULL, e.g. at driver load time) and allocating a new empty state object.
+ * @crtc: Pointer to drm crtc structure
+ */
+static void dpu_crtc_reset(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+
+ /* revert suspend actions, if necessary */
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, false);
+
+ /* remove previous state, if present */
+ if (crtc->state) {
+ dpu_crtc_destroy_state(crtc, crtc->state);
+ crtc->state = 0;
+ }
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = kzalloc(sizeof(*cstate), GFP_KERNEL);
+ if (!cstate) {
+ DPU_ERROR("failed to allocate state\n");
+ return;
+ }
+
+ _dpu_crtc_rp_reset(&cstate->rp, &dpu_crtc->rp_lock,
+ &dpu_crtc->rp_head);
+
+ cstate->base.crtc = crtc;
+ crtc->state = &cstate->base;
+}
+
+static void dpu_crtc_handle_power_event(u32 event_type, void *arg)
+{
+ struct drm_crtc *crtc = arg;
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct dpu_crtc_mixer *m;
+ u32 i, misr_status;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ trace_dpu_crtc_handle_power_event(DRMID(crtc), event_type);
+
+ switch (event_type) {
+ case DPU_POWER_EVENT_POST_ENABLE:
+ /* restore encoder; crtc will be programmed during commit */
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+
+ dpu_encoder_virt_restore(encoder);
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, true,
+ dpu_crtc->misr_frame_count);
+ }
+ break;
+ case DPU_POWER_EVENT_PRE_DISABLE:
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr ||
+ !dpu_crtc->misr_enable)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ }
+ break;
+ case DPU_POWER_EVENT_POST_DISABLE:
+ /**
+ * Nothing to do. All the planes on the CRTC will be
+ * programmed for every frame
+ */
+ break;
+ default:
+ DPU_DEBUG("event:%d not handled\n", event_type);
+ break;
+ }
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+}
+
+static void dpu_crtc_disable(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_state *cstate;
+ struct drm_display_mode *mode;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+ unsigned long flags;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private || !crtc->state) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(crtc->state);
+ mode = &cstate->base.adjusted_mode;
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+
+ if (dpu_kms_is_suspend_state(crtc->dev))
+ _dpu_crtc_set_suspend(crtc, true);
+
+ /* Disable/save vblank irq handling */
+ drm_crtc_vblank_off(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+
+ /* wait for frame_event_done completion */
+ if (_dpu_crtc_wait_for_frame_done(crtc))
+ DPU_ERROR("crtc%d wait for frame done failed;frame_pending%d\n",
+ crtc->base.id,
+ atomic_read(&dpu_crtc->frame_pending));
+
+ trace_dpu_crtc_disable(DRMID(crtc), false, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, false);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = false;
+
+ if (atomic_read(&dpu_crtc->frame_pending)) {
+ trace_dpu_crtc_disable_frame_pending(DRMID(crtc),
+ atomic_read(&dpu_crtc->frame_pending));
+ dpu_core_perf_crtc_release_bw(crtc);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+ }
+
+ dpu_core_perf_crtc_update(crtc, 0, true);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder, NULL, NULL);
+ }
+
+ if (dpu_crtc->power_event)
+ dpu_power_handle_unregister_event(dpu_crtc->phandle,
+ dpu_crtc->power_event);
+
+ memset(dpu_crtc->mixers, 0, sizeof(dpu_crtc->mixers));
+ dpu_crtc->num_mixers = 0;
+ dpu_crtc->mixers_swapped = false;
+
+ /* disable clk & bw control until clk & bw properties are set */
+ cstate->bw_control = false;
+ cstate->bw_split_vote = false;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ if (crtc->state->event && !crtc->state->active) {
+ spin_lock_irqsave(&crtc->dev->event_lock, flags);
+ drm_crtc_send_vblank_event(crtc, crtc->state->event);
+ crtc->state->event = NULL;
+ spin_unlock_irqrestore(&crtc->dev->event_lock, flags);
+ }
+}
+
+static void dpu_crtc_enable(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct drm_encoder *encoder;
+ struct msm_drm_private *priv;
+ int ret;
+
+ if (!crtc || !crtc->dev || !crtc->dev->dev_private) {
+ DPU_ERROR("invalid crtc\n");
+ return;
+ }
+ priv = crtc->dev->dev_private;
+
+ DRM_DEBUG_KMS("crtc%d\n", crtc->base.id);
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ drm_for_each_encoder(encoder, crtc->dev) {
+ if (encoder->crtc != crtc)
+ continue;
+ dpu_encoder_register_frame_event_callback(encoder,
+ dpu_crtc_frame_event_cb, (void *)crtc);
+ }
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_enable(DRMID(crtc), true, dpu_crtc);
+ if (!dpu_crtc->enabled && !dpu_crtc->suspend &&
+ dpu_crtc->vblank_requested) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, true);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->enabled = true;
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ /* Enable/restore vblank irq handling */
+ drm_crtc_vblank_on(crtc);
+
+ dpu_crtc->power_event = dpu_power_handle_register_event(
+ dpu_crtc->phandle,
+ DPU_POWER_EVENT_POST_ENABLE | DPU_POWER_EVENT_POST_DISABLE |
+ DPU_POWER_EVENT_PRE_DISABLE,
+ dpu_crtc_handle_power_event, crtc, dpu_crtc->name);
+
+}
+
+struct plane_state {
+ struct dpu_plane_state *dpu_pstate;
+ const struct drm_plane_state *drm_pstate;
+ int stage;
+ u32 pipe_id;
+};
+
+static int dpu_crtc_atomic_check(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct plane_state *pstates;
+ struct dpu_crtc_state *cstate;
+
+ const struct drm_plane_state *pstate;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+
+ int cnt = 0, rc = 0, mixer_width, i, z_pos;
+
+ struct dpu_multirect_plane_states multirect_plane[DPU_STAGE_MAX * 2];
+ int multirect_count = 0;
+ const struct drm_plane_state *pipe_staged[SSPP_MAX];
+ int left_zpos_cnt = 0, right_zpos_cnt = 0;
+ struct drm_rect crtc_rect = { 0 };
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+
+ pstates = kzalloc(sizeof(*pstates) * DPU_STAGE_MAX * 4, GFP_KERNEL);
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ cstate = to_dpu_crtc_state(state);
+
+ if (!state->enable || !state->active) {
+ DPU_DEBUG("crtc%d -> enable %d, active %d, skip atomic_check\n",
+ crtc->base.id, state->enable, state->active);
+ goto end;
+ }
+
+ mode = &state->adjusted_mode;
+ DPU_DEBUG("%s: check", dpu_crtc->name);
+
+ /* force a full mode set if active state changed */
+ if (state->active_changed)
+ state->mode_changed = true;
+
+ memset(pipe_staged, 0, sizeof(pipe_staged));
+
+ mixer_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ _dpu_crtc_setup_lm_bounds(crtc, state);
+
+ crtc_rect.x2 = mode->hdisplay;
+ crtc_rect.y2 = mode->vdisplay;
+
+ /* get plane state for all drm planes associated with crtc state */
+ drm_atomic_crtc_state_for_each_plane_state(plane, pstate, state) {
+ struct drm_rect dst, clip = crtc_rect;
+
+ if (IS_ERR_OR_NULL(pstate)) {
+ rc = PTR_ERR(pstate);
+ DPU_ERROR("%s: failed to get plane%d state, %d\n",
+ dpu_crtc->name, plane->base.id, rc);
+ goto end;
+ }
+ if (cnt >= DPU_STAGE_MAX * 4)
+ continue;
+
+ pstates[cnt].dpu_pstate = to_dpu_plane_state(pstate);
+ pstates[cnt].drm_pstate = pstate;
+ pstates[cnt].stage = pstate->normalized_zpos;
+ pstates[cnt].pipe_id = dpu_plane_pipe(plane);
+
+ if (pipe_staged[pstates[cnt].pipe_id]) {
+ multirect_plane[multirect_count].r0 =
+ pipe_staged[pstates[cnt].pipe_id];
+ multirect_plane[multirect_count].r1 = pstate;
+ multirect_count++;
+
+ pipe_staged[pstates[cnt].pipe_id] = NULL;
+ } else {
+ pipe_staged[pstates[cnt].pipe_id] = pstate;
+ }
+
+ cnt++;
+
+ dst = drm_plane_state_dest(pstate);
+ if (!drm_rect_intersect(&clip, &dst) ||
+ !drm_rect_equals(&clip, &dst)) {
+ DPU_ERROR("invalid vertical/horizontal destination\n");
+ DPU_ERROR("display: " DRM_RECT_FMT " plane: "
+ DRM_RECT_FMT "\n", DRM_RECT_ARG(&crtc_rect),
+ DRM_RECT_ARG(&dst));
+ rc = -E2BIG;
+ goto end;
+ }
+ }
+
+ for (i = 1; i < SSPP_MAX; i++) {
+ if (pipe_staged[i]) {
+ dpu_plane_clear_multirect(pipe_staged[i]);
+
+ if (is_dpu_plane_virtual(pipe_staged[i]->plane)) {
+ DPU_ERROR(
+ "r1 only virt plane:%d not supported\n",
+ pipe_staged[i]->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+ }
+
+ z_pos = -1;
+ for (i = 0; i < cnt; i++) {
+ /* reset counts at every new blend stage */
+ if (pstates[i].stage != z_pos) {
+ left_zpos_cnt = 0;
+ right_zpos_cnt = 0;
+ z_pos = pstates[i].stage;
+ }
+
+ /* verify z_pos setting before using it */
+ if (z_pos >= DPU_STAGE_MAX - DPU_STAGE_0) {
+ DPU_ERROR("> %d plane stages assigned\n",
+ DPU_STAGE_MAX - DPU_STAGE_0);
+ rc = -EINVAL;
+ goto end;
+ } else if (pstates[i].drm_pstate->crtc_x < mixer_width) {
+ if (left_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on left\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ left_zpos_cnt++;
+
+ } else {
+ if (right_zpos_cnt == 2) {
+ DPU_ERROR("> 2 planes @ stage %d on right\n",
+ z_pos);
+ rc = -EINVAL;
+ goto end;
+ }
+ right_zpos_cnt++;
+ }
+
+ pstates[i].dpu_pstate->stage = z_pos + DPU_STAGE_0;
+ DPU_DEBUG("%s: zpos %d", dpu_crtc->name, z_pos);
+ }
+
+ for (i = 0; i < multirect_count; i++) {
+ if (dpu_plane_validate_multirect_v2(&multirect_plane[i])) {
+ DPU_ERROR(
+ "multirect validation failed for planes (%d - %d)\n",
+ multirect_plane[i].r0->plane->base.id,
+ multirect_plane[i].r1->plane->base.id);
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+ rc = dpu_core_perf_crtc_check(crtc, state);
+ if (rc) {
+ DPU_ERROR("crtc%d failed performance check %d\n",
+ crtc->base.id, rc);
+ goto end;
+ }
+
+ /* validate source split:
+ * use pstates sorted by stage to check planes on same stage
+ * we assume that all pipes are in source split so its valid to compare
+ * without taking into account left/right mixer placement
+ */
+ for (i = 1; i < cnt; i++) {
+ struct plane_state *prv_pstate, *cur_pstate;
+ struct drm_rect left_rect, right_rect;
+ int32_t left_pid, right_pid;
+ int32_t stage;
+
+ prv_pstate = &pstates[i - 1];
+ cur_pstate = &pstates[i];
+ if (prv_pstate->stage != cur_pstate->stage)
+ continue;
+
+ stage = cur_pstate->stage;
+
+ left_pid = prv_pstate->dpu_pstate->base.plane->base.id;
+ left_rect = drm_plane_state_dest(prv_pstate->drm_pstate);
+
+ right_pid = cur_pstate->dpu_pstate->base.plane->base.id;
+ right_rect = drm_plane_state_dest(cur_pstate->drm_pstate);
+
+ if (right_rect.x1 < left_rect.x1) {
+ swap(left_pid, right_pid);
+ swap(left_rect, right_rect);
+ }
+
+ /**
+ * - planes are enumerated in pipe-priority order such that
+ * planes with lower drm_id must be left-most in a shared
+ * blend-stage when using source split.
+ * - planes in source split must be contiguous in width
+ * - planes in source split must have same dest yoff and height
+ */
+ if (right_pid < left_pid) {
+ DPU_ERROR(
+ "invalid src split cfg. priority mismatch. stage: %d left: %d right: %d\n",
+ stage, left_pid, right_pid);
+ rc = -EINVAL;
+ goto end;
+ } else if (right_rect.x1 != drm_rect_width(&left_rect)) {
+ DPU_ERROR("non-contiguous coordinates for src split. "
+ "stage: %d left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ } else if (left_rect.y1 != right_rect.y1 ||
+ drm_rect_height(&left_rect) != drm_rect_height(&right_rect)) {
+ DPU_ERROR("source split at stage: %d. invalid "
+ "yoff/height: left: " DRM_RECT_FMT " right: "
+ DRM_RECT_FMT "\n", stage,
+ DRM_RECT_ARG(&left_rect),
+ DRM_RECT_ARG(&right_rect));
+ rc = -EINVAL;
+ goto end;
+ }
+ }
+
+end:
+ _dpu_crtc_rp_free_unused(&cstate->rp);
+ kfree(pstates);
+ return rc;
+}
+
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en)
+{
+ struct dpu_crtc *dpu_crtc;
+ int ret;
+
+ if (!crtc) {
+ DPU_ERROR("invalid crtc\n");
+ return -EINVAL;
+ }
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ trace_dpu_crtc_vblank(DRMID(&dpu_crtc->base), en, dpu_crtc);
+ if (dpu_crtc->enabled && !dpu_crtc->suspend) {
+ ret = _dpu_crtc_vblank_enable_no_lock(dpu_crtc, en);
+ if (ret)
+ DPU_ERROR("%s vblank enable failed: %d\n",
+ dpu_crtc->name, ret);
+ }
+ dpu_crtc->vblank_requested = en;
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_debugfs_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_plane_state *pstate = NULL;
+ struct dpu_crtc_mixer *m;
+
+ struct drm_crtc *crtc;
+ struct drm_plane *plane;
+ struct drm_display_mode *mode;
+ struct drm_framebuffer *fb;
+ struct drm_plane_state *state;
+ struct dpu_crtc_state *cstate;
+
+ int i, out_width;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_crtc = s->private;
+ crtc = &dpu_crtc->base;
+ cstate = to_dpu_crtc_state(crtc->state);
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ mode = &crtc->state->adjusted_mode;
+ out_width = dpu_crtc_get_mixer_width(dpu_crtc, cstate, mode);
+
+ seq_printf(s, "crtc:%d width:%d height:%d\n", crtc->base.id,
+ mode->hdisplay, mode->vdisplay);
+
+ seq_puts(s, "\n");
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm)
+ seq_printf(s, "\tmixer[%d] has no lm\n", i);
+ else if (!m->hw_ctl)
+ seq_printf(s, "\tmixer[%d] has no ctl\n", i);
+ else
+ seq_printf(s, "\tmixer:%d ctl:%d width:%d height:%d\n",
+ m->hw_lm->idx - LM_0, m->hw_ctl->idx - CTL_0,
+ out_width, mode->vdisplay);
+ }
+
+ seq_puts(s, "\n");
+
+ drm_atomic_crtc_for_each_plane(plane, crtc) {
+ pstate = to_dpu_plane_state(plane->state);
+ state = plane->state;
+
+ if (!pstate || !state)
+ continue;
+
+ seq_printf(s, "\tplane:%u stage:%d\n", plane->base.id,
+ pstate->stage);
+
+ if (plane->state->fb) {
+ fb = plane->state->fb;
+
+ seq_printf(s, "\tfb:%d image format:%4.4s wxh:%ux%u ",
+ fb->base.id, (char *) &fb->format->format,
+ fb->width, fb->height);
+ for (i = 0; i < ARRAY_SIZE(fb->format->cpp); ++i)
+ seq_printf(s, "cpp[%d]:%u ",
+ i, fb->format->cpp[i]);
+ seq_puts(s, "\n\t");
+
+ seq_printf(s, "modifier:%8llu ", fb->modifier);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->pitches); i++)
+ seq_printf(s, "pitches[%d]:%8u ", i,
+ fb->pitches[i]);
+ seq_puts(s, "\n");
+
+ seq_puts(s, "\t");
+ for (i = 0; i < ARRAY_SIZE(fb->offsets); i++)
+ seq_printf(s, "offsets[%d]:%8u ", i,
+ fb->offsets[i]);
+ seq_puts(s, "\n");
+ }
+
+ seq_printf(s, "\tsrc_x:%4d src_y:%4d src_w:%4d src_h:%4d\n",
+ state->src_x, state->src_y, state->src_w, state->src_h);
+
+ seq_printf(s, "\tdst x:%4d dst_y:%4d dst_w:%4d dst_h:%4d\n",
+ state->crtc_x, state->crtc_y, state->crtc_w,
+ state->crtc_h);
+ seq_printf(s, "\tmultirect: mode: %d index: %d\n",
+ pstate->multirect_mode, pstate->multirect_index);
+
+ seq_puts(s, "\n");
+ }
+ if (dpu_crtc->vblank_cb_count) {
+ ktime_t diff = ktime_sub(ktime_get(), dpu_crtc->vblank_cb_time);
+ s64 diff_ms = ktime_to_ms(diff);
+ s64 fps = diff_ms ? div_s64(
+ dpu_crtc->vblank_cb_count * 1000, diff_ms) : 0;
+
+ seq_printf(s,
+ "vblank fps:%lld count:%u total:%llums total_framecount:%llu\n",
+ fps, dpu_crtc->vblank_cb_count,
+ ktime_to_ms(diff), dpu_crtc->play_count);
+
+ /* reset time & count for next measurement */
+ dpu_crtc->vblank_cb_count = 0;
+ dpu_crtc->vblank_cb_time = ktime_set(0, 0);
+ }
+
+ seq_printf(s, "vblank_enable:%d\n", dpu_crtc->vblank_requested);
+
+ mutex_unlock(&dpu_crtc->crtc_lock);
+
+ return 0;
+}
+
+static int _dpu_debugfs_status_open(struct inode *inode, struct file *file)
+{
+ return single_open(file, _dpu_debugfs_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_crtc_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ u32 frame_count, enable;
+ size_t buff_copy;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy)) {
+ DPU_ERROR("buffer copy failed\n");
+ return -EINVAL;
+ }
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ dpu_crtc->misr_enable = enable;
+ dpu_crtc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ dpu_crtc->misr_data[i] = 0;
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.setup_misr)
+ continue;
+
+ m->hw_lm->ops.setup_misr(m->hw_lm, enable, frame_count);
+ }
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_crtc_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_crtc_mixer *m;
+ int i = 0, rc;
+ u32 misr_status;
+ ssize_t len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_crtc = file->private_data;
+ rc = _dpu_crtc_power_enable(dpu_crtc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_crtc->crtc_lock);
+ if (!dpu_crtc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_crtc->num_mixers; ++i) {
+ m = &dpu_crtc->mixers[i];
+ if (!m->hw_lm || !m->hw_lm->ops.collect_misr)
+ continue;
+
+ misr_status = m->hw_lm->ops.collect_misr(m->hw_lm);
+ dpu_crtc->misr_data[i] = misr_status ? misr_status :
+ dpu_crtc->misr_data[i];
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "lm idx:%d\n",
+ m->hw_lm->idx - LM_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ dpu_crtc->misr_data[i]);
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_crtc->crtc_lock);
+ _dpu_crtc_power_enable(dpu_crtc, false);
+ return len;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_crtc_debugfs_state_show(struct seq_file *s, void *v)
+{
+ struct drm_crtc *crtc = (struct drm_crtc *) s->private;
+ struct dpu_crtc *dpu_crtc = to_dpu_crtc(crtc);
+ struct dpu_crtc_res *res;
+ struct dpu_crtc_respool *rp;
+ int i;
+
+ seq_printf(s, "client type: %d\n", dpu_crtc_get_client_type(crtc));
+ seq_printf(s, "intf_mode: %d\n", dpu_crtc_get_intf_mode(crtc));
+ seq_printf(s, "core_clk_rate: %llu\n",
+ dpu_crtc->cur_perf.core_clk_rate);
+ for (i = DPU_POWER_HANDLE_DBUS_ID_MNOC;
+ i < DPU_POWER_HANDLE_DBUS_ID_MAX; i++) {
+ seq_printf(s, "bw_ctl[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.bw_ctl[i]);
+ seq_printf(s, "max_per_pipe_ib[%s]: %llu\n",
+ dpu_power_handle_get_dbus_name(i),
+ dpu_crtc->cur_perf.max_per_pipe_ib[i]);
+ }
+
+ mutex_lock(&dpu_crtc->rp_lock);
+ list_for_each_entry(rp, &dpu_crtc->rp_head, rp_list) {
+ seq_printf(s, "rp.%d: ", rp->sequence_id);
+ list_for_each_entry(res, &rp->res_list, list)
+ seq_printf(s, "0x%x/0x%llx/%pK/%d ",
+ res->type, res->tag, res->val,
+ atomic_read(&res->refcount));
+ seq_puts(s, "\n");
+ }
+ mutex_unlock(&dpu_crtc->rp_lock);
+
+ return 0;
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_crtc_debugfs_state);
+
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+ struct dpu_kms *dpu_kms;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_crtc_misr_read,
+ .write = _dpu_crtc_misr_setup,
+ };
+
+ if (!crtc)
+ return -EINVAL;
+ dpu_crtc = to_dpu_crtc(crtc);
+
+ dpu_kms = _dpu_crtc_get_kms(crtc);
+ if (!dpu_kms)
+ return -EINVAL;
+
+ dpu_crtc->debugfs_root = debugfs_create_dir(dpu_crtc->name,
+ crtc->dev->primary->debugfs_root);
+ if (!dpu_crtc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0400,
+ dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_status_fops);
+ debugfs_create_file("state", 0600,
+ dpu_crtc->debugfs_root,
+ &dpu_crtc->base,
+ &dpu_crtc_debugfs_state_fops);
+ debugfs_create_file("misr_data", 0600, dpu_crtc->debugfs_root,
+ dpu_crtc, &debugfs_misr_fops);
+
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return;
+ dpu_crtc = to_dpu_crtc(crtc);
+ debugfs_remove_recursive(dpu_crtc->debugfs_root);
+}
+#else
+static int _dpu_crtc_init_debugfs(struct drm_crtc *crtc)
+{
+ return 0;
+}
+
+static void _dpu_crtc_destroy_debugfs(struct drm_crtc *crtc)
+{
+}
+#endif /* CONFIG_DEBUG_FS */
+
+static int dpu_crtc_late_register(struct drm_crtc *crtc)
+{
+ return _dpu_crtc_init_debugfs(crtc);
+}
+
+static void dpu_crtc_early_unregister(struct drm_crtc *crtc)
+{
+ _dpu_crtc_destroy_debugfs(crtc);
+}
+
+static const struct drm_crtc_funcs dpu_crtc_funcs = {
+ .set_config = drm_atomic_helper_set_config,
+ .destroy = dpu_crtc_destroy,
+ .page_flip = drm_atomic_helper_page_flip,
+ .reset = dpu_crtc_reset,
+ .atomic_duplicate_state = dpu_crtc_duplicate_state,
+ .atomic_destroy_state = dpu_crtc_destroy_state,
+ .late_register = dpu_crtc_late_register,
+ .early_unregister = dpu_crtc_early_unregister,
+};
+
+static const struct drm_crtc_helper_funcs dpu_crtc_helper_funcs = {
+ .disable = dpu_crtc_disable,
+ .atomic_enable = dpu_crtc_enable,
+ .atomic_check = dpu_crtc_atomic_check,
+ .atomic_begin = dpu_crtc_atomic_begin,
+ .atomic_flush = dpu_crtc_atomic_flush,
+};
+
+/* initialize crtc */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane)
+{
+ struct drm_crtc *crtc = NULL;
+ struct dpu_crtc *dpu_crtc = NULL;
+ struct msm_drm_private *priv = NULL;
+ struct dpu_kms *kms = NULL;
+ int i;
+
+ priv = dev->dev_private;
+ kms = to_dpu_kms(priv->kms);
+
+ dpu_crtc = kzalloc(sizeof(*dpu_crtc), GFP_KERNEL);
+ if (!dpu_crtc)
+ return ERR_PTR(-ENOMEM);
+
+ crtc = &dpu_crtc->base;
+ crtc->dev = dev;
+
+ mutex_init(&dpu_crtc->crtc_lock);
+ spin_lock_init(&dpu_crtc->spin_lock);
+ atomic_set(&dpu_crtc->frame_pending, 0);
+
+ mutex_init(&dpu_crtc->rp_lock);
+ INIT_LIST_HEAD(&dpu_crtc->rp_head);
+
+ init_completion(&dpu_crtc->frame_done_comp);
+
+ INIT_LIST_HEAD(&dpu_crtc->frame_event_list);
+
+ for (i = 0; i < ARRAY_SIZE(dpu_crtc->frame_events); i++) {
+ INIT_LIST_HEAD(&dpu_crtc->frame_events[i].list);
+ list_add(&dpu_crtc->frame_events[i].list,
+ &dpu_crtc->frame_event_list);
+ kthread_init_work(&dpu_crtc->frame_events[i].work,
+ dpu_crtc_frame_event_work);
+ }
+
+ drm_crtc_init_with_planes(dev, crtc, plane, NULL, &dpu_crtc_funcs,
+ NULL);
+
+ drm_crtc_helper_add(crtc, &dpu_crtc_helper_funcs);
+ plane->crtc = crtc;
+
+ /* save user friendly CRTC name for later */
+ snprintf(dpu_crtc->name, DPU_CRTC_NAME_SIZE, "crtc%u", crtc->base.id);
+
+ /* initialize event handling */
+ spin_lock_init(&dpu_crtc->event_lock);
+
+ dpu_crtc->phandle = &kms->phandle;
+
+ DPU_DEBUG("%s: successfully initialized crtc\n", dpu_crtc->name);
+ return crtc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
new file mode 100644
index 000000000000..e87109e608e9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_crtc.h
@@ -0,0 +1,423 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_CRTC_H_
+#define _DPU_CRTC_H_
+
+#include <linux/kthread.h>
+#include <drm/drm_crtc.h>
+#include "dpu_kms.h"
+#include "dpu_core_perf.h"
+#include "dpu_hw_blk.h"
+
+#define DPU_CRTC_NAME_SIZE 12
+
+/* define the maximum number of in-flight frame events */
+#define DPU_CRTC_FRAME_EVENT_SIZE 4
+
+/**
+ * enum dpu_crtc_client_type: crtc client type
+ * @RT_CLIENT: RealTime client like video/cmd mode display
+ * voting through apps rsc
+ * @NRT_CLIENT: Non-RealTime client like WB display
+ * voting through apps rsc
+ */
+enum dpu_crtc_client_type {
+ RT_CLIENT,
+ NRT_CLIENT,
+};
+
+/**
+ * enum dpu_crtc_smmu_state: smmu state
+ * @ATTACHED: all the context banks are attached.
+ * @DETACHED: all the context banks are detached.
+ * @ATTACH_ALL_REQ: transient state of attaching context banks.
+ * @DETACH_ALL_REQ: transient state of detaching context banks.
+ */
+enum dpu_crtc_smmu_state {
+ ATTACHED = 0,
+ DETACHED,
+ ATTACH_ALL_REQ,
+ DETACH_ALL_REQ,
+};
+
+/**
+ * enum dpu_crtc_smmu_state_transition_type: state transition type
+ * @NONE: no pending state transitions
+ * @PRE_COMMIT: state transitions should be done before processing the commit
+ * @POST_COMMIT: state transitions to be done after processing the commit.
+ */
+enum dpu_crtc_smmu_state_transition_type {
+ NONE,
+ PRE_COMMIT,
+ POST_COMMIT
+};
+
+/**
+ * struct dpu_crtc_smmu_state_data: stores the smmu state and transition type
+ * @state: current state of smmu context banks
+ * @transition_type: transition request type
+ * @transition_error: whether there is error while transitioning the state
+ */
+struct dpu_crtc_smmu_state_data {
+ uint32_t state;
+ uint32_t transition_type;
+ uint32_t transition_error;
+};
+
+/**
+ * struct dpu_crtc_mixer: stores the map for each virtual pipeline in the CRTC
+ * @hw_lm: LM HW Driver context
+ * @hw_ctl: CTL Path HW driver context
+ * @encoder: Encoder attached to this lm & ctl
+ * @mixer_op_mode: mixer blending operation mode
+ * @flush_mask: mixer flush mask for ctl, mixer and pipe
+ */
+struct dpu_crtc_mixer {
+ struct dpu_hw_mixer *hw_lm;
+ struct dpu_hw_ctl *hw_ctl;
+ struct drm_encoder *encoder;
+ u32 mixer_op_mode;
+ u32 flush_mask;
+};
+
+/**
+ * struct dpu_crtc_frame_event: stores crtc frame event for crtc processing
+ * @work: base work structure
+ * @crtc: Pointer to crtc handling this event
+ * @list: event list
+ * @ts: timestamp at queue entry
+ * @event: event identifier
+ */
+struct dpu_crtc_frame_event {
+ struct kthread_work work;
+ struct drm_crtc *crtc;
+ struct list_head list;
+ ktime_t ts;
+ u32 event;
+};
+
+/*
+ * Maximum number of free event structures to cache
+ */
+#define DPU_CRTC_MAX_EVENT_COUNT 16
+
+/**
+ * struct dpu_crtc - virtualized CRTC data structure
+ * @base : Base drm crtc structure
+ * @name : ASCII description of this crtc
+ * @num_ctls : Number of ctl paths in use
+ * @num_mixers : Number of mixers in use
+ * @mixers_swapped: Whether the mixers have been swapped for left/right update
+ * especially in the case of DSC Merge.
+ * @mixers : List of active mixers
+ * @event : Pointer to last received drm vblank event. If there is a
+ * pending vblank event, this will be non-null.
+ * @vsync_count : Running count of received vsync events
+ * @drm_requested_vblank : Whether vblanks have been enabled in the encoder
+ * @property_info : Opaque structure for generic property support
+ * @property_defaults : Array of default values for generic property support
+ * @stage_cfg : H/w mixer stage configuration
+ * @debugfs_root : Parent of debugfs node
+ * @vblank_cb_count : count of vblank callback since last reset
+ * @play_count : frame count between crtc enable and disable
+ * @vblank_cb_time : ktime at vblank count reset
+ * @vblank_requested : whether the user has requested vblank events
+ * @suspend : whether or not a suspend operation is in progress
+ * @enabled : whether the DPU CRTC is currently enabled. updated in the
+ * commit-thread, not state-swap time which is earlier, so
+ * safe to make decisions on during VBLANK on/off work
+ * @feature_list : list of color processing features supported on a crtc
+ * @active_list : list of color processing features are active
+ * @dirty_list : list of color processing features are dirty
+ * @ad_dirty: list containing ad properties that are dirty
+ * @ad_active: list containing ad properties that are active
+ * @crtc_lock : crtc lock around create, destroy and access.
+ * @frame_pending : Whether or not an update is pending
+ * @frame_events : static allocation of in-flight frame events
+ * @frame_event_list : available frame event list
+ * @spin_lock : spin lock for frame event, transaction status, etc...
+ * @frame_done_comp : for frame_event_done synchronization
+ * @event_thread : Pointer to event handler thread
+ * @event_worker : Event worker queue
+ * @event_lock : Spinlock around event handling code
+ * @misr_enable : boolean entry indicates misr enable/disable status.
+ * @misr_frame_count : misr frame count provided by client
+ * @misr_data : store misr data before turning off the clocks.
+ * @phandle: Pointer to power handler
+ * @power_event : registered power event handle
+ * @cur_perf : current performance committed to clock/bandwidth driver
+ * @rp_lock : serialization lock for resource pool
+ * @rp_head : list of active resource pool
+ * @scl3_cfg_lut : qseed3 lut config
+ */
+struct dpu_crtc {
+ struct drm_crtc base;
+ char name[DPU_CRTC_NAME_SIZE];
+
+ /* HW Resources reserved for the crtc */
+ u32 num_ctls;
+ u32 num_mixers;
+ bool mixers_swapped;
+ struct dpu_crtc_mixer mixers[CRTC_DUAL_MIXERS];
+ struct dpu_hw_scaler3_lut_cfg *scl3_lut_cfg;
+
+ struct drm_pending_vblank_event *event;
+ u32 vsync_count;
+
+ struct dpu_hw_stage_cfg stage_cfg;
+ struct dentry *debugfs_root;
+
+ u32 vblank_cb_count;
+ u64 play_count;
+ ktime_t vblank_cb_time;
+ bool vblank_requested;
+ bool suspend;
+ bool enabled;
+
+ struct list_head feature_list;
+ struct list_head active_list;
+ struct list_head dirty_list;
+ struct list_head ad_dirty;
+ struct list_head ad_active;
+
+ struct mutex crtc_lock;
+
+ atomic_t frame_pending;
+ struct dpu_crtc_frame_event frame_events[DPU_CRTC_FRAME_EVENT_SIZE];
+ struct list_head frame_event_list;
+ spinlock_t spin_lock;
+ struct completion frame_done_comp;
+
+ /* for handling internal event thread */
+ spinlock_t event_lock;
+ bool misr_enable;
+ u32 misr_frame_count;
+ u32 misr_data[CRTC_DUAL_MIXERS];
+
+ struct dpu_power_handle *phandle;
+ struct dpu_power_event *power_event;
+
+ struct dpu_core_perf_params cur_perf;
+
+ struct mutex rp_lock;
+ struct list_head rp_head;
+
+ struct dpu_crtc_smmu_state_data smmu_state;
+};
+
+#define to_dpu_crtc(x) container_of(x, struct dpu_crtc, base)
+
+/**
+ * struct dpu_crtc_res_ops - common operations for crtc resources
+ * @get: get given resource
+ * @put: put given resource
+ */
+struct dpu_crtc_res_ops {
+ void *(*get)(void *val, u32 type, u64 tag);
+ void (*put)(void *val);
+};
+
+#define DPU_CRTC_RES_FLAG_FREE BIT(0)
+
+/**
+ * struct dpu_crtc_res - definition of crtc resources
+ * @list: list of crtc resource
+ * @type: crtc resource type
+ * @tag: unique identifier per type
+ * @refcount: reference/usage count
+ * @ops: callback operations
+ * @val: resource handle associated with type/tag
+ * @flags: customization flags
+ */
+struct dpu_crtc_res {
+ struct list_head list;
+ u32 type;
+ u64 tag;
+ atomic_t refcount;
+ struct dpu_crtc_res_ops ops;
+ void *val;
+ u32 flags;
+};
+
+/**
+ * dpu_crtc_respool - crtc resource pool
+ * @rp_lock: pointer to serialization lock
+ * @rp_head: pointer to head of active resource pools of this crtc
+ * @rp_list: list of crtc resource pool
+ * @sequence_id: sequence identifier, incremented per state duplication
+ * @res_list: list of resource managed by this resource pool
+ * @ops: resource operations for parent resource pool
+ */
+struct dpu_crtc_respool {
+ struct mutex *rp_lock;
+ struct list_head *rp_head;
+ struct list_head rp_list;
+ u32 sequence_id;
+ struct list_head res_list;
+ struct dpu_crtc_res_ops ops;
+};
+
+/**
+ * struct dpu_crtc_state - dpu container for atomic crtc state
+ * @base: Base drm crtc state structure
+ * @is_ppsplit : Whether current topology requires PPSplit special handling
+ * @bw_control : true if bw/clk controlled by core bw/clk properties
+ * @bw_split_vote : true if bw controlled by llcc/dram bw properties
+ * @lm_bounds : LM boundaries based on current mode full resolution, no ROI.
+ * Origin top left of CRTC.
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: Current crtc property values
+ * @input_fence_timeout_ns : Cached input fence timeout, in ns
+ * @new_perf: new performance state being requested
+ */
+struct dpu_crtc_state {
+ struct drm_crtc_state base;
+
+ bool bw_control;
+ bool bw_split_vote;
+
+ bool is_ppsplit;
+ struct drm_rect lm_bounds[CRTC_DUAL_MIXERS];
+
+ uint64_t input_fence_timeout_ns;
+
+ struct dpu_core_perf_params new_perf;
+ struct dpu_crtc_respool rp;
+};
+
+#define to_dpu_crtc_state(x) \
+ container_of(x, struct dpu_crtc_state, base)
+
+/**
+ * dpu_crtc_get_mixer_width - get the mixer width
+ * Mixer width will be same as panel width(/2 for split)
+ */
+static inline int dpu_crtc_get_mixer_width(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ u32 mixer_width;
+
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ mixer_width = (dpu_crtc->num_mixers == CRTC_DUAL_MIXERS ?
+ mode->hdisplay / CRTC_DUAL_MIXERS : mode->hdisplay);
+
+ return mixer_width;
+}
+
+/**
+ * dpu_crtc_get_mixer_height - get the mixer height
+ * Mixer height will be same as panel height
+ */
+static inline int dpu_crtc_get_mixer_height(struct dpu_crtc *dpu_crtc,
+ struct dpu_crtc_state *cstate, struct drm_display_mode *mode)
+{
+ if (!dpu_crtc || !cstate || !mode)
+ return 0;
+
+ return mode->vdisplay;
+}
+
+/**
+ * dpu_crtc_frame_pending - retun the number of pending frames
+ * @crtc: Pointer to drm crtc object
+ */
+static inline int dpu_crtc_frame_pending(struct drm_crtc *crtc)
+{
+ struct dpu_crtc *dpu_crtc;
+
+ if (!crtc)
+ return -EINVAL;
+
+ dpu_crtc = to_dpu_crtc(crtc);
+ return atomic_read(&dpu_crtc->frame_pending);
+}
+
+/**
+ * dpu_crtc_vblank - enable or disable vblanks for this crtc
+ * @crtc: Pointer to drm crtc object
+ * @en: true to enable vblanks, false to disable
+ */
+int dpu_crtc_vblank(struct drm_crtc *crtc, bool en);
+
+/**
+ * dpu_crtc_commit_kickoff - trigger kickoff of the commit for this crtc
+ * @crtc: Pointer to drm crtc object
+ */
+void dpu_crtc_commit_kickoff(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_complete_commit - callback signalling completion of current commit
+ * @crtc: Pointer to drm crtc object
+ * @old_state: Pointer to drm crtc old state object
+ */
+void dpu_crtc_complete_commit(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_state);
+
+/**
+ * dpu_crtc_init - create a new crtc object
+ * @dev: dpu device
+ * @plane: base plane
+ * @Return: new crtc object or error
+ */
+struct drm_crtc *dpu_crtc_init(struct drm_device *dev, struct drm_plane *plane);
+
+/**
+ * dpu_crtc_register_custom_event - api for enabling/disabling crtc event
+ * @kms: Pointer to dpu_kms
+ * @crtc_drm: Pointer to crtc object
+ * @event: Event that client is interested
+ * @en: Flag to enable/disable the event
+ */
+int dpu_crtc_register_custom_event(struct dpu_kms *kms,
+ struct drm_crtc *crtc_drm, u32 event, bool en);
+
+/**
+ * dpu_crtc_get_intf_mode - get interface mode of the given crtc
+ * @crtc: Pointert to crtc
+ */
+enum dpu_intf_mode dpu_crtc_get_intf_mode(struct drm_crtc *crtc);
+
+/**
+ * dpu_crtc_get_client_type - check the crtc type- rt, nrt etc.
+ * @crtc: Pointer to crtc
+ */
+static inline enum dpu_crtc_client_type dpu_crtc_get_client_type(
+ struct drm_crtc *crtc)
+{
+ struct dpu_crtc_state *cstate =
+ crtc ? to_dpu_crtc_state(crtc->state) : NULL;
+
+ if (!cstate)
+ return NRT_CLIENT;
+
+ return RT_CLIENT;
+}
+
+/**
+ * dpu_crtc_is_enabled - check if dpu crtc is enabled or not
+ * @crtc: Pointer to crtc
+ */
+static inline bool dpu_crtc_is_enabled(struct drm_crtc *crtc)
+{
+ return crtc ? crtc->enabled : false;
+}
+
+#endif /* _DPU_CRTC_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
new file mode 100644
index 000000000000..ae2aee7ed9e1
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.c
@@ -0,0 +1,2393 @@
+/* Copyright (c) 2009-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/delay.h>
+#include <linux/spinlock.h>
+#include <linux/ktime.h>
+#include <linux/debugfs.h>
+#include <linux/uaccess.h>
+#include <linux/dma-buf.h>
+#include <linux/slab.h>
+#include <linux/list_sort.h>
+#include <linux/pm_runtime.h>
+
+#include "dpu_dbg.h"
+#include "disp/dpu1/dpu_hw_catalog.h"
+
+
+#define DEFAULT_DBGBUS_DPU DPU_DBG_DUMP_IN_MEM
+#define DEFAULT_DBGBUS_VBIFRT DPU_DBG_DUMP_IN_MEM
+#define REG_BASE_NAME_LEN 80
+
+#define DBGBUS_FLAGS_DSPP BIT(0)
+#define DBGBUS_DSPP_STATUS 0x34C
+
+#define DBGBUS_NAME_DPU "dpu"
+#define DBGBUS_NAME_VBIF_RT "vbif_rt"
+
+/* offsets from dpu top address for the debug buses */
+#define DBGBUS_SSPP0 0x188
+#define DBGBUS_AXI_INTF 0x194
+#define DBGBUS_SSPP1 0x298
+#define DBGBUS_DSPP 0x348
+#define DBGBUS_PERIPH 0x418
+
+#define TEST_MASK(id, tp) ((id << 4) | (tp << 1) | BIT(0))
+
+/* following offsets are with respect to MDP VBIF base for DBG BUS access */
+#define MMSS_VBIF_CLKON 0x4
+#define MMSS_VBIF_TEST_BUS_OUT_CTRL 0x210
+#define MMSS_VBIF_TEST_BUS_OUT 0x230
+
+/* Vbif error info */
+#define MMSS_VBIF_PND_ERR 0x190
+#define MMSS_VBIF_SRC_ERR 0x194
+#define MMSS_VBIF_XIN_HALT_CTRL1 0x204
+#define MMSS_VBIF_ERR_INFO 0X1a0
+#define MMSS_VBIF_ERR_INFO_1 0x1a4
+#define MMSS_VBIF_CLIENT_NUM 14
+
+/**
+ * struct dpu_dbg_reg_base - register region base.
+ * may sub-ranges: sub-ranges are used for dumping
+ * or may not have sub-ranges: dumping is base -> max_offset
+ * @reg_base_head: head of this node
+ * @name: register base name
+ * @base: base pointer
+ * @off: cached offset of region for manual register dumping
+ * @cnt: cached range of region for manual register dumping
+ * @max_offset: length of region
+ * @buf: buffer used for manual register dumping
+ * @buf_len: buffer length used for manual register dumping
+ * @cb: callback for external dump function, null if not defined
+ * @cb_ptr: private pointer to callback function
+ */
+struct dpu_dbg_reg_base {
+ struct list_head reg_base_head;
+ char name[REG_BASE_NAME_LEN];
+ void __iomem *base;
+ size_t off;
+ size_t cnt;
+ size_t max_offset;
+ char *buf;
+ size_t buf_len;
+ void (*cb)(void *ptr);
+ void *cb_ptr;
+};
+
+struct dpu_debug_bus_entry {
+ u32 wr_addr;
+ u32 block_id;
+ u32 test_id;
+ void (*analyzer)(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val);
+};
+
+struct vbif_debug_bus_entry {
+ u32 disable_bus_addr;
+ u32 block_bus_addr;
+ u32 bit_offset;
+ u32 block_cnt;
+ u32 test_pnt_start;
+ u32 test_pnt_cnt;
+};
+
+struct dpu_dbg_debug_bus_common {
+ char *name;
+ u32 enable_mask;
+ bool include_in_deferred_work;
+ u32 flags;
+ u32 entries_size;
+ u32 *dumped_content;
+};
+
+struct dpu_dbg_dpu_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct dpu_debug_bus_entry *entries;
+ u32 top_blk_off;
+};
+
+struct dpu_dbg_vbif_debug_bus {
+ struct dpu_dbg_debug_bus_common cmn;
+ struct vbif_debug_bus_entry *entries;
+};
+
+/**
+ * struct dpu_dbg_base - global dpu debug base structure
+ * @reg_base_list: list of register dumping regions
+ * @dev: device pointer
+ * @dump_work: work struct for deferring register dump work to separate thread
+ * @dbgbus_dpu: debug bus structure for the dpu
+ * @dbgbus_vbif_rt: debug bus structure for the realtime vbif
+ */
+static struct dpu_dbg_base {
+ struct list_head reg_base_list;
+ struct device *dev;
+
+ struct work_struct dump_work;
+
+ struct dpu_dbg_dpu_debug_bus dbgbus_dpu;
+ struct dpu_dbg_vbif_debug_bus dbgbus_vbif_rt;
+} dpu_dbg_base;
+
+static void _dpu_debug_bus_xbar_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ dev_err(dpu_dbg_base.dev, "xbar 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_lm_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & 0xFFF000))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "lm 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb0_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb0 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static void _dpu_debug_bus_ppb1_dump(void __iomem *mem_base,
+ struct dpu_debug_bus_entry *entry, u32 val)
+{
+ if (!(val & BIT(15)))
+ return;
+
+ dev_err(dpu_dbg_base.dev, "ppb1 0x%x %d %d 0x%x\n",
+ entry->wr_addr, entry->block_id, entry->test_id, val);
+}
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_8998[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+ { DBGBUS_SSPP0, 85, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+ { DBGBUS_SSPP1, 85, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* cursor 1 */
+ { DBGBUS_SSPP1, 80, 0 },
+ { DBGBUS_SSPP1, 80, 1 },
+ { DBGBUS_SSPP1, 80, 2 },
+ { DBGBUS_SSPP1, 80, 3 },
+ { DBGBUS_SSPP1, 80, 4 },
+ { DBGBUS_SSPP1, 80, 5 },
+ { DBGBUS_SSPP1, 80, 6 },
+ { DBGBUS_SSPP1, 80, 7 },
+
+ { DBGBUS_SSPP1, 81, 0 },
+ { DBGBUS_SSPP1, 81, 1 },
+ { DBGBUS_SSPP1, 81, 2 },
+ { DBGBUS_SSPP1, 81, 3 },
+ { DBGBUS_SSPP1, 81, 4 },
+ { DBGBUS_SSPP1, 81, 5 },
+ { DBGBUS_SSPP1, 81, 6 },
+ { DBGBUS_SSPP1, 81, 7 },
+
+ { DBGBUS_SSPP1, 82, 0 },
+ { DBGBUS_SSPP1, 82, 1 },
+ { DBGBUS_SSPP1, 82, 2 },
+ { DBGBUS_SSPP1, 82, 3 },
+ { DBGBUS_SSPP1, 82, 4 },
+ { DBGBUS_SSPP1, 82, 5 },
+ { DBGBUS_SSPP1, 82, 6 },
+ { DBGBUS_SSPP1, 82, 7 },
+
+ { DBGBUS_SSPP1, 83, 0 },
+ { DBGBUS_SSPP1, 83, 1 },
+ { DBGBUS_SSPP1, 83, 2 },
+ { DBGBUS_SSPP1, 83, 3 },
+ { DBGBUS_SSPP1, 83, 4 },
+ { DBGBUS_SSPP1, 83, 5 },
+ { DBGBUS_SSPP1, 83, 6 },
+ { DBGBUS_SSPP1, 83, 7 },
+
+ { DBGBUS_SSPP1, 84, 0 },
+ { DBGBUS_SSPP1, 84, 1 },
+ { DBGBUS_SSPP1, 84, 2 },
+ { DBGBUS_SSPP1, 84, 3 },
+ { DBGBUS_SSPP1, 84, 4 },
+ { DBGBUS_SSPP1, 84, 5 },
+ { DBGBUS_SSPP1, 84, 6 },
+ { DBGBUS_SSPP1, 84, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 0},
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 0},
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 0},
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 0},
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 0},
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 0},
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 0},
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 0},
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 0},
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 0},
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 0},
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 0},
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 0},
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 0},
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 9, 0},
+ { DBGBUS_SSPP0, 9, 1},
+ { DBGBUS_SSPP0, 9, 3},
+ { DBGBUS_SSPP0, 29, 0},
+ { DBGBUS_SSPP0, 29, 1},
+ { DBGBUS_SSPP0, 29, 3},
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 9, 0},
+ { DBGBUS_SSPP1, 9, 1},
+ { DBGBUS_SSPP1, 9, 3},
+ { DBGBUS_SSPP1, 29, 0},
+ { DBGBUS_SSPP1, 29, 1},
+ { DBGBUS_SSPP1, 29, 3},
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ { DBGBUS_PERIPH, 60, 0},
+};
+
+static struct dpu_debug_bus_entry dbg_bus_dpu_sdm845[] = {
+
+ /* Unpack 0 sspp 0*/
+ { DBGBUS_SSPP0, 50, 2 },
+ { DBGBUS_SSPP0, 60, 2 },
+ { DBGBUS_SSPP0, 70, 2 },
+
+ /* Upack 0 sspp 1*/
+ { DBGBUS_SSPP1, 50, 2 },
+ { DBGBUS_SSPP1, 60, 2 },
+ { DBGBUS_SSPP1, 70, 2 },
+
+ /* scheduler */
+ { DBGBUS_DSPP, 130, 0 },
+ { DBGBUS_DSPP, 130, 1 },
+ { DBGBUS_DSPP, 130, 2 },
+ { DBGBUS_DSPP, 130, 3 },
+ { DBGBUS_DSPP, 130, 4 },
+ { DBGBUS_DSPP, 130, 5 },
+
+ /* qseed */
+ { DBGBUS_SSPP0, 6, 0},
+ { DBGBUS_SSPP0, 6, 1},
+ { DBGBUS_SSPP0, 26, 0},
+ { DBGBUS_SSPP0, 26, 1},
+ { DBGBUS_SSPP1, 6, 0},
+ { DBGBUS_SSPP1, 6, 1},
+ { DBGBUS_SSPP1, 26, 0},
+ { DBGBUS_SSPP1, 26, 1},
+
+ /* scale */
+ { DBGBUS_SSPP0, 16, 0},
+ { DBGBUS_SSPP0, 16, 1},
+ { DBGBUS_SSPP0, 36, 0},
+ { DBGBUS_SSPP0, 36, 1},
+ { DBGBUS_SSPP1, 16, 0},
+ { DBGBUS_SSPP1, 16, 1},
+ { DBGBUS_SSPP1, 36, 0},
+ { DBGBUS_SSPP1, 36, 1},
+
+ /* fetch sspp0 */
+
+ /* vig 0 */
+ { DBGBUS_SSPP0, 0, 0 },
+ { DBGBUS_SSPP0, 0, 1 },
+ { DBGBUS_SSPP0, 0, 2 },
+ { DBGBUS_SSPP0, 0, 3 },
+ { DBGBUS_SSPP0, 0, 4 },
+ { DBGBUS_SSPP0, 0, 5 },
+ { DBGBUS_SSPP0, 0, 6 },
+ { DBGBUS_SSPP0, 0, 7 },
+
+ { DBGBUS_SSPP0, 1, 0 },
+ { DBGBUS_SSPP0, 1, 1 },
+ { DBGBUS_SSPP0, 1, 2 },
+ { DBGBUS_SSPP0, 1, 3 },
+ { DBGBUS_SSPP0, 1, 4 },
+ { DBGBUS_SSPP0, 1, 5 },
+ { DBGBUS_SSPP0, 1, 6 },
+ { DBGBUS_SSPP0, 1, 7 },
+
+ { DBGBUS_SSPP0, 2, 0 },
+ { DBGBUS_SSPP0, 2, 1 },
+ { DBGBUS_SSPP0, 2, 2 },
+ { DBGBUS_SSPP0, 2, 3 },
+ { DBGBUS_SSPP0, 2, 4 },
+ { DBGBUS_SSPP0, 2, 5 },
+ { DBGBUS_SSPP0, 2, 6 },
+ { DBGBUS_SSPP0, 2, 7 },
+
+ { DBGBUS_SSPP0, 4, 0 },
+ { DBGBUS_SSPP0, 4, 1 },
+ { DBGBUS_SSPP0, 4, 2 },
+ { DBGBUS_SSPP0, 4, 3 },
+ { DBGBUS_SSPP0, 4, 4 },
+ { DBGBUS_SSPP0, 4, 5 },
+ { DBGBUS_SSPP0, 4, 6 },
+ { DBGBUS_SSPP0, 4, 7 },
+
+ { DBGBUS_SSPP0, 5, 0 },
+ { DBGBUS_SSPP0, 5, 1 },
+ { DBGBUS_SSPP0, 5, 2 },
+ { DBGBUS_SSPP0, 5, 3 },
+ { DBGBUS_SSPP0, 5, 4 },
+ { DBGBUS_SSPP0, 5, 5 },
+ { DBGBUS_SSPP0, 5, 6 },
+ { DBGBUS_SSPP0, 5, 7 },
+
+ /* vig 2 */
+ { DBGBUS_SSPP0, 20, 0 },
+ { DBGBUS_SSPP0, 20, 1 },
+ { DBGBUS_SSPP0, 20, 2 },
+ { DBGBUS_SSPP0, 20, 3 },
+ { DBGBUS_SSPP0, 20, 4 },
+ { DBGBUS_SSPP0, 20, 5 },
+ { DBGBUS_SSPP0, 20, 6 },
+ { DBGBUS_SSPP0, 20, 7 },
+
+ { DBGBUS_SSPP0, 21, 0 },
+ { DBGBUS_SSPP0, 21, 1 },
+ { DBGBUS_SSPP0, 21, 2 },
+ { DBGBUS_SSPP0, 21, 3 },
+ { DBGBUS_SSPP0, 21, 4 },
+ { DBGBUS_SSPP0, 21, 5 },
+ { DBGBUS_SSPP0, 21, 6 },
+ { DBGBUS_SSPP0, 21, 7 },
+
+ { DBGBUS_SSPP0, 22, 0 },
+ { DBGBUS_SSPP0, 22, 1 },
+ { DBGBUS_SSPP0, 22, 2 },
+ { DBGBUS_SSPP0, 22, 3 },
+ { DBGBUS_SSPP0, 22, 4 },
+ { DBGBUS_SSPP0, 22, 5 },
+ { DBGBUS_SSPP0, 22, 6 },
+ { DBGBUS_SSPP0, 22, 7 },
+
+ { DBGBUS_SSPP0, 24, 0 },
+ { DBGBUS_SSPP0, 24, 1 },
+ { DBGBUS_SSPP0, 24, 2 },
+ { DBGBUS_SSPP0, 24, 3 },
+ { DBGBUS_SSPP0, 24, 4 },
+ { DBGBUS_SSPP0, 24, 5 },
+ { DBGBUS_SSPP0, 24, 6 },
+ { DBGBUS_SSPP0, 24, 7 },
+
+ { DBGBUS_SSPP0, 25, 0 },
+ { DBGBUS_SSPP0, 25, 1 },
+ { DBGBUS_SSPP0, 25, 2 },
+ { DBGBUS_SSPP0, 25, 3 },
+ { DBGBUS_SSPP0, 25, 4 },
+ { DBGBUS_SSPP0, 25, 5 },
+ { DBGBUS_SSPP0, 25, 6 },
+ { DBGBUS_SSPP0, 25, 7 },
+
+ /* dma 2 */
+ { DBGBUS_SSPP0, 30, 0 },
+ { DBGBUS_SSPP0, 30, 1 },
+ { DBGBUS_SSPP0, 30, 2 },
+ { DBGBUS_SSPP0, 30, 3 },
+ { DBGBUS_SSPP0, 30, 4 },
+ { DBGBUS_SSPP0, 30, 5 },
+ { DBGBUS_SSPP0, 30, 6 },
+ { DBGBUS_SSPP0, 30, 7 },
+
+ { DBGBUS_SSPP0, 31, 0 },
+ { DBGBUS_SSPP0, 31, 1 },
+ { DBGBUS_SSPP0, 31, 2 },
+ { DBGBUS_SSPP0, 31, 3 },
+ { DBGBUS_SSPP0, 31, 4 },
+ { DBGBUS_SSPP0, 31, 5 },
+ { DBGBUS_SSPP0, 31, 6 },
+ { DBGBUS_SSPP0, 31, 7 },
+
+ { DBGBUS_SSPP0, 32, 0 },
+ { DBGBUS_SSPP0, 32, 1 },
+ { DBGBUS_SSPP0, 32, 2 },
+ { DBGBUS_SSPP0, 32, 3 },
+ { DBGBUS_SSPP0, 32, 4 },
+ { DBGBUS_SSPP0, 32, 5 },
+ { DBGBUS_SSPP0, 32, 6 },
+ { DBGBUS_SSPP0, 32, 7 },
+
+ { DBGBUS_SSPP0, 33, 0 },
+ { DBGBUS_SSPP0, 33, 1 },
+ { DBGBUS_SSPP0, 33, 2 },
+ { DBGBUS_SSPP0, 33, 3 },
+ { DBGBUS_SSPP0, 33, 4 },
+ { DBGBUS_SSPP0, 33, 5 },
+ { DBGBUS_SSPP0, 33, 6 },
+ { DBGBUS_SSPP0, 33, 7 },
+
+ { DBGBUS_SSPP0, 34, 0 },
+ { DBGBUS_SSPP0, 34, 1 },
+ { DBGBUS_SSPP0, 34, 2 },
+ { DBGBUS_SSPP0, 34, 3 },
+ { DBGBUS_SSPP0, 34, 4 },
+ { DBGBUS_SSPP0, 34, 5 },
+ { DBGBUS_SSPP0, 34, 6 },
+ { DBGBUS_SSPP0, 34, 7 },
+
+ { DBGBUS_SSPP0, 35, 0 },
+ { DBGBUS_SSPP0, 35, 1 },
+ { DBGBUS_SSPP0, 35, 2 },
+ { DBGBUS_SSPP0, 35, 3 },
+
+ /* dma 0 */
+ { DBGBUS_SSPP0, 40, 0 },
+ { DBGBUS_SSPP0, 40, 1 },
+ { DBGBUS_SSPP0, 40, 2 },
+ { DBGBUS_SSPP0, 40, 3 },
+ { DBGBUS_SSPP0, 40, 4 },
+ { DBGBUS_SSPP0, 40, 5 },
+ { DBGBUS_SSPP0, 40, 6 },
+ { DBGBUS_SSPP0, 40, 7 },
+
+ { DBGBUS_SSPP0, 41, 0 },
+ { DBGBUS_SSPP0, 41, 1 },
+ { DBGBUS_SSPP0, 41, 2 },
+ { DBGBUS_SSPP0, 41, 3 },
+ { DBGBUS_SSPP0, 41, 4 },
+ { DBGBUS_SSPP0, 41, 5 },
+ { DBGBUS_SSPP0, 41, 6 },
+ { DBGBUS_SSPP0, 41, 7 },
+
+ { DBGBUS_SSPP0, 42, 0 },
+ { DBGBUS_SSPP0, 42, 1 },
+ { DBGBUS_SSPP0, 42, 2 },
+ { DBGBUS_SSPP0, 42, 3 },
+ { DBGBUS_SSPP0, 42, 4 },
+ { DBGBUS_SSPP0, 42, 5 },
+ { DBGBUS_SSPP0, 42, 6 },
+ { DBGBUS_SSPP0, 42, 7 },
+
+ { DBGBUS_SSPP0, 44, 0 },
+ { DBGBUS_SSPP0, 44, 1 },
+ { DBGBUS_SSPP0, 44, 2 },
+ { DBGBUS_SSPP0, 44, 3 },
+ { DBGBUS_SSPP0, 44, 4 },
+ { DBGBUS_SSPP0, 44, 5 },
+ { DBGBUS_SSPP0, 44, 6 },
+ { DBGBUS_SSPP0, 44, 7 },
+
+ { DBGBUS_SSPP0, 45, 0 },
+ { DBGBUS_SSPP0, 45, 1 },
+ { DBGBUS_SSPP0, 45, 2 },
+ { DBGBUS_SSPP0, 45, 3 },
+ { DBGBUS_SSPP0, 45, 4 },
+ { DBGBUS_SSPP0, 45, 5 },
+ { DBGBUS_SSPP0, 45, 6 },
+ { DBGBUS_SSPP0, 45, 7 },
+
+ /* fetch sspp1 */
+ /* vig 1 */
+ { DBGBUS_SSPP1, 0, 0 },
+ { DBGBUS_SSPP1, 0, 1 },
+ { DBGBUS_SSPP1, 0, 2 },
+ { DBGBUS_SSPP1, 0, 3 },
+ { DBGBUS_SSPP1, 0, 4 },
+ { DBGBUS_SSPP1, 0, 5 },
+ { DBGBUS_SSPP1, 0, 6 },
+ { DBGBUS_SSPP1, 0, 7 },
+
+ { DBGBUS_SSPP1, 1, 0 },
+ { DBGBUS_SSPP1, 1, 1 },
+ { DBGBUS_SSPP1, 1, 2 },
+ { DBGBUS_SSPP1, 1, 3 },
+ { DBGBUS_SSPP1, 1, 4 },
+ { DBGBUS_SSPP1, 1, 5 },
+ { DBGBUS_SSPP1, 1, 6 },
+ { DBGBUS_SSPP1, 1, 7 },
+
+ { DBGBUS_SSPP1, 2, 0 },
+ { DBGBUS_SSPP1, 2, 1 },
+ { DBGBUS_SSPP1, 2, 2 },
+ { DBGBUS_SSPP1, 2, 3 },
+ { DBGBUS_SSPP1, 2, 4 },
+ { DBGBUS_SSPP1, 2, 5 },
+ { DBGBUS_SSPP1, 2, 6 },
+ { DBGBUS_SSPP1, 2, 7 },
+
+ { DBGBUS_SSPP1, 4, 0 },
+ { DBGBUS_SSPP1, 4, 1 },
+ { DBGBUS_SSPP1, 4, 2 },
+ { DBGBUS_SSPP1, 4, 3 },
+ { DBGBUS_SSPP1, 4, 4 },
+ { DBGBUS_SSPP1, 4, 5 },
+ { DBGBUS_SSPP1, 4, 6 },
+ { DBGBUS_SSPP1, 4, 7 },
+
+ { DBGBUS_SSPP1, 5, 0 },
+ { DBGBUS_SSPP1, 5, 1 },
+ { DBGBUS_SSPP1, 5, 2 },
+ { DBGBUS_SSPP1, 5, 3 },
+ { DBGBUS_SSPP1, 5, 4 },
+ { DBGBUS_SSPP1, 5, 5 },
+ { DBGBUS_SSPP1, 5, 6 },
+ { DBGBUS_SSPP1, 5, 7 },
+
+ /* vig 3 */
+ { DBGBUS_SSPP1, 20, 0 },
+ { DBGBUS_SSPP1, 20, 1 },
+ { DBGBUS_SSPP1, 20, 2 },
+ { DBGBUS_SSPP1, 20, 3 },
+ { DBGBUS_SSPP1, 20, 4 },
+ { DBGBUS_SSPP1, 20, 5 },
+ { DBGBUS_SSPP1, 20, 6 },
+ { DBGBUS_SSPP1, 20, 7 },
+
+ { DBGBUS_SSPP1, 21, 0 },
+ { DBGBUS_SSPP1, 21, 1 },
+ { DBGBUS_SSPP1, 21, 2 },
+ { DBGBUS_SSPP1, 21, 3 },
+ { DBGBUS_SSPP1, 21, 4 },
+ { DBGBUS_SSPP1, 21, 5 },
+ { DBGBUS_SSPP1, 21, 6 },
+ { DBGBUS_SSPP1, 21, 7 },
+
+ { DBGBUS_SSPP1, 22, 0 },
+ { DBGBUS_SSPP1, 22, 1 },
+ { DBGBUS_SSPP1, 22, 2 },
+ { DBGBUS_SSPP1, 22, 3 },
+ { DBGBUS_SSPP1, 22, 4 },
+ { DBGBUS_SSPP1, 22, 5 },
+ { DBGBUS_SSPP1, 22, 6 },
+ { DBGBUS_SSPP1, 22, 7 },
+
+ { DBGBUS_SSPP1, 24, 0 },
+ { DBGBUS_SSPP1, 24, 1 },
+ { DBGBUS_SSPP1, 24, 2 },
+ { DBGBUS_SSPP1, 24, 3 },
+ { DBGBUS_SSPP1, 24, 4 },
+ { DBGBUS_SSPP1, 24, 5 },
+ { DBGBUS_SSPP1, 24, 6 },
+ { DBGBUS_SSPP1, 24, 7 },
+
+ { DBGBUS_SSPP1, 25, 0 },
+ { DBGBUS_SSPP1, 25, 1 },
+ { DBGBUS_SSPP1, 25, 2 },
+ { DBGBUS_SSPP1, 25, 3 },
+ { DBGBUS_SSPP1, 25, 4 },
+ { DBGBUS_SSPP1, 25, 5 },
+ { DBGBUS_SSPP1, 25, 6 },
+ { DBGBUS_SSPP1, 25, 7 },
+
+ /* dma 3 */
+ { DBGBUS_SSPP1, 30, 0 },
+ { DBGBUS_SSPP1, 30, 1 },
+ { DBGBUS_SSPP1, 30, 2 },
+ { DBGBUS_SSPP1, 30, 3 },
+ { DBGBUS_SSPP1, 30, 4 },
+ { DBGBUS_SSPP1, 30, 5 },
+ { DBGBUS_SSPP1, 30, 6 },
+ { DBGBUS_SSPP1, 30, 7 },
+
+ { DBGBUS_SSPP1, 31, 0 },
+ { DBGBUS_SSPP1, 31, 1 },
+ { DBGBUS_SSPP1, 31, 2 },
+ { DBGBUS_SSPP1, 31, 3 },
+ { DBGBUS_SSPP1, 31, 4 },
+ { DBGBUS_SSPP1, 31, 5 },
+ { DBGBUS_SSPP1, 31, 6 },
+ { DBGBUS_SSPP1, 31, 7 },
+
+ { DBGBUS_SSPP1, 32, 0 },
+ { DBGBUS_SSPP1, 32, 1 },
+ { DBGBUS_SSPP1, 32, 2 },
+ { DBGBUS_SSPP1, 32, 3 },
+ { DBGBUS_SSPP1, 32, 4 },
+ { DBGBUS_SSPP1, 32, 5 },
+ { DBGBUS_SSPP1, 32, 6 },
+ { DBGBUS_SSPP1, 32, 7 },
+
+ { DBGBUS_SSPP1, 33, 0 },
+ { DBGBUS_SSPP1, 33, 1 },
+ { DBGBUS_SSPP1, 33, 2 },
+ { DBGBUS_SSPP1, 33, 3 },
+ { DBGBUS_SSPP1, 33, 4 },
+ { DBGBUS_SSPP1, 33, 5 },
+ { DBGBUS_SSPP1, 33, 6 },
+ { DBGBUS_SSPP1, 33, 7 },
+
+ { DBGBUS_SSPP1, 34, 0 },
+ { DBGBUS_SSPP1, 34, 1 },
+ { DBGBUS_SSPP1, 34, 2 },
+ { DBGBUS_SSPP1, 34, 3 },
+ { DBGBUS_SSPP1, 34, 4 },
+ { DBGBUS_SSPP1, 34, 5 },
+ { DBGBUS_SSPP1, 34, 6 },
+ { DBGBUS_SSPP1, 34, 7 },
+
+ { DBGBUS_SSPP1, 35, 0 },
+ { DBGBUS_SSPP1, 35, 1 },
+ { DBGBUS_SSPP1, 35, 2 },
+
+ /* dma 1 */
+ { DBGBUS_SSPP1, 40, 0 },
+ { DBGBUS_SSPP1, 40, 1 },
+ { DBGBUS_SSPP1, 40, 2 },
+ { DBGBUS_SSPP1, 40, 3 },
+ { DBGBUS_SSPP1, 40, 4 },
+ { DBGBUS_SSPP1, 40, 5 },
+ { DBGBUS_SSPP1, 40, 6 },
+ { DBGBUS_SSPP1, 40, 7 },
+
+ { DBGBUS_SSPP1, 41, 0 },
+ { DBGBUS_SSPP1, 41, 1 },
+ { DBGBUS_SSPP1, 41, 2 },
+ { DBGBUS_SSPP1, 41, 3 },
+ { DBGBUS_SSPP1, 41, 4 },
+ { DBGBUS_SSPP1, 41, 5 },
+ { DBGBUS_SSPP1, 41, 6 },
+ { DBGBUS_SSPP1, 41, 7 },
+
+ { DBGBUS_SSPP1, 42, 0 },
+ { DBGBUS_SSPP1, 42, 1 },
+ { DBGBUS_SSPP1, 42, 2 },
+ { DBGBUS_SSPP1, 42, 3 },
+ { DBGBUS_SSPP1, 42, 4 },
+ { DBGBUS_SSPP1, 42, 5 },
+ { DBGBUS_SSPP1, 42, 6 },
+ { DBGBUS_SSPP1, 42, 7 },
+
+ { DBGBUS_SSPP1, 44, 0 },
+ { DBGBUS_SSPP1, 44, 1 },
+ { DBGBUS_SSPP1, 44, 2 },
+ { DBGBUS_SSPP1, 44, 3 },
+ { DBGBUS_SSPP1, 44, 4 },
+ { DBGBUS_SSPP1, 44, 5 },
+ { DBGBUS_SSPP1, 44, 6 },
+ { DBGBUS_SSPP1, 44, 7 },
+
+ { DBGBUS_SSPP1, 45, 0 },
+ { DBGBUS_SSPP1, 45, 1 },
+ { DBGBUS_SSPP1, 45, 2 },
+ { DBGBUS_SSPP1, 45, 3 },
+ { DBGBUS_SSPP1, 45, 4 },
+ { DBGBUS_SSPP1, 45, 5 },
+ { DBGBUS_SSPP1, 45, 6 },
+ { DBGBUS_SSPP1, 45, 7 },
+
+ /* dspp */
+ { DBGBUS_DSPP, 13, 0 },
+ { DBGBUS_DSPP, 19, 0 },
+ { DBGBUS_DSPP, 14, 0 },
+ { DBGBUS_DSPP, 14, 1 },
+ { DBGBUS_DSPP, 14, 3 },
+ { DBGBUS_DSPP, 20, 0 },
+ { DBGBUS_DSPP, 20, 1 },
+ { DBGBUS_DSPP, 20, 3 },
+
+ /* ppb_0 */
+ { DBGBUS_DSPP, 31, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 33, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 35, 0, _dpu_debug_bus_ppb0_dump },
+ { DBGBUS_DSPP, 42, 0, _dpu_debug_bus_ppb0_dump },
+
+ /* ppb_1 */
+ { DBGBUS_DSPP, 32, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 34, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 36, 0, _dpu_debug_bus_ppb1_dump },
+ { DBGBUS_DSPP, 43, 0, _dpu_debug_bus_ppb1_dump },
+
+ /* lm_lut */
+ { DBGBUS_DSPP, 109, 0 },
+ { DBGBUS_DSPP, 105, 0 },
+ { DBGBUS_DSPP, 103, 0 },
+
+ /* crossbar */
+ { DBGBUS_DSPP, 0, 0, _dpu_debug_bus_xbar_dump },
+
+ /* rotator */
+ { DBGBUS_DSPP, 9, 0},
+
+ /* blend */
+ /* LM0 */
+ { DBGBUS_DSPP, 63, 1},
+ { DBGBUS_DSPP, 63, 2},
+ { DBGBUS_DSPP, 63, 3},
+ { DBGBUS_DSPP, 63, 4},
+ { DBGBUS_DSPP, 63, 5},
+ { DBGBUS_DSPP, 63, 6},
+ { DBGBUS_DSPP, 63, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 64, 1},
+ { DBGBUS_DSPP, 64, 2},
+ { DBGBUS_DSPP, 64, 3},
+ { DBGBUS_DSPP, 64, 4},
+ { DBGBUS_DSPP, 64, 5},
+ { DBGBUS_DSPP, 64, 6},
+ { DBGBUS_DSPP, 64, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 65, 1},
+ { DBGBUS_DSPP, 65, 2},
+ { DBGBUS_DSPP, 65, 3},
+ { DBGBUS_DSPP, 65, 4},
+ { DBGBUS_DSPP, 65, 5},
+ { DBGBUS_DSPP, 65, 6},
+ { DBGBUS_DSPP, 65, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 66, 1},
+ { DBGBUS_DSPP, 66, 2},
+ { DBGBUS_DSPP, 66, 3},
+ { DBGBUS_DSPP, 66, 4},
+ { DBGBUS_DSPP, 66, 5},
+ { DBGBUS_DSPP, 66, 6},
+ { DBGBUS_DSPP, 66, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 67, 1},
+ { DBGBUS_DSPP, 67, 2},
+ { DBGBUS_DSPP, 67, 3},
+ { DBGBUS_DSPP, 67, 4},
+ { DBGBUS_DSPP, 67, 5},
+ { DBGBUS_DSPP, 67, 6},
+ { DBGBUS_DSPP, 67, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 68, 1},
+ { DBGBUS_DSPP, 68, 2},
+ { DBGBUS_DSPP, 68, 3},
+ { DBGBUS_DSPP, 68, 4},
+ { DBGBUS_DSPP, 68, 5},
+ { DBGBUS_DSPP, 68, 6},
+ { DBGBUS_DSPP, 68, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 69, 1},
+ { DBGBUS_DSPP, 69, 2},
+ { DBGBUS_DSPP, 69, 3},
+ { DBGBUS_DSPP, 69, 4},
+ { DBGBUS_DSPP, 69, 5},
+ { DBGBUS_DSPP, 69, 6},
+ { DBGBUS_DSPP, 69, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 84, 1},
+ { DBGBUS_DSPP, 84, 2},
+ { DBGBUS_DSPP, 84, 3},
+ { DBGBUS_DSPP, 84, 4},
+ { DBGBUS_DSPP, 84, 5},
+ { DBGBUS_DSPP, 84, 6},
+ { DBGBUS_DSPP, 84, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 85, 1},
+ { DBGBUS_DSPP, 85, 2},
+ { DBGBUS_DSPP, 85, 3},
+ { DBGBUS_DSPP, 85, 4},
+ { DBGBUS_DSPP, 85, 5},
+ { DBGBUS_DSPP, 85, 6},
+ { DBGBUS_DSPP, 85, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 86, 1},
+ { DBGBUS_DSPP, 86, 2},
+ { DBGBUS_DSPP, 86, 3},
+ { DBGBUS_DSPP, 86, 4},
+ { DBGBUS_DSPP, 86, 5},
+ { DBGBUS_DSPP, 86, 6},
+ { DBGBUS_DSPP, 86, 7, _dpu_debug_bus_lm_dump },
+
+
+ { DBGBUS_DSPP, 87, 1},
+ { DBGBUS_DSPP, 87, 2},
+ { DBGBUS_DSPP, 87, 3},
+ { DBGBUS_DSPP, 87, 4},
+ { DBGBUS_DSPP, 87, 5},
+ { DBGBUS_DSPP, 87, 6},
+ { DBGBUS_DSPP, 87, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM1 */
+ { DBGBUS_DSPP, 70, 1},
+ { DBGBUS_DSPP, 70, 2},
+ { DBGBUS_DSPP, 70, 3},
+ { DBGBUS_DSPP, 70, 4},
+ { DBGBUS_DSPP, 70, 5},
+ { DBGBUS_DSPP, 70, 6},
+ { DBGBUS_DSPP, 70, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 71, 1},
+ { DBGBUS_DSPP, 71, 2},
+ { DBGBUS_DSPP, 71, 3},
+ { DBGBUS_DSPP, 71, 4},
+ { DBGBUS_DSPP, 71, 5},
+ { DBGBUS_DSPP, 71, 6},
+ { DBGBUS_DSPP, 71, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 72, 1},
+ { DBGBUS_DSPP, 72, 2},
+ { DBGBUS_DSPP, 72, 3},
+ { DBGBUS_DSPP, 72, 4},
+ { DBGBUS_DSPP, 72, 5},
+ { DBGBUS_DSPP, 72, 6},
+ { DBGBUS_DSPP, 72, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 73, 1},
+ { DBGBUS_DSPP, 73, 2},
+ { DBGBUS_DSPP, 73, 3},
+ { DBGBUS_DSPP, 73, 4},
+ { DBGBUS_DSPP, 73, 5},
+ { DBGBUS_DSPP, 73, 6},
+ { DBGBUS_DSPP, 73, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 74, 1},
+ { DBGBUS_DSPP, 74, 2},
+ { DBGBUS_DSPP, 74, 3},
+ { DBGBUS_DSPP, 74, 4},
+ { DBGBUS_DSPP, 74, 5},
+ { DBGBUS_DSPP, 74, 6},
+ { DBGBUS_DSPP, 74, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 75, 1},
+ { DBGBUS_DSPP, 75, 2},
+ { DBGBUS_DSPP, 75, 3},
+ { DBGBUS_DSPP, 75, 4},
+ { DBGBUS_DSPP, 75, 5},
+ { DBGBUS_DSPP, 75, 6},
+ { DBGBUS_DSPP, 75, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 76, 1},
+ { DBGBUS_DSPP, 76, 2},
+ { DBGBUS_DSPP, 76, 3},
+ { DBGBUS_DSPP, 76, 4},
+ { DBGBUS_DSPP, 76, 5},
+ { DBGBUS_DSPP, 76, 6},
+ { DBGBUS_DSPP, 76, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 88, 1},
+ { DBGBUS_DSPP, 88, 2},
+ { DBGBUS_DSPP, 88, 3},
+ { DBGBUS_DSPP, 88, 4},
+ { DBGBUS_DSPP, 88, 5},
+ { DBGBUS_DSPP, 88, 6},
+ { DBGBUS_DSPP, 88, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 89, 1},
+ { DBGBUS_DSPP, 89, 2},
+ { DBGBUS_DSPP, 89, 3},
+ { DBGBUS_DSPP, 89, 4},
+ { DBGBUS_DSPP, 89, 5},
+ { DBGBUS_DSPP, 89, 6},
+ { DBGBUS_DSPP, 89, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 90, 1},
+ { DBGBUS_DSPP, 90, 2},
+ { DBGBUS_DSPP, 90, 3},
+ { DBGBUS_DSPP, 90, 4},
+ { DBGBUS_DSPP, 90, 5},
+ { DBGBUS_DSPP, 90, 6},
+ { DBGBUS_DSPP, 90, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 91, 1},
+ { DBGBUS_DSPP, 91, 2},
+ { DBGBUS_DSPP, 91, 3},
+ { DBGBUS_DSPP, 91, 4},
+ { DBGBUS_DSPP, 91, 5},
+ { DBGBUS_DSPP, 91, 6},
+ { DBGBUS_DSPP, 91, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM2 */
+ { DBGBUS_DSPP, 77, 0},
+ { DBGBUS_DSPP, 77, 1},
+ { DBGBUS_DSPP, 77, 2},
+ { DBGBUS_DSPP, 77, 3},
+ { DBGBUS_DSPP, 77, 4},
+ { DBGBUS_DSPP, 77, 5},
+ { DBGBUS_DSPP, 77, 6},
+ { DBGBUS_DSPP, 77, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 78, 0},
+ { DBGBUS_DSPP, 78, 1},
+ { DBGBUS_DSPP, 78, 2},
+ { DBGBUS_DSPP, 78, 3},
+ { DBGBUS_DSPP, 78, 4},
+ { DBGBUS_DSPP, 78, 5},
+ { DBGBUS_DSPP, 78, 6},
+ { DBGBUS_DSPP, 78, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 79, 0},
+ { DBGBUS_DSPP, 79, 1},
+ { DBGBUS_DSPP, 79, 2},
+ { DBGBUS_DSPP, 79, 3},
+ { DBGBUS_DSPP, 79, 4},
+ { DBGBUS_DSPP, 79, 5},
+ { DBGBUS_DSPP, 79, 6},
+ { DBGBUS_DSPP, 79, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 80, 0},
+ { DBGBUS_DSPP, 80, 1},
+ { DBGBUS_DSPP, 80, 2},
+ { DBGBUS_DSPP, 80, 3},
+ { DBGBUS_DSPP, 80, 4},
+ { DBGBUS_DSPP, 80, 5},
+ { DBGBUS_DSPP, 80, 6},
+ { DBGBUS_DSPP, 80, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 81, 0},
+ { DBGBUS_DSPP, 81, 1},
+ { DBGBUS_DSPP, 81, 2},
+ { DBGBUS_DSPP, 81, 3},
+ { DBGBUS_DSPP, 81, 4},
+ { DBGBUS_DSPP, 81, 5},
+ { DBGBUS_DSPP, 81, 6},
+ { DBGBUS_DSPP, 81, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 82, 0},
+ { DBGBUS_DSPP, 82, 1},
+ { DBGBUS_DSPP, 82, 2},
+ { DBGBUS_DSPP, 82, 3},
+ { DBGBUS_DSPP, 82, 4},
+ { DBGBUS_DSPP, 82, 5},
+ { DBGBUS_DSPP, 82, 6},
+ { DBGBUS_DSPP, 82, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 83, 0},
+ { DBGBUS_DSPP, 83, 1},
+ { DBGBUS_DSPP, 83, 2},
+ { DBGBUS_DSPP, 83, 3},
+ { DBGBUS_DSPP, 83, 4},
+ { DBGBUS_DSPP, 83, 5},
+ { DBGBUS_DSPP, 83, 6},
+ { DBGBUS_DSPP, 83, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 92, 1},
+ { DBGBUS_DSPP, 92, 2},
+ { DBGBUS_DSPP, 92, 3},
+ { DBGBUS_DSPP, 92, 4},
+ { DBGBUS_DSPP, 92, 5},
+ { DBGBUS_DSPP, 92, 6},
+ { DBGBUS_DSPP, 92, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 93, 1},
+ { DBGBUS_DSPP, 93, 2},
+ { DBGBUS_DSPP, 93, 3},
+ { DBGBUS_DSPP, 93, 4},
+ { DBGBUS_DSPP, 93, 5},
+ { DBGBUS_DSPP, 93, 6},
+ { DBGBUS_DSPP, 93, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 94, 1},
+ { DBGBUS_DSPP, 94, 2},
+ { DBGBUS_DSPP, 94, 3},
+ { DBGBUS_DSPP, 94, 4},
+ { DBGBUS_DSPP, 94, 5},
+ { DBGBUS_DSPP, 94, 6},
+ { DBGBUS_DSPP, 94, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 95, 1},
+ { DBGBUS_DSPP, 95, 2},
+ { DBGBUS_DSPP, 95, 3},
+ { DBGBUS_DSPP, 95, 4},
+ { DBGBUS_DSPP, 95, 5},
+ { DBGBUS_DSPP, 95, 6},
+ { DBGBUS_DSPP, 95, 7, _dpu_debug_bus_lm_dump },
+
+ /* LM5 */
+ { DBGBUS_DSPP, 110, 1},
+ { DBGBUS_DSPP, 110, 2},
+ { DBGBUS_DSPP, 110, 3},
+ { DBGBUS_DSPP, 110, 4},
+ { DBGBUS_DSPP, 110, 5},
+ { DBGBUS_DSPP, 110, 6},
+ { DBGBUS_DSPP, 110, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 111, 1},
+ { DBGBUS_DSPP, 111, 2},
+ { DBGBUS_DSPP, 111, 3},
+ { DBGBUS_DSPP, 111, 4},
+ { DBGBUS_DSPP, 111, 5},
+ { DBGBUS_DSPP, 111, 6},
+ { DBGBUS_DSPP, 111, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 112, 1},
+ { DBGBUS_DSPP, 112, 2},
+ { DBGBUS_DSPP, 112, 3},
+ { DBGBUS_DSPP, 112, 4},
+ { DBGBUS_DSPP, 112, 5},
+ { DBGBUS_DSPP, 112, 6},
+ { DBGBUS_DSPP, 112, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 113, 1},
+ { DBGBUS_DSPP, 113, 2},
+ { DBGBUS_DSPP, 113, 3},
+ { DBGBUS_DSPP, 113, 4},
+ { DBGBUS_DSPP, 113, 5},
+ { DBGBUS_DSPP, 113, 6},
+ { DBGBUS_DSPP, 113, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 114, 1},
+ { DBGBUS_DSPP, 114, 2},
+ { DBGBUS_DSPP, 114, 3},
+ { DBGBUS_DSPP, 114, 4},
+ { DBGBUS_DSPP, 114, 5},
+ { DBGBUS_DSPP, 114, 6},
+ { DBGBUS_DSPP, 114, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 115, 1},
+ { DBGBUS_DSPP, 115, 2},
+ { DBGBUS_DSPP, 115, 3},
+ { DBGBUS_DSPP, 115, 4},
+ { DBGBUS_DSPP, 115, 5},
+ { DBGBUS_DSPP, 115, 6},
+ { DBGBUS_DSPP, 115, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 116, 1},
+ { DBGBUS_DSPP, 116, 2},
+ { DBGBUS_DSPP, 116, 3},
+ { DBGBUS_DSPP, 116, 4},
+ { DBGBUS_DSPP, 116, 5},
+ { DBGBUS_DSPP, 116, 6},
+ { DBGBUS_DSPP, 116, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 117, 1},
+ { DBGBUS_DSPP, 117, 2},
+ { DBGBUS_DSPP, 117, 3},
+ { DBGBUS_DSPP, 117, 4},
+ { DBGBUS_DSPP, 117, 5},
+ { DBGBUS_DSPP, 117, 6},
+ { DBGBUS_DSPP, 117, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 118, 1},
+ { DBGBUS_DSPP, 118, 2},
+ { DBGBUS_DSPP, 118, 3},
+ { DBGBUS_DSPP, 118, 4},
+ { DBGBUS_DSPP, 118, 5},
+ { DBGBUS_DSPP, 118, 6},
+ { DBGBUS_DSPP, 118, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 119, 1},
+ { DBGBUS_DSPP, 119, 2},
+ { DBGBUS_DSPP, 119, 3},
+ { DBGBUS_DSPP, 119, 4},
+ { DBGBUS_DSPP, 119, 5},
+ { DBGBUS_DSPP, 119, 6},
+ { DBGBUS_DSPP, 119, 7, _dpu_debug_bus_lm_dump },
+
+ { DBGBUS_DSPP, 120, 1},
+ { DBGBUS_DSPP, 120, 2},
+ { DBGBUS_DSPP, 120, 3},
+ { DBGBUS_DSPP, 120, 4},
+ { DBGBUS_DSPP, 120, 5},
+ { DBGBUS_DSPP, 120, 6},
+ { DBGBUS_DSPP, 120, 7, _dpu_debug_bus_lm_dump },
+
+ /* csc */
+ { DBGBUS_SSPP0, 7, 0},
+ { DBGBUS_SSPP0, 7, 1},
+ { DBGBUS_SSPP0, 27, 0},
+ { DBGBUS_SSPP0, 27, 1},
+ { DBGBUS_SSPP1, 7, 0},
+ { DBGBUS_SSPP1, 7, 1},
+ { DBGBUS_SSPP1, 27, 0},
+ { DBGBUS_SSPP1, 27, 1},
+
+ /* pcc */
+ { DBGBUS_SSPP0, 3, 3},
+ { DBGBUS_SSPP0, 23, 3},
+ { DBGBUS_SSPP0, 33, 3},
+ { DBGBUS_SSPP0, 43, 3},
+ { DBGBUS_SSPP1, 3, 3},
+ { DBGBUS_SSPP1, 23, 3},
+ { DBGBUS_SSPP1, 33, 3},
+ { DBGBUS_SSPP1, 43, 3},
+
+ /* spa */
+ { DBGBUS_SSPP0, 8, 0},
+ { DBGBUS_SSPP0, 28, 0},
+ { DBGBUS_SSPP1, 8, 0},
+ { DBGBUS_SSPP1, 28, 0},
+ { DBGBUS_DSPP, 13, 0},
+ { DBGBUS_DSPP, 19, 0},
+
+ /* igc */
+ { DBGBUS_SSPP0, 17, 0},
+ { DBGBUS_SSPP0, 17, 1},
+ { DBGBUS_SSPP0, 17, 3},
+ { DBGBUS_SSPP0, 37, 0},
+ { DBGBUS_SSPP0, 37, 1},
+ { DBGBUS_SSPP0, 37, 3},
+ { DBGBUS_SSPP0, 46, 0},
+ { DBGBUS_SSPP0, 46, 1},
+ { DBGBUS_SSPP0, 46, 3},
+
+ { DBGBUS_SSPP1, 17, 0},
+ { DBGBUS_SSPP1, 17, 1},
+ { DBGBUS_SSPP1, 17, 3},
+ { DBGBUS_SSPP1, 37, 0},
+ { DBGBUS_SSPP1, 37, 1},
+ { DBGBUS_SSPP1, 37, 3},
+ { DBGBUS_SSPP1, 46, 0},
+ { DBGBUS_SSPP1, 46, 1},
+ { DBGBUS_SSPP1, 46, 3},
+
+ { DBGBUS_DSPP, 14, 0},
+ { DBGBUS_DSPP, 14, 1},
+ { DBGBUS_DSPP, 14, 3},
+ { DBGBUS_DSPP, 20, 0},
+ { DBGBUS_DSPP, 20, 1},
+ { DBGBUS_DSPP, 20, 3},
+
+ /* intf0-3 */
+ { DBGBUS_PERIPH, 0, 0},
+ { DBGBUS_PERIPH, 1, 0},
+ { DBGBUS_PERIPH, 2, 0},
+ { DBGBUS_PERIPH, 3, 0},
+
+ /* te counter wrapper */
+ { DBGBUS_PERIPH, 60, 0},
+
+ /* dsc0 */
+ { DBGBUS_PERIPH, 47, 0},
+ { DBGBUS_PERIPH, 47, 1},
+ { DBGBUS_PERIPH, 47, 2},
+ { DBGBUS_PERIPH, 47, 3},
+ { DBGBUS_PERIPH, 47, 4},
+ { DBGBUS_PERIPH, 47, 5},
+ { DBGBUS_PERIPH, 47, 6},
+ { DBGBUS_PERIPH, 47, 7},
+
+ /* dsc1 */
+ { DBGBUS_PERIPH, 48, 0},
+ { DBGBUS_PERIPH, 48, 1},
+ { DBGBUS_PERIPH, 48, 2},
+ { DBGBUS_PERIPH, 48, 3},
+ { DBGBUS_PERIPH, 48, 4},
+ { DBGBUS_PERIPH, 48, 5},
+ { DBGBUS_PERIPH, 48, 6},
+ { DBGBUS_PERIPH, 48, 7},
+
+ /* dsc2 */
+ { DBGBUS_PERIPH, 51, 0},
+ { DBGBUS_PERIPH, 51, 1},
+ { DBGBUS_PERIPH, 51, 2},
+ { DBGBUS_PERIPH, 51, 3},
+ { DBGBUS_PERIPH, 51, 4},
+ { DBGBUS_PERIPH, 51, 5},
+ { DBGBUS_PERIPH, 51, 6},
+ { DBGBUS_PERIPH, 51, 7},
+
+ /* dsc3 */
+ { DBGBUS_PERIPH, 52, 0},
+ { DBGBUS_PERIPH, 52, 1},
+ { DBGBUS_PERIPH, 52, 2},
+ { DBGBUS_PERIPH, 52, 3},
+ { DBGBUS_PERIPH, 52, 4},
+ { DBGBUS_PERIPH, 52, 5},
+ { DBGBUS_PERIPH, 52, 6},
+ { DBGBUS_PERIPH, 52, 7},
+
+ /* tear-check */
+ { DBGBUS_PERIPH, 63, 0 },
+ { DBGBUS_PERIPH, 64, 0 },
+ { DBGBUS_PERIPH, 65, 0 },
+ { DBGBUS_PERIPH, 73, 0 },
+ { DBGBUS_PERIPH, 74, 0 },
+
+ /* cdwn */
+ { DBGBUS_PERIPH, 80, 0},
+ { DBGBUS_PERIPH, 80, 1},
+ { DBGBUS_PERIPH, 80, 2},
+
+ { DBGBUS_PERIPH, 81, 0},
+ { DBGBUS_PERIPH, 81, 1},
+ { DBGBUS_PERIPH, 81, 2},
+
+ { DBGBUS_PERIPH, 82, 0},
+ { DBGBUS_PERIPH, 82, 1},
+ { DBGBUS_PERIPH, 82, 2},
+ { DBGBUS_PERIPH, 82, 3},
+ { DBGBUS_PERIPH, 82, 4},
+ { DBGBUS_PERIPH, 82, 5},
+ { DBGBUS_PERIPH, 82, 6},
+ { DBGBUS_PERIPH, 82, 7},
+
+ /* hdmi */
+ { DBGBUS_PERIPH, 68, 0},
+ { DBGBUS_PERIPH, 68, 1},
+ { DBGBUS_PERIPH, 68, 2},
+ { DBGBUS_PERIPH, 68, 3},
+ { DBGBUS_PERIPH, 68, 4},
+ { DBGBUS_PERIPH, 68, 5},
+
+ /* edp */
+ { DBGBUS_PERIPH, 69, 0},
+ { DBGBUS_PERIPH, 69, 1},
+ { DBGBUS_PERIPH, 69, 2},
+ { DBGBUS_PERIPH, 69, 3},
+ { DBGBUS_PERIPH, 69, 4},
+ { DBGBUS_PERIPH, 69, 5},
+
+ /* dsi0 */
+ { DBGBUS_PERIPH, 70, 0},
+ { DBGBUS_PERIPH, 70, 1},
+ { DBGBUS_PERIPH, 70, 2},
+ { DBGBUS_PERIPH, 70, 3},
+ { DBGBUS_PERIPH, 70, 4},
+ { DBGBUS_PERIPH, 70, 5},
+
+ /* dsi1 */
+ { DBGBUS_PERIPH, 71, 0},
+ { DBGBUS_PERIPH, 71, 1},
+ { DBGBUS_PERIPH, 71, 2},
+ { DBGBUS_PERIPH, 71, 3},
+ { DBGBUS_PERIPH, 71, 4},
+ { DBGBUS_PERIPH, 71, 5},
+};
+
+static struct vbif_debug_bus_entry vbif_dbg_bus_msm8998[] = {
+ {0x214, 0x21c, 16, 2, 0x0, 0xd}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x80, 0xc0}, /* arb clients */
+ {0x214, 0x21c, 16, 2, 0x100, 0x140}, /* arb clients */
+ {0x214, 0x21c, 0, 16, 0x0, 0xf}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 16, 0x80, 0xa4}, /* xin blocks - axi side */
+ {0x214, 0x21c, 0, 15, 0x100, 0x124}, /* xin blocks - axi side */
+ {0x21c, 0x214, 0, 14, 0, 0xc}, /* xin blocks - clock side */
+};
+
+/**
+ * _dpu_dbg_enable_power - use callback to turn power on for hw register access
+ * @enable: whether to turn power on or off
+ */
+static inline void _dpu_dbg_enable_power(int enable)
+{
+ if (enable)
+ pm_runtime_get_sync(dpu_dbg_base.dev);
+ else
+ pm_runtime_put_sync(dpu_dbg_base.dev);
+}
+
+static void _dpu_dbg_dump_dpu_dbg_bus(struct dpu_dbg_dpu_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 status = 0;
+ struct dpu_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int list_size;
+ int i;
+ u32 offset;
+ void __iomem *mem_base = NULL;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base + bus->top_blk_off;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dump_mem = &bus->cmn.dumped_content;
+
+ /* will keep in memory 4 entries of 4 bytes each */
+ list_size = (bus->cmn.entries_size * 4 * 4);
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+ for (i = 0; i < bus->cmn.entries_size; i++) {
+ head = bus->entries + i;
+ writel_relaxed(TEST_MASK(head->block_id, head->test_id),
+ mem_base + head->wr_addr);
+ wmb(); /* make sure test bits were written */
+
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP) {
+ offset = DBGBUS_DSPP_STATUS;
+ /* keep DSPP test point enabled */
+ if (head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0xF, mem_base + DBGBUS_DSPP);
+ } else {
+ offset = head->wr_addr + 0x4;
+ }
+
+ status = readl_relaxed(mem_base + offset);
+
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "waddr=0x%x blk=%d tst=%d val=0x%x\n",
+ head->wr_addr, head->block_id,
+ head->test_id, status);
+
+ if (dump_addr && in_mem) {
+ dump_addr[i*4] = head->wr_addr;
+ dump_addr[i*4 + 1] = head->block_id;
+ dump_addr[i*4 + 2] = head->test_id;
+ dump_addr[i*4 + 3] = status;
+ }
+
+ if (head->analyzer)
+ head->analyzer(mem_base, head, status);
+
+ /* Disable debug bus once we are done */
+ writel_relaxed(0, mem_base + head->wr_addr);
+ if (bus->cmn.flags & DBGBUS_FLAGS_DSPP &&
+ head->wr_addr != DBGBUS_DSPP)
+ writel_relaxed(0x0, mem_base + DBGBUS_DSPP);
+ }
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+static void _dpu_dbg_dump_vbif_debug_bus_entry(
+ struct vbif_debug_bus_entry *head, void __iomem *mem_base,
+ u32 *dump_addr, bool in_log)
+{
+ int i, j;
+ u32 val;
+
+ if (!dump_addr && !in_log)
+ return;
+
+ for (i = 0; i < head->block_cnt; i++) {
+ writel_relaxed(1 << (i + head->bit_offset),
+ mem_base + head->block_bus_addr);
+ /* make sure that current bus blcok enable */
+ wmb();
+ for (j = head->test_pnt_start; j < head->test_pnt_cnt; j++) {
+ writel_relaxed(j, mem_base + head->block_bus_addr + 4);
+ /* make sure that test point is enabled */
+ wmb();
+ val = readl_relaxed(mem_base + MMSS_VBIF_TEST_BUS_OUT);
+ if (dump_addr) {
+ *dump_addr++ = head->block_bus_addr;
+ *dump_addr++ = i;
+ *dump_addr++ = j;
+ *dump_addr++ = val;
+ }
+ if (in_log)
+ dev_info(dpu_dbg_base.dev,
+ "testpoint:%x arb/xin id=%d index=%d val=0x%x\n",
+ head->block_bus_addr, i, j, val);
+ }
+ }
+}
+
+static void _dpu_dbg_dump_vbif_dbg_bus(struct dpu_dbg_vbif_debug_bus *bus)
+{
+ bool in_log, in_mem;
+ u32 **dump_mem = NULL;
+ u32 *dump_addr = NULL;
+ u32 value, d0, d1;
+ unsigned long reg, reg1, reg2;
+ struct vbif_debug_bus_entry *head;
+ phys_addr_t phys = 0;
+ int i, list_size = 0;
+ void __iomem *mem_base = NULL;
+ struct vbif_debug_bus_entry *dbg_bus;
+ u32 bus_size;
+ struct dpu_dbg_reg_base *reg_base;
+
+ if (!bus || !bus->cmn.entries_size)
+ return;
+
+ list_for_each_entry(reg_base, &dpu_dbg_base.reg_base_list,
+ reg_base_head)
+ if (strlen(reg_base->name) &&
+ !strcmp(reg_base->name, bus->cmn.name))
+ mem_base = reg_base->base;
+
+ if (!mem_base) {
+ pr_err("unable to find mem_base for %s\n", bus->cmn.name);
+ return;
+ }
+
+ dbg_bus = bus->entries;
+ bus_size = bus->cmn.entries_size;
+ list_size = bus->cmn.entries_size;
+ dump_mem = &bus->cmn.dumped_content;
+
+ dev_info(dpu_dbg_base.dev, "======== start %s dump =========\n",
+ bus->cmn.name);
+
+ if (!dump_mem || !dbg_bus || !bus_size || !list_size)
+ return;
+
+ /* allocate memory for each test point */
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+ list_size += (head->block_cnt * head->test_pnt_cnt);
+ }
+
+ /* 4 bytes * 4 entries for each test point*/
+ list_size *= 16;
+
+ in_log = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_LOG);
+ in_mem = (bus->cmn.enable_mask & DPU_DBG_DUMP_IN_MEM);
+
+ if (!in_log && !in_mem)
+ return;
+
+ if (in_mem) {
+ if (!(*dump_mem))
+ *dump_mem = dma_alloc_coherent(dpu_dbg_base.dev,
+ list_size, &phys, GFP_KERNEL);
+
+ if (*dump_mem) {
+ dump_addr = *dump_mem;
+ dev_info(dpu_dbg_base.dev,
+ "%s: start_addr:0x%pK len:0x%x\n",
+ __func__, dump_addr, list_size);
+ } else {
+ in_mem = false;
+ pr_err("dump_mem: allocation fails\n");
+ }
+ }
+
+ _dpu_dbg_enable_power(true);
+
+ value = readl_relaxed(mem_base + MMSS_VBIF_CLKON);
+ writel_relaxed(value | BIT(1), mem_base + MMSS_VBIF_CLKON);
+
+ /* make sure that vbif core is on */
+ wmb();
+
+ /**
+ * Extract VBIF error info based on XIN halt and error status.
+ * If the XIN client is not in HALT state, or an error is detected,
+ * then retrieve the VBIF error info for it.
+ */
+ reg = readl_relaxed(mem_base + MMSS_VBIF_XIN_HALT_CTRL1);
+ reg1 = readl_relaxed(mem_base + MMSS_VBIF_PND_ERR);
+ reg2 = readl_relaxed(mem_base + MMSS_VBIF_SRC_ERR);
+ dev_err(dpu_dbg_base.dev,
+ "XIN HALT:0x%lX, PND ERR:0x%lX, SRC ERR:0x%lX\n",
+ reg, reg1, reg2);
+ reg >>= 16;
+ reg &= ~(reg1 | reg2);
+ for (i = 0; i < MMSS_VBIF_CLIENT_NUM; i++) {
+ if (!test_bit(0, &reg)) {
+ writel_relaxed(i, mem_base + MMSS_VBIF_ERR_INFO);
+ /* make sure reg write goes through */
+ wmb();
+
+ d0 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO);
+ d1 = readl_relaxed(mem_base + MMSS_VBIF_ERR_INFO_1);
+
+ dev_err(dpu_dbg_base.dev,
+ "Client:%d, errinfo=0x%X, errinfo1=0x%X\n",
+ i, d0, d1);
+ }
+ reg >>= 1;
+ }
+
+ for (i = 0; i < bus_size; i++) {
+ head = dbg_bus + i;
+
+ writel_relaxed(0, mem_base + head->disable_bus_addr);
+ writel_relaxed(BIT(0), mem_base + MMSS_VBIF_TEST_BUS_OUT_CTRL);
+ /* make sure that other bus is off */
+ wmb();
+
+ _dpu_dbg_dump_vbif_debug_bus_entry(head, mem_base, dump_addr,
+ in_log);
+ if (dump_addr)
+ dump_addr += (head->block_cnt * head->test_pnt_cnt * 4);
+ }
+
+ _dpu_dbg_enable_power(false);
+
+ dev_info(dpu_dbg_base.dev, "======== end %s dump =========\n",
+ bus->cmn.name);
+}
+
+/**
+ * _dpu_dump_array - dump array of register bases
+ * @name: string indicating origin of dump
+ * @dump_dbgbus_dpu: whether to dump the dpu debug bus
+ * @dump_dbgbus_vbif_rt: whether to dump the vbif rt debug bus
+ */
+static void _dpu_dump_array(const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (dump_dbgbus_dpu)
+ _dpu_dbg_dump_dpu_dbg_bus(&dpu_dbg_base.dbgbus_dpu);
+
+ if (dump_dbgbus_vbif_rt)
+ _dpu_dbg_dump_vbif_dbg_bus(&dpu_dbg_base.dbgbus_vbif_rt);
+}
+
+/**
+ * _dpu_dump_work - deferred dump work function
+ * @work: work structure
+ */
+static void _dpu_dump_work(struct work_struct *work)
+{
+ _dpu_dump_array("dpudump_workitem",
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work,
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work);
+}
+
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt)
+{
+ if (queue_work && work_pending(&dpu_dbg_base.dump_work))
+ return;
+
+ if (!queue_work) {
+ _dpu_dump_array(name, dump_dbgbus_dpu, dump_dbgbus_vbif_rt);
+ return;
+ }
+
+ /* schedule work to dump later */
+ dpu_dbg_base.dbgbus_dpu.cmn.include_in_deferred_work = dump_dbgbus_dpu;
+ dpu_dbg_base.dbgbus_vbif_rt.cmn.include_in_deferred_work =
+ dump_dbgbus_vbif_rt;
+ schedule_work(&dpu_dbg_base.dump_work);
+}
+
+/*
+ * dpu_dbg_debugfs_open - debugfs open handler for debug dump
+ * @inode: debugfs inode
+ * @file: file handle
+ */
+static int dpu_dbg_debugfs_open(struct inode *inode, struct file *file)
+{
+ /* non-seekable */
+ file->f_mode &= ~(FMODE_LSEEK | FMODE_PREAD | FMODE_PWRITE);
+ file->private_data = inode->i_private;
+ return 0;
+}
+
+/**
+ * dpu_dbg_dump_write - debugfs write handler for debug dump
+ * @file: file handler
+ * @user_buf: user buffer content from debugfs
+ * @count: size of user buffer
+ * @ppos: position offset of user buffer
+ */
+static ssize_t dpu_dbg_dump_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ _dpu_dump_array("dump_debugfs", true, true);
+ return count;
+}
+
+static const struct file_operations dpu_dbg_dump_fops = {
+ .open = dpu_dbg_debugfs_open,
+ .write = dpu_dbg_dump_write,
+};
+
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+ char debug_name[80] = "";
+
+ if (!debugfs_root)
+ return -EINVAL;
+
+ debugfs_create_file("dump", 0600, debugfs_root, NULL,
+ &dpu_dbg_dump_fops);
+
+ if (dbg->dbgbus_dpu.entries) {
+ dbg->dbgbus_dpu.cmn.name = DBGBUS_NAME_DPU;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_dpu.cmn.name);
+ dbg->dbgbus_dpu.cmn.enable_mask = DEFAULT_DBGBUS_DPU;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_dpu.cmn.enable_mask);
+ }
+
+ if (dbg->dbgbus_vbif_rt.entries) {
+ dbg->dbgbus_vbif_rt.cmn.name = DBGBUS_NAME_VBIF_RT;
+ snprintf(debug_name, sizeof(debug_name), "%s_dbgbus",
+ dbg->dbgbus_vbif_rt.cmn.name);
+ dbg->dbgbus_vbif_rt.cmn.enable_mask = DEFAULT_DBGBUS_VBIFRT;
+ debugfs_create_u32(debug_name, 0600, debugfs_root,
+ &dbg->dbgbus_vbif_rt.cmn.enable_mask);
+ }
+
+ return 0;
+}
+
+static void _dpu_dbg_debugfs_destroy(void)
+{
+}
+
+void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+ static struct dpu_dbg_base *dbg = &dpu_dbg_base;
+
+ memset(&dbg->dbgbus_dpu, 0, sizeof(dbg->dbgbus_dpu));
+ memset(&dbg->dbgbus_vbif_rt, 0, sizeof(dbg->dbgbus_vbif_rt));
+
+ if (IS_MSM8998_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_8998;
+ dbg->dbgbus_dpu.cmn.entries_size = ARRAY_SIZE(dbg_bus_dpu_8998);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else if (IS_SDM845_TARGET(hwversion) || IS_SDM670_TARGET(hwversion)) {
+ dbg->dbgbus_dpu.entries = dbg_bus_dpu_sdm845;
+ dbg->dbgbus_dpu.cmn.entries_size =
+ ARRAY_SIZE(dbg_bus_dpu_sdm845);
+ dbg->dbgbus_dpu.cmn.flags = DBGBUS_FLAGS_DSPP;
+
+ /* vbif is unchanged vs 8998 */
+ dbg->dbgbus_vbif_rt.entries = vbif_dbg_bus_msm8998;
+ dbg->dbgbus_vbif_rt.cmn.entries_size =
+ ARRAY_SIZE(vbif_dbg_bus_msm8998);
+ } else {
+ pr_err("unsupported chipset id %X\n", hwversion);
+ }
+}
+
+int dpu_dbg_init(struct device *dev)
+{
+ if (!dev) {
+ pr_err("invalid params\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&dpu_dbg_base.reg_base_list);
+ dpu_dbg_base.dev = dev;
+
+ INIT_WORK(&dpu_dbg_base.dump_work, _dpu_dump_work);
+
+ return 0;
+}
+
+/**
+ * dpu_dbg_destroy - destroy dpu debug facilities
+ */
+void dpu_dbg_destroy(void)
+{
+ _dpu_dbg_debugfs_destroy();
+}
+
+void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+ dpu_dbg_base.dbgbus_dpu.top_blk_off = blk_off;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
new file mode 100644
index 000000000000..1e6fa945f98b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_dbg.h
@@ -0,0 +1,103 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef DPU_DBG_H_
+#define DPU_DBG_H_
+
+#include <stdarg.h>
+#include <linux/debugfs.h>
+#include <linux/list.h>
+
+enum dpu_dbg_dump_flag {
+ DPU_DBG_DUMP_IN_LOG = BIT(0),
+ DPU_DBG_DUMP_IN_MEM = BIT(1),
+};
+
+#if defined(CONFIG_DEBUG_FS)
+
+/**
+ * dpu_dbg_init_dbg_buses - initialize debug bus dumping support for the chipset
+ * @hwversion: Chipset revision
+ */
+void dpu_dbg_init_dbg_buses(u32 hwversion);
+
+/**
+ * dpu_dbg_init - initialize global dpu debug facilities: regdump
+ * @dev: device handle
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_init(struct device *dev);
+
+/**
+ * dpu_dbg_debugfs_register - register entries at the given debugfs dir
+ * @debugfs_root: debugfs root in which to create dpu debug entries
+ * Returns: 0 or -ERROR
+ */
+int dpu_dbg_debugfs_register(struct dentry *debugfs_root);
+
+/**
+ * dpu_dbg_destroy - destroy the global dpu debug facilities
+ * Returns: none
+ */
+void dpu_dbg_destroy(void);
+
+/**
+ * dpu_dbg_dump - trigger dumping of all dpu_dbg facilities
+ * @queue_work: whether to queue the dumping work to the work_struct
+ * @name: string indicating origin of dump
+ * @dump_dbgbus: dump the dpu debug bus
+ * @dump_vbif_rt: dump the vbif rt bus
+ * Returns: none
+ */
+void dpu_dbg_dump(bool queue_work, const char *name, bool dump_dbgbus_dpu,
+ bool dump_dbgbus_vbif_rt);
+
+/**
+ * dpu_dbg_set_dpu_top_offset - set the target specific offset from mdss base
+ * address of the top registers. Used for accessing debug bus controls.
+ * @blk_off: offset from mdss base of the top block
+ */
+void dpu_dbg_set_dpu_top_offset(u32 blk_off);
+
+#else
+
+static inline void dpu_dbg_init_dbg_buses(u32 hwversion)
+{
+}
+
+static inline int dpu_dbg_init(struct device *dev)
+{
+ return 0;
+}
+
+static inline int dpu_dbg_debugfs_register(struct dentry *debugfs_root)
+{
+ return 0;
+}
+
+static inline void dpu_dbg_destroy(void)
+{
+}
+
+static inline void dpu_dbg_dump(bool queue_work, const char *name,
+ bool dump_dbgbus_dpu, bool dump_dbgbus_vbif_rt)
+{
+}
+
+static inline void dpu_dbg_set_dpu_top_offset(u32 blk_off)
+{
+}
+
+#endif /* defined(CONFIG_DEBUG_FS) */
+
+
+#endif /* DPU_DBG_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
new file mode 100644
index 000000000000..1b4de3486ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.c
@@ -0,0 +1,2500 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/kthread.h>
+#include <linux/debugfs.h>
+#include <linux/seq_file.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include <drm/drm_crtc.h>
+#include <drm/drm_crtc_helper.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_formats.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_crtc.h"
+#include "dpu_trace.h"
+#include "dpu_core_irq.h"
+
+#define DPU_DEBUG_ENC(e, fmt, ...) DPU_DEBUG("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_ENC(e, fmt, ...) DPU_ERROR("enc%d " fmt,\
+ (e) ? (e)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_DEBUG_PHYS(p, fmt, ...) DPU_DEBUG("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+#define DPU_ERROR_PHYS(p, fmt, ...) DPU_ERROR("enc%d intf%d pp%d " fmt,\
+ (p) ? (p)->parent->base.id : -1, \
+ (p) ? (p)->intf_idx - INTF_0 : -1, \
+ (p) ? ((p)->hw_pp ? (p)->hw_pp->idx - PINGPONG_0 : -1) : -1, \
+ ##__VA_ARGS__)
+
+/*
+ * Two to anticipate panels that can do cmd/vid dynamic switching
+ * plan is to create all possible physical encoder types, and switch between
+ * them at runtime
+ */
+#define NUM_PHYS_ENCODER_TYPES 2
+
+#define MAX_PHYS_ENCODERS_PER_VIRTUAL \
+ (MAX_H_TILES_PER_DISPLAY * NUM_PHYS_ENCODER_TYPES)
+
+#define MAX_CHANNELS_PER_ENC 2
+
+#define MISR_BUFF_SIZE 256
+
+#define IDLE_SHORT_TIMEOUT 1
+
+#define MAX_VDISPLAY_SPLIT 1080
+
+/**
+ * enum dpu_enc_rc_events - events for resource control state machine
+ * @DPU_ENC_RC_EVENT_KICKOFF:
+ * This event happens at NORMAL priority.
+ * Event that signals the start of the transfer. When this event is
+ * received, enable MDP/DSI core clocks. Regardless of the previous
+ * state, the resource should be in ON state at the end of this event.
+ * @DPU_ENC_RC_EVENT_FRAME_DONE:
+ * This event happens at INTERRUPT level.
+ * Event signals the end of the data transfer after the PP FRAME_DONE
+ * event. At the end of this event, a delayed work is scheduled to go to
+ * IDLE_PC state after IDLE_TIMEOUT time.
+ * @DPU_ENC_RC_EVENT_PRE_STOP:
+ * This event happens at NORMAL priority.
+ * This event, when received during the ON state, leave the RC STATE
+ * in the PRE_OFF state. It should be followed by the STOP event as
+ * part of encoder disable.
+ * If received during IDLE or OFF states, it will do nothing.
+ * @DPU_ENC_RC_EVENT_STOP:
+ * This event happens at NORMAL priority.
+ * When this event is received, disable all the MDP/DSI core clocks, and
+ * disable IRQs. It should be called from the PRE_OFF or IDLE states.
+ * IDLE is expected when IDLE_PC has run, and PRE_OFF did nothing.
+ * PRE_OFF is expected when PRE_STOP was executed during the ON state.
+ * Resource state should be in OFF at the end of the event.
+ * @DPU_ENC_RC_EVENT_ENTER_IDLE:
+ * This event happens at NORMAL priority from a work item.
+ * Event signals that there were no frame updates for IDLE_TIMEOUT time.
+ * This would disable MDP/DSI core clocks and change the resource state
+ * to IDLE.
+ */
+enum dpu_enc_rc_events {
+ DPU_ENC_RC_EVENT_KICKOFF = 1,
+ DPU_ENC_RC_EVENT_FRAME_DONE,
+ DPU_ENC_RC_EVENT_PRE_STOP,
+ DPU_ENC_RC_EVENT_STOP,
+ DPU_ENC_RC_EVENT_ENTER_IDLE
+};
+
+/*
+ * enum dpu_enc_rc_states - states that the resource control maintains
+ * @DPU_ENC_RC_STATE_OFF: Resource is in OFF state
+ * @DPU_ENC_RC_STATE_PRE_OFF: Resource is transitioning to OFF state
+ * @DPU_ENC_RC_STATE_ON: Resource is in ON state
+ * @DPU_ENC_RC_STATE_MODESET: Resource is in modeset state
+ * @DPU_ENC_RC_STATE_IDLE: Resource is in IDLE state
+ */
+enum dpu_enc_rc_states {
+ DPU_ENC_RC_STATE_OFF,
+ DPU_ENC_RC_STATE_PRE_OFF,
+ DPU_ENC_RC_STATE_ON,
+ DPU_ENC_RC_STATE_IDLE
+};
+
+/**
+ * struct dpu_encoder_virt - virtual encoder. Container of one or more physical
+ * encoders. Virtual encoder manages one "logical" display. Physical
+ * encoders manage one intf block, tied to a specific panel/sub-panel.
+ * Virtual encoder defers as much as possible to the physical encoders.
+ * Virtual encoder registers itself with the DRM Framework as the encoder.
+ * @base: drm_encoder base class for registration with DRM
+ * @enc_spin_lock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @bus_scaling_client: Client handle to the bus scaling interface
+ * @num_phys_encs: Actual number of physical encoders contained.
+ * @phys_encs: Container of physical encoders managed.
+ * @cur_master: Pointer to the current master in this mode. Optimization
+ * Only valid after enable. Cleared as disable.
+ * @hw_pp Handle to the pingpong blocks used for the display. No.
+ * pingpong blocks can be different than num_phys_encs.
+ * @intfs_swapped Whether or not the phys_enc interfaces have been swapped
+ * for partial update right-only cases, such as pingpong
+ * split where virtual pingpong does not generate IRQs
+ * @crtc_vblank_cb: Callback into the upper layer / CRTC for
+ * notification of the VBLANK
+ * @crtc_vblank_cb_data: Data from upper layer for VBLANK notification
+ * @crtc_kickoff_cb: Callback into CRTC that will flush & start
+ * all CTL paths
+ * @crtc_kickoff_cb_data: Opaque user data given to crtc_kickoff_cb
+ * @debugfs_root: Debug file system root file node
+ * @enc_lock: Lock around physical encoder create/destroy and
+ access.
+ * @frame_busy_mask: Bitmask tracking which phys_enc we are still
+ * busy processing current command.
+ * Bit0 = phys_encs[0] etc.
+ * @crtc_frame_event_cb: callback handler for frame event
+ * @crtc_frame_event_cb_data: callback handler private data
+ * @frame_done_timeout: frame done timeout in Hz
+ * @frame_done_timer: watchdog timer for frame done event
+ * @vsync_event_timer: vsync timer
+ * @disp_info: local copy of msm_display_info struct
+ * @misr_enable: misr enable/disable status
+ * @misr_frame_count: misr frame count before start capturing the data
+ * @idle_pc_supported: indicate if idle power collaps is supported
+ * @rc_lock: resource control mutex lock to protect
+ * virt encoder over various state changes
+ * @rc_state: resource controller state
+ * @delayed_off_work: delayed worker to schedule disabling of
+ * clks and resources after IDLE_TIMEOUT time.
+ * @vsync_event_work: worker to handle vsync event for autorefresh
+ * @topology: topology of the display
+ * @mode_set_complete: flag to indicate modeset completion
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+struct dpu_encoder_virt {
+ struct drm_encoder base;
+ spinlock_t enc_spinlock;
+ uint32_t bus_scaling_client;
+
+ uint32_t display_num_of_h_tiles;
+
+ unsigned int num_phys_encs;
+ struct dpu_encoder_phys *phys_encs[MAX_PHYS_ENCODERS_PER_VIRTUAL];
+ struct dpu_encoder_phys *cur_master;
+ struct dpu_hw_pingpong *hw_pp[MAX_CHANNELS_PER_ENC];
+
+ bool intfs_swapped;
+
+ void (*crtc_vblank_cb)(void *);
+ void *crtc_vblank_cb_data;
+
+ struct dentry *debugfs_root;
+ struct mutex enc_lock;
+ DECLARE_BITMAP(frame_busy_mask, MAX_PHYS_ENCODERS_PER_VIRTUAL);
+ void (*crtc_frame_event_cb)(void *, u32 event);
+ void *crtc_frame_event_cb_data;
+
+ atomic_t frame_done_timeout;
+ struct timer_list frame_done_timer;
+ struct timer_list vsync_event_timer;
+
+ struct msm_display_info disp_info;
+ bool misr_enable;
+ u32 misr_frame_count;
+
+ bool idle_pc_supported;
+ struct mutex rc_lock;
+ enum dpu_enc_rc_states rc_state;
+ struct kthread_delayed_work delayed_off_work;
+ struct kthread_work vsync_event_work;
+ struct msm_display_topology topology;
+ bool mode_set_complete;
+
+ u32 idle_timeout;
+};
+
+#define to_dpu_encoder_virt(x) container_of(x, struct dpu_encoder_virt, base)
+static inline int _dpu_encoder_power_enable(struct dpu_encoder_virt *dpu_enc,
+ bool enable)
+{
+ struct drm_encoder *drm_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu enc\n");
+ return -EINVAL;
+ }
+
+ drm_enc = &dpu_enc->base;
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("drm device invalid\n");
+ return -EINVAL;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ if (!priv->kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ if (enable)
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ else
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ DRM_ERROR("irq timeout id=%u, intf=%d, pp=%d, intr=%d\n",
+ DRMID(phys_enc->parent), phys_enc->intf_idx - INTF_0,
+ phys_enc->hw_pp->idx - PINGPONG_0, intr_idx);
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_ERROR);
+}
+
+static int dpu_encoder_helper_wait_event_timeout(int32_t drm_id,
+ int32_t hw_id, struct dpu_encoder_wait_info *info);
+
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info)
+{
+ struct dpu_encoder_irq *irq;
+ u32 irq_status;
+ int ret;
+
+ if (!phys_enc || !wait_info || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* note: do master / slave checking outside */
+
+ /* return EWOULDBLOCK since we know the wait isn't necessary */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DRM_ERROR("encoder is disabled id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return -EWOULDBLOCK;
+ }
+
+ if (irq->irq_idx < 0) {
+ DRM_DEBUG_KMS("skip irq wait id=%u, intr=%d, hw=%d, irq=%s",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->name);
+ return 0;
+ }
+
+ DRM_DEBUG_KMS("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, pending_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+
+ ret = dpu_encoder_helper_wait_event_timeout(
+ DRMID(phys_enc->parent),
+ irq->hw_idx,
+ wait_info);
+
+ if (ret <= 0) {
+ irq_status = dpu_core_irq_read(phys_enc->dpu_kms,
+ irq->irq_idx, true);
+ if (irq_status) {
+ unsigned long flags;
+
+ DRM_DEBUG_KMS("irq not triggered id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ local_irq_save(flags);
+ irq->cb.func(phys_enc, irq->irq_idx);
+ local_irq_restore(flags);
+ ret = 0;
+ } else {
+ ret = -ETIMEDOUT;
+ DRM_DEBUG_KMS("irq timeout id=%u, intr=%d, "
+ "hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+ } else {
+ ret = 0;
+ trace_dpu_enc_irq_wait_success(DRMID(phys_enc->parent),
+ intr_idx, irq->hw_idx, irq->irq_idx,
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(wait_info->atomic_cnt));
+ }
+
+ return ret;
+}
+
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret = 0;
+
+ if (!phys_enc || intr_idx >= INTR_IDX_MAX) {
+ DPU_ERROR("invalid params\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ if (irq->irq_idx >= 0) {
+ DPU_DEBUG_PHYS(phys_enc,
+ "skipping already registered irq %s type %d\n",
+ irq->name, irq->intr_type);
+ return 0;
+ }
+
+ irq->irq_idx = dpu_core_irq_idx_lookup(phys_enc->dpu_kms,
+ irq->intr_type, irq->hw_idx);
+ if (irq->irq_idx < 0) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to lookup IRQ index for %s type:%d\n",
+ irq->name, irq->intr_type);
+ return -EINVAL;
+ }
+
+ ret = dpu_core_irq_register_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DPU_ERROR_PHYS(phys_enc,
+ "failed to register IRQ callback for %s\n",
+ irq->name);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ ret = dpu_core_irq_enable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("enable failed id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ dpu_core_irq_unregister_callback(phys_enc->dpu_kms,
+ irq->irq_idx, &irq->cb);
+ irq->irq_idx = -EINVAL;
+ return ret;
+ }
+
+ trace_dpu_enc_irq_register_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ return ret;
+}
+
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx)
+{
+ struct dpu_encoder_irq *irq;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ irq = &phys_enc->irq[intr_idx];
+
+ /* silently skip irqs that weren't registered */
+ if (irq->irq_idx < 0) {
+ DRM_ERROR("duplicate unregister id=%u, intr=%d, hw=%d, irq=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx);
+ return 0;
+ }
+
+ ret = dpu_core_irq_disable(phys_enc->dpu_kms, &irq->irq_idx, 1);
+ if (ret) {
+ DRM_ERROR("disable failed id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ ret = dpu_core_irq_unregister_callback(phys_enc->dpu_kms, irq->irq_idx,
+ &irq->cb);
+ if (ret) {
+ DRM_ERROR("unreg cb fail id=%u, intr=%d, hw=%d, irq=%d ret=%d",
+ DRMID(phys_enc->parent), intr_idx, irq->hw_idx,
+ irq->irq_idx, ret);
+ }
+
+ trace_dpu_enc_irq_unregister_success(DRMID(phys_enc->parent), intr_idx,
+ irq->hw_idx, irq->irq_idx);
+
+ irq->irq_idx = -EINVAL;
+
+ return 0;
+}
+
+void dpu_encoder_get_hw_resources(struct drm_encoder *drm_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!hw_res || !drm_enc || !conn_state) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, res %d, state %d\n",
+ drm_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /* Query resources used by phys encs, expected to be without overlap */
+ memset(hw_res, 0, sizeof(*hw_res));
+ hw_res->display_num_of_h_tiles = dpu_enc->display_num_of_h_tiles;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.get_hw_resources)
+ phys->ops.get_hw_resources(phys, hw_res, conn_state);
+ }
+}
+
+static void dpu_encoder_destroy(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ mutex_lock(&dpu_enc->enc_lock);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.destroy) {
+ phys->ops.destroy(phys);
+ --dpu_enc->num_phys_encs;
+ dpu_enc->phys_encs[i] = NULL;
+ }
+ }
+
+ if (dpu_enc->num_phys_encs)
+ DPU_ERROR_ENC(dpu_enc, "expected 0 num_phys_encs not %d\n",
+ dpu_enc->num_phys_encs);
+ dpu_enc->num_phys_encs = 0;
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ drm_encoder_cleanup(drm_enc);
+ mutex_destroy(&dpu_enc->enc_lock);
+
+ kfree(dpu_enc);
+}
+
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct split_pipe_cfg cfg = { 0 };
+ struct dpu_hw_mdp *hw_mdptop;
+ struct msm_display_info *disp_info;
+
+ if (!phys_enc || !phys_enc->hw_mdptop || !phys_enc->parent) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ hw_mdptop = phys_enc->hw_mdptop;
+ disp_info = &dpu_enc->disp_info;
+
+ if (disp_info->intf_type != DRM_MODE_CONNECTOR_DSI)
+ return;
+
+ /**
+ * disable split modes since encoder will be operating in as the only
+ * encoder, either for the entire use case in the case of, for example,
+ * single DSI, or for this frame in the case of left/right only partial
+ * update.
+ */
+ if (phys_enc->split_role == ENC_ROLE_SOLO) {
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ return;
+ }
+
+ cfg.en = true;
+ cfg.mode = phys_enc->intf_mode;
+ cfg.intf = interface;
+
+ if (cfg.en && phys_enc->ops.needs_single_flush &&
+ phys_enc->ops.needs_single_flush(phys_enc))
+ cfg.split_flush_en = true;
+
+ if (phys_enc->split_role == ENC_ROLE_MASTER) {
+ DPU_DEBUG_ENC(dpu_enc, "enable %d\n", cfg.en);
+
+ if (hw_mdptop->ops.setup_split_pipe)
+ hw_mdptop->ops.setup_split_pipe(hw_mdptop, &cfg);
+ }
+}
+
+static void _dpu_encoder_adjust_mode(struct drm_connector *connector,
+ struct drm_display_mode *adj_mode)
+{
+ struct drm_display_mode *cur_mode;
+
+ if (!connector || !adj_mode)
+ return;
+
+ list_for_each_entry(cur_mode, &connector->modes, head) {
+ if (cur_mode->vdisplay == adj_mode->vdisplay &&
+ cur_mode->hdisplay == adj_mode->hdisplay &&
+ cur_mode->vrefresh == adj_mode->vrefresh) {
+ adj_mode->private = cur_mode->private;
+ adj_mode->private_flags |= cur_mode->private_flags;
+ }
+ }
+}
+
+static struct msm_display_topology dpu_encoder_get_topology(
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct drm_display_mode *mode)
+{
+ struct msm_display_topology topology;
+ int i, intf_count = 0;
+
+ for (i = 0; i < MAX_PHYS_ENCODERS_PER_VIRTUAL; i++)
+ if (dpu_enc->phys_encs[i])
+ intf_count++;
+
+ /* User split topology for width > 1080 */
+ topology.num_lm = (mode->vdisplay > MAX_VDISPLAY_SPLIT) ? 2 : 1;
+ topology.num_enc = 0;
+ topology.num_intf = intf_count;
+
+ return topology;
+}
+static int dpu_encoder_virt_atomic_check(
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ const struct drm_display_mode *mode;
+ struct drm_display_mode *adj_mode;
+ struct msm_display_topology topology;
+ int i = 0;
+ int ret = 0;
+
+ if (!drm_enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arg(s), drm_enc %d, crtc/conn state %d/%d\n",
+ drm_enc != 0, crtc_state != 0, conn_state != 0);
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ mode = &crtc_state->mode;
+ adj_mode = &crtc_state->adjusted_mode;
+ trace_dpu_enc_atomic_check(DRMID(drm_enc));
+
+ /*
+ * display drivers may populate private fields of the drm display mode
+ * structure while registering possible modes of a connector with DRM.
+ * These private fields are not populated back while DRM invokes
+ * the mode_set callbacks. This module retrieves and populates the
+ * private fields of the given mode.
+ */
+ _dpu_encoder_adjust_mode(conn_state->connector, adj_mode);
+
+ /* perform atomic check on the first physical encoder (master) */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.atomic_check)
+ ret = phys->ops.atomic_check(phys, crtc_state,
+ conn_state);
+ else if (phys && phys->ops.mode_fixup)
+ if (!phys->ops.mode_fixup(phys, mode, adj_mode))
+ ret = -EINVAL;
+
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "mode unsupported, phys idx %d\n", i);
+ break;
+ }
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating AtomicTest phase */
+ if (!ret) {
+ /*
+ * Avoid reserving resources when mode set is pending. Topology
+ * info may not be available to complete reservation.
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state)
+ && dpu_enc->mode_set_complete) {
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, crtc_state,
+ conn_state, topology, true);
+ dpu_enc->mode_set_complete = false;
+ }
+ }
+
+ if (!ret)
+ drm_mode_set_crtcinfo(adj_mode, 0);
+
+ trace_dpu_enc_atomic_check_flags(DRMID(drm_enc), adj_mode->flags,
+ adj_mode->private_flags);
+
+ return ret;
+}
+
+static void _dpu_encoder_update_vsync_source(struct dpu_encoder_virt *dpu_enc,
+ struct msm_display_info *disp_info)
+{
+ struct dpu_vsync_source_cfg vsync_cfg = { 0 };
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct drm_encoder *drm_enc;
+ int i;
+
+ if (!dpu_enc || !disp_info) {
+ DPU_ERROR("invalid param dpu_enc:%d or disp_info:%d\n",
+ dpu_enc != NULL, disp_info != NULL);
+ return;
+ } else if (dpu_enc->num_phys_encs > ARRAY_SIZE(dpu_enc->hw_pp)) {
+ DPU_ERROR("invalid num phys enc %d/%d\n",
+ dpu_enc->num_phys_encs,
+ (int) ARRAY_SIZE(dpu_enc->hw_pp));
+ return;
+ }
+
+ drm_enc = &dpu_enc->base;
+ /* this pointers are checked in virt_enable_helper */
+ priv = drm_enc->dev->dev_private;
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ hw_mdptop = dpu_kms->hw_mdp;
+ if (!hw_mdptop) {
+ DPU_ERROR("invalid mdptop\n");
+ return;
+ }
+
+ if (hw_mdptop->ops.setup_vsync_source &&
+ disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) {
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ vsync_cfg.ppnumber[i] = dpu_enc->hw_pp[i]->idx;
+
+ vsync_cfg.pp_count = dpu_enc->num_phys_encs;
+ if (disp_info->is_te_using_watchdog_timer)
+ vsync_cfg.vsync_source = DPU_VSYNC_SOURCE_WD_TIMER_0;
+ else
+ vsync_cfg.vsync_source = DPU_VSYNC0_SOURCE_GPIO;
+
+ hw_mdptop->ops.setup_vsync_source(hw_mdptop, &vsync_cfg);
+ }
+}
+
+static void _dpu_encoder_irq_control(struct drm_encoder *drm_enc, bool enable)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ DPU_DEBUG_ENC(dpu_enc, "enable:%d\n", enable);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.irq_control)
+ phys->ops.irq_control(phys, enable);
+ }
+
+}
+
+static void _dpu_encoder_resource_control_helper(struct drm_encoder *drm_enc,
+ bool enable)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct dpu_encoder_virt *dpu_enc;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_rc_helper(DRMID(drm_enc), enable);
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("encoder master not set\n");
+ return;
+ }
+
+ if (enable) {
+ /* enable DPU core clks */
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* enable all the irq */
+ _dpu_encoder_irq_control(drm_enc, true);
+
+ } else {
+ /* disable all the irq */
+ _dpu_encoder_irq_control(drm_enc, false);
+
+ /* disable DPU core clks */
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+ }
+
+}
+
+static int dpu_encoder_resource_control(struct drm_encoder *drm_enc,
+ u32 sw_event)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *disp_thread;
+ bool is_vid_mode = false;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ is_vid_mode = dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->disp_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return -EINVAL;
+ }
+ disp_thread = &priv->disp_thread[drm_enc->crtc->index];
+
+ /*
+ * when idle_pc is not supported, process only KICKOFF, STOP and MODESET
+ * events and return early for other events (ie wb display).
+ */
+ if (!dpu_enc->idle_pc_supported &&
+ (sw_event != DPU_ENC_RC_EVENT_KICKOFF &&
+ sw_event != DPU_ENC_RC_EVENT_STOP &&
+ sw_event != DPU_ENC_RC_EVENT_PRE_STOP))
+ return 0;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event, dpu_enc->idle_pc_supported,
+ dpu_enc->rc_state, "begin");
+
+ switch (sw_event) {
+ case DPU_ENC_RC_EVENT_KICKOFF:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in ON state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in ON state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state != DPU_ENC_RC_STATE_OFF &&
+ dpu_enc->rc_state != DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id;%u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ if (is_vid_mode && dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE)
+ _dpu_encoder_irq_control(drm_enc, true);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, true);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_ON;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "kickoff");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_FRAME_DONE:
+ /*
+ * mutex lock is not used as this event happens at interrupt
+ * context. And locking is not required as, the other events
+ * like KICKOFF and STOP does a wait-for-idle before executing
+ * the resource_control
+ */
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_DEBUG_KMS("id:%d, sw_event:%d,rc:%d-unexpected\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ return -EINVAL;
+ }
+
+ /*
+ * schedule off work item only when there are no
+ * frames pending
+ */
+ if (dpu_crtc_frame_pending(drm_enc->crtc) > 1) {
+ DRM_DEBUG_KMS("id:%d skip schedule work\n",
+ DRMID(drm_enc));
+ return 0;
+ }
+
+ kthread_queue_delayed_work(
+ &disp_thread->worker,
+ &dpu_enc->delayed_off_work,
+ msecs_to_jiffies(dpu_enc->idle_timeout));
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "frame done");
+ break;
+
+ case DPU_ENC_RC_EVENT_PRE_STOP:
+ /* cancel delayed off work, if any */
+ if (kthread_cancel_delayed_work_sync(
+ &dpu_enc->delayed_off_work))
+ DPU_DEBUG_ENC(dpu_enc, "sw_event:%d, work cancelled\n",
+ sw_event);
+
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (is_vid_mode &&
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ _dpu_encoder_irq_control(drm_enc, true);
+ }
+ /* skip if is already OFF or IDLE, resources are off already */
+ else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF ||
+ dpu_enc->rc_state == DPU_ENC_RC_STATE_IDLE) {
+ DRM_DEBUG_KMS("id:%u, sw_event:%d, rc in %d state\n",
+ DRMID(drm_enc), sw_event,
+ dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_PRE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "pre stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_STOP:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ /* return if the resource control is already in OFF state */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_OFF) {
+ DRM_DEBUG_KMS("id: %u, sw_event:%d, rc in OFF state\n",
+ DRMID(drm_enc), sw_event);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ } else if (dpu_enc->rc_state == DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc in state %d\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return -EINVAL;
+ }
+
+ /**
+ * expect to arrive here only if in either idle state or pre-off
+ * and in IDLE state the resources are already disabled
+ */
+ if (dpu_enc->rc_state == DPU_ENC_RC_STATE_PRE_OFF)
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_OFF;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "stop");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ case DPU_ENC_RC_EVENT_ENTER_IDLE:
+ mutex_lock(&dpu_enc->rc_lock);
+
+ if (dpu_enc->rc_state != DPU_ENC_RC_STATE_ON) {
+ DRM_ERROR("id: %u, sw_event:%d, rc:%d !ON state\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ /*
+ * if we are in ON but a frame was just kicked off,
+ * ignore the IDLE event, it's probably a stale timer event
+ */
+ if (dpu_enc->frame_busy_mask[0]) {
+ DRM_ERROR("id:%u, sw_event:%d, rc:%d frame pending\n",
+ DRMID(drm_enc), sw_event, dpu_enc->rc_state);
+ mutex_unlock(&dpu_enc->rc_lock);
+ return 0;
+ }
+
+ if (is_vid_mode)
+ _dpu_encoder_irq_control(drm_enc, false);
+ else
+ _dpu_encoder_resource_control_helper(drm_enc, false);
+
+ dpu_enc->rc_state = DPU_ENC_RC_STATE_IDLE;
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "idle");
+
+ mutex_unlock(&dpu_enc->rc_lock);
+ break;
+
+ default:
+ DRM_ERROR("id:%u, unexpected sw_event: %d\n", DRMID(drm_enc),
+ sw_event);
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "error");
+ break;
+ }
+
+ trace_dpu_enc_rc(DRMID(drm_enc), sw_event,
+ dpu_enc->idle_pc_supported, dpu_enc->rc_state,
+ "end");
+ return 0;
+}
+
+static void dpu_encoder_virt_mode_set(struct drm_encoder *drm_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct list_head *connector_list;
+ struct drm_connector *conn = NULL, *conn_iter;
+ struct dpu_rm_hw_iter pp_iter;
+ struct msm_display_topology topology;
+ enum dpu_rm_topology_name topology_name;
+ int i = 0, ret;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ connector_list = &dpu_kms->dev->mode_config.connector_list;
+
+ trace_dpu_enc_mode_set(DRMID(drm_enc));
+
+ list_for_each_entry(conn_iter, connector_list, head)
+ if (conn_iter->encoder == drm_enc)
+ conn = conn_iter;
+
+ if (!conn) {
+ DPU_ERROR_ENC(dpu_enc, "failed to find attached connector\n");
+ return;
+ } else if (!conn->state) {
+ DPU_ERROR_ENC(dpu_enc, "invalid connector state\n");
+ return;
+ }
+
+ topology = dpu_encoder_get_topology(dpu_enc, dpu_kms, adj_mode);
+
+ /* Reserve dynamic resources now. Indicating non-AtomicTest phase */
+ ret = dpu_rm_reserve(&dpu_kms->rm, drm_enc, drm_enc->crtc->state,
+ conn->state, topology, false);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc,
+ "failed to reserve hw resources, %d\n", ret);
+ return;
+ }
+
+ dpu_rm_init_hw_iter(&pp_iter, drm_enc->base.id, DPU_HW_BLK_PINGPONG);
+ for (i = 0; i < MAX_CHANNELS_PER_ENC; i++) {
+ dpu_enc->hw_pp[i] = NULL;
+ if (!dpu_rm_get_hw(&dpu_kms->rm, &pp_iter))
+ break;
+ dpu_enc->hw_pp[i] = (struct dpu_hw_pingpong *) pp_iter.hw;
+ }
+
+ topology_name = dpu_rm_get_topology_name(topology);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ if (!dpu_enc->hw_pp[i]) {
+ DPU_ERROR_ENC(dpu_enc,
+ "invalid pingpong block for the encoder\n");
+ return;
+ }
+ phys->hw_pp = dpu_enc->hw_pp[i];
+ phys->connector = conn->state->connector;
+ phys->topology_name = topology_name;
+ if (phys->ops.mode_set)
+ phys->ops.mode_set(phys, mode, adj_mode);
+ }
+ }
+
+ dpu_enc->mode_set_complete = true;
+}
+
+static void _dpu_encoder_virt_enable_helper(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ if (!dpu_enc || !dpu_enc->cur_master) {
+ DPU_ERROR("invalid dpu encoder/master\n");
+ return;
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DisplayPort &&
+ dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select)
+ dpu_enc->cur_master->hw_mdptop->ops.intf_audio_select(
+ dpu_enc->cur_master->hw_mdptop);
+
+ if (dpu_enc->cur_master->hw_mdptop &&
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc)
+ dpu_enc->cur_master->hw_mdptop->ops.reset_ubwc(
+ dpu_enc->cur_master->hw_mdptop,
+ dpu_kms->catalog);
+
+ _dpu_encoder_update_vsync_source(dpu_enc, &dpu_enc->disp_info);
+}
+
+void dpu_encoder_virt_restore(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && (phys != dpu_enc->cur_master) && phys->ops.restore)
+ phys->ops.restore(phys);
+ }
+
+ if (dpu_enc->cur_master && dpu_enc->cur_master->ops.restore)
+ dpu_enc->cur_master->ops.restore(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_enable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+ struct drm_display_mode *cur_mode = NULL;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ cur_mode = &dpu_enc->base.crtc->state->adjusted_mode;
+
+ trace_dpu_enc_enable(DRMID(drm_enc), cur_mode->hdisplay,
+ cur_mode->vdisplay);
+
+ dpu_enc->cur_master = NULL;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.is_master && phys->ops.is_master(phys)) {
+ DPU_DEBUG_ENC(dpu_enc, "master is now idx %d\n", i);
+ dpu_enc->cur_master = phys;
+ break;
+ }
+ }
+
+ if (!dpu_enc->cur_master) {
+ DPU_ERROR("virt encoder has no master! num_phys %d\n", i);
+ return;
+ }
+
+ ret = dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+ if (ret) {
+ DPU_ERROR_ENC(dpu_enc, "dpu resource control failed: %d\n",
+ ret);
+ return;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ if (phys != dpu_enc->cur_master) {
+ if (phys->ops.enable)
+ phys->ops.enable(phys);
+ }
+
+ if (dpu_enc->misr_enable && (dpu_enc->disp_info.capabilities &
+ MSM_DISPLAY_CAP_VID_MODE) && phys->ops.setup_misr)
+ phys->ops.setup_misr(phys, true,
+ dpu_enc->misr_frame_count);
+ }
+
+ if (dpu_enc->cur_master->ops.enable)
+ dpu_enc->cur_master->ops.enable(dpu_enc->cur_master);
+
+ _dpu_encoder_virt_enable_helper(drm_enc);
+}
+
+static void dpu_encoder_virt_disable(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode *mode;
+ int i = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ } else if (!drm_enc->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ trace_dpu_enc_disable(DRMID(drm_enc));
+
+ /* wait for idle */
+ dpu_encoder_wait_for_event(drm_enc, MSM_ENC_TX_COMPLETE);
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_PRE_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.disable)
+ phys->ops.disable(phys);
+ }
+
+ /* after phys waits for frame-done, should be no more frames pending */
+ if (atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DPU_ERROR("enc%d timeout pending\n", drm_enc->base.id);
+ del_timer_sync(&dpu_enc->frame_done_timer);
+ }
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_STOP);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i])
+ dpu_enc->phys_encs[i]->connector = NULL;
+ }
+
+ dpu_enc->cur_master = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "encoder disabled\n");
+
+ dpu_rm_release(&dpu_kms->rm, drm_enc);
+}
+
+static enum dpu_intf dpu_encoder_get_intf(struct dpu_mdss_cfg *catalog,
+ enum dpu_intf_type type, u32 controller_id)
+{
+ int i = 0;
+
+ for (i = 0; i < catalog->intf_count; i++) {
+ if (catalog->intf[i].type == type
+ && catalog->intf[i].controller_id == controller_id) {
+ return catalog->intf[i].id;
+ }
+ }
+
+ return INTF_MAX;
+}
+
+static void dpu_encoder_vblank_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ unsigned long lock_flags;
+
+ if (!drm_enc || !phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_vblank_callback");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ if (dpu_enc->crtc_vblank_cb)
+ dpu_enc->crtc_vblank_cb(dpu_enc->crtc_vblank_cb_data);
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ atomic_inc(&phy_enc->vsync_cnt);
+ DPU_ATRACE_END("encoder_vblank_callback");
+}
+
+static void dpu_encoder_underrun_callback(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phy_enc)
+{
+ if (!phy_enc)
+ return;
+
+ DPU_ATRACE_BEGIN("encoder_underrun_callback");
+ atomic_inc(&phy_enc->underrun_cnt);
+ trace_dpu_enc_underrun_cb(DRMID(drm_enc),
+ atomic_read(&phy_enc->underrun_cnt));
+ DPU_ATRACE_END("encoder_underrun_callback");
+}
+
+void dpu_encoder_register_vblank_callback(struct drm_encoder *drm_enc,
+ void (*vbl_cb)(void *), void *vbl_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+ int i;
+
+ enable = vbl_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_vblank_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_vblank_cb = vbl_cb;
+ dpu_enc->crtc_vblank_cb_data = vbl_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->ops.control_vblank_irq)
+ phys->ops.control_vblank_irq(phys, enable);
+ }
+}
+
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *drm_enc,
+ void (*frame_event_cb)(void *, u32 event),
+ void *frame_event_cb_data)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned long lock_flags;
+ bool enable;
+
+ enable = frame_event_cb ? true : false;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ trace_dpu_enc_frame_event_cb(DRMID(drm_enc), enable);
+
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+ dpu_enc->crtc_frame_event_cb = frame_event_cb;
+ dpu_enc->crtc_frame_event_cb_data = frame_event_cb_data;
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+static void dpu_encoder_frame_done_callback(
+ struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *ready_phys, u32 event)
+{
+ struct dpu_encoder_virt *dpu_enc = to_dpu_encoder_virt(drm_enc);
+ unsigned int i;
+
+ if (event & (DPU_ENCODER_FRAME_EVENT_DONE
+ | DPU_ENCODER_FRAME_EVENT_ERROR
+ | DPU_ENCODER_FRAME_EVENT_PANEL_DEAD)) {
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ /**
+ * suppress frame_done without waiter,
+ * likely autorefresh
+ */
+ trace_dpu_enc_frame_done_cb_not_busy(DRMID(drm_enc),
+ event, ready_phys->intf_idx);
+ return;
+ }
+
+ /* One of the physical encoders has become idle */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ if (dpu_enc->phys_encs[i] == ready_phys) {
+ clear_bit(i, dpu_enc->frame_busy_mask);
+ trace_dpu_enc_frame_done_cb(DRMID(drm_enc), i,
+ dpu_enc->frame_busy_mask[0]);
+ }
+ }
+
+ if (!dpu_enc->frame_busy_mask[0]) {
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ del_timer(&dpu_enc->frame_done_timer);
+
+ dpu_encoder_resource_control(drm_enc,
+ DPU_ENC_RC_EVENT_FRAME_DONE);
+
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data,
+ event);
+ }
+ } else {
+ if (dpu_enc->crtc_frame_event_cb)
+ dpu_enc->crtc_frame_event_cb(
+ dpu_enc->crtc_frame_event_cb_data, event);
+ }
+}
+
+static void dpu_encoder_off_work(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, delayed_off_work.work);
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ dpu_encoder_resource_control(&dpu_enc->base,
+ DPU_ENC_RC_EVENT_ENTER_IDLE);
+
+ dpu_encoder_frame_done_callback(&dpu_enc->base, NULL,
+ DPU_ENCODER_FRAME_EVENT_IDLE);
+}
+
+/**
+ * _dpu_encoder_trigger_flush - trigger flush for a physical encoder
+ * drm_enc: Pointer to drm encoder structure
+ * phys: Pointer to physical encoder structure
+ * extra_flush_bits: Additional bit mask to include in flush trigger
+ */
+static inline void _dpu_encoder_trigger_flush(struct drm_encoder *drm_enc,
+ struct dpu_encoder_phys *phys, uint32_t extra_flush_bits)
+{
+ struct dpu_hw_ctl *ctl;
+ int pending_kickoff_cnt;
+ u32 ret = UINT_MAX;
+
+ if (!drm_enc || !phys) {
+ DPU_ERROR("invalid argument(s), drm_enc %d, phys_enc %d\n",
+ drm_enc != 0, phys != 0);
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ ctl = phys->hw_ctl;
+ if (!ctl || !ctl->ops.trigger_flush) {
+ DPU_ERROR("missing trigger cb\n");
+ return;
+ }
+
+ pending_kickoff_cnt = dpu_encoder_phys_inc_pending(phys);
+
+ if (extra_flush_bits && ctl->ops.update_pending_flush)
+ ctl->ops.update_pending_flush(ctl, extra_flush_bits);
+
+ ctl->ops.trigger_flush(ctl);
+
+ if (ctl->ops.get_pending_flush)
+ ret = ctl->ops.get_pending_flush(ctl);
+
+ trace_dpu_enc_trigger_flush(DRMID(drm_enc), phys->intf_idx,
+ pending_kickoff_cnt, ctl->idx, ret);
+}
+
+/**
+ * _dpu_encoder_trigger_start - trigger start for a physical encoder
+ * phys: Pointer to physical encoder structure
+ */
+static inline void _dpu_encoder_trigger_start(struct dpu_encoder_phys *phys)
+{
+ if (!phys) {
+ DPU_ERROR("invalid argument(s)\n");
+ return;
+ }
+
+ if (!phys->hw_pp) {
+ DPU_ERROR("invalid pingpong hw\n");
+ return;
+ }
+
+ if (phys->ops.trigger_start && phys->enable_state != DPU_ENC_DISABLED)
+ phys->ops.trigger_start(phys);
+}
+
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ ctl = phys_enc->hw_ctl;
+ if (ctl && ctl->ops.trigger_start) {
+ ctl->ops.trigger_start(ctl);
+ trace_dpu_enc_trigger_start(DRMID(phys_enc->parent), ctl->idx);
+ }
+}
+
+static int dpu_encoder_helper_wait_event_timeout(
+ int32_t drm_id,
+ int32_t hw_id,
+ struct dpu_encoder_wait_info *info)
+{
+ int rc = 0;
+ s64 expected_time = ktime_to_ms(ktime_get()) + info->timeout_ms;
+ s64 jiffies = msecs_to_jiffies(info->timeout_ms);
+ s64 time;
+
+ do {
+ rc = wait_event_timeout(*(info->wq),
+ atomic_read(info->atomic_cnt) == 0, jiffies);
+ time = ktime_to_ms(ktime_get());
+
+ trace_dpu_enc_wait_event_timeout(drm_id, hw_id, rc, time,
+ expected_time,
+ atomic_read(info->atomic_cnt));
+ /* If we timed out, counter is valid and time is less, wait again */
+ } while (atomic_read(info->atomic_cnt) && (rc == 0) &&
+ (time < expected_time));
+
+ return rc;
+}
+
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(phys_enc->parent);
+ ctl = phys_enc->hw_ctl;
+
+ if (!ctl || !ctl->ops.reset)
+ return;
+
+ DRM_DEBUG_KMS("id:%u ctl %d reset\n", DRMID(phys_enc->parent),
+ ctl->idx);
+
+ rc = ctl->ops.reset(ctl);
+ if (rc) {
+ DPU_ERROR_ENC(dpu_enc, "ctl %d reset failure\n", ctl->idx);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+/**
+ * _dpu_encoder_kickoff_phys - handle physical encoder kickoff
+ * Iterate through the physical encoders and perform consolidated flush
+ * and/or control start triggering as needed. This is done in the virtual
+ * encoder rather than the individual physical ones in order to handle
+ * use cases that require visibility into multiple physical encoders at
+ * a time.
+ * dpu_enc: Pointer to virtual encoder structure
+ */
+static void _dpu_encoder_kickoff_phys(struct dpu_encoder_virt *dpu_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ uint32_t i, pending_flush;
+ unsigned long lock_flags;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ pending_flush = 0x0;
+
+ /* update pending counts and trigger kickoff ctl flush atomically */
+ spin_lock_irqsave(&dpu_enc->enc_spinlock, lock_flags);
+
+ /* don't perform flush/start operations for slave encoders */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || phys->enable_state == DPU_ENC_DISABLED)
+ continue;
+
+ ctl = phys->hw_ctl;
+ if (!ctl)
+ continue;
+
+ if (phys->split_role != ENC_ROLE_SLAVE)
+ set_bit(i, dpu_enc->frame_busy_mask);
+ if (!phys->ops.needs_single_flush ||
+ !phys->ops.needs_single_flush(phys))
+ _dpu_encoder_trigger_flush(&dpu_enc->base, phys, 0x0);
+ else if (ctl->ops.get_pending_flush)
+ pending_flush |= ctl->ops.get_pending_flush(ctl);
+ }
+
+ /* for split flush, combine pending flush masks and send to master */
+ if (pending_flush && dpu_enc->cur_master) {
+ _dpu_encoder_trigger_flush(
+ &dpu_enc->base,
+ dpu_enc->cur_master,
+ pending_flush);
+ }
+
+ _dpu_encoder_trigger_start(dpu_enc->cur_master);
+
+ spin_unlock_irqrestore(&dpu_enc->enc_spinlock, lock_flags);
+}
+
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ unsigned int i;
+ struct dpu_hw_ctl *ctl;
+ struct msm_display_info *disp_info;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ disp_info = &dpu_enc->disp_info;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+
+ if (phys && phys->hw_ctl) {
+ ctl = phys->hw_ctl;
+ if (ctl->ops.clear_pending_flush)
+ ctl->ops.clear_pending_flush(ctl);
+
+ /* update only for command mode primary ctl */
+ if ((phys == dpu_enc->cur_master) &&
+ (disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE)
+ && ctl->ops.trigger_pending)
+ ctl->ops.trigger_pending(ctl);
+ }
+ }
+}
+
+static u32 _dpu_encoder_calculate_linetime(struct dpu_encoder_virt *dpu_enc,
+ struct drm_display_mode *mode)
+{
+ u64 pclk_rate;
+ u32 pclk_period;
+ u32 line_time;
+
+ /*
+ * For linetime calculation, only operate on master encoder.
+ */
+ if (!dpu_enc->cur_master)
+ return 0;
+
+ if (!dpu_enc->cur_master->ops.get_line_count) {
+ DPU_ERROR("get_line_count function not defined\n");
+ return 0;
+ }
+
+ pclk_rate = mode->clock; /* pixel clock in kHz */
+ if (pclk_rate == 0) {
+ DPU_ERROR("pclk is 0, cannot calculate line time\n");
+ return 0;
+ }
+
+ pclk_period = DIV_ROUND_UP_ULL(1000000000ull, pclk_rate);
+ if (pclk_period == 0) {
+ DPU_ERROR("pclk period is 0\n");
+ return 0;
+ }
+
+ /*
+ * Line time calculation based on Pixel clock and HTOTAL.
+ * Final unit is in ns.
+ */
+ line_time = (pclk_period * mode->htotal) / 1000;
+ if (line_time == 0) {
+ DPU_ERROR("line time calculation is 0\n");
+ return 0;
+ }
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "clk_rate=%lldkHz, clk_period=%d, linetime=%dns\n",
+ pclk_rate, pclk_period, line_time);
+
+ return line_time;
+}
+
+static int _dpu_encoder_wakeup_time(struct drm_encoder *drm_enc,
+ ktime_t *wakeup_time)
+{
+ struct drm_display_mode *mode;
+ struct dpu_encoder_virt *dpu_enc;
+ u32 cur_line;
+ u32 line_time;
+ u32 vtotal, time_to_vsync;
+ ktime_t cur_time;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ if (!drm_enc->crtc || !drm_enc->crtc->state) {
+ DPU_ERROR("crtc/crtc state object is NULL\n");
+ return -EINVAL;
+ }
+ mode = &drm_enc->crtc->state->adjusted_mode;
+
+ line_time = _dpu_encoder_calculate_linetime(dpu_enc, mode);
+ if (!line_time)
+ return -EINVAL;
+
+ cur_line = dpu_enc->cur_master->ops.get_line_count(dpu_enc->cur_master);
+
+ vtotal = mode->vtotal;
+ if (cur_line >= vtotal)
+ time_to_vsync = line_time * vtotal;
+ else
+ time_to_vsync = line_time * (vtotal - cur_line);
+
+ if (time_to_vsync == 0) {
+ DPU_ERROR("time to vsync should not be zero, vtotal=%d\n",
+ vtotal);
+ return -EINVAL;
+ }
+
+ cur_time = ktime_get();
+ *wakeup_time = ktime_add_ns(cur_time, time_to_vsync);
+
+ DPU_DEBUG_ENC(dpu_enc,
+ "cur_line=%u vtotal=%u time_to_vsync=%u, cur_time=%lld, wakeup_time=%lld\n",
+ cur_line, vtotal, time_to_vsync,
+ ktime_to_ms(cur_time),
+ ktime_to_ms(*wakeup_time));
+ return 0;
+}
+
+static void dpu_encoder_vsync_event_handler(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ vsync_event_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ struct msm_drm_thread *event_thread;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private ||
+ !drm_enc->crtc) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ priv = drm_enc->dev->dev_private;
+
+ if (drm_enc->crtc->index >= ARRAY_SIZE(priv->event_thread)) {
+ DPU_ERROR("invalid crtc index\n");
+ return;
+ }
+ event_thread = &priv->event_thread[drm_enc->crtc->index];
+ if (!event_thread) {
+ DPU_ERROR("event_thread not found for crtc:%d\n",
+ drm_enc->crtc->index);
+ return;
+ }
+
+ del_timer(&dpu_enc->vsync_event_timer);
+}
+
+static void dpu_encoder_vsync_event_work_handler(struct kthread_work *work)
+{
+ struct dpu_encoder_virt *dpu_enc = container_of(work,
+ struct dpu_encoder_virt, vsync_event_work);
+ ktime_t wakeup_time;
+
+ if (!dpu_enc) {
+ DPU_ERROR("invalid dpu encoder\n");
+ return;
+ }
+
+ if (_dpu_encoder_wakeup_time(&dpu_enc->base, &wakeup_time))
+ return;
+
+ trace_dpu_enc_vsync_event_work(DRMID(&dpu_enc->base), wakeup_time);
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+}
+
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *drm_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ bool needs_hw_reset = false;
+ unsigned int i;
+
+ if (!drm_enc || !params) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_prepare_kickoff(DRMID(drm_enc));
+
+ /* prepare for next kickoff, may include waiting on previous kickoff */
+ DPU_ATRACE_BEGIN("enc_prepare_for_kickoff");
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys) {
+ if (phys->ops.prepare_for_kickoff)
+ phys->ops.prepare_for_kickoff(phys, params);
+ if (phys->enable_state == DPU_ENC_ERR_NEEDS_HW_RESET)
+ needs_hw_reset = true;
+ }
+ }
+ DPU_ATRACE_END("enc_prepare_for_kickoff");
+
+ dpu_encoder_resource_control(drm_enc, DPU_ENC_RC_EVENT_KICKOFF);
+
+ /* if any phys needs reset, reset all phys, in-order */
+ if (needs_hw_reset) {
+ trace_dpu_enc_prepare_kickoff_reset(DRMID(drm_enc));
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.hw_reset)
+ phys->ops.hw_reset(phys);
+ }
+ }
+}
+
+void dpu_encoder_kickoff(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ ktime_t wakeup_time;
+ unsigned int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DPU_ATRACE_BEGIN("encoder_kickoff");
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ trace_dpu_enc_kickoff(DRMID(drm_enc));
+
+ atomic_set(&dpu_enc->frame_done_timeout,
+ DPU_FRAME_DONE_TIMEOUT * 1000 /
+ drm_enc->crtc->state->adjusted_mode.vrefresh);
+ mod_timer(&dpu_enc->frame_done_timer, jiffies +
+ ((atomic_read(&dpu_enc->frame_done_timeout) * HZ) / 1000));
+
+ /* All phys encs are ready to go, trigger the kickoff */
+ _dpu_encoder_kickoff_phys(dpu_enc);
+
+ /* allow phys encs to handle any post-kickoff business */
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.handle_post_kickoff)
+ phys->ops.handle_post_kickoff(phys);
+ }
+
+ if (dpu_enc->disp_info.intf_type == DRM_MODE_CONNECTOR_DSI &&
+ !_dpu_encoder_wakeup_time(drm_enc, &wakeup_time)) {
+ trace_dpu_enc_early_kickoff(DRMID(drm_enc),
+ ktime_to_ms(wakeup_time));
+ mod_timer(&dpu_enc->vsync_event_timer,
+ nsecs_to_jiffies(ktime_to_ns(wakeup_time)));
+ }
+
+ DPU_ATRACE_END("encoder_kickoff");
+}
+
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct dpu_encoder_phys *phys;
+ int i;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ phys = dpu_enc->phys_encs[i];
+ if (phys && phys->ops.prepare_commit)
+ phys->ops.prepare_commit(phys);
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_encoder_status_show(struct seq_file *s, void *data)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i;
+
+ if (!s || !s->private)
+ return -EINVAL;
+
+ dpu_enc = s->private;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys)
+ continue;
+
+ seq_printf(s, "intf:%d vsync:%8d underrun:%8d ",
+ phys->intf_idx - INTF_0,
+ atomic_read(&phys->vsync_cnt),
+ atomic_read(&phys->underrun_cnt));
+
+ switch (phys->intf_mode) {
+ case INTF_MODE_VIDEO:
+ seq_puts(s, "mode: video\n");
+ break;
+ case INTF_MODE_CMD:
+ seq_puts(s, "mode: command\n");
+ break;
+ default:
+ seq_puts(s, "mode: ???\n");
+ break;
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return 0;
+}
+
+static int _dpu_encoder_debugfs_status_open(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_encoder_status_show, inode->i_private);
+}
+
+static ssize_t _dpu_encoder_misr_setup(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, rc;
+ char buf[MISR_BUFF_SIZE + 1];
+ size_t buff_copy;
+ u32 frame_count, enable;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ buff_copy = min_t(size_t, count, MISR_BUFF_SIZE);
+ if (copy_from_user(buf, user_buf, buff_copy))
+ return -EINVAL;
+
+ buf[buff_copy] = 0; /* end of string */
+
+ if (sscanf(buf, "%u %u", &enable, &frame_count) != 2)
+ return -EINVAL;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ dpu_enc->misr_enable = enable;
+ dpu_enc->misr_frame_count = frame_count;
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.setup_misr)
+ continue;
+
+ phys->ops.setup_misr(phys, enable, frame_count);
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+
+ return count;
+}
+
+static ssize_t _dpu_encoder_misr_read(struct file *file,
+ char __user *user_buff, size_t count, loff_t *ppos)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ int i = 0, len = 0;
+ char buf[MISR_BUFF_SIZE + 1] = {'\0'};
+ int rc;
+
+ if (*ppos)
+ return 0;
+
+ if (!file || !file->private_data)
+ return -EINVAL;
+
+ dpu_enc = file->private_data;
+
+ rc = _dpu_encoder_power_enable(dpu_enc, true);
+ if (rc)
+ return rc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ if (!dpu_enc->misr_enable) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "disabled\n");
+ goto buff_check;
+ } else if (dpu_enc->disp_info.capabilities &
+ ~MSM_DISPLAY_CAP_VID_MODE) {
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "unsupported\n");
+ goto buff_check;
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (!phys || !phys->ops.collect_misr)
+ continue;
+
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len,
+ "Intf idx:%d\n", phys->intf_idx - INTF_0);
+ len += snprintf(buf + len, MISR_BUFF_SIZE - len, "0x%x\n",
+ phys->ops.collect_misr(phys));
+ }
+
+buff_check:
+ if (count <= len) {
+ len = 0;
+ goto end;
+ }
+
+ if (copy_to_user(user_buff, buf, len)) {
+ len = -EFAULT;
+ goto end;
+ }
+
+ *ppos += len; /* increase offset */
+
+end:
+ mutex_unlock(&dpu_enc->enc_lock);
+ _dpu_encoder_power_enable(dpu_enc, false);
+ return len;
+}
+
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int i;
+
+ static const struct file_operations debugfs_status_fops = {
+ .open = _dpu_encoder_debugfs_status_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+ };
+
+ static const struct file_operations debugfs_misr_fops = {
+ .open = simple_open,
+ .read = _dpu_encoder_misr_read,
+ .write = _dpu_encoder_misr_setup,
+ };
+
+ char name[DPU_NAME_SIZE];
+
+ if (!drm_enc || !drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid encoder or kms\n");
+ return -EINVAL;
+ }
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ priv = drm_enc->dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ snprintf(name, DPU_NAME_SIZE, "encoder%u", drm_enc->base.id);
+
+ /* create overall sub-directory for the encoder */
+ dpu_enc->debugfs_root = debugfs_create_dir(name,
+ drm_enc->dev->primary->debugfs_root);
+ if (!dpu_enc->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_file("status", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_status_fops);
+
+ debugfs_create_file("misr_data", 0600,
+ dpu_enc->debugfs_root, dpu_enc, &debugfs_misr_fops);
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++)
+ if (dpu_enc->phys_encs[i] &&
+ dpu_enc->phys_encs[i]->ops.late_register)
+ dpu_enc->phys_encs[i]->ops.late_register(
+ dpu_enc->phys_encs[i],
+ dpu_enc->debugfs_root);
+
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+ struct dpu_encoder_virt *dpu_enc;
+
+ if (!drm_enc)
+ return;
+
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ debugfs_remove_recursive(dpu_enc->debugfs_root);
+}
+#else
+static int _dpu_encoder_init_debugfs(struct drm_encoder *drm_enc)
+{
+ return 0;
+}
+
+static void _dpu_encoder_destroy_debugfs(struct drm_encoder *drm_enc)
+{
+}
+#endif
+
+static int dpu_encoder_late_register(struct drm_encoder *encoder)
+{
+ return _dpu_encoder_init_debugfs(encoder);
+}
+
+static void dpu_encoder_early_unregister(struct drm_encoder *encoder)
+{
+ _dpu_encoder_destroy_debugfs(encoder);
+}
+
+static int dpu_encoder_virt_add_phys_encs(
+ u32 display_caps,
+ struct dpu_encoder_virt *dpu_enc,
+ struct dpu_enc_phys_init_params *params)
+{
+ struct dpu_encoder_phys *enc = NULL;
+
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ /*
+ * We may create up to NUM_PHYS_ENCODER_TYPES physical encoder types
+ * in this function, check up-front.
+ */
+ if (dpu_enc->num_phys_encs + NUM_PHYS_ENCODER_TYPES >=
+ ARRAY_SIZE(dpu_enc->phys_encs)) {
+ DPU_ERROR_ENC(dpu_enc, "too many physical encoders %d\n",
+ dpu_enc->num_phys_encs);
+ return -EINVAL;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_VID_MODE) {
+ enc = dpu_encoder_phys_vid_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init vid enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ if (display_caps & MSM_DISPLAY_CAP_CMD_MODE) {
+ enc = dpu_encoder_phys_cmd_init(params);
+
+ if (IS_ERR_OR_NULL(enc)) {
+ DPU_ERROR_ENC(dpu_enc, "failed to init cmd enc: %ld\n",
+ PTR_ERR(enc));
+ return enc == 0 ? -EINVAL : PTR_ERR(enc);
+ }
+
+ dpu_enc->phys_encs[dpu_enc->num_phys_encs] = enc;
+ ++dpu_enc->num_phys_encs;
+ }
+
+ return 0;
+}
+
+static const struct dpu_encoder_virt_ops dpu_encoder_parent_ops = {
+ .handle_vblank_virt = dpu_encoder_vblank_callback,
+ .handle_underrun_virt = dpu_encoder_underrun_callback,
+ .handle_frame_done = dpu_encoder_frame_done_callback,
+};
+
+static int dpu_encoder_setup_display(struct dpu_encoder_virt *dpu_enc,
+ struct dpu_kms *dpu_kms,
+ struct msm_display_info *disp_info,
+ int *drm_enc_mode)
+{
+ int ret = 0;
+ int i = 0;
+ enum dpu_intf_type intf_type;
+ struct dpu_enc_phys_init_params phys_params;
+
+ if (!dpu_enc || !dpu_kms) {
+ DPU_ERROR("invalid arg(s), enc %d kms %d\n",
+ dpu_enc != 0, dpu_kms != 0);
+ return -EINVAL;
+ }
+
+ memset(&phys_params, 0, sizeof(phys_params));
+ phys_params.dpu_kms = dpu_kms;
+ phys_params.parent = &dpu_enc->base;
+ phys_params.parent_ops = &dpu_encoder_parent_ops;
+ phys_params.enc_spinlock = &dpu_enc->enc_spinlock;
+
+ DPU_DEBUG("\n");
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI) {
+ *drm_enc_mode = DRM_MODE_ENCODER_DSI;
+ intf_type = INTF_DSI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_HDMIA) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_HDMI;
+ } else if (disp_info->intf_type == DRM_MODE_CONNECTOR_DisplayPort) {
+ *drm_enc_mode = DRM_MODE_ENCODER_TMDS;
+ intf_type = INTF_DP;
+ } else {
+ DPU_ERROR_ENC(dpu_enc, "unsupported display interface type\n");
+ return -EINVAL;
+ }
+
+ WARN_ON(disp_info->num_of_h_tiles < 1);
+
+ dpu_enc->display_num_of_h_tiles = disp_info->num_of_h_tiles;
+
+ DPU_DEBUG("dsi_info->num_of_h_tiles %d\n", disp_info->num_of_h_tiles);
+
+ if ((disp_info->capabilities & MSM_DISPLAY_CAP_CMD_MODE) ||
+ (disp_info->capabilities & MSM_DISPLAY_CAP_VID_MODE))
+ dpu_enc->idle_pc_supported =
+ dpu_kms->catalog->caps->has_idle_pc;
+
+ mutex_lock(&dpu_enc->enc_lock);
+ for (i = 0; i < disp_info->num_of_h_tiles && !ret; i++) {
+ /*
+ * Left-most tile is at index 0, content is controller id
+ * h_tile_instance_ids[2] = {0, 1}; DSI0 = left, DSI1 = right
+ * h_tile_instance_ids[2] = {1, 0}; DSI1 = left, DSI0 = right
+ */
+ u32 controller_id = disp_info->h_tile_instance[i];
+
+ if (disp_info->num_of_h_tiles > 1) {
+ if (i == 0)
+ phys_params.split_role = ENC_ROLE_MASTER;
+ else
+ phys_params.split_role = ENC_ROLE_SLAVE;
+ } else {
+ phys_params.split_role = ENC_ROLE_SOLO;
+ }
+
+ DPU_DEBUG("h_tile_instance %d = %d, split_role %d\n",
+ i, controller_id, phys_params.split_role);
+
+ phys_params.intf_idx = dpu_encoder_get_intf(dpu_kms->catalog,
+ intf_type,
+ controller_id);
+ if (phys_params.intf_idx == INTF_MAX) {
+ DPU_ERROR_ENC(dpu_enc, "could not get intf: type %d, id %d\n",
+ intf_type, controller_id);
+ ret = -EINVAL;
+ }
+
+ if (!ret) {
+ ret = dpu_encoder_virt_add_phys_encs(disp_info->capabilities,
+ dpu_enc,
+ &phys_params);
+ if (ret)
+ DPU_ERROR_ENC(dpu_enc, "failed to add phys encs\n");
+ }
+ }
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys) {
+ atomic_set(&phys->vsync_cnt, 0);
+ atomic_set(&phys->underrun_cnt, 0);
+ }
+ }
+ mutex_unlock(&dpu_enc->enc_lock);
+
+ return ret;
+}
+
+static void dpu_encoder_frame_done_timeout(struct timer_list *t)
+{
+ struct dpu_encoder_virt *dpu_enc = from_timer(dpu_enc, t,
+ frame_done_timer);
+ struct drm_encoder *drm_enc = &dpu_enc->base;
+ struct msm_drm_private *priv;
+ u32 event;
+
+ if (!drm_enc->dev || !drm_enc->dev->dev_private) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+ priv = drm_enc->dev->dev_private;
+
+ if (!dpu_enc->frame_busy_mask[0] || !dpu_enc->crtc_frame_event_cb) {
+ DRM_DEBUG_KMS("id:%u invalid timeout frame_busy_mask=%lu\n",
+ DRMID(drm_enc), dpu_enc->frame_busy_mask[0]);
+ return;
+ } else if (!atomic_xchg(&dpu_enc->frame_done_timeout, 0)) {
+ DRM_DEBUG_KMS("id:%u invalid timeout\n", DRMID(drm_enc));
+ return;
+ }
+
+ DPU_ERROR_ENC(dpu_enc, "frame done timeout\n");
+
+ event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ trace_dpu_enc_frame_done_timeout(DRMID(drm_enc), event);
+ dpu_enc->crtc_frame_event_cb(dpu_enc->crtc_frame_event_cb_data, event);
+}
+
+static const struct drm_encoder_helper_funcs dpu_encoder_helper_funcs = {
+ .mode_set = dpu_encoder_virt_mode_set,
+ .disable = dpu_encoder_virt_disable,
+ .enable = dpu_kms_encoder_enable,
+ .atomic_check = dpu_encoder_virt_atomic_check,
+
+ /* This is called by dpu_kms_encoder_enable */
+ .commit = dpu_encoder_virt_enable,
+};
+
+static const struct drm_encoder_funcs dpu_encoder_funcs = {
+ .destroy = dpu_encoder_destroy,
+ .late_register = dpu_encoder_late_register,
+ .early_unregister = dpu_encoder_early_unregister,
+};
+
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info)
+{
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_kms *dpu_kms = to_dpu_kms(priv->kms);
+ struct drm_encoder *drm_enc = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int drm_enc_mode = DRM_MODE_ENCODER_NONE;
+ int ret = 0;
+
+ dpu_enc = to_dpu_encoder_virt(enc);
+
+ mutex_init(&dpu_enc->enc_lock);
+ ret = dpu_encoder_setup_display(dpu_enc, dpu_kms, disp_info,
+ &drm_enc_mode);
+ if (ret)
+ goto fail;
+
+ dpu_enc->cur_master = NULL;
+ spin_lock_init(&dpu_enc->enc_spinlock);
+
+ atomic_set(&dpu_enc->frame_done_timeout, 0);
+ timer_setup(&dpu_enc->frame_done_timer,
+ dpu_encoder_frame_done_timeout, 0);
+
+ if (disp_info->intf_type == DRM_MODE_CONNECTOR_DSI)
+ timer_setup(&dpu_enc->vsync_event_timer,
+ dpu_encoder_vsync_event_handler,
+ 0);
+
+
+ mutex_init(&dpu_enc->rc_lock);
+ kthread_init_delayed_work(&dpu_enc->delayed_off_work,
+ dpu_encoder_off_work);
+ dpu_enc->idle_timeout = IDLE_TIMEOUT;
+
+ kthread_init_work(&dpu_enc->vsync_event_work,
+ dpu_encoder_vsync_event_work_handler);
+
+ memcpy(&dpu_enc->disp_info, disp_info, sizeof(*disp_info));
+
+ DPU_DEBUG_ENC(dpu_enc, "created\n");
+
+ return ret;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (drm_enc)
+ dpu_encoder_destroy(drm_enc);
+
+ return ret;
+
+
+}
+
+struct drm_encoder *dpu_encoder_init(struct drm_device *dev,
+ int drm_enc_mode)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int rc = 0;
+
+ dpu_enc = devm_kzalloc(dev->dev, sizeof(*dpu_enc), GFP_KERNEL);
+ if (!dpu_enc)
+ return ERR_PTR(ENOMEM);
+
+ rc = drm_encoder_init(dev, &dpu_enc->base, &dpu_encoder_funcs,
+ drm_enc_mode, NULL);
+ if (rc) {
+ devm_kfree(dev->dev, dpu_enc);
+ return ERR_PTR(rc);
+ }
+
+ drm_encoder_helper_add(&dpu_enc->base, &dpu_encoder_helper_funcs);
+
+ return &dpu_enc->base;
+}
+
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_enc,
+ enum msm_event_wait event)
+{
+ int (*fn_wait)(struct dpu_encoder_phys *phys_enc) = NULL;
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i, ret = 0;
+
+ if (!drm_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+ dpu_enc = to_dpu_encoder_virt(drm_enc);
+ DPU_DEBUG_ENC(dpu_enc, "\n");
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+ if (!phys)
+ continue;
+
+ switch (event) {
+ case MSM_ENC_COMMIT_DONE:
+ fn_wait = phys->ops.wait_for_commit_done;
+ break;
+ case MSM_ENC_TX_COMPLETE:
+ fn_wait = phys->ops.wait_for_tx_complete;
+ break;
+ case MSM_ENC_VBLANK:
+ fn_wait = phys->ops.wait_for_vblank;
+ break;
+ default:
+ DPU_ERROR_ENC(dpu_enc, "unknown wait event %d\n",
+ event);
+ return -EINVAL;
+ };
+
+ if (fn_wait) {
+ DPU_ATRACE_BEGIN("wait_for_completion_event");
+ ret = fn_wait(phys);
+ DPU_ATRACE_END("wait_for_completion_event");
+ if (ret)
+ return ret;
+ }
+ }
+
+ return ret;
+}
+
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder)
+{
+ struct dpu_encoder_virt *dpu_enc = NULL;
+ int i;
+
+ if (!encoder) {
+ DPU_ERROR("invalid encoder\n");
+ return INTF_MODE_NONE;
+ }
+ dpu_enc = to_dpu_encoder_virt(encoder);
+
+ if (dpu_enc->cur_master)
+ return dpu_enc->cur_master->intf_mode;
+
+ for (i = 0; i < dpu_enc->num_phys_encs; i++) {
+ struct dpu_encoder_phys *phys = dpu_enc->phys_encs[i];
+
+ if (phys)
+ return phys->intf_mode;
+ }
+
+ return INTF_MODE_NONE;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
new file mode 100644
index 000000000000..60f809fc7c13
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder.h
@@ -0,0 +1,177 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_ENCODER_H__
+#define __DPU_ENCODER_H__
+
+#include <drm/drm_crtc.h>
+#include "dpu_hw_mdss.h"
+
+#define DPU_ENCODER_FRAME_EVENT_DONE BIT(0)
+#define DPU_ENCODER_FRAME_EVENT_ERROR BIT(1)
+#define DPU_ENCODER_FRAME_EVENT_PANEL_DEAD BIT(2)
+#define DPU_ENCODER_FRAME_EVENT_IDLE BIT(3)
+
+#define IDLE_TIMEOUT (66 - 16/2)
+
+/**
+ * Encoder functions and data types
+ * @intfs: Interfaces this encoder is using, INTF_MODE_NONE if unused
+ * @needs_cdm: Encoder requests a CDM based on pixel format conversion needs
+ * @display_num_of_h_tiles: Number of horizontal tiles in case of split
+ * interface
+ * @topology: Topology of the display
+ */
+struct dpu_encoder_hw_resources {
+ enum dpu_intf_mode intfs[INTF_MAX];
+ bool needs_cdm;
+ u32 display_num_of_h_tiles;
+};
+
+/**
+ * dpu_encoder_kickoff_params - info encoder requires at kickoff
+ * @affected_displays: bitmask, bit set means the ROI of the commit lies within
+ * the bounds of the physical display at the bit index
+ */
+struct dpu_encoder_kickoff_params {
+ unsigned long affected_displays;
+};
+
+/**
+ * dpu_encoder_get_hw_resources - Populate table of required hardware resources
+ * @encoder: encoder pointer
+ * @hw_res: resource table to populate with encoder required resources
+ * @conn_state: report hw reqs based on this proposed connector state
+ */
+void dpu_encoder_get_hw_resources(struct drm_encoder *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+
+/**
+ * dpu_encoder_register_vblank_callback - provide callback to encoder that
+ * will be called on the next vblank.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister and disable IRQs
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_vblank_callback(struct drm_encoder *encoder,
+ void (*cb)(void *), void *data);
+
+/**
+ * dpu_encoder_register_frame_event_callback - provide callback to encoder that
+ * will be called after the request is complete, or other events.
+ * @encoder: encoder pointer
+ * @cb: callback pointer, provide NULL to deregister
+ * @data: user data provided to callback
+ */
+void dpu_encoder_register_frame_event_callback(struct drm_encoder *encoder,
+ void (*cb)(void *, u32), void *data);
+
+/**
+ * dpu_encoder_prepare_for_kickoff - schedule double buffer flip of the ctl
+ * path (i.e. ctl flush and start) at next appropriate time.
+ * Immediately: if no previous commit is outstanding.
+ * Delayed: Block until next trigger can be issued.
+ * @encoder: encoder pointer
+ * @params: kickoff time parameters
+ */
+void dpu_encoder_prepare_for_kickoff(struct drm_encoder *encoder,
+ struct dpu_encoder_kickoff_params *params);
+
+/**
+ * dpu_encoder_trigger_kickoff_pending - Clear the flush bits from previous
+ * kickoff and trigger the ctl prepare progress for command mode display.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_trigger_kickoff_pending(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_kickoff - trigger a double buffer flip of the ctl path
+ * (i.e. ctl flush and start) immediately.
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_kickoff(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_wait_for_event - Waits for encoder events
+ * @encoder: encoder pointer
+ * @event: event to wait for
+ * MSM_ENC_COMMIT_DONE - Wait for hardware to have flushed the current pending
+ * frames to hardware at a vblank or ctl_start
+ * Encoders will map this differently depending on the
+ * panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> ctl_start
+ * MSM_ENC_TX_COMPLETE - Wait for the hardware to transfer all the pixels to
+ * the panel. Encoders will map this differently
+ * depending on the panel type.
+ * vid mode -> vsync_irq
+ * cmd mode -> pp_done
+ * Returns: 0 on success, -EWOULDBLOCK if already signaled, error otherwise
+ */
+int dpu_encoder_wait_for_event(struct drm_encoder *drm_encoder,
+ enum msm_event_wait event);
+
+/*
+ * dpu_encoder_get_intf_mode - get interface mode of the given encoder
+ * @encoder: Pointer to drm encoder object
+ */
+enum dpu_intf_mode dpu_encoder_get_intf_mode(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_virt_restore - restore the encoder configs
+ * @encoder: encoder pointer
+ */
+void dpu_encoder_virt_restore(struct drm_encoder *encoder);
+
+/**
+ * dpu_encoder_init - initialize virtual encoder object
+ * @dev: Pointer to drm device structure
+ * @disp_info: Pointer to display information structure
+ * Returns: Pointer to newly created drm encoder
+ */
+struct drm_encoder *dpu_encoder_init(
+ struct drm_device *dev,
+ int drm_enc_mode);
+
+/**
+ * dpu_encoder_setup - setup dpu_encoder for the display probed
+ * @dev: Pointer to drm device structure
+ * @enc: Pointer to the drm_encoder
+ * @disp_info: Pointer to the display info
+ */
+int dpu_encoder_setup(struct drm_device *dev, struct drm_encoder *enc,
+ struct msm_display_info *disp_info);
+
+/**
+ * dpu_encoder_prepare_commit - prepare encoder at the very beginning of an
+ * atomic commit, before any registers are written
+ * @drm_enc: Pointer to previously created drm encoder structure
+ */
+void dpu_encoder_prepare_commit(struct drm_encoder *drm_enc);
+
+/**
+ * dpu_encoder_set_idle_timeout - set the idle timeout for video
+ * and command mode encoders.
+ * @drm_enc: Pointer to previously created drm encoder structure
+ * @idle_timeout: idle timeout duration in milliseconds
+ */
+void dpu_encoder_set_idle_timeout(struct drm_encoder *drm_enc,
+ u32 idle_timeout);
+
+#endif /* __DPU_ENCODER_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
new file mode 100644
index 000000000000..c7df8aad6613
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys.h
@@ -0,0 +1,430 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_ENCODER_PHYS_H__
+#define __DPU_ENCODER_PHYS_H__
+
+#include <linux/jiffies.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_intf.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_encoder.h"
+
+#define DPU_ENCODER_NAME_MAX 16
+
+/* wait for at most 2 vsync for lowest refresh rate (24hz) */
+#define KICKOFF_TIMEOUT_MS 84
+#define KICKOFF_TIMEOUT_JIFFIES msecs_to_jiffies(KICKOFF_TIMEOUT_MS)
+
+/**
+ * enum dpu_enc_split_role - Role this physical encoder will play in a
+ * split-panel configuration, where one panel is master, and others slaves.
+ * Masters have extra responsibilities, like managing the VBLANK IRQ.
+ * @ENC_ROLE_SOLO: This is the one and only panel. This encoder is master.
+ * @ENC_ROLE_MASTER: This encoder is the master of a split panel config.
+ * @ENC_ROLE_SLAVE: This encoder is not the master of a split panel config.
+ */
+enum dpu_enc_split_role {
+ ENC_ROLE_SOLO,
+ ENC_ROLE_MASTER,
+ ENC_ROLE_SLAVE,
+};
+
+/**
+ * enum dpu_enc_enable_state - current enabled state of the physical encoder
+ * @DPU_ENC_DISABLING: Encoder transitioning to disable state
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_DISABLED: Encoder is disabled
+ * @DPU_ENC_ENABLING: Encoder transitioning to enabled
+ * Events bounding transition are encoder type specific
+ * @DPU_ENC_ENABLED: Encoder is enabled
+ * @DPU_ENC_ERR_NEEDS_HW_RESET: Encoder is enabled, but requires a hw_reset
+ * to recover from a previous error
+ */
+enum dpu_enc_enable_state {
+ DPU_ENC_DISABLING,
+ DPU_ENC_DISABLED,
+ DPU_ENC_ENABLING,
+ DPU_ENC_ENABLED,
+ DPU_ENC_ERR_NEEDS_HW_RESET
+};
+
+struct dpu_encoder_phys;
+
+/**
+ * struct dpu_encoder_virt_ops - Interface the containing virtual encoder
+ * provides for the physical encoders to use to callback.
+ * @handle_vblank_virt: Notify virtual encoder of vblank IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_underrun_virt: Notify virtual encoder of underrun IRQ reception
+ * Note: This is called from IRQ handler context.
+ * @handle_frame_done: Notify virtual encoder that this phys encoder
+ * completes last request frame.
+ */
+struct dpu_encoder_virt_ops {
+ void (*handle_vblank_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_underrun_virt)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys);
+ void (*handle_frame_done)(struct drm_encoder *,
+ struct dpu_encoder_phys *phys, u32 event);
+};
+
+/**
+ * struct dpu_encoder_phys_ops - Interface the physical encoders provide to
+ * the containing virtual encoder.
+ * @late_register: DRM Call. Add Userspace interfaces, debugfs.
+ * @prepare_commit: MSM Atomic Call, start of atomic commit sequence
+ * @is_master: Whether this phys_enc is the current master
+ * encoder. Can be switched at enable time. Based
+ * on split_role and current mode (CMD/VID).
+ * @mode_fixup: DRM Call. Fixup a DRM mode.
+ * @mode_set: DRM Call. Set a DRM mode.
+ * This likely caches the mode, for use at enable.
+ * @enable: DRM Call. Enable a DRM mode.
+ * @disable: DRM Call. Disable mode.
+ * @atomic_check: DRM Call. Atomic check new DRM state.
+ * @destroy: DRM Call. Destroy and release resources.
+ * @get_hw_resources: Populate the structure with the hardware
+ * resources that this phys_enc is using.
+ * Expect no overlap between phys_encs.
+ * @control_vblank_irq Register/Deregister for VBLANK IRQ
+ * @wait_for_commit_done: Wait for hardware to have flushed the
+ * current pending frames to hardware
+ * @wait_for_tx_complete: Wait for hardware to transfer the pixels
+ * to the panel
+ * @wait_for_vblank: Wait for VBLANK, for sub-driver internal use
+ * @prepare_for_kickoff: Do any work necessary prior to a kickoff
+ * For CMD encoder, may wait for previous tx done
+ * @handle_post_kickoff: Do any work necessary post-kickoff work
+ * @trigger_start: Process start event on physical encoder
+ * @needs_single_flush: Whether encoder slaves need to be flushed
+ * @setup_misr: Sets up MISR, enable and disables based on sysfs
+ * @collect_misr: Collects MISR data on frame update
+ * @hw_reset: Issue HW recovery such as CTL reset and clear
+ * DPU_ENC_ERR_NEEDS_HW_RESET state
+ * @irq_control: Handler to enable/disable all the encoder IRQs
+ * @prepare_idle_pc: phys encoder can update the vsync_enable status
+ * on idle power collapse prepare
+ * @restore: Restore all the encoder configs.
+ * @get_line_count: Obtain current vertical line count
+ */
+
+struct dpu_encoder_phys_ops {
+ int (*late_register)(struct dpu_encoder_phys *encoder,
+ struct dentry *debugfs_root);
+ void (*prepare_commit)(struct dpu_encoder_phys *encoder);
+ bool (*is_master)(struct dpu_encoder_phys *encoder);
+ bool (*mode_fixup)(struct dpu_encoder_phys *encoder,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*mode_set)(struct dpu_encoder_phys *encoder,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode);
+ void (*enable)(struct dpu_encoder_phys *encoder);
+ void (*disable)(struct dpu_encoder_phys *encoder);
+ int (*atomic_check)(struct dpu_encoder_phys *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state);
+ void (*destroy)(struct dpu_encoder_phys *encoder);
+ void (*get_hw_resources)(struct dpu_encoder_phys *encoder,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state);
+ int (*control_vblank_irq)(struct dpu_encoder_phys *enc, bool enable);
+ int (*wait_for_commit_done)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_tx_complete)(struct dpu_encoder_phys *phys_enc);
+ int (*wait_for_vblank)(struct dpu_encoder_phys *phys_enc);
+ void (*prepare_for_kickoff)(struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params);
+ void (*handle_post_kickoff)(struct dpu_encoder_phys *phys_enc);
+ void (*trigger_start)(struct dpu_encoder_phys *phys_enc);
+ bool (*needs_single_flush)(struct dpu_encoder_phys *phys_enc);
+
+ void (*setup_misr)(struct dpu_encoder_phys *phys_encs,
+ bool enable, u32 frame_count);
+ u32 (*collect_misr)(struct dpu_encoder_phys *phys_enc);
+ void (*hw_reset)(struct dpu_encoder_phys *phys_enc);
+ void (*irq_control)(struct dpu_encoder_phys *phys, bool enable);
+ void (*prepare_idle_pc)(struct dpu_encoder_phys *phys_enc);
+ void (*restore)(struct dpu_encoder_phys *phys);
+ int (*get_line_count)(struct dpu_encoder_phys *phys);
+};
+
+/**
+ * enum dpu_intr_idx - dpu encoder interrupt index
+ * @INTR_IDX_VSYNC: Vsync interrupt for video mode panel
+ * @INTR_IDX_PINGPONG: Pingpong done unterrupt for cmd mode panel
+ * @INTR_IDX_UNDERRUN: Underrun unterrupt for video and cmd mode panel
+ * @INTR_IDX_RDPTR: Readpointer done unterrupt for cmd mode panel
+ */
+enum dpu_intr_idx {
+ INTR_IDX_VSYNC,
+ INTR_IDX_PINGPONG,
+ INTR_IDX_UNDERRUN,
+ INTR_IDX_CTL_START,
+ INTR_IDX_RDPTR,
+ INTR_IDX_MAX,
+};
+
+/**
+ * dpu_encoder_irq - tracking structure for interrupts
+ * @name: string name of interrupt
+ * @intr_type: Encoder interrupt type
+ * @intr_idx: Encoder interrupt enumeration
+ * @hw_idx: HW Block ID
+ * @irq_idx: IRQ interface lookup index from DPU IRQ framework
+ * will be -EINVAL if IRQ is not registered
+ * @irq_cb: interrupt callback
+ */
+struct dpu_encoder_irq {
+ const char *name;
+ enum dpu_intr_type intr_type;
+ enum dpu_intr_idx intr_idx;
+ int hw_idx;
+ int irq_idx;
+ struct dpu_irq_callback cb;
+};
+
+/**
+ * struct dpu_encoder_phys - physical encoder that drives a single INTF block
+ * tied to a specific panel / sub-panel. Abstract type, sub-classed by
+ * phys_vid or phys_cmd for video mode or command mode encs respectively.
+ * @parent: Pointer to the containing virtual encoder
+ * @connector: If a mode is set, cached pointer to the active connector
+ * @ops: Operations exposed to the virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @hw_mdptop: Hardware interface to the top registers
+ * @hw_ctl: Hardware interface to the ctl registers
+ * @hw_cdm: Hardware interface to the cdm registers
+ * @cdm_cfg: Chroma-down hardware configuration
+ * @hw_pp: Hardware interface to the ping pong registers
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @cached_mode: DRM mode cached at mode_set time, acted on in enable
+ * @enabled: Whether the encoder has enabled and running a mode
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_mode: Interface mode
+ * @intf_idx: Interface index on dpu hardware
+ * @topology_name: topology selected for the display
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ * @enable_state: Enable state tracking
+ * @vblank_refcount: Reference count of vblank request
+ * @vsync_cnt: Vsync count for the physical encoder
+ * @underrun_cnt: Underrun count for the physical encoder
+ * @pending_kickoff_cnt: Atomic counter tracking the number of kickoffs
+ * vs. the number of done/vblank irqs. Should hover
+ * between 0-2 Incremented when a new kickoff is
+ * scheduled. Decremented in irq handler
+ * @pending_ctlstart_cnt: Atomic counter tracking the number of ctl start
+ * pending.
+ * @pending_kickoff_wq: Wait queue for blocking until kickoff completes
+ * @irq: IRQ tracking structures
+ */
+struct dpu_encoder_phys {
+ struct drm_encoder *parent;
+ struct drm_connector *connector;
+ struct dpu_encoder_phys_ops ops;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ struct dpu_hw_mdp *hw_mdptop;
+ struct dpu_hw_ctl *hw_ctl;
+ struct dpu_hw_cdm *hw_cdm;
+ struct dpu_hw_cdm_cfg cdm_cfg;
+ struct dpu_hw_pingpong *hw_pp;
+ struct dpu_kms *dpu_kms;
+ struct drm_display_mode cached_mode;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf_mode intf_mode;
+ enum dpu_intf intf_idx;
+ enum dpu_rm_topology_name topology_name;
+ spinlock_t *enc_spinlock;
+ enum dpu_enc_enable_state enable_state;
+ atomic_t vblank_refcount;
+ atomic_t vsync_cnt;
+ atomic_t underrun_cnt;
+ atomic_t pending_ctlstart_cnt;
+ atomic_t pending_kickoff_cnt;
+ wait_queue_head_t pending_kickoff_wq;
+ struct dpu_encoder_irq irq[INTR_IDX_MAX];
+};
+
+static inline int dpu_encoder_phys_inc_pending(struct dpu_encoder_phys *phys)
+{
+ atomic_inc_return(&phys->pending_ctlstart_cnt);
+ return atomic_inc_return(&phys->pending_kickoff_cnt);
+}
+
+/**
+ * struct dpu_encoder_phys_vid - sub-class of dpu_encoder_phys to handle video
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @hw_intf: Hardware interface to the intf registers
+ * @timing_params: Current timing parameter
+ */
+struct dpu_encoder_phys_vid {
+ struct dpu_encoder_phys base;
+ struct dpu_hw_intf *hw_intf;
+ struct intf_timing_params timing_params;
+};
+
+/**
+ * struct dpu_encoder_phys_cmd - sub-class of dpu_encoder_phys to handle command
+ * mode specific operations
+ * @base: Baseclass physical encoder structure
+ * @intf_idx: Intf Block index used by this phys encoder
+ * @stream_sel: Stream selection for multi-stream interfaces
+ * @serialize_wait4pp: serialize wait4pp feature waits for pp_done interrupt
+ * after ctl_start instead of before next frame kickoff
+ * @pp_timeout_report_cnt: number of pingpong done irq timeout errors
+ * @pending_vblank_cnt: Atomic counter tracking pending wait for VBLANK
+ * @pending_vblank_wq: Wait queue for blocking until VBLANK received
+ */
+struct dpu_encoder_phys_cmd {
+ struct dpu_encoder_phys base;
+ int stream_sel;
+ bool serialize_wait4pp;
+ int pp_timeout_report_cnt;
+ atomic_t pending_vblank_cnt;
+ wait_queue_head_t pending_vblank_wq;
+};
+
+/**
+ * struct dpu_enc_phys_init_params - initialization parameters for phys encs
+ * @dpu_kms: Pointer to the dpu_kms top level
+ * @parent: Pointer to the containing virtual encoder
+ * @parent_ops: Callbacks exposed by the parent to the phys_enc
+ * @split_role: Role to play in a split-panel configuration
+ * @intf_idx: Interface index this phys_enc will control
+ * @enc_spinlock: Virtual-Encoder-Wide Spin Lock for IRQ purposes
+ */
+struct dpu_enc_phys_init_params {
+ struct dpu_kms *dpu_kms;
+ struct drm_encoder *parent;
+ const struct dpu_encoder_virt_ops *parent_ops;
+ enum dpu_enc_split_role split_role;
+ enum dpu_intf intf_idx;
+ spinlock_t *enc_spinlock;
+};
+
+/**
+ * dpu_encoder_wait_info - container for passing arguments to irq wait functions
+ * @wq: wait queue structure
+ * @atomic_cnt: wait until atomic_cnt equals zero
+ * @timeout_ms: timeout value in milliseconds
+ */
+struct dpu_encoder_wait_info {
+ wait_queue_head_t *wq;
+ atomic_t *atomic_cnt;
+ s64 timeout_ms;
+};
+
+/**
+ * dpu_encoder_phys_vid_init - Construct a new video mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_phys_cmd_init - Construct a new command mode physical encoder
+ * @p: Pointer to init params structure
+ * Return: Error code or newly allocated encoder
+ */
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p);
+
+/**
+ * dpu_encoder_helper_trigger_start - control start helper function
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl_start triggering.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_trigger_start(struct dpu_encoder_phys *phys_enc);
+
+/**
+ * dpu_encoder_helper_hw_reset - issue ctl hw reset
+ * This helper function may be optionally specified by physical
+ * encoders if they require ctl hw reset. If state is currently
+ * DPU_ENC_ERR_NEEDS_HW_RESET, it is set back to DPU_ENC_ENABLED.
+ * @phys_enc: Pointer to physical encoder structure
+ */
+void dpu_encoder_helper_hw_reset(struct dpu_encoder_phys *phys_enc);
+
+static inline enum dpu_3d_blend_mode dpu_encoder_helper_get_3d_blend_mode(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc || phys_enc->enable_state == DPU_ENC_DISABLING)
+ return BLEND_3D_NONE;
+
+ if (phys_enc->split_role == ENC_ROLE_SOLO &&
+ phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE)
+ return BLEND_3D_H_ROW_INT;
+
+ return BLEND_3D_NONE;
+}
+
+/**
+ * dpu_encoder_helper_split_config - split display configuration helper function
+ * This helper function may be used by physical encoders to configure
+ * the split display related registers.
+ * @phys_enc: Pointer to physical encoder structure
+ * @interface: enum dpu_intf setting
+ */
+void dpu_encoder_helper_split_config(
+ struct dpu_encoder_phys *phys_enc,
+ enum dpu_intf interface);
+
+/**
+ * dpu_encoder_helper_report_irq_timeout - utility to report error that irq has
+ * timed out, including reporting frame error event to crtc and debug dump
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: Failing interrupt index
+ */
+void dpu_encoder_helper_report_irq_timeout(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_wait_for_irq - utility to wait on an irq.
+ * note: will call dpu_encoder_helper_wait_for_irq on timeout
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @wait_info: wait info struct
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_wait_for_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx,
+ struct dpu_encoder_wait_info *wait_info);
+
+/**
+ * dpu_encoder_helper_register_irq - register and enable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_register_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+/**
+ * dpu_encoder_helper_unregister_irq - unregister and disable an irq
+ * @phys_enc: Pointer to physical encoder structure
+ * @intr_idx: encoder interrupt index
+ * @Return: 0 or -ERROR
+ */
+int dpu_encoder_helper_unregister_irq(struct dpu_encoder_phys *phys_enc,
+ enum dpu_intr_idx intr_idx);
+
+#endif /* __dpu_encoder_phys_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
new file mode 100644
index 000000000000..3084675ed425
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_cmd.c
@@ -0,0 +1,905 @@
+/*
+ * Copyright (c) 2015-2018 The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_CMDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_CMDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) ? (e)->base.intf_idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_cmd(x) \
+ container_of(x, struct dpu_encoder_phys_cmd, base)
+
+#define PP_TIMEOUT_MAX_TRIALS 10
+
+/*
+ * Tearcheck sync start and continue thresholds are empirically found
+ * based on common panels In the future, may want to allow panels to override
+ * these default values
+ */
+#define DEFAULT_TEARCHECK_SYNC_THRESH_START 4
+#define DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE 4
+
+#define DPU_ENC_WR_PTR_START_TIMEOUT_US 20000
+
+static inline int _dpu_encoder_phys_cmd_get_idle_timeout(
+ struct dpu_encoder_phys_cmd *cmd_enc)
+{
+ return KICKOFF_TIMEOUT_MS;
+}
+
+static inline bool dpu_encoder_phys_cmd_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc->split_role != ENC_ROLE_SLAVE) ? true : false;
+}
+
+static bool dpu_encoder_phys_cmd_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_CMDENC(to_dpu_encoder_phys_cmd(phys_enc), "\n");
+ return true;
+}
+
+static void _dpu_encoder_phys_cmd_update_intf_cfg(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_ctl *ctl;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc)
+ return;
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.setup_intf_cfg)
+ return;
+
+ intf_cfg.intf = phys_enc->intf_idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_CMD;
+ intf_cfg.stream_sel = cmd_enc->stream_sel;
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+ ctl->ops.setup_intf_cfg(ctl, &intf_cfg);
+}
+
+static void dpu_encoder_phys_cmd_pp_tx_done_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ unsigned long lock_flags;
+ int new_cnt;
+ u32 event = DPU_ENCODER_FRAME_EVENT_DONE;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("pp_done_irq");
+ /* notify all synchronous clients first, then asynchronous clients */
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(phys_enc->parent,
+ phys_enc, event);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ trace_dpu_enc_phys_cmd_pp_tx_done(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ new_cnt, event);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("pp_done_irq");
+}
+
+static void dpu_encoder_phys_cmd_pp_rd_ptr_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return;
+
+ DPU_ATRACE_BEGIN("rd_ptr_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ atomic_add_unless(&cmd_enc->pending_vblank_cnt, -1, 0);
+ wake_up_all(&cmd_enc->pending_vblank_wq);
+ DPU_ATRACE_END("rd_ptr_irq");
+}
+
+static void dpu_encoder_phys_cmd_ctl_start_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc || !phys_enc->hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("ctl_start_irq");
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ atomic_add_unless(&phys_enc->pending_ctlstart_cnt, -1, 0);
+
+ /* Signal any waiting ctl start interrupt */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("ctl_start_irq");
+}
+
+static void dpu_encoder_phys_cmd_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static void _dpu_encoder_phys_cmd_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->hw_idx = phys_enc->hw_ctl->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->hw_idx = phys_enc->hw_pp->idx;
+ irq->irq_idx = -EINVAL;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->hw_idx = phys_enc->intf_idx;
+ irq->irq_idx = -EINVAL;
+}
+
+static void dpu_encoder_phys_cmd_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_rm *rm = &phys_enc->dpu_kms->rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+
+ if (!phys_enc || !mode || !adj_mode) {
+ DPU_ERROR("invalid args\n");
+ return;
+ }
+ phys_enc->cached_mode = *adj_mode;
+ DPU_DEBUG_CMDENC(cmd_enc, "caching mode:\n");
+ drm_mode_debug_printmodeline(adj_mode);
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_CMDENC(cmd_enc, "failed to init ctl: %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_cmd_setup_irq_hw_idx(phys_enc);
+}
+
+static int _dpu_encoder_phys_cmd_handle_ppdone_timeout(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ u32 frame_event = DPU_ENCODER_FRAME_EVENT_ERROR;
+ bool do_log = false;
+
+ if (!phys_enc || !phys_enc->hw_pp || !phys_enc->hw_ctl)
+ return -EINVAL;
+
+ cmd_enc->pp_timeout_report_cnt++;
+ if (cmd_enc->pp_timeout_report_cnt == PP_TIMEOUT_MAX_TRIALS) {
+ frame_event |= DPU_ENCODER_FRAME_EVENT_PANEL_DEAD;
+ do_log = true;
+ } else if (cmd_enc->pp_timeout_report_cnt == 1) {
+ do_log = true;
+ }
+
+ trace_dpu_enc_phys_cmd_pdone_timeout(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt),
+ frame_event);
+
+ /* to avoid flooding, only log first time, and "dead" time */
+ if (do_log) {
+ DRM_ERROR("id:%d pp:%d kickoff timeout %d cnt %d koff_cnt %d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->hw_ctl->idx - CTL_0,
+ cmd_enc->pp_timeout_report_cnt,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_RDPTR);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+
+ atomic_add_unless(&phys_enc->pending_kickoff_cnt, -1, 0);
+
+ /* request a ctl reset before the next kickoff */
+ phys_enc->enable_state = DPU_ENC_ERR_NEEDS_HW_RESET;
+
+ if (phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc, frame_event);
+
+ return -ETIMEDOUT;
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_idle(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_PINGPONG,
+ &wait_info);
+ if (ret == -ETIMEDOUT)
+ _dpu_encoder_phys_cmd_handle_ppdone_timeout(phys_enc);
+ else if (!ret)
+ cmd_enc->pp_timeout_report_cnt = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ int refcount;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u pp:%d enable=%s/%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable ? "true" : "false", refcount);
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_RDPTR);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_RDPTR);
+
+end:
+ if (ret) {
+ DRM_ERROR("vblank irq err id:%u pp:%d ret:%d, enable %s/%d\n",
+ DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0, ret,
+ enable ? "true" : "false", refcount);
+ }
+
+ return ret;
+}
+
+static void dpu_encoder_phys_cmd_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ trace_dpu_enc_phys_cmd_irq_ctrl(DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ enable, atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_PINGPONG);
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, true);
+
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_register_irq(phys_enc,
+ INTR_IDX_CTL_START);
+ } else {
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_CTL_START);
+
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ dpu_encoder_phys_cmd_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_PINGPONG);
+ }
+}
+
+static void dpu_encoder_phys_cmd_tearcheck_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_hw_tear_check tc_cfg = { 0 };
+ struct drm_display_mode *mode;
+ bool tc_enable = true;
+ u32 vsync_hz;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ mode = &phys_enc->cached_mode;
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (!phys_enc->hw_pp->ops.setup_tearcheck ||
+ !phys_enc->hw_pp->ops.enable_tearcheck) {
+ DPU_DEBUG_CMDENC(cmd_enc, "tearcheck not supported\n");
+ return;
+ }
+
+ dpu_kms = phys_enc->dpu_kms;
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid device\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ /*
+ * TE default: dsi byte clock calculated base on 70 fps;
+ * around 14 ms to complete a kickoff cycle if te disabled;
+ * vclk_line base on 60 fps; write is faster than read;
+ * init == start == rdptr;
+ *
+ * vsync_count is ratio of MDP VSYNC clock frequency to LCD panel
+ * frequency divided by the no. of rows (lines) in the LCDpanel.
+ */
+ vsync_hz = dpu_kms_get_clk_rate(dpu_kms, "vsync");
+ if (vsync_hz <= 0) {
+ DPU_DEBUG_CMDENC(cmd_enc, "invalid - vsync_hz %u\n",
+ vsync_hz);
+ return;
+ }
+
+ tc_cfg.vsync_count = vsync_hz / (mode->vtotal * mode->vrefresh);
+
+ /* enable external TE after kickoff to avoid premature autorefresh */
+ tc_cfg.hw_vsync_mode = 0;
+
+ /*
+ * By setting sync_cfg_height to near max register value, we essentially
+ * disable dpu hw generated TE signal, since hw TE will arrive first.
+ * Only caveat is if due to error, we hit wrap-around.
+ */
+ tc_cfg.sync_cfg_height = 0xFFF0;
+ tc_cfg.vsync_init_val = mode->vdisplay;
+ tc_cfg.sync_threshold_start = DEFAULT_TEARCHECK_SYNC_THRESH_START;
+ tc_cfg.sync_threshold_continue = DEFAULT_TEARCHECK_SYNC_THRESH_CONTINUE;
+ tc_cfg.start_pos = mode->vdisplay;
+ tc_cfg.rd_ptr_irq = mode->vdisplay + 1;
+
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d vsync_clk_speed_hz %u vtotal %u vrefresh %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, vsync_hz,
+ mode->vtotal, mode->vrefresh);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d enable %u start_pos %u rd_ptr_irq %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_enable, tc_cfg.start_pos,
+ tc_cfg.rd_ptr_irq);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d hw_vsync_mode %u vsync_count %u vsync_init_val %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.hw_vsync_mode,
+ tc_cfg.vsync_count, tc_cfg.vsync_init_val);
+ DPU_DEBUG_CMDENC(cmd_enc,
+ "tc %d cfgheight %u thresh_start %u thresh_cont %u\n",
+ phys_enc->hw_pp->idx - PINGPONG_0, tc_cfg.sync_cfg_height,
+ tc_cfg.sync_threshold_start, tc_cfg.sync_threshold_continue);
+
+ phys_enc->hw_pp->ops.setup_tearcheck(phys_enc->hw_pp, &tc_cfg);
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, tc_enable);
+}
+
+static void _dpu_encoder_phys_cmd_pingpong_config(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp
+ || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid arg(s), enc %d\n", phys_enc != 0);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d, enabling mode:\n",
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ drm_mode_debug_printmodeline(&phys_enc->cached_mode);
+
+ _dpu_encoder_phys_cmd_update_intf_cfg(phys_enc);
+ dpu_encoder_phys_cmd_tearcheck_config(phys_enc);
+}
+
+static bool dpu_encoder_phys_cmd_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ /**
+ * we do separate flush for each CTL and let
+ * CTL_START synchronize them
+ */
+ return false;
+}
+
+static void dpu_encoder_phys_cmd_enable_helper(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->hw_ctl || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid arg(s), encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ dpu_encoder_helper_split_config(phys_enc, phys_enc->intf_idx);
+
+ _dpu_encoder_phys_cmd_pingpong_config(phys_enc);
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl = phys_enc->hw_ctl;
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, phys_enc->intf_idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ return;
+}
+
+static void dpu_encoder_phys_cmd_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid phys encoder\n");
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp %d\n", phys_enc->hw_pp->idx - PINGPONG_0);
+
+ if (phys_enc->enable_state == DPU_ENC_ENABLED) {
+ DPU_ERROR("already enabled\n");
+ return;
+ }
+
+ dpu_encoder_phys_cmd_enable_helper(phys_enc);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+}
+
+static void _dpu_encoder_phys_cmd_connect_te(
+ struct dpu_encoder_phys *phys_enc, bool enable)
+{
+ if (!phys_enc || !phys_enc->hw_pp ||
+ !phys_enc->hw_pp->ops.connect_external_te)
+ return;
+
+ trace_dpu_enc_phys_cmd_connect_te(DRMID(phys_enc->parent), enable);
+ phys_enc->hw_pp->ops.connect_external_te(phys_enc->hw_pp, enable);
+}
+
+static void dpu_encoder_phys_cmd_prepare_idle_pc(
+ struct dpu_encoder_phys *phys_enc)
+{
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, false);
+}
+
+static int dpu_encoder_phys_cmd_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_hw_pingpong *hw_pp;
+
+ if (!phys_enc || !phys_enc->hw_pp)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return -EINVAL;
+
+ hw_pp = phys_enc->hw_pp;
+ if (!hw_pp->ops.get_line_count)
+ return -EINVAL;
+
+ return hw_pp->ops.get_line_count(hw_pp);
+}
+
+static void dpu_encoder_phys_cmd_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d state:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ phys_enc->enable_state);
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR_CMDENC(cmd_enc, "already disabled\n");
+ return;
+ }
+
+ if (phys_enc->hw_pp->ops.enable_tearcheck)
+ phys_enc->hw_pp->ops.enable_tearcheck(phys_enc->hw_pp, false);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_cmd_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ kfree(cmd_enc);
+}
+
+static void dpu_encoder_phys_cmd_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ if ((phys_enc->intf_idx - INTF_0) >= INTF_MAX) {
+ DPU_ERROR("invalid intf idx:%d\n", phys_enc->intf_idx);
+ return;
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "\n");
+ hw_res->intfs[phys_enc->intf_idx - INTF_0] = INTF_MODE_CMD;
+}
+
+static void dpu_encoder_phys_cmd_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_pp) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+ DRM_DEBUG_KMS("id:%u pp:%d pending_cnt:%d\n", DRMID(phys_enc->parent),
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+
+ /*
+ * Mark kickoff request as outstanding. If there are more than one,
+ * outstanding, then we have to wait for the previous one to complete
+ */
+ ret = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (ret) {
+ /* force pending_kickoff_cnt 0 to discard failed kickoff */
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d pp:%d\n",
+ DRMID(phys_enc->parent), ret,
+ phys_enc->hw_pp->idx - PINGPONG_0);
+ }
+
+ DPU_DEBUG_CMDENC(cmd_enc, "pp:%d pending_cnt %d\n",
+ phys_enc->hw_pp->idx - PINGPONG_0,
+ atomic_read(&phys_enc->pending_kickoff_cnt));
+}
+
+static int _dpu_encoder_phys_cmd_wait_for_ctl_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_cmd *cmd_enc =
+ to_dpu_encoder_phys_cmd(phys_enc);
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid argument(s)\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_ctlstart_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_CTL_START,
+ &wait_info);
+ if (ret == -ETIMEDOUT) {
+ DPU_ERROR_CMDENC(cmd_enc, "ctl start interrupt wait failed\n");
+ ret = -EINVAL;
+ } else if (!ret)
+ ret = 0;
+
+ return ret;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_tx_complete(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ rc = _dpu_encoder_phys_cmd_wait_for_idle(phys_enc);
+ if (rc) {
+ DRM_ERROR("failed wait_for_idle: id:%u ret:%d intf:%d\n",
+ DRMID(phys_enc->parent), rc,
+ phys_enc->intf_idx - INTF_0);
+ }
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_commit_done(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (dpu_encoder_phys_cmd_is_master(phys_enc))
+ rc = _dpu_encoder_phys_cmd_wait_for_ctl_start(phys_enc);
+
+ /* required for both controllers */
+ if (!rc && cmd_enc->serialize_wait4pp)
+ dpu_encoder_phys_cmd_prepare_for_kickoff(phys_enc, NULL);
+
+ return rc;
+}
+
+static int dpu_encoder_phys_cmd_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ int rc = 0;
+ struct dpu_encoder_phys_cmd *cmd_enc;
+ struct dpu_encoder_wait_info wait_info;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ cmd_enc = to_dpu_encoder_phys_cmd(phys_enc);
+
+ /* only required for master controller */
+ if (!dpu_encoder_phys_cmd_is_master(phys_enc))
+ return rc;
+
+ wait_info.wq = &cmd_enc->pending_vblank_wq;
+ wait_info.atomic_cnt = &cmd_enc->pending_vblank_cnt;
+ wait_info.timeout_ms = _dpu_encoder_phys_cmd_get_idle_timeout(cmd_enc);
+
+ atomic_inc(&cmd_enc->pending_vblank_cnt);
+
+ rc = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_RDPTR,
+ &wait_info);
+
+ return rc;
+}
+
+static void dpu_encoder_phys_cmd_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ /**
+ * re-enable external TE, either for the first time after enabling
+ * or if disabled for Autorefresh
+ */
+ _dpu_encoder_phys_cmd_connect_te(phys_enc, true);
+}
+
+static void dpu_encoder_phys_cmd_trigger_start(
+ struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return;
+
+ dpu_encoder_helper_trigger_start(phys_enc);
+}
+
+static void dpu_encoder_phys_cmd_init_ops(
+ struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_cmd_is_master;
+ ops->mode_set = dpu_encoder_phys_cmd_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_cmd_mode_fixup;
+ ops->enable = dpu_encoder_phys_cmd_enable;
+ ops->disable = dpu_encoder_phys_cmd_disable;
+ ops->destroy = dpu_encoder_phys_cmd_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_cmd_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_cmd_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_cmd_wait_for_commit_done;
+ ops->prepare_for_kickoff = dpu_encoder_phys_cmd_prepare_for_kickoff;
+ ops->wait_for_tx_complete = dpu_encoder_phys_cmd_wait_for_tx_complete;
+ ops->wait_for_vblank = dpu_encoder_phys_cmd_wait_for_vblank;
+ ops->trigger_start = dpu_encoder_phys_cmd_trigger_start;
+ ops->needs_single_flush = dpu_encoder_phys_cmd_needs_single_flush;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->irq_control = dpu_encoder_phys_cmd_irq_control;
+ ops->restore = dpu_encoder_phys_cmd_enable_helper;
+ ops->prepare_idle_pc = dpu_encoder_phys_cmd_prepare_idle_pc;
+ ops->handle_post_kickoff = dpu_encoder_phys_cmd_handle_post_kickoff;
+ ops->get_line_count = dpu_encoder_phys_cmd_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_cmd_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_cmd *cmd_enc = NULL;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ DPU_DEBUG("intf %d\n", p->intf_idx - INTF_0);
+
+ cmd_enc = kzalloc(sizeof(*cmd_enc), GFP_KERNEL);
+ if (!cmd_enc) {
+ ret = -ENOMEM;
+ DPU_ERROR("failed to allocate\n");
+ goto fail;
+ }
+ phys_enc = &cmd_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail_mdp_init;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ dpu_encoder_phys_cmd_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_CMD;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ cmd_enc->stream_sel = 0;
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_CTL_START];
+ irq->name = "ctl_start";
+ irq->intr_type = DPU_IRQ_TYPE_CTL_START;
+ irq->intr_idx = INTR_IDX_CTL_START;
+ irq->cb.func = dpu_encoder_phys_cmd_ctl_start_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_PINGPONG];
+ irq->name = "pp_done";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_COMP;
+ irq->intr_idx = INTR_IDX_PINGPONG;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_tx_done_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_RDPTR];
+ irq->name = "pp_rd_ptr";
+ irq->intr_type = DPU_IRQ_TYPE_PING_PONG_RD_PTR;
+ irq->intr_idx = INTR_IDX_RDPTR;
+ irq->cb.func = dpu_encoder_phys_cmd_pp_rd_ptr_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_cmd_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ atomic_set(&phys_enc->pending_ctlstart_cnt, 0);
+ atomic_set(&cmd_enc->pending_vblank_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ init_waitqueue_head(&cmd_enc->pending_vblank_wq);
+
+ DPU_DEBUG_CMDENC(cmd_enc, "created\n");
+
+ return phys_enc;
+
+fail_mdp_init:
+ kfree(cmd_enc);
+fail:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
new file mode 100644
index 000000000000..14fc7c2a6bb7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_encoder_phys_vid.c
@@ -0,0 +1,922 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_trace.h"
+
+#define DPU_DEBUG_VIDENC(e, fmt, ...) DPU_DEBUG("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_VIDENC(e, fmt, ...) DPU_ERROR("enc%d intf%d " fmt, \
+ (e) && (e)->base.parent ? \
+ (e)->base.parent->base.id : -1, \
+ (e) && (e)->hw_intf ? \
+ (e)->hw_intf->idx - INTF_0 : -1, ##__VA_ARGS__)
+
+#define to_dpu_encoder_phys_vid(x) \
+ container_of(x, struct dpu_encoder_phys_vid, base)
+
+static bool dpu_encoder_phys_vid_is_master(
+ struct dpu_encoder_phys *phys_enc)
+{
+ bool ret = false;
+
+ if (phys_enc->split_role != ENC_ROLE_SLAVE)
+ ret = true;
+
+ return ret;
+}
+
+static void drm_mode_to_intf_timing_params(
+ const struct dpu_encoder_phys_vid *vid_enc,
+ const struct drm_display_mode *mode,
+ struct intf_timing_params *timing)
+{
+ memset(timing, 0, sizeof(*timing));
+
+ if ((mode->htotal < mode->hsync_end)
+ || (mode->hsync_start < mode->hdisplay)
+ || (mode->vtotal < mode->vsync_end)
+ || (mode->vsync_start < mode->vdisplay)
+ || (mode->hsync_end < mode->hsync_start)
+ || (mode->vsync_end < mode->vsync_start)) {
+ DPU_ERROR(
+ "invalid params - hstart:%d,hend:%d,htot:%d,hdisplay:%d\n",
+ mode->hsync_start, mode->hsync_end,
+ mode->htotal, mode->hdisplay);
+ DPU_ERROR("vstart:%d,vend:%d,vtot:%d,vdisplay:%d\n",
+ mode->vsync_start, mode->vsync_end,
+ mode->vtotal, mode->vdisplay);
+ return;
+ }
+
+ /*
+ * https://www.kernel.org/doc/htmldocs/drm/ch02s05.html
+ * Active Region Front Porch Sync Back Porch
+ * <-----------------><------------><-----><----------->
+ * <- [hv]display --->
+ * <--------- [hv]sync_start ------>
+ * <----------------- [hv]sync_end ------->
+ * <---------------------------- [hv]total ------------->
+ */
+ timing->width = mode->hdisplay; /* active width */
+ timing->height = mode->vdisplay; /* active height */
+ timing->xres = timing->width;
+ timing->yres = timing->height;
+ timing->h_back_porch = mode->htotal - mode->hsync_end;
+ timing->h_front_porch = mode->hsync_start - mode->hdisplay;
+ timing->v_back_porch = mode->vtotal - mode->vsync_end;
+ timing->v_front_porch = mode->vsync_start - mode->vdisplay;
+ timing->hsync_pulse_width = mode->hsync_end - mode->hsync_start;
+ timing->vsync_pulse_width = mode->vsync_end - mode->vsync_start;
+ timing->hsync_polarity = (mode->flags & DRM_MODE_FLAG_NHSYNC) ? 1 : 0;
+ timing->vsync_polarity = (mode->flags & DRM_MODE_FLAG_NVSYNC) ? 1 : 0;
+ timing->border_clr = 0;
+ timing->underflow_clr = 0xff;
+ timing->hsync_skew = mode->hskew;
+
+ /* DSI controller cannot handle active-low sync signals. */
+ if (vid_enc->hw_intf->cap->type == INTF_DSI) {
+ timing->hsync_polarity = 0;
+ timing->vsync_polarity = 0;
+ }
+
+ /*
+ * For edp only:
+ * DISPLAY_V_START = (VBP * HCYCLE) + HBP
+ * DISPLAY_V_END = (VBP + VACTIVE) * HCYCLE - 1 - HFP
+ */
+ /*
+ * if (vid_enc->hw->cap->type == INTF_EDP) {
+ * display_v_start += mode->htotal - mode->hsync_start;
+ * display_v_end -= mode->hsync_start - mode->hdisplay;
+ * }
+ */
+}
+
+static inline u32 get_horizontal_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->xres;
+ u32 inactive =
+ timing->h_back_porch + timing->h_front_porch +
+ timing->hsync_pulse_width;
+ return active + inactive;
+}
+
+static inline u32 get_vertical_total(const struct intf_timing_params *timing)
+{
+ u32 active = timing->yres;
+ u32 inactive =
+ timing->v_back_porch + timing->v_front_porch +
+ timing->vsync_pulse_width;
+ return active + inactive;
+}
+
+/*
+ * programmable_fetch_get_num_lines:
+ * Number of fetch lines in vertical front porch
+ * @timing: Pointer to the intf timing information for the requested mode
+ *
+ * Returns the number of fetch lines in vertical front porch at which mdp
+ * can start fetching the next frame.
+ *
+ * Number of needed prefetch lines is anything that cannot be absorbed in the
+ * start of frame time (back porch + vsync pulse width).
+ *
+ * Some panels have very large VFP, however we only need a total number of
+ * lines based on the chip worst case latencies.
+ */
+static u32 programmable_fetch_get_num_lines(
+ struct dpu_encoder_phys_vid *vid_enc,
+ const struct intf_timing_params *timing)
+{
+ u32 worst_case_needed_lines =
+ vid_enc->hw_intf->cap->prog_fetch_lines_worst_case;
+ u32 start_of_frame_lines =
+ timing->v_back_porch + timing->vsync_pulse_width;
+ u32 needed_vfp_lines = worst_case_needed_lines - start_of_frame_lines;
+ u32 actual_vfp_lines = 0;
+
+ /* Fetch must be outside active lines, otherwise undefined. */
+ if (start_of_frame_lines >= worst_case_needed_lines) {
+ DPU_DEBUG_VIDENC(vid_enc,
+ "prog fetch is not needed, large vbp+vsw\n");
+ actual_vfp_lines = 0;
+ } else if (timing->v_front_porch < needed_vfp_lines) {
+ /* Warn fetch needed, but not enough porch in panel config */
+ pr_warn_once
+ ("low vbp+vfp may lead to perf issues in some cases\n");
+ DPU_DEBUG_VIDENC(vid_enc,
+ "less vfp than fetch req, using entire vfp\n");
+ actual_vfp_lines = timing->v_front_porch;
+ } else {
+ DPU_DEBUG_VIDENC(vid_enc, "room in vfp for needed prefetch\n");
+ actual_vfp_lines = needed_vfp_lines;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "v_front_porch %u v_back_porch %u vsync_pulse_width %u\n",
+ timing->v_front_porch, timing->v_back_porch,
+ timing->vsync_pulse_width);
+ DPU_DEBUG_VIDENC(vid_enc,
+ "wc_lines %u needed_vfp_lines %u actual_vfp_lines %u\n",
+ worst_case_needed_lines, needed_vfp_lines, actual_vfp_lines);
+
+ return actual_vfp_lines;
+}
+
+/*
+ * programmable_fetch_config: Programs HW to prefetch lines by offsetting
+ * the start of fetch into the vertical front porch for cases where the
+ * vsync pulse width and vertical back porch time is insufficient
+ *
+ * Gets # of lines to pre-fetch, then calculate VSYNC counter value.
+ * HW layer requires VSYNC counter of first pixel of tgt VFP line.
+ *
+ * @timing: Pointer to the intf timing information for the requested mode
+ */
+static void programmable_fetch_config(struct dpu_encoder_phys *phys_enc,
+ const struct intf_timing_params *timing)
+{
+ struct dpu_encoder_phys_vid *vid_enc =
+ to_dpu_encoder_phys_vid(phys_enc);
+ struct intf_prog_fetch f = { 0 };
+ u32 vfp_fetch_lines = 0;
+ u32 horiz_total = 0;
+ u32 vert_total = 0;
+ u32 vfp_fetch_start_vsync_counter = 0;
+ unsigned long lock_flags;
+
+ if (WARN_ON_ONCE(!vid_enc->hw_intf->ops.setup_prg_fetch))
+ return;
+
+ vfp_fetch_lines = programmable_fetch_get_num_lines(vid_enc, timing);
+ if (vfp_fetch_lines) {
+ vert_total = get_vertical_total(timing);
+ horiz_total = get_horizontal_total(timing);
+ vfp_fetch_start_vsync_counter =
+ (vert_total - vfp_fetch_lines) * horiz_total + 1;
+ f.enable = 1;
+ f.fetch_start = vfp_fetch_start_vsync_counter;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "vfp_fetch_lines %u vfp_fetch_start_vsync_counter %u\n",
+ vfp_fetch_lines, vfp_fetch_start_vsync_counter);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_prg_fetch(vid_enc->hw_intf, &f);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+}
+
+static bool dpu_encoder_phys_vid_mode_fixup(
+ struct dpu_encoder_phys *phys_enc,
+ const struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ if (phys_enc)
+ DPU_DEBUG_VIDENC(to_dpu_encoder_phys_vid(phys_enc), "\n");
+
+ /*
+ * Modifying mode has consequences when the mode comes back to us
+ */
+ return true;
+}
+
+static void dpu_encoder_phys_vid_setup_timing_engine(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct drm_display_mode mode;
+ struct intf_timing_params timing_params = { 0 };
+ const struct dpu_format *fmt = NULL;
+ u32 fmt_fourcc = DRM_FORMAT_RGB888;
+ unsigned long lock_flags;
+ struct dpu_hw_intf_cfg intf_cfg = { 0 };
+
+ if (!phys_enc || !phys_enc->hw_ctl->ops.setup_intf_cfg) {
+ DPU_ERROR("invalid encoder %d\n", phys_enc != 0);
+ return;
+ }
+
+ mode = phys_enc->cached_mode;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf->ops.setup_timing_gen) {
+ DPU_ERROR("timing engine setup is not supported\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "enabling mode:\n");
+ drm_mode_debug_printmodeline(&mode);
+
+ if (phys_enc->split_role != ENC_ROLE_SOLO) {
+ mode.hdisplay >>= 1;
+ mode.htotal >>= 1;
+ mode.hsync_start >>= 1;
+ mode.hsync_end >>= 1;
+
+ DPU_DEBUG_VIDENC(vid_enc,
+ "split_role %d, halve horizontal %d %d %d %d\n",
+ phys_enc->split_role,
+ mode.hdisplay, mode.htotal,
+ mode.hsync_start, mode.hsync_end);
+ }
+
+ drm_mode_to_intf_timing_params(vid_enc, &mode, &timing_params);
+
+ fmt = dpu_get_dpu_format(fmt_fourcc);
+ DPU_DEBUG_VIDENC(vid_enc, "fmt_fourcc 0x%X\n", fmt_fourcc);
+
+ intf_cfg.intf = vid_enc->hw_intf->idx;
+ intf_cfg.intf_mode_sel = DPU_CTL_MODE_SEL_VID;
+ intf_cfg.stream_sel = 0; /* Don't care value for video mode */
+ intf_cfg.mode_3d = dpu_encoder_helper_get_3d_blend_mode(phys_enc);
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.setup_timing_gen(vid_enc->hw_intf,
+ &timing_params, fmt);
+ phys_enc->hw_ctl->ops.setup_intf_cfg(phys_enc->hw_ctl, &intf_cfg);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ programmable_fetch_config(phys_enc, &timing_params);
+
+ vid_enc->timing_params = timing_params;
+}
+
+static void dpu_encoder_phys_vid_vblank_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+ struct dpu_hw_ctl *hw_ctl;
+ unsigned long lock_flags;
+ u32 flush_register = 0;
+ int new_cnt = -1, old_cnt = -1;
+
+ if (!phys_enc)
+ return;
+
+ hw_ctl = phys_enc->hw_ctl;
+ if (!hw_ctl)
+ return;
+
+ DPU_ATRACE_BEGIN("vblank_irq");
+
+ if (phys_enc->parent_ops->handle_vblank_virt)
+ phys_enc->parent_ops->handle_vblank_virt(phys_enc->parent,
+ phys_enc);
+
+ old_cnt = atomic_read(&phys_enc->pending_kickoff_cnt);
+
+ /*
+ * only decrement the pending flush count if we've actually flushed
+ * hardware. due to sw irq latency, vblank may have already happened
+ * so we need to double-check with hw that it accepted the flush bits
+ */
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ if (hw_ctl && hw_ctl->ops.get_flush_register)
+ flush_register = hw_ctl->ops.get_flush_register(hw_ctl);
+
+ if (flush_register == 0)
+ new_cnt = atomic_add_unless(&phys_enc->pending_kickoff_cnt,
+ -1, 0);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /* Signal any waiting atomic commit thread */
+ wake_up_all(&phys_enc->pending_kickoff_wq);
+ DPU_ATRACE_END("vblank_irq");
+}
+
+static void dpu_encoder_phys_vid_underrun_irq(void *arg, int irq_idx)
+{
+ struct dpu_encoder_phys *phys_enc = arg;
+
+ if (!phys_enc)
+ return;
+
+ if (phys_enc->parent_ops->handle_underrun_virt)
+ phys_enc->parent_ops->handle_underrun_virt(phys_enc->parent,
+ phys_enc);
+}
+
+static bool _dpu_encoder_phys_is_dual_ctl(struct dpu_encoder_phys *phys_enc)
+{
+ if (!phys_enc)
+ return false;
+
+ if (phys_enc->topology_name == DPU_RM_TOPOLOGY_DUALPIPE)
+ return true;
+
+ return false;
+}
+
+static bool dpu_encoder_phys_vid_needs_single_flush(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return (phys_enc && _dpu_encoder_phys_is_dual_ctl(phys_enc));
+}
+
+static void _dpu_encoder_phys_vid_setup_irq_hw_idx(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_irq *irq;
+
+ /*
+ * Initialize irq->hw_idx only when irq is not registered.
+ * Prevent invalidating irq->irq_idx as modeset may be
+ * called many times during dfps.
+ */
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ if (irq->irq_idx < 0)
+ irq->hw_idx = phys_enc->intf_idx;
+}
+
+static void dpu_encoder_phys_vid_mode_set(
+ struct dpu_encoder_phys *phys_enc,
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adj_mode)
+{
+ struct dpu_rm *rm;
+ struct dpu_rm_hw_iter iter;
+ int i, instance;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !phys_enc->dpu_kms) {
+ DPU_ERROR("invalid encoder/kms\n");
+ return;
+ }
+
+ rm = &phys_enc->dpu_kms->rm;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (adj_mode) {
+ phys_enc->cached_mode = *adj_mode;
+ drm_mode_debug_printmodeline(adj_mode);
+ DPU_DEBUG_VIDENC(vid_enc, "caching mode:\n");
+ }
+
+ instance = phys_enc->split_role == ENC_ROLE_SLAVE ? 1 : 0;
+
+ /* Retrieve previously allocated HW Resources. Shouldn't fail */
+ dpu_rm_init_hw_iter(&iter, phys_enc->parent->base.id, DPU_HW_BLK_CTL);
+ for (i = 0; i <= instance; i++) {
+ if (dpu_rm_get_hw(rm, &iter))
+ phys_enc->hw_ctl = (struct dpu_hw_ctl *)iter.hw;
+ }
+ if (IS_ERR_OR_NULL(phys_enc->hw_ctl)) {
+ DPU_ERROR_VIDENC(vid_enc, "failed to init ctl, %ld\n",
+ PTR_ERR(phys_enc->hw_ctl));
+ phys_enc->hw_ctl = NULL;
+ return;
+ }
+
+ _dpu_encoder_phys_vid_setup_irq_hw_idx(phys_enc);
+}
+
+static int dpu_encoder_phys_vid_control_vblank_irq(
+ struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ int ret = 0;
+ struct dpu_encoder_phys_vid *vid_enc;
+ int refcount;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ refcount = atomic_read(&phys_enc->vblank_refcount);
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ /* Slave encoders don't report vblank */
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ goto end;
+
+ /* protect against negative */
+ if (!enable && refcount == 0) {
+ ret = -EINVAL;
+ goto end;
+ }
+
+ DRM_DEBUG_KMS("id:%u enable=%d/%d\n", DRMID(phys_enc->parent), enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable && atomic_inc_return(&phys_enc->vblank_refcount) == 1)
+ ret = dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_VSYNC);
+ else if (!enable && atomic_dec_return(&phys_enc->vblank_refcount) == 0)
+ ret = dpu_encoder_helper_unregister_irq(phys_enc,
+ INTR_IDX_VSYNC);
+
+end:
+ if (ret) {
+ DRM_ERROR("failed: id:%u intf:%d ret:%d enable:%d refcnt:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret, enable,
+ refcount);
+ }
+ return ret;
+}
+
+static void dpu_encoder_phys_vid_enable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_intf *intf;
+ struct dpu_hw_ctl *ctl;
+ u32 flush_mask = 0;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ intf = vid_enc->hw_intf;
+ ctl = phys_enc->hw_ctl;
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ dpu_encoder_helper_split_config(phys_enc, vid_enc->hw_intf->idx);
+
+ dpu_encoder_phys_vid_setup_timing_engine(phys_enc);
+
+ /*
+ * For single flush cases (dual-ctl or pp-split), skip setting the
+ * flush bit for the slave intf, since both intfs use same ctl
+ * and HW will only flush the master.
+ */
+ if (dpu_encoder_phys_vid_needs_single_flush(phys_enc) &&
+ !dpu_encoder_phys_vid_is_master(phys_enc))
+ goto skip_flush;
+
+ ctl->ops.get_bitmask_intf(ctl, &flush_mask, intf->idx);
+ ctl->ops.update_pending_flush(ctl, flush_mask);
+
+skip_flush:
+ DPU_DEBUG_VIDENC(vid_enc, "update pending flush ctl %d flush_mask %x\n",
+ ctl->idx - CTL_0, flush_mask);
+
+ /* ctl_flush & timing engine enable will be triggered by framework */
+ if (phys_enc->enable_state == DPU_ENC_DISABLED)
+ phys_enc->enable_state = DPU_ENC_ENABLING;
+}
+
+static void dpu_encoder_phys_vid_destroy(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ kfree(vid_enc);
+}
+
+static void dpu_encoder_phys_vid_get_hw_resources(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_hw_resources *hw_res,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc || !hw_res) {
+ DPU_ERROR("invalid arg(s), enc %d hw_res %d conn_state %d\n",
+ phys_enc != 0, hw_res != 0, conn_state != 0);
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf) {
+ DPU_ERROR("invalid arg(s), hw_intf\n");
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+ hw_res->intfs[vid_enc->hw_intf->idx - INTF_0] = INTF_MODE_VIDEO;
+}
+
+static int _dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc, bool notify)
+{
+ struct dpu_encoder_wait_info wait_info;
+ int ret;
+
+ if (!phys_enc) {
+ pr_err("invalid encoder\n");
+ return -EINVAL;
+ }
+
+ wait_info.wq = &phys_enc->pending_kickoff_wq;
+ wait_info.atomic_cnt = &phys_enc->pending_kickoff_cnt;
+ wait_info.timeout_ms = KICKOFF_TIMEOUT_MS;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc)) {
+ if (notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+ return 0;
+ }
+
+ /* Wait for kickoff to complete */
+ ret = dpu_encoder_helper_wait_for_irq(phys_enc, INTR_IDX_VSYNC,
+ &wait_info);
+
+ if (ret == -ETIMEDOUT) {
+ dpu_encoder_helper_report_irq_timeout(phys_enc, INTR_IDX_VSYNC);
+ } else if (!ret && notify && phys_enc->parent_ops->handle_frame_done)
+ phys_enc->parent_ops->handle_frame_done(
+ phys_enc->parent, phys_enc,
+ DPU_ENCODER_FRAME_EVENT_DONE);
+
+ return ret;
+}
+
+static int dpu_encoder_phys_vid_wait_for_vblank(
+ struct dpu_encoder_phys *phys_enc)
+{
+ return _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, true);
+}
+
+static void dpu_encoder_phys_vid_prepare_for_kickoff(
+ struct dpu_encoder_phys *phys_enc,
+ struct dpu_encoder_kickoff_params *params)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ struct dpu_hw_ctl *ctl;
+ int rc;
+
+ if (!phys_enc || !params) {
+ DPU_ERROR("invalid encoder/parameters\n");
+ return;
+ }
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ ctl = phys_enc->hw_ctl;
+ if (!ctl || !ctl->ops.wait_reset_status)
+ return;
+
+ /*
+ * hw supports hardware initiated ctl reset, so before we kickoff a new
+ * frame, need to check and wait for hw initiated ctl reset completion
+ */
+ rc = ctl->ops.wait_reset_status(ctl);
+ if (rc) {
+ DPU_ERROR_VIDENC(vid_enc, "ctl %d reset failure: %d\n",
+ ctl->idx, rc);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_VSYNC);
+ dpu_dbg_dump(false, __func__, true, true);
+ }
+}
+
+static void dpu_encoder_phys_vid_disable(struct dpu_encoder_phys *phys_enc)
+{
+ struct msm_drm_private *priv;
+ struct dpu_encoder_phys_vid *vid_enc;
+ unsigned long lock_flags;
+ int ret;
+
+ if (!phys_enc || !phys_enc->parent || !phys_enc->parent->dev ||
+ !phys_enc->parent->dev->dev_private) {
+ DPU_ERROR("invalid encoder/device\n");
+ return;
+ }
+ priv = phys_enc->parent->dev->dev_private;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !phys_enc->hw_ctl) {
+ DPU_ERROR("invalid hw_intf %d hw_ctl %d\n",
+ vid_enc->hw_intf != 0, phys_enc->hw_ctl != 0);
+ return;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ if (WARN_ON(!vid_enc->hw_intf->ops.enable_timing))
+ return;
+
+ if (phys_enc->enable_state == DPU_ENC_DISABLED) {
+ DPU_ERROR("already disabled\n");
+ return;
+ }
+
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 0);
+ if (dpu_encoder_phys_vid_is_master(phys_enc))
+ dpu_encoder_phys_inc_pending(phys_enc);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+
+ /*
+ * Wait for a vsync so we know the ENABLE=0 latched before
+ * the (connector) source of the vsync's gets disabled,
+ * otherwise we end up in a funny state if we re-enable
+ * before the disable latches, which results that some of
+ * the settings changes for the new modeset (like new
+ * scanout buffer) don't latch properly..
+ */
+ if (dpu_encoder_phys_vid_is_master(phys_enc)) {
+ ret = _dpu_encoder_phys_vid_wait_for_vblank(phys_enc, false);
+ if (ret) {
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ DRM_ERROR("wait disable failed: id:%u intf:%d ret:%d\n",
+ DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0, ret);
+ }
+ }
+
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+}
+
+static void dpu_encoder_phys_vid_handle_post_kickoff(
+ struct dpu_encoder_phys *phys_enc)
+{
+ unsigned long lock_flags;
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc) {
+ DPU_ERROR("invalid encoder\n");
+ return;
+ }
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ DPU_DEBUG_VIDENC(vid_enc, "enable_state %d\n", phys_enc->enable_state);
+
+ /*
+ * Video mode must flush CTL before enabling timing engine
+ * Video encoders need to turn on their interfaces now
+ */
+ if (phys_enc->enable_state == DPU_ENC_ENABLING) {
+ trace_dpu_enc_phys_vid_post_kickoff(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0);
+ spin_lock_irqsave(phys_enc->enc_spinlock, lock_flags);
+ vid_enc->hw_intf->ops.enable_timing(vid_enc->hw_intf, 1);
+ spin_unlock_irqrestore(phys_enc->enc_spinlock, lock_flags);
+ phys_enc->enable_state = DPU_ENC_ENABLED;
+ }
+}
+
+static void dpu_encoder_phys_vid_irq_control(struct dpu_encoder_phys *phys_enc,
+ bool enable)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+ int ret;
+
+ if (!phys_enc)
+ return;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ trace_dpu_enc_phys_vid_irq_ctrl(DRMID(phys_enc->parent),
+ vid_enc->hw_intf->idx - INTF_0,
+ enable,
+ atomic_read(&phys_enc->vblank_refcount));
+
+ if (enable) {
+ ret = dpu_encoder_phys_vid_control_vblank_irq(phys_enc, true);
+ if (ret)
+ return;
+
+ dpu_encoder_helper_register_irq(phys_enc, INTR_IDX_UNDERRUN);
+ } else {
+ dpu_encoder_phys_vid_control_vblank_irq(phys_enc, false);
+ dpu_encoder_helper_unregister_irq(phys_enc, INTR_IDX_UNDERRUN);
+ }
+}
+
+static void dpu_encoder_phys_vid_setup_misr(struct dpu_encoder_phys *phys_enc,
+ bool enable, u32 frame_count)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ if (vid_enc->hw_intf && vid_enc->hw_intf->ops.setup_misr)
+ vid_enc->hw_intf->ops.setup_misr(vid_enc->hw_intf,
+ enable, frame_count);
+}
+
+static u32 dpu_encoder_phys_vid_collect_misr(struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return 0;
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+
+ return vid_enc->hw_intf && vid_enc->hw_intf->ops.collect_misr ?
+ vid_enc->hw_intf->ops.collect_misr(vid_enc->hw_intf) : 0;
+}
+
+static int dpu_encoder_phys_vid_get_line_count(
+ struct dpu_encoder_phys *phys_enc)
+{
+ struct dpu_encoder_phys_vid *vid_enc;
+
+ if (!phys_enc)
+ return -EINVAL;
+
+ if (!dpu_encoder_phys_vid_is_master(phys_enc))
+ return -EINVAL;
+
+ vid_enc = to_dpu_encoder_phys_vid(phys_enc);
+ if (!vid_enc->hw_intf || !vid_enc->hw_intf->ops.get_line_count)
+ return -EINVAL;
+
+ return vid_enc->hw_intf->ops.get_line_count(vid_enc->hw_intf);
+}
+
+static void dpu_encoder_phys_vid_init_ops(struct dpu_encoder_phys_ops *ops)
+{
+ ops->is_master = dpu_encoder_phys_vid_is_master;
+ ops->mode_set = dpu_encoder_phys_vid_mode_set;
+ ops->mode_fixup = dpu_encoder_phys_vid_mode_fixup;
+ ops->enable = dpu_encoder_phys_vid_enable;
+ ops->disable = dpu_encoder_phys_vid_disable;
+ ops->destroy = dpu_encoder_phys_vid_destroy;
+ ops->get_hw_resources = dpu_encoder_phys_vid_get_hw_resources;
+ ops->control_vblank_irq = dpu_encoder_phys_vid_control_vblank_irq;
+ ops->wait_for_commit_done = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_vblank = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->wait_for_tx_complete = dpu_encoder_phys_vid_wait_for_vblank;
+ ops->irq_control = dpu_encoder_phys_vid_irq_control;
+ ops->prepare_for_kickoff = dpu_encoder_phys_vid_prepare_for_kickoff;
+ ops->handle_post_kickoff = dpu_encoder_phys_vid_handle_post_kickoff;
+ ops->needs_single_flush = dpu_encoder_phys_vid_needs_single_flush;
+ ops->setup_misr = dpu_encoder_phys_vid_setup_misr;
+ ops->collect_misr = dpu_encoder_phys_vid_collect_misr;
+ ops->hw_reset = dpu_encoder_helper_hw_reset;
+ ops->get_line_count = dpu_encoder_phys_vid_get_line_count;
+}
+
+struct dpu_encoder_phys *dpu_encoder_phys_vid_init(
+ struct dpu_enc_phys_init_params *p)
+{
+ struct dpu_encoder_phys *phys_enc = NULL;
+ struct dpu_encoder_phys_vid *vid_enc = NULL;
+ struct dpu_rm_hw_iter iter;
+ struct dpu_hw_mdp *hw_mdp;
+ struct dpu_encoder_irq *irq;
+ int i, ret = 0;
+
+ if (!p) {
+ ret = -EINVAL;
+ goto fail;
+ }
+
+ vid_enc = kzalloc(sizeof(*vid_enc), GFP_KERNEL);
+ if (!vid_enc) {
+ ret = -ENOMEM;
+ goto fail;
+ }
+
+ phys_enc = &vid_enc->base;
+
+ hw_mdp = dpu_rm_get_mdp(&p->dpu_kms->rm);
+ if (IS_ERR_OR_NULL(hw_mdp)) {
+ ret = PTR_ERR(hw_mdp);
+ DPU_ERROR("failed to get mdptop\n");
+ goto fail;
+ }
+ phys_enc->hw_mdptop = hw_mdp;
+ phys_enc->intf_idx = p->intf_idx;
+
+ /**
+ * hw_intf resource permanently assigned to this encoder
+ * Other resources allocated at atomic commit time by use case
+ */
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_INTF);
+ while (dpu_rm_get_hw(&p->dpu_kms->rm, &iter)) {
+ struct dpu_hw_intf *hw_intf = (struct dpu_hw_intf *)iter.hw;
+
+ if (hw_intf->idx == p->intf_idx) {
+ vid_enc->hw_intf = hw_intf;
+ break;
+ }
+ }
+
+ if (!vid_enc->hw_intf) {
+ ret = -EINVAL;
+ DPU_ERROR("failed to get hw_intf\n");
+ goto fail;
+ }
+
+ DPU_DEBUG_VIDENC(vid_enc, "\n");
+
+ dpu_encoder_phys_vid_init_ops(&phys_enc->ops);
+ phys_enc->parent = p->parent;
+ phys_enc->parent_ops = p->parent_ops;
+ phys_enc->dpu_kms = p->dpu_kms;
+ phys_enc->split_role = p->split_role;
+ phys_enc->intf_mode = INTF_MODE_VIDEO;
+ phys_enc->enc_spinlock = p->enc_spinlock;
+ for (i = 0; i < INTR_IDX_MAX; i++) {
+ irq = &phys_enc->irq[i];
+ INIT_LIST_HEAD(&irq->cb.list);
+ irq->irq_idx = -EINVAL;
+ irq->hw_idx = -EINVAL;
+ irq->cb.arg = phys_enc;
+ }
+
+ irq = &phys_enc->irq[INTR_IDX_VSYNC];
+ irq->name = "vsync_irq";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_VSYNC;
+ irq->intr_idx = INTR_IDX_VSYNC;
+ irq->cb.func = dpu_encoder_phys_vid_vblank_irq;
+
+ irq = &phys_enc->irq[INTR_IDX_UNDERRUN];
+ irq->name = "underrun";
+ irq->intr_type = DPU_IRQ_TYPE_INTF_UNDER_RUN;
+ irq->intr_idx = INTR_IDX_UNDERRUN;
+ irq->cb.func = dpu_encoder_phys_vid_underrun_irq;
+
+ atomic_set(&phys_enc->vblank_refcount, 0);
+ atomic_set(&phys_enc->pending_kickoff_cnt, 0);
+ init_waitqueue_head(&phys_enc->pending_kickoff_wq);
+ phys_enc->enable_state = DPU_ENC_DISABLED;
+
+ DPU_DEBUG_VIDENC(vid_enc, "created intf idx:%d\n", p->intf_idx);
+
+ return phys_enc;
+
+fail:
+ DPU_ERROR("failed to create encoder\n");
+ if (vid_enc)
+ dpu_encoder_phys_vid_destroy(phys_enc);
+
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
new file mode 100644
index 000000000000..bfcd165e96df
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.c
@@ -0,0 +1,1173 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <uapi/drm/drm_fourcc.h>
+
+#include "msm_media_info.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+
+#define DPU_UBWC_META_MACRO_W_H 16
+#define DPU_UBWC_META_BLOCK_SIZE 256
+#define DPU_UBWC_PLANE_SIZE_ALIGNMENT 4096
+
+#define DPU_TILE_HEIGHT_DEFAULT 1
+#define DPU_TILE_HEIGHT_TILED 4
+#define DPU_TILE_HEIGHT_UBWC 4
+#define DPU_TILE_HEIGHT_NV12 8
+
+#define DPU_MAX_IMG_WIDTH 0x3FFF
+#define DPU_MAX_IMG_HEIGHT 0x3FFF
+
+/**
+ * DPU supported format packing, bpp, and other format
+ * information.
+ * DPU currently only supports interleaved RGB formats
+ * UBWC support for a pixel format is indicated by the flag,
+ * there is additional meta data plane for such formats
+ */
+
+#define INTERLEAVED_RGB_FMT(fmt, a, r, g, b, e0, e1, e2, e3, uc, alpha, \
+bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define INTERLEAVED_RGB_FMT_TILED(fmt, a, r, g, b, e0, e1, e2, e3, uc, \
+alpha, bp, flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3) }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = DPU_CHROMA_RGB, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = uc, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define INTERLEAVED_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, e3, \
+alpha, chroma, count, bp, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_INTERLEAVED, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), (e3)}, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = count, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE(fmt, a, r, g, b, e0, e1, chroma, flg, fm, np)\
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+#define PSEUDO_YUV_FMT_LOOSE_TILED(fmt, a, r, g, b, e0, e1, chroma, \
+flg, fm, np, th) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PSEUDO_PLANAR, \
+ .alpha_enable = false, \
+ .element = { (e0), (e1), 0, 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 1, \
+ .unpack_tight = 0, \
+ .unpack_count = 2, \
+ .bpp = 2, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = th \
+}
+
+
+#define PLANAR_YUV_FMT(fmt, a, r, g, b, e0, e1, e2, alpha, chroma, bp, \
+flg, fm, np) \
+{ \
+ .base.pixel_format = DRM_FORMAT_ ## fmt, \
+ .fetch_planes = DPU_PLANE_PLANAR, \
+ .alpha_enable = alpha, \
+ .element = { (e0), (e1), (e2), 0 }, \
+ .bits = { g, b, r, a }, \
+ .chroma_sample = chroma, \
+ .unpack_align_msb = 0, \
+ .unpack_tight = 1, \
+ .unpack_count = 1, \
+ .bpp = bp, \
+ .fetch_mode = fm, \
+ .flag = {(flg)}, \
+ .num_planes = np, \
+ .tile_height = DPU_TILE_HEIGHT_DEFAULT \
+}
+
+/*
+ * struct dpu_media_color_map - maps drm format to media format
+ * @format: DRM base pixel format
+ * @color: Media API color related to DRM format
+ */
+struct dpu_media_color_map {
+ uint32_t format;
+ uint32_t color;
+};
+
+static const struct dpu_format dpu_format_map[] = {
+ INTERLEAVED_RGB_FMT(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR888,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 3, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGB565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR1555,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX5551,
+ COLOR_ALPHA_1BIT, COLOR_5BIT, COLOR_5BIT, COLOR_5BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX4444,
+ COLOR_ALPHA_4BIT, COLOR_4BIT, COLOR_4BIT, COLOR_4BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 2, 0,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBA1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(ARGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XRGB2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(BGRX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ INTERLEAVED_RGB_FMT(RGBX1010102,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ false, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_LINEAR, 1),
+
+ PSEUDO_YUV_FMT(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV16,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PSEUDO_YUV_FMT(NV61,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_H2V1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(VYUY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(UYVY,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YUYV,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C1_B_Cb, C0_G_Y, C2_R_Cr,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ INTERLEAVED_YUV_FMT(YVYU,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C0_G_Y, C2_R_Cr, C0_G_Y, C1_B_Cb,
+ false, DPU_CHROMA_H2V1, 4, 2, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 2),
+
+ PLANAR_YUV_FMT(YUV420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+
+ PLANAR_YUV_FMT(YVU420,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr, C0_G_Y,
+ false, DPU_CHROMA_420, 1, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_LINEAR, 3),
+};
+
+/*
+ * A5x tile formats tables:
+ * These tables hold the A5x tile formats supported.
+ */
+static const struct dpu_format dpu_format_map_tile[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ARGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C1_B_Cb, C0_G_Y, C2_R_Cr, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRA8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ true, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(BGRX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C0_G_Y, C2_R_Cr, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XRGB8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C3_ALPHA, C2_R_Cr, C0_G_Y, C1_B_Cb, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(RGBX8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, 0,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX,
+ DPU_FETCH_UBWC, 1, DPU_TILE_HEIGHT_TILED),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+
+ PSEUDO_YUV_FMT_TILED(NV21,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C1_B_Cb,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_NV12),
+};
+
+/*
+ * UBWC formats table:
+ * This table holds the UBWC formats supported.
+ * If a compression ratio needs to be used for this or any other format,
+ * the data will be passed by user-space.
+ */
+static const struct dpu_format dpu_format_map_ubwc[] = {
+ INTERLEAVED_RGB_FMT_TILED(BGR565,
+ 0, COLOR_5BIT, COLOR_6BIT, COLOR_5BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, 0, 3,
+ false, 2, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR8888,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ false, 4, DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(ABGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ INTERLEAVED_RGB_FMT_TILED(XBGR2101010,
+ COLOR_8BIT, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C2_R_Cr, C0_G_Y, C1_B_Cb, C3_ALPHA, 4,
+ true, 4, DPU_FORMAT_FLAG_DX | DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 2, DPU_TILE_HEIGHT_UBWC),
+
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, DPU_FORMAT_FLAG_YUV |
+ DPU_FORMAT_FLAG_COMPRESSED,
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_p010[] = {
+ PSEUDO_YUV_FMT_LOOSE(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX),
+ DPU_FETCH_LINEAR, 2),
+};
+
+static const struct dpu_format dpu_format_map_p010_ubwc[] = {
+ PSEUDO_YUV_FMT_LOOSE_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+static const struct dpu_format dpu_format_map_tp10_ubwc[] = {
+ PSEUDO_YUV_FMT_TILED(NV12,
+ 0, COLOR_8BIT, COLOR_8BIT, COLOR_8BIT,
+ C1_B_Cb, C2_R_Cr,
+ DPU_CHROMA_420, (DPU_FORMAT_FLAG_YUV | DPU_FORMAT_FLAG_DX |
+ DPU_FORMAT_FLAG_COMPRESSED),
+ DPU_FETCH_UBWC, 4, DPU_TILE_HEIGHT_NV12),
+};
+
+/* _dpu_get_v_h_subsample_rate - Get subsample rates for all formats we support
+ * Note: Not using the drm_format_*_subsampling since we have formats
+ */
+static void _dpu_get_v_h_subsample_rate(
+ enum dpu_chroma_samp_type chroma_sample,
+ uint32_t *v_sample,
+ uint32_t *h_sample)
+{
+ if (!v_sample || !h_sample)
+ return;
+
+ switch (chroma_sample) {
+ case DPU_CHROMA_H2V1:
+ *v_sample = 1;
+ *h_sample = 2;
+ break;
+ case DPU_CHROMA_H1V2:
+ *v_sample = 2;
+ *h_sample = 1;
+ break;
+ case DPU_CHROMA_420:
+ *v_sample = 2;
+ *h_sample = 2;
+ break;
+ default:
+ *v_sample = 1;
+ *h_sample = 1;
+ break;
+ }
+}
+
+static int _dpu_format_get_media_color_ubwc(const struct dpu_format *fmt)
+{
+ static const struct dpu_media_color_map dpu_media_ubwc_map[] = {
+ {DRM_FORMAT_ABGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_XBGR8888, COLOR_FMT_RGBA8888_UBWC},
+ {DRM_FORMAT_ABGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_XBGR2101010, COLOR_FMT_RGBA1010102_UBWC},
+ {DRM_FORMAT_BGR565, COLOR_FMT_RGB565_UBWC},
+ };
+ int color_fmt = -1;
+ int i;
+
+ if (fmt->base.pixel_format == DRM_FORMAT_NV12) {
+ if (DPU_FORMAT_IS_DX(fmt)) {
+ if (fmt->unpack_tight)
+ color_fmt = COLOR_FMT_NV12_BPP10_UBWC;
+ else
+ color_fmt = COLOR_FMT_P010_UBWC;
+ } else
+ color_fmt = COLOR_FMT_NV12_UBWC;
+ return color_fmt;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_media_ubwc_map); ++i)
+ if (fmt->base.pixel_format == dpu_media_ubwc_map[i].format) {
+ color_fmt = dpu_media_ubwc_map[i].color;
+ break;
+ }
+ return color_fmt;
+}
+
+static int _dpu_format_get_plane_sizes_ubwc(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout)
+{
+ int i;
+ int color;
+ bool meta = DPU_FORMAT_IS_UBWC(fmt);
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ color = _dpu_format_get_media_color_ubwc(fmt);
+ if (color < 0) {
+ DRM_ERROR("UBWC format not supported for fmt: %4.4s\n",
+ (char *)&fmt->base.pixel_format);
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ uint32_t y_sclines, uv_sclines;
+ uint32_t y_meta_scanlines = 0;
+ uint32_t uv_meta_scanlines = 0;
+
+ layout->num_planes = 2;
+ layout->plane_pitch[0] = VENUS_Y_STRIDE(color, width);
+ y_sclines = VENUS_Y_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ y_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[1] = VENUS_UV_STRIDE(color, width);
+ uv_sclines = VENUS_UV_SCANLINES(color, height);
+ layout->plane_size[1] = MSM_MEDIA_ALIGN(layout->plane_pitch[1] *
+ uv_sclines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_Y_META_STRIDE(color, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ y_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ layout->plane_pitch[3] = VENUS_UV_META_STRIDE(color, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color, height);
+ layout->plane_size[3] = MSM_MEDIA_ALIGN(layout->plane_pitch[3] *
+ uv_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ } else {
+ uint32_t rgb_scanlines, rgb_meta_scanlines;
+
+ layout->num_planes = 1;
+
+ layout->plane_pitch[0] = VENUS_RGB_STRIDE(color, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color, height);
+ layout->plane_size[0] = MSM_MEDIA_ALIGN(layout->plane_pitch[0] *
+ rgb_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+
+ if (!meta)
+ goto done;
+ layout->num_planes += 2;
+ layout->plane_pitch[2] = VENUS_RGB_META_STRIDE(color, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color, height);
+ layout->plane_size[2] = MSM_MEDIA_ALIGN(layout->plane_pitch[2] *
+ rgb_meta_scanlines, DPU_UBWC_PLANE_SIZE_ALIGNMENT);
+ }
+
+done:
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int _dpu_format_get_plane_sizes_linear(
+ const struct dpu_format *fmt,
+ const uint32_t width,
+ const uint32_t height,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ int i;
+
+ memset(layout, 0, sizeof(struct dpu_hw_fmt_layout));
+ layout->format = fmt;
+ layout->width = width;
+ layout->height = height;
+ layout->num_planes = fmt->num_planes;
+
+ /* Due to memset above, only need to set planes of interest */
+ if (fmt->fetch_planes == DPU_PLANE_INTERLEAVED) {
+ layout->num_planes = 1;
+ layout->plane_size[0] = width * height * layout->format->bpp;
+ layout->plane_pitch[0] = width * layout->format->bpp;
+ } else {
+ uint32_t v_subsample, h_subsample;
+ uint32_t chroma_samp;
+ uint32_t bpp = 1;
+
+ chroma_samp = fmt->chroma_sample;
+ _dpu_get_v_h_subsample_rate(chroma_samp, &v_subsample,
+ &h_subsample);
+
+ if (width % h_subsample || height % v_subsample) {
+ DRM_ERROR("mismatch in subsample vs dimensions\n");
+ return -EINVAL;
+ }
+
+ if ((fmt->base.pixel_format == DRM_FORMAT_NV12) &&
+ (DPU_FORMAT_IS_DX(fmt)))
+ bpp = 2;
+ layout->plane_pitch[0] = width * bpp;
+ layout->plane_pitch[1] = layout->plane_pitch[0] / h_subsample;
+ layout->plane_size[0] = layout->plane_pitch[0] * height;
+ layout->plane_size[1] = layout->plane_pitch[1] *
+ (height / v_subsample);
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ layout->num_planes = 2;
+ layout->plane_size[1] *= 2;
+ layout->plane_pitch[1] *= 2;
+ } else {
+ /* planar */
+ layout->num_planes = 3;
+ layout->plane_size[2] = layout->plane_size[1];
+ layout->plane_pitch[2] = layout->plane_pitch[1];
+ }
+ }
+
+ /*
+ * linear format: allow user allocated pitches if they are greater than
+ * the requirement.
+ * ubwc format: pitch values are computed uniformly across
+ * all the components based on ubwc specifications.
+ */
+ for (i = 0; i < layout->num_planes && i < DPU_MAX_PLANES; ++i) {
+ if (pitches && layout->plane_pitch[i] < pitches[i])
+ layout->plane_pitch[i] = pitches[i];
+ }
+
+ for (i = 0; i < DPU_MAX_PLANES; i++)
+ layout->total_size += layout->plane_size[i];
+
+ return 0;
+}
+
+static int dpu_format_get_plane_sizes(
+ const struct dpu_format *fmt,
+ const uint32_t w,
+ const uint32_t h,
+ struct dpu_hw_fmt_layout *layout,
+ const uint32_t *pitches)
+{
+ if (!layout || !fmt) {
+ DRM_ERROR("invalid pointer\n");
+ return -EINVAL;
+ }
+
+ if ((w > DPU_MAX_IMG_WIDTH) || (h > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ if (DPU_FORMAT_IS_UBWC(fmt) || DPU_FORMAT_IS_TILE(fmt))
+ return _dpu_format_get_plane_sizes_ubwc(fmt, w, h, layout);
+
+ return _dpu_format_get_plane_sizes_linear(fmt, w, h, layout, pitches);
+}
+
+static int _dpu_format_populate_addrs_ubwc(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t base_addr = 0;
+ bool meta;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid pointers\n");
+ return -EINVAL;
+ }
+
+ if (aspace)
+ base_addr = msm_framebuffer_iova(fb, aspace, 0);
+ if (!base_addr) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+
+ meta = DPU_FORMAT_IS_UBWC(layout->format);
+
+ /* Per-format logic for verifying active planes */
+ if (DPU_FORMAT_IS_YUV(layout->format)) {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | Y meta | ** | Y bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Y bitstream | ** | CbCr bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | Cbcr metadata | ** | Y meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | CbCr bitstream | ** | CbCr meta | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /************************************************/
+
+ /* configure Y bitstream plane */
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+
+ /* configure CbCr bitstream plane */
+ layout->plane_addr[1] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2] + layout->plane_size[3];
+
+ if (!meta)
+ goto done;
+
+ /* configure Y metadata plane */
+ layout->plane_addr[2] = base_addr;
+
+ /* configure CbCr metadata plane */
+ layout->plane_addr[3] = base_addr + layout->plane_size[0]
+ + layout->plane_size[2];
+
+ } else {
+ /************************************************/
+ /* UBWC ** */
+ /* buffer ** DPU PLANE */
+ /* format ** */
+ /************************************************/
+ /* ------------------- ** -------------------- */
+ /* | RGB meta | ** | RGB bitstream | */
+ /* | data | ** | plane | */
+ /* ------------------- ** -------------------- */
+ /* | RGB bitstream | ** | NONE | */
+ /* | data | ** | | */
+ /* ------------------- ** -------------------- */
+ /* ** | RGB meta | */
+ /* ** | plane | */
+ /* ** -------------------- */
+ /************************************************/
+
+ layout->plane_addr[0] = base_addr + layout->plane_size[2];
+ layout->plane_addr[1] = 0;
+
+ if (!meta)
+ goto done;
+
+ layout->plane_addr[2] = base_addr;
+ layout->plane_addr[3] = 0;
+ }
+done:
+ return 0;
+}
+
+static int _dpu_format_populate_addrs_linear(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ unsigned int i;
+
+ /* Can now check the pitches given vs pitches expected */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (layout->plane_pitch[i] > fb->pitches[i]) {
+ DRM_ERROR("plane %u expected pitch %u, fb %u\n",
+ i, layout->plane_pitch[i], fb->pitches[i]);
+ return -EINVAL;
+ }
+ }
+
+ /* Populate addresses for simple formats here */
+ for (i = 0; i < layout->num_planes; ++i) {
+ if (aspace)
+ layout->plane_addr[i] =
+ msm_framebuffer_iova(fb, aspace, i);
+ if (!layout->plane_addr[i]) {
+ DRM_ERROR("failed to retrieve base addr\n");
+ return -EFAULT;
+ }
+ }
+
+ return 0;
+}
+
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *layout)
+{
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ int i, ret;
+
+ if (!fb || !layout) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ if ((fb->width > DPU_MAX_IMG_WIDTH) ||
+ (fb->height > DPU_MAX_IMG_HEIGHT)) {
+ DRM_ERROR("image dimensions outside max range\n");
+ return -ERANGE;
+ }
+
+ layout->format = to_dpu_format(msm_framebuffer_format(fb));
+
+ /* Populate the plane sizes etc via get_format */
+ ret = dpu_format_get_plane_sizes(layout->format, fb->width, fb->height,
+ layout, fb->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < DPU_MAX_PLANES; ++i)
+ plane_addr[i] = layout->plane_addr[i];
+
+ /* Populate the addresses given the fb */
+ if (DPU_FORMAT_IS_UBWC(layout->format) ||
+ DPU_FORMAT_IS_TILE(layout->format))
+ ret = _dpu_format_populate_addrs_ubwc(aspace, fb, layout);
+ else
+ ret = _dpu_format_populate_addrs_linear(aspace, fb, layout);
+
+ /* check if anything changed */
+ if (!ret && !memcmp(plane_addr, layout->plane_addr, sizeof(plane_addr)))
+ ret = -EAGAIN;
+
+ return ret;
+}
+
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos)
+{
+ int ret, i, num_base_fmt_planes;
+ const struct dpu_format *fmt;
+ struct dpu_hw_fmt_layout layout;
+ uint32_t bos_total_size = 0;
+
+ if (!msm_fmt || !cmd || !bos) {
+ DRM_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ fmt = to_dpu_format(msm_fmt);
+ num_base_fmt_planes = drm_format_num_planes(fmt->base.pixel_format);
+
+ ret = dpu_format_get_plane_sizes(fmt, cmd->width, cmd->height,
+ &layout, cmd->pitches);
+ if (ret)
+ return ret;
+
+ for (i = 0; i < num_base_fmt_planes; i++) {
+ if (!bos[i]) {
+ DRM_ERROR("invalid handle for plane %d\n", i);
+ return -EINVAL;
+ }
+ if ((i == 0) || (bos[i] != bos[0]))
+ bos_total_size += bos[i]->size;
+ }
+
+ if (bos_total_size < layout.total_size) {
+ DRM_ERROR("buffers total size too small %u expected %u\n",
+ bos_total_size, layout.total_size);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier)
+{
+ uint32_t i = 0;
+ const struct dpu_format *fmt = NULL;
+ const struct dpu_format *map = NULL;
+ ssize_t map_size = 0;
+
+ /*
+ * Currently only support exactly zero or one modifier.
+ * All planes use the same modifier.
+ */
+ DPU_DEBUG("plane format modifier 0x%llX\n", modifier);
+
+ switch (modifier) {
+ case 0:
+ map = dpu_format_map;
+ map_size = ARRAY_SIZE(dpu_format_map);
+ break;
+ case DRM_FORMAT_MOD_QCOM_COMPRESSED:
+ map = dpu_format_map_ubwc;
+ map_size = ARRAY_SIZE(dpu_format_map_ubwc);
+ DPU_DEBUG("found fmt: %4.4s DRM_FORMAT_MOD_QCOM_COMPRESSED\n",
+ (char *)&format);
+ break;
+ default:
+ DPU_ERROR("unsupported format modifier %llX\n", modifier);
+ return NULL;
+ }
+
+ for (i = 0; i < map_size; i++) {
+ if (format == map[i].base.pixel_format) {
+ fmt = &map[i];
+ break;
+ }
+ }
+
+ if (fmt == NULL)
+ DPU_ERROR("unsupported fmt: %4.4s modifier 0x%llX\n",
+ (char *)&format, modifier);
+ else
+ DPU_DEBUG("fmt %4.4s mod 0x%llX ubwc %d yuv %d\n",
+ (char *)&format, modifier,
+ DPU_FORMAT_IS_UBWC(fmt),
+ DPU_FORMAT_IS_YUV(fmt));
+
+ return fmt;
+}
+
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers)
+{
+ const struct dpu_format *fmt = dpu_get_dpu_format_ext(format,
+ modifiers);
+ if (fmt)
+ return &fmt->base;
+ return NULL;
+}
+
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max)
+{
+ uint32_t i, fourcc_format;
+
+ if (!format_list || !pixel_formats)
+ return 0;
+
+ for (i = 0, fourcc_format = 0;
+ format_list->fourcc_format && i < pixel_formats_max;
+ ++format_list) {
+ /* verify if listed format is in dpu_format_map? */
+
+ /* optionally return modified formats */
+ if (pixel_modifiers) {
+ /* assume same modifier for all fb planes */
+ pixel_formats[i] = format_list->fourcc_format;
+ pixel_modifiers[i++] = format_list->modifier;
+ } else {
+ /* assume base formats grouped together */
+ if (fourcc_format != format_list->fourcc_format) {
+ fourcc_format = format_list->fourcc_format;
+ pixel_formats[i++] = fourcc_format;
+ }
+ }
+ }
+
+ return i;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
new file mode 100644
index 000000000000..a54451d8d011
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_formats.h
@@ -0,0 +1,88 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_FORMATS_H
+#define _DPU_FORMATS_H
+
+#include <drm/drm_fourcc.h>
+#include "msm_gem.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * dpu_get_dpu_format_ext() - Returns dpu format structure pointer.
+ * @format: DRM FourCC Code
+ * @modifiers: format modifier array from client, one per plane
+ */
+const struct dpu_format *dpu_get_dpu_format_ext(
+ const uint32_t format,
+ const uint64_t modifier);
+
+#define dpu_get_dpu_format(f) dpu_get_dpu_format_ext(f, 0)
+
+/**
+ * dpu_get_msm_format - get an dpu_format by its msm_format base
+ * callback function registers with the msm_kms layer
+ * @kms: kms driver
+ * @format: DRM FourCC Code
+ * @modifiers: data layout modifier
+ */
+const struct msm_format *dpu_get_msm_format(
+ struct msm_kms *kms,
+ const uint32_t format,
+ const uint64_t modifiers);
+
+/**
+ * dpu_populate_formats - populate the given array with fourcc codes supported
+ * @format_list: pointer to list of possible formats
+ * @pixel_formats: array to populate with fourcc codes
+ * @pixel_modifiers: array to populate with drm modifiers, can be NULL
+ * @pixel_formats_max: length of pixel formats array
+ * Return: number of elements populated
+ */
+uint32_t dpu_populate_formats(
+ const struct dpu_format_extended *format_list,
+ uint32_t *pixel_formats,
+ uint64_t *pixel_modifiers,
+ uint32_t pixel_formats_max);
+
+/**
+ * dpu_format_check_modified_format - validate format and buffers for
+ * dpu non-standard, i.e. modified format
+ * @kms: kms driver
+ * @msm_fmt: pointer to the msm_fmt base pointer of an dpu_format
+ * @cmd: fb_cmd2 structure user request
+ * @bos: gem buffer object list
+ *
+ * Return: error code on failure, 0 on success
+ */
+int dpu_format_check_modified_format(
+ const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
+
+/**
+ * dpu_format_populate_layout - populate the given format layout based on
+ * mmu, fb, and format found in the fb
+ * @aspace: address space pointer
+ * @fb: framebuffer pointer
+ * @fmtl: format layout structure to populate
+ *
+ * Return: error code on failure, -EAGAIN if success but the addresses
+ * are the same as before or 0 if new addresses were populated
+ */
+int dpu_format_populate_layout(
+ struct msm_gem_address_space *aspace,
+ struct drm_framebuffer *fb,
+ struct dpu_hw_fmt_layout *fmtl);
+
+#endif /*_DPU_FORMATS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
new file mode 100644
index 000000000000..58d29e43faef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.c
@@ -0,0 +1,155 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/mutex.h>
+#include <linux/errno.h>
+#include <linux/slab.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_blk.h"
+
+/* Serialization lock for dpu_hw_blk_list */
+static DEFINE_MUTEX(dpu_hw_blk_lock);
+
+/* List of all hw block objects */
+static LIST_HEAD(dpu_hw_blk_list);
+
+/**
+ * dpu_hw_blk_init - initialize hw block object
+ * @type: hw block type - enum dpu_hw_blk_type
+ * @id: instance id of the hw block
+ * @ops: Pointer to block operations
+ * return: 0 if success; error code otherwise
+ */
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ INIT_LIST_HEAD(&hw_blk->list);
+ hw_blk->type = type;
+ hw_blk->id = id;
+ atomic_set(&hw_blk->refcount, 0);
+
+ if (ops)
+ hw_blk->ops = *ops;
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_add(&hw_blk->list, &dpu_hw_blk_list);
+ mutex_unlock(&dpu_hw_blk_lock);
+
+ return 0;
+}
+
+/**
+ * dpu_hw_blk_destroy - destroy hw block object.
+ * @hw_blk: pointer to hw block object
+ * return: none
+ */
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ if (atomic_read(&hw_blk->refcount))
+ pr_err("hw_blk:%d.%d invalid refcount\n", hw_blk->type,
+ hw_blk->id);
+
+ mutex_lock(&dpu_hw_blk_lock);
+ list_del(&hw_blk->list);
+ mutex_unlock(&dpu_hw_blk_lock);
+}
+
+/**
+ * dpu_hw_blk_get - get hw_blk from free pool
+ * @hw_blk: if specified, increment reference count only
+ * @type: if hw_blk is not specified, allocate the next available of this type
+ * @id: if specified (>= 0), allocate the given instance of the above type
+ * return: pointer to hw block object
+ */
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id)
+{
+ struct dpu_hw_blk *curr;
+ int rc, refcount;
+
+ if (!hw_blk) {
+ mutex_lock(&dpu_hw_blk_lock);
+ list_for_each_entry(curr, &dpu_hw_blk_list, list) {
+ if ((curr->type != type) ||
+ (id >= 0 && curr->id != id) ||
+ (id < 0 &&
+ atomic_read(&curr->refcount)))
+ continue;
+
+ hw_blk = curr;
+ break;
+ }
+ mutex_unlock(&dpu_hw_blk_lock);
+ }
+
+ if (!hw_blk) {
+ pr_debug("no hw_blk:%d\n", type);
+ return NULL;
+ }
+
+ refcount = atomic_inc_return(&hw_blk->refcount);
+
+ if (refcount == 1 && hw_blk->ops.start) {
+ rc = hw_blk->ops.start(hw_blk);
+ if (rc) {
+ pr_err("failed to start hw_blk:%d rc:%d\n", type, rc);
+ goto error_start;
+ }
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type,
+ hw_blk->id, refcount);
+ return hw_blk;
+
+error_start:
+ dpu_hw_blk_put(hw_blk);
+ return ERR_PTR(rc);
+}
+
+/**
+ * dpu_hw_blk_put - put hw_blk to free pool if decremented refcount is zero
+ * @hw_blk: hw block to be freed
+ * @free_blk: function to be called when reference count goes to zero
+ */
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk)
+{
+ if (!hw_blk) {
+ pr_err("invalid parameters\n");
+ return;
+ }
+
+ pr_debug("hw_blk:%d.%d refcount:%d\n", hw_blk->type, hw_blk->id,
+ atomic_read(&hw_blk->refcount));
+
+ if (!atomic_read(&hw_blk->refcount)) {
+ pr_err("hw_blk:%d.%d invalid put\n", hw_blk->type, hw_blk->id);
+ return;
+ }
+
+ if (atomic_dec_return(&hw_blk->refcount))
+ return;
+
+ if (hw_blk->ops.stop)
+ hw_blk->ops.stop(hw_blk);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
new file mode 100644
index 000000000000..0f4ca8af1ec5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_blk.h
@@ -0,0 +1,53 @@
+/* Copyright (c) 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_BLK_H
+#define _DPU_HW_BLK_H
+
+#include <linux/types.h>
+#include <linux/list.h>
+#include <linux/atomic.h>
+
+struct dpu_hw_blk;
+
+/**
+ * struct dpu_hw_blk_ops - common hardware block operations
+ * @start: start operation on first get
+ * @stop: stop operation on last put
+ */
+struct dpu_hw_blk_ops {
+ int (*start)(struct dpu_hw_blk *);
+ void (*stop)(struct dpu_hw_blk *);
+};
+
+/**
+ * struct dpu_hw_blk - definition of hardware block object
+ * @list: list of hardware blocks
+ * @type: hardware block type
+ * @id: instance id
+ * @refcount: reference/usage count
+ */
+struct dpu_hw_blk {
+ struct list_head list;
+ u32 type;
+ int id;
+ atomic_t refcount;
+ struct dpu_hw_blk_ops ops;
+};
+
+int dpu_hw_blk_init(struct dpu_hw_blk *hw_blk, u32 type, int id,
+ struct dpu_hw_blk_ops *ops);
+void dpu_hw_blk_destroy(struct dpu_hw_blk *hw_blk);
+
+struct dpu_hw_blk *dpu_hw_blk_get(struct dpu_hw_blk *hw_blk, u32 type, int id);
+void dpu_hw_blk_put(struct dpu_hw_blk *hw_blk);
+#endif /*_DPU_HW_BLK_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
new file mode 100644
index 000000000000..44ee06398b1d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.c
@@ -0,0 +1,511 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+#include <linux/slab.h>
+#include <linux/of_address.h>
+#include <linux/platform_device.h>
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_kms.h"
+
+#define VIG_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_SCALER_QSEED3) | BIT(DPU_SSPP_QOS) |\
+ BIT(DPU_SSPP_CSC_10BIT) | BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define DMA_SDM845_MASK \
+ (BIT(DPU_SSPP_SRC) | BIT(DPU_SSPP_QOS) | BIT(DPU_SSPP_QOS_8LVL) |\
+ BIT(DPU_SSPP_TS_PREFILL) | BIT(DPU_SSPP_TS_PREFILL_REC1) |\
+ BIT(DPU_SSPP_CDP) | BIT(DPU_SSPP_EXCL_RECT))
+
+#define MIXER_SDM845_MASK \
+ (BIT(DPU_MIXER_SOURCESPLIT) | BIT(DPU_DIM_LAYER))
+
+#define PINGPONG_SDM845_MASK BIT(DPU_PINGPONG_DITHER)
+
+#define PINGPONG_SDM845_SPLIT_MASK \
+ (PINGPONG_SDM845_MASK | BIT(DPU_PINGPONG_TE2))
+
+#define DEFAULT_PIXEL_RAM_SIZE (50 * 1024)
+#define DEFAULT_DPU_LINE_WIDTH 2048
+#define DEFAULT_DPU_OUTPUT_LINE_WIDTH 2560
+
+#define MAX_HORZ_DECIMATION 4
+#define MAX_VERT_DECIMATION 4
+
+#define MAX_UPSCALE_RATIO 20
+#define MAX_DOWNSCALE_RATIO 4
+#define SSPP_UNITY_SCALE 1
+
+#define STRCAT(X, Y) (X Y)
+
+/*************************************************************
+ * DPU sub blocks config
+ *************************************************************/
+/* DPU top level caps */
+static const struct dpu_caps sdm845_dpu_caps = {
+ .max_mixer_width = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .max_mixer_blendstages = 0xb,
+ .qseed_type = DPU_SSPP_SCALER_QSEED3,
+ .smart_dma_rev = DPU_SSPP_SMART_DMA_V2,
+ .ubwc_version = DPU_HW_UBWC_VER_20,
+ .has_src_split = true,
+ .has_dim_layer = true,
+ .has_idle_pc = true,
+};
+
+static struct dpu_mdp_cfg sdm845_mdp[] = {
+ {
+ .name = "top_0", .id = MDP_TOP,
+ .base = 0x0, .len = 0x45C,
+ .features = 0,
+ .highest_bank_bit = 0x2,
+ .has_dest_scaler = true,
+ .clk_ctrls[DPU_CLK_CTRL_VIG0] = {
+ .reg_off = 0x2AC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG1] = {
+ .reg_off = 0x2B4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG2] = {
+ .reg_off = 0x2BC, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_VIG3] = {
+ .reg_off = 0x2C4, .bit_off = 0},
+ .clk_ctrls[DPU_CLK_CTRL_DMA0] = {
+ .reg_off = 0x2AC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_DMA1] = {
+ .reg_off = 0x2B4, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR0] = {
+ .reg_off = 0x2BC, .bit_off = 8},
+ .clk_ctrls[DPU_CLK_CTRL_CURSOR1] = {
+ .reg_off = 0x2C4, .bit_off = 8},
+ },
+};
+
+/*************************************************************
+ * CTL sub blocks config
+ *************************************************************/
+static struct dpu_ctl_cfg sdm845_ctl[] = {
+ {
+ .name = "ctl_0", .id = CTL_0,
+ .base = 0x1000, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_1", .id = CTL_1,
+ .base = 0x1200, .len = 0xE4,
+ .features = BIT(DPU_CTL_SPLIT_DISPLAY)
+ },
+ {
+ .name = "ctl_2", .id = CTL_2,
+ .base = 0x1400, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_3", .id = CTL_3,
+ .base = 0x1600, .len = 0xE4,
+ .features = 0
+ },
+ {
+ .name = "ctl_4", .id = CTL_4,
+ .base = 0x1800, .len = 0xE4,
+ .features = 0
+ },
+};
+
+/*************************************************************
+ * SSPP sub blocks config
+ *************************************************************/
+
+/* SSPP common configuration */
+static const struct dpu_sspp_blks_common sdm845_sspp_common = {
+ .maxlinewidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .pixel_ram_size = DEFAULT_PIXEL_RAM_SIZE,
+ .maxhdeciexp = MAX_HORZ_DECIMATION,
+ .maxvdeciexp = MAX_VERT_DECIMATION,
+};
+
+#define _VIG_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = MAX_DOWNSCALE_RATIO, \
+ .maxupscale = MAX_UPSCALE_RATIO, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .scaler_blk = {.name = STRCAT("sspp_scaler", num), \
+ .id = DPU_SSPP_SCALER_QSEED3, \
+ .base = 0xa00, .len = 0xa0,}, \
+ .csc_blk = {.name = STRCAT("sspp_csc", num), \
+ .id = DPU_SSPP_CSC_10BIT, \
+ .base = 0x1a00, .len = 0x100,}, \
+ .format_list = plane_formats_yuv, \
+ .virt_format_list = plane_formats, \
+ }
+
+#define _DMA_SBLK(num, sdma_pri) \
+ { \
+ .common = &sdm845_sspp_common, \
+ .maxdwnscale = SSPP_UNITY_SCALE, \
+ .maxupscale = SSPP_UNITY_SCALE, \
+ .smart_dma_priority = sdma_pri, \
+ .src_blk = {.name = STRCAT("sspp_src_", num), \
+ .id = DPU_SSPP_SRC, .base = 0x00, .len = 0x150,}, \
+ .format_list = plane_formats, \
+ .virt_format_list = plane_formats, \
+ }
+
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_0 = _VIG_SBLK("0", 5);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_1 = _VIG_SBLK("1", 6);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_2 = _VIG_SBLK("2", 7);
+static const struct dpu_sspp_sub_blks sdm845_vig_sblk_3 = _VIG_SBLK("3", 8);
+
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_0 = _DMA_SBLK("8", 1);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_1 = _DMA_SBLK("9", 2);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_2 = _DMA_SBLK("10", 3);
+static const struct dpu_sspp_sub_blks sdm845_dma_sblk_3 = _DMA_SBLK("11", 4);
+
+#define SSPP_VIG_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = VIG_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_VIG, \
+ .clk_ctrl = _clkctrl \
+ }
+
+#define SSPP_DMA_BLK(_name, _id, _base, _sblk, _xinid, _clkctrl) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x1c8, \
+ .features = DMA_SDM845_MASK, \
+ .sblk = &_sblk, \
+ .xin_id = _xinid, \
+ .type = SSPP_TYPE_DMA, \
+ .clk_ctrl = _clkctrl \
+ }
+
+static struct dpu_sspp_cfg sdm845_sspp[] = {
+ SSPP_VIG_BLK("sspp_0", SSPP_VIG0, 0x4000,
+ sdm845_vig_sblk_0, 0, DPU_CLK_CTRL_VIG0),
+ SSPP_VIG_BLK("sspp_1", SSPP_VIG1, 0x6000,
+ sdm845_vig_sblk_1, 4, DPU_CLK_CTRL_VIG1),
+ SSPP_VIG_BLK("sspp_2", SSPP_VIG2, 0x8000,
+ sdm845_vig_sblk_2, 8, DPU_CLK_CTRL_VIG2),
+ SSPP_VIG_BLK("sspp_3", SSPP_VIG3, 0xa000,
+ sdm845_vig_sblk_3, 12, DPU_CLK_CTRL_VIG3),
+ SSPP_DMA_BLK("sspp_8", SSPP_DMA0, 0x24000,
+ sdm845_dma_sblk_0, 1, DPU_CLK_CTRL_DMA0),
+ SSPP_DMA_BLK("sspp_9", SSPP_DMA1, 0x26000,
+ sdm845_dma_sblk_1, 5, DPU_CLK_CTRL_DMA1),
+ SSPP_DMA_BLK("sspp_10", SSPP_DMA2, 0x28000,
+ sdm845_dma_sblk_2, 9, DPU_CLK_CTRL_CURSOR0),
+ SSPP_DMA_BLK("sspp_11", SSPP_DMA3, 0x2a000,
+ sdm845_dma_sblk_3, 13, DPU_CLK_CTRL_CURSOR1),
+};
+
+/*************************************************************
+ * MIXER sub blocks config
+ *************************************************************/
+static const struct dpu_lm_sub_blks sdm845_lm_sblk = {
+ .maxwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxblendstages = 11, /* excluding base layer */
+ .blendstage_base = { /* offsets relative to mixer base */
+ 0x20, 0x38, 0x50, 0x68, 0x80, 0x98,
+ 0xb0, 0xc8, 0xe0, 0xf8, 0x110
+ },
+};
+
+#define LM_BLK(_name, _id, _base, _ds, _pp, _lmpair) \
+ { \
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x320, \
+ .features = MIXER_SDM845_MASK, \
+ .sblk = &sdm845_lm_sblk, \
+ .ds = _ds, \
+ .pingpong = _pp, \
+ .lm_pair_mask = (1 << _lmpair) \
+ }
+
+static struct dpu_lm_cfg sdm845_lm[] = {
+ LM_BLK("lm_0", LM_0, 0x44000, DS_0, PINGPONG_0, LM_1),
+ LM_BLK("lm_1", LM_1, 0x45000, DS_1, PINGPONG_1, LM_0),
+ LM_BLK("lm_2", LM_2, 0x46000, DS_MAX, PINGPONG_2, LM_5),
+ LM_BLK("lm_3", LM_3, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_4", LM_4, 0x0, DS_MAX, PINGPONG_MAX, 0),
+ LM_BLK("lm_5", LM_5, 0x49000, DS_MAX, PINGPONG_3, LM_2),
+};
+
+/*************************************************************
+ * DS sub blocks config
+ *************************************************************/
+static const struct dpu_ds_top_cfg sdm845_ds_top = {
+ .name = "ds_top_0", .id = DS_TOP,
+ .base = 0x60000, .len = 0xc,
+ .maxinputwidth = DEFAULT_DPU_LINE_WIDTH,
+ .maxoutputwidth = DEFAULT_DPU_OUTPUT_LINE_WIDTH,
+ .maxupscale = MAX_UPSCALE_RATIO,
+};
+
+#define DS_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x800, \
+ .features = DPU_SSPP_SCALER_QSEED3, \
+ .top = &sdm845_ds_top \
+ }
+
+static struct dpu_ds_cfg sdm845_ds[] = {
+ DS_BLK("ds_0", DS_0, 0x800),
+ DS_BLK("ds_1", DS_1, 0x1000),
+};
+
+/*************************************************************
+ * PINGPONG sub blocks config
+ *************************************************************/
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk_te = {
+ .te2 = {.id = DPU_PINGPONG_TE2, .base = 0x2000, .len = 0x0,
+ .version = 0x1},
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+static const struct dpu_pingpong_sub_blks sdm845_pp_sblk = {
+ .dither = {.id = DPU_PINGPONG_DITHER, .base = 0x30e0,
+ .len = 0x20, .version = 0x10000},
+};
+
+#define PP_BLK_TE(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_SPLIT_MASK, \
+ .sblk = &sdm845_pp_sblk_te \
+ }
+#define PP_BLK(_name, _id, _base) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0xd4, \
+ .features = PINGPONG_SDM845_MASK, \
+ .sblk = &sdm845_pp_sblk \
+ }
+
+static struct dpu_pingpong_cfg sdm845_pp[] = {
+ PP_BLK_TE("pingpong_0", PINGPONG_0, 0x70000),
+ PP_BLK_TE("pingpong_1", PINGPONG_1, 0x70800),
+ PP_BLK("pingpong_2", PINGPONG_2, 0x71000),
+ PP_BLK("pingpong_3", PINGPONG_3, 0x71800),
+};
+
+/*************************************************************
+ * INTF sub blocks config
+ *************************************************************/
+#define INTF_BLK(_name, _id, _base, _type, _ctrl_id) \
+ {\
+ .name = _name, .id = _id, \
+ .base = _base, .len = 0x280, \
+ .type = _type, \
+ .controller_id = _ctrl_id, \
+ .prog_fetch_lines_worst_case = 24 \
+ }
+
+static struct dpu_intf_cfg sdm845_intf[] = {
+ INTF_BLK("intf_0", INTF_0, 0x6A000, INTF_DP, 0),
+ INTF_BLK("intf_1", INTF_1, 0x6A800, INTF_DSI, 0),
+ INTF_BLK("intf_2", INTF_2, 0x6B000, INTF_DSI, 1),
+ INTF_BLK("intf_3", INTF_3, 0x6B800, INTF_DP, 1),
+};
+
+/*************************************************************
+ * CDM sub blocks config
+ *************************************************************/
+static struct dpu_cdm_cfg sdm845_cdm[] = {
+ {
+ .name = "cdm_0", .id = CDM_0,
+ .base = 0x79200, .len = 0x224,
+ .features = 0,
+ .intf_connect = BIT(INTF_3),
+ },
+};
+
+/*************************************************************
+ * VBIF sub blocks config
+ *************************************************************/
+/* VBIF QOS remap */
+static u32 sdm845_rt_pri_lvl[] = {3, 3, 4, 4, 5, 5, 6, 6};
+static u32 sdm845_nrt_pri_lvl[] = {3, 3, 3, 3, 3, 3, 3, 3};
+
+static struct dpu_vbif_cfg sdm845_vbif[] = {
+ {
+ .name = "vbif_0", .id = VBIF_0,
+ .base = 0, .len = 0x1040,
+ .features = BIT(DPU_VBIF_QOS_REMAP),
+ .xin_halt_timeout = 0x4000,
+ .qos_rt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_rt_pri_lvl),
+ .priority_lvl = sdm845_rt_pri_lvl,
+ },
+ .qos_nrt_tbl = {
+ .npriority_lvl = ARRAY_SIZE(sdm845_nrt_pri_lvl),
+ .priority_lvl = sdm845_nrt_pri_lvl,
+ },
+ .memtype_count = 14,
+ .memtype = {3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3, 3},
+ },
+};
+
+static struct dpu_reg_dma_cfg sdm845_regdma = {
+ .base = 0x0, .version = 0x1, .trigger_sel_off = 0x119c
+};
+
+/*************************************************************
+ * PERF data config
+ *************************************************************/
+
+/* SSPP QOS LUTs */
+static struct dpu_qos_lut_entry sdm845_qos_linear[] = {
+ {.fl = 4, .lut = 0x357},
+ {.fl = 5, .lut = 0x3357},
+ {.fl = 6, .lut = 0x23357},
+ {.fl = 7, .lut = 0x223357},
+ {.fl = 8, .lut = 0x2223357},
+ {.fl = 9, .lut = 0x22223357},
+ {.fl = 10, .lut = 0x222223357},
+ {.fl = 11, .lut = 0x2222223357},
+ {.fl = 12, .lut = 0x22222223357},
+ {.fl = 13, .lut = 0x222222223357},
+ {.fl = 14, .lut = 0x1222222223357},
+ {.fl = 0, .lut = 0x11222222223357}
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_macrotile[] = {
+ {.fl = 10, .lut = 0x344556677},
+ {.fl = 11, .lut = 0x3344556677},
+ {.fl = 12, .lut = 0x23344556677},
+ {.fl = 13, .lut = 0x223344556677},
+ {.fl = 14, .lut = 0x1223344556677},
+ {.fl = 0, .lut = 0x112233344556677},
+};
+
+static struct dpu_qos_lut_entry sdm845_qos_nrt[] = {
+ {.fl = 0, .lut = 0x0},
+};
+
+static struct dpu_perf_cfg sdm845_perf_data = {
+ .max_bw_low = 6800000,
+ .max_bw_high = 6800000,
+ .min_core_ib = 2400000,
+ .min_llcc_ib = 800000,
+ .min_dram_ib = 800000,
+ .core_ib_ff = "6.0",
+ .core_clk_ff = "1.0",
+ .comp_ratio_rt =
+ "NV12/5/1/1.23 AB24/5/1/1.23 XB24/5/1/1.23",
+ .comp_ratio_nrt =
+ "NV12/5/1/1.25 AB24/5/1/1.25 XB24/5/1/1.25",
+ .undersized_prefill_lines = 2,
+ .xtra_prefill_lines = 2,
+ .dest_scale_prefill_lines = 3,
+ .macrotile_prefill_lines = 4,
+ .yuv_nv12_prefill_lines = 8,
+ .linear_prefill_lines = 1,
+ .downscaling_prefill_lines = 1,
+ .amortizable_threshold = 25,
+ .min_prefill_lines = 24,
+ .danger_lut_tbl = {0xf, 0xffff, 0x0},
+ .qos_lut_tbl = {
+ {.nentry = ARRAY_SIZE(sdm845_qos_linear),
+ .entries = sdm845_qos_linear
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_macrotile),
+ .entries = sdm845_qos_macrotile
+ },
+ {.nentry = ARRAY_SIZE(sdm845_qos_nrt),
+ .entries = sdm845_qos_nrt
+ },
+ },
+ .cdp_cfg = {
+ {.rd_enable = 1, .wr_enable = 1},
+ {.rd_enable = 1, .wr_enable = 0}
+ },
+};
+
+/*************************************************************
+ * Hardware catalog init
+ *************************************************************/
+
+/*
+ * sdm845_cfg_init(): populate sdm845 dpu sub-blocks reg offsets
+ * and instance counts.
+ */
+static void sdm845_cfg_init(struct dpu_mdss_cfg *dpu_cfg)
+{
+ *dpu_cfg = (struct dpu_mdss_cfg){
+ .caps = &sdm845_dpu_caps,
+ .mdp_count = ARRAY_SIZE(sdm845_mdp),
+ .mdp = sdm845_mdp,
+ .ctl_count = ARRAY_SIZE(sdm845_ctl),
+ .ctl = sdm845_ctl,
+ .sspp_count = ARRAY_SIZE(sdm845_sspp),
+ .sspp = sdm845_sspp,
+ .mixer_count = ARRAY_SIZE(sdm845_lm),
+ .mixer = sdm845_lm,
+ .ds_count = ARRAY_SIZE(sdm845_ds),
+ .ds = sdm845_ds,
+ .pingpong_count = ARRAY_SIZE(sdm845_pp),
+ .pingpong = sdm845_pp,
+ .cdm_count = ARRAY_SIZE(sdm845_cdm),
+ .cdm = sdm845_cdm,
+ .intf_count = ARRAY_SIZE(sdm845_intf),
+ .intf = sdm845_intf,
+ .vbif_count = ARRAY_SIZE(sdm845_vbif),
+ .vbif = sdm845_vbif,
+ .reg_dma_count = 1,
+ .dma_cfg = sdm845_regdma,
+ .perf = sdm845_perf_data,
+ };
+}
+
+static struct dpu_mdss_hw_cfg_handler cfg_handler[] = {
+ { .hw_rev = DPU_HW_VER_400, .cfg_init = sdm845_cfg_init},
+ { .hw_rev = DPU_HW_VER_401, .cfg_init = sdm845_cfg_init},
+};
+
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg)
+{
+ kfree(dpu_cfg);
+}
+
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev)
+{
+ int i;
+ struct dpu_mdss_cfg *dpu_cfg;
+
+ dpu_cfg = kzalloc(sizeof(*dpu_cfg), GFP_KERNEL);
+ if (!dpu_cfg)
+ return ERR_PTR(-ENOMEM);
+
+ for (i = 0; i < ARRAY_SIZE(cfg_handler); i++) {
+ if (cfg_handler[i].hw_rev == hw_rev) {
+ cfg_handler[i].cfg_init(dpu_cfg);
+ dpu_cfg->hwversion = hw_rev;
+ return dpu_cfg;
+ }
+ }
+
+ DPU_ERROR("unsupported chipset id:%X\n", hw_rev);
+ dpu_hw_catalog_deinit(dpu_cfg);
+ return ERR_PTR(-ENODEV);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
new file mode 100644
index 000000000000..f0cb0d4fc80e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog.h
@@ -0,0 +1,804 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CATALOG_H
+#define _DPU_HW_CATALOG_H
+
+#include <linux/kernel.h>
+#include <linux/bug.h>
+#include <linux/bitmap.h>
+#include <linux/err.h>
+#include <drm/drmP.h>
+
+/**
+ * Max hardware block count: For ex: max 12 SSPP pipes or
+ * 5 ctl paths. In all cases, it can have max 12 hardware blocks
+ * based on current design
+ */
+#define MAX_BLOCKS 12
+
+#define DPU_HW_VER(MAJOR, MINOR, STEP) (((MAJOR & 0xF) << 28) |\
+ ((MINOR & 0xFFF) << 16) |\
+ (STEP & 0xFFFF))
+
+#define DPU_HW_MAJOR(rev) ((rev) >> 28)
+#define DPU_HW_MINOR(rev) (((rev) >> 16) & 0xFFF)
+#define DPU_HW_STEP(rev) ((rev) & 0xFFFF)
+#define DPU_HW_MAJOR_MINOR(rev) ((rev) >> 16)
+
+#define IS_DPU_MAJOR_MINOR_SAME(rev1, rev2) \
+ (DPU_HW_MAJOR_MINOR((rev1)) == DPU_HW_MAJOR_MINOR((rev2)))
+
+#define DPU_HW_VER_170 DPU_HW_VER(1, 7, 0) /* 8996 v1.0 */
+#define DPU_HW_VER_171 DPU_HW_VER(1, 7, 1) /* 8996 v2.0 */
+#define DPU_HW_VER_172 DPU_HW_VER(1, 7, 2) /* 8996 v3.0 */
+#define DPU_HW_VER_300 DPU_HW_VER(3, 0, 0) /* 8998 v1.0 */
+#define DPU_HW_VER_301 DPU_HW_VER(3, 0, 1) /* 8998 v1.1 */
+#define DPU_HW_VER_400 DPU_HW_VER(4, 0, 0) /* sdm845 v1.0 */
+#define DPU_HW_VER_401 DPU_HW_VER(4, 0, 1) /* sdm845 v2.0 */
+#define DPU_HW_VER_410 DPU_HW_VER(4, 1, 0) /* sdm670 v1.0 */
+#define DPU_HW_VER_500 DPU_HW_VER(5, 0, 0) /* sdm855 v1.0 */
+
+
+#define IS_MSM8996_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_170)
+#define IS_MSM8998_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_300)
+#define IS_SDM845_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_400)
+#define IS_SDM670_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_410)
+#define IS_SDM855_TARGET(rev) IS_DPU_MAJOR_MINOR_SAME((rev), DPU_HW_VER_500)
+
+
+#define DPU_HW_BLK_NAME_LEN 16
+
+#define MAX_IMG_WIDTH 0x3fff
+#define MAX_IMG_HEIGHT 0x3fff
+
+#define CRTC_DUAL_MIXERS 2
+
+#define MAX_XIN_COUNT 16
+
+/**
+ * Supported UBWC feature versions
+ */
+enum {
+ DPU_HW_UBWC_VER_10 = 0x100,
+ DPU_HW_UBWC_VER_20 = 0x200,
+ DPU_HW_UBWC_VER_30 = 0x300,
+};
+
+#define IS_UBWC_20_SUPPORTED(rev) ((rev) >= DPU_HW_UBWC_VER_20)
+
+/**
+ * MDP TOP BLOCK features
+ * @DPU_MDP_PANIC_PER_PIPE Panic configuration needs to be be done per pipe
+ * @DPU_MDP_10BIT_SUPPORT, Chipset supports 10 bit pixel formats
+ * @DPU_MDP_BWC, MDSS HW supports Bandwidth compression.
+ * @DPU_MDP_UBWC_1_0, This chipsets supports Universal Bandwidth
+ * compression initial revision
+ * @DPU_MDP_UBWC_1_5, Universal Bandwidth compression version 1.5
+ * @DPU_MDP_MAX Maximum value
+
+ */
+enum {
+ DPU_MDP_PANIC_PER_PIPE = 0x1,
+ DPU_MDP_10BIT_SUPPORT,
+ DPU_MDP_BWC,
+ DPU_MDP_UBWC_1_0,
+ DPU_MDP_UBWC_1_5,
+ DPU_MDP_MAX
+};
+
+/**
+ * SSPP sub-blocks/features
+ * @DPU_SSPP_SRC Src and fetch part of the pipes,
+ * @DPU_SSPP_SCALER_QSEED2, QSEED2 algorithm support
+ * @DPU_SSPP_SCALER_QSEED3, QSEED3 alogorithm support
+ * @DPU_SSPP_SCALER_RGB, RGB Scaler, supported by RGB pipes
+ * @DPU_SSPP_CSC, Support of Color space converion
+ * @DPU_SSPP_CSC_10BIT, Support of 10-bit Color space conversion
+ * @DPU_SSPP_CURSOR, SSPP can be used as a cursor layer
+ * @DPU_SSPP_QOS, SSPP support QoS control, danger/safe/creq
+ * @DPU_SSPP_QOS_8LVL, SSPP support 8-level QoS control
+ * @DPU_SSPP_EXCL_RECT, SSPP supports exclusion rect
+ * @DPU_SSPP_SMART_DMA_V1, SmartDMA 1.0 support
+ * @DPU_SSPP_SMART_DMA_V2, SmartDMA 2.0 support
+ * @DPU_SSPP_TS_PREFILL Supports prefill with traffic shaper
+ * @DPU_SSPP_TS_PREFILL_REC1 Supports prefill with traffic shaper multirec
+ * @DPU_SSPP_CDP Supports client driven prefetch
+ * @DPU_SSPP_MAX maximum value
+ */
+enum {
+ DPU_SSPP_SRC = 0x1,
+ DPU_SSPP_SCALER_QSEED2,
+ DPU_SSPP_SCALER_QSEED3,
+ DPU_SSPP_SCALER_RGB,
+ DPU_SSPP_CSC,
+ DPU_SSPP_CSC_10BIT,
+ DPU_SSPP_CURSOR,
+ DPU_SSPP_QOS,
+ DPU_SSPP_QOS_8LVL,
+ DPU_SSPP_EXCL_RECT,
+ DPU_SSPP_SMART_DMA_V1,
+ DPU_SSPP_SMART_DMA_V2,
+ DPU_SSPP_TS_PREFILL,
+ DPU_SSPP_TS_PREFILL_REC1,
+ DPU_SSPP_CDP,
+ DPU_SSPP_MAX
+};
+
+/*
+ * MIXER sub-blocks/features
+ * @DPU_MIXER_LAYER Layer mixer layer blend configuration,
+ * @DPU_MIXER_SOURCESPLIT Layer mixer supports source-split configuration
+ * @DPU_MIXER_GC Gamma correction block
+ * @DPU_DIM_LAYER Layer mixer supports dim layer
+ * @DPU_MIXER_MAX maximum value
+ */
+enum {
+ DPU_MIXER_LAYER = 0x1,
+ DPU_MIXER_SOURCESPLIT,
+ DPU_MIXER_GC,
+ DPU_DIM_LAYER,
+ DPU_MIXER_MAX
+};
+
+/**
+ * PINGPONG sub-blocks
+ * @DPU_PINGPONG_TE Tear check block
+ * @DPU_PINGPONG_TE2 Additional tear check block for split pipes
+ * @DPU_PINGPONG_SPLIT PP block supports split fifo
+ * @DPU_PINGPONG_SLAVE PP block is a suitable slave for split fifo
+ * @DPU_PINGPONG_DITHER, Dither blocks
+ * @DPU_PINGPONG_MAX
+ */
+enum {
+ DPU_PINGPONG_TE = 0x1,
+ DPU_PINGPONG_TE2,
+ DPU_PINGPONG_SPLIT,
+ DPU_PINGPONG_SLAVE,
+ DPU_PINGPONG_DITHER,
+ DPU_PINGPONG_MAX
+};
+
+/**
+ * CTL sub-blocks
+ * @DPU_CTL_SPLIT_DISPLAY CTL supports video mode split display
+ * @DPU_CTL_MAX
+ */
+enum {
+ DPU_CTL_SPLIT_DISPLAY = 0x1,
+ DPU_CTL_MAX
+};
+
+/**
+ * VBIF sub-blocks and features
+ * @DPU_VBIF_QOS_OTLIM VBIF supports OT Limit
+ * @DPU_VBIF_QOS_REMAP VBIF supports QoS priority remap
+ * @DPU_VBIF_MAX maximum value
+ */
+enum {
+ DPU_VBIF_QOS_OTLIM = 0x1,
+ DPU_VBIF_QOS_REMAP,
+ DPU_VBIF_MAX
+};
+
+/**
+ * MACRO DPU_HW_BLK_INFO - information of HW blocks inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this block
+ * @base: register base offset to mdss
+ * @len: length of hardware block
+ * @features bit mask identifying sub-blocks/features
+ */
+#define DPU_HW_BLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len; \
+ unsigned long features
+
+/**
+ * MACRO DPU_HW_SUBBLK_INFO - information of HW sub-block inside DPU
+ * @name: string name for debug purposes
+ * @id: enum identifying this sub-block
+ * @base: offset of this sub-block relative to the block
+ * offset
+ * @len register block length of this sub-block
+ */
+#define DPU_HW_SUBBLK_INFO \
+ char name[DPU_HW_BLK_NAME_LEN]; \
+ u32 id; \
+ u32 base; \
+ u32 len
+
+/**
+ * struct dpu_src_blk: SSPP part of the source pipes
+ * @info: HW register and features supported by this sub-blk
+ */
+struct dpu_src_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_scaler_blk: Scaler information
+ * @info: HW register and features supported by this sub-blk
+ * @version: qseed block revision
+ */
+struct dpu_scaler_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+struct dpu_csc_blk {
+ DPU_HW_SUBBLK_INFO;
+};
+
+/**
+ * struct dpu_pp_blk : Pixel processing sub-blk information
+ * @info: HW register and features supported by this sub-blk
+ * @version: HW Algorithm version
+ */
+struct dpu_pp_blk {
+ DPU_HW_SUBBLK_INFO;
+ u32 version;
+};
+
+/**
+ * struct dpu_format_extended - define dpu specific pixel format+modifier
+ * @fourcc_format: Base FOURCC pixel format code
+ * @modifier: 64-bit drm format modifier, same modifier must be applied to all
+ * framebuffer planes
+ */
+struct dpu_format_extended {
+ uint32_t fourcc_format;
+ uint64_t modifier;
+};
+
+/**
+ * enum dpu_qos_lut_usage - define QoS LUT use cases
+ */
+enum dpu_qos_lut_usage {
+ DPU_QOS_LUT_USAGE_LINEAR,
+ DPU_QOS_LUT_USAGE_MACROTILE,
+ DPU_QOS_LUT_USAGE_NRT,
+ DPU_QOS_LUT_USAGE_MAX,
+};
+
+/**
+ * struct dpu_qos_lut_entry - define QoS LUT table entry
+ * @fl: fill level, or zero on last entry to indicate default lut
+ * @lut: lut to use if equal to or less than fill level
+ */
+struct dpu_qos_lut_entry {
+ u32 fl;
+ u64 lut;
+};
+
+/**
+ * struct dpu_qos_lut_tbl - define QoS LUT table
+ * @nentry: number of entry in this table
+ * @entries: Pointer to table entries
+ */
+struct dpu_qos_lut_tbl {
+ u32 nentry;
+ struct dpu_qos_lut_entry *entries;
+};
+
+/**
+ * struct dpu_caps - define DPU capabilities
+ * @max_mixer_width max layer mixer line width support.
+ * @max_mixer_blendstages max layer mixer blend stages or
+ * supported z order
+ * @qseed_type qseed2 or qseed3 support.
+ * @smart_dma_rev Supported version of SmartDMA feature.
+ * @ubwc_version UBWC feature version (0x0 for not supported)
+ * @has_src_split source split feature status
+ * @has_dim_layer dim layer feature status
+ * @has_idle_pc indicate if idle power collapse feature is supported
+ */
+struct dpu_caps {
+ u32 max_mixer_width;
+ u32 max_mixer_blendstages;
+ u32 qseed_type;
+ u32 smart_dma_rev;
+ u32 ubwc_version;
+ bool has_src_split;
+ bool has_dim_layer;
+ bool has_idle_pc;
+};
+
+/**
+ * struct dpu_sspp_blks_common : SSPP sub-blocks common configuration
+ * @maxwidth: max pixelwidth supported by this pipe
+ * @pixel_ram_size: size of latency hiding and de-tiling buffer in bytes
+ * @maxhdeciexp: max horizontal decimation supported by this pipe
+ * (max is 2^value)
+ * @maxvdeciexp: max vertical decimation supported by this pipe
+ * (max is 2^value)
+ */
+struct dpu_sspp_blks_common {
+ u32 maxlinewidth;
+ u32 pixel_ram_size;
+ u32 maxhdeciexp;
+ u32 maxvdeciexp;
+};
+
+/**
+ * struct dpu_sspp_sub_blks : SSPP sub-blocks
+ * common: Pointer to common configurations shared by sub blocks
+ * @creq_vblank: creq priority during vertical blanking
+ * @danger_vblank: danger priority during vertical blanking
+ * @maxdwnscale: max downscale ratio supported(without DECIMATION)
+ * @maxupscale: maxupscale ratio supported
+ * @smart_dma_priority: hw priority of rect1 of multirect pipe
+ * @max_per_pipe_bw: maximum allowable bandwidth of this pipe in kBps
+ * @src_blk:
+ * @scaler_blk:
+ * @csc_blk:
+ * @hsic:
+ * @memcolor:
+ * @pcc_blk:
+ * @igc_blk:
+ * @format_list: Pointer to list of supported formats
+ * @virt_format_list: Pointer to list of supported formats for virtual planes
+ */
+struct dpu_sspp_sub_blks {
+ const struct dpu_sspp_blks_common *common;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ u32 maxdwnscale;
+ u32 maxupscale;
+ u32 smart_dma_priority;
+ u32 max_per_pipe_bw;
+ struct dpu_src_blk src_blk;
+ struct dpu_scaler_blk scaler_blk;
+ struct dpu_pp_blk csc_blk;
+ struct dpu_pp_blk hsic_blk;
+ struct dpu_pp_blk memcolor_blk;
+ struct dpu_pp_blk pcc_blk;
+ struct dpu_pp_blk igc_blk;
+
+ const struct dpu_format_extended *format_list;
+ const struct dpu_format_extended *virt_format_list;
+};
+
+/**
+ * struct dpu_lm_sub_blks: information of mixer block
+ * @maxwidth: Max pixel width supported by this mixer
+ * @maxblendstages: Max number of blend-stages supported
+ * @blendstage_base: Blend-stage register base offset
+ * @gc: gamma correction block
+ */
+struct dpu_lm_sub_blks {
+ u32 maxwidth;
+ u32 maxblendstages;
+ u32 blendstage_base[MAX_BLOCKS];
+ struct dpu_pp_blk gc;
+};
+
+struct dpu_pingpong_sub_blks {
+ struct dpu_pp_blk te;
+ struct dpu_pp_blk te2;
+ struct dpu_pp_blk dither;
+};
+
+/**
+ * dpu_clk_ctrl_type - Defines top level clock control signals
+ */
+enum dpu_clk_ctrl_type {
+ DPU_CLK_CTRL_NONE,
+ DPU_CLK_CTRL_VIG0,
+ DPU_CLK_CTRL_VIG1,
+ DPU_CLK_CTRL_VIG2,
+ DPU_CLK_CTRL_VIG3,
+ DPU_CLK_CTRL_VIG4,
+ DPU_CLK_CTRL_RGB0,
+ DPU_CLK_CTRL_RGB1,
+ DPU_CLK_CTRL_RGB2,
+ DPU_CLK_CTRL_RGB3,
+ DPU_CLK_CTRL_DMA0,
+ DPU_CLK_CTRL_DMA1,
+ DPU_CLK_CTRL_CURSOR0,
+ DPU_CLK_CTRL_CURSOR1,
+ DPU_CLK_CTRL_INLINE_ROT0_SSPP,
+ DPU_CLK_CTRL_MAX,
+};
+
+/* struct dpu_clk_ctrl_reg : Clock control register
+ * @reg_off: register offset
+ * @bit_off: bit offset
+ */
+struct dpu_clk_ctrl_reg {
+ u32 reg_off;
+ u32 bit_off;
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ * @highest_bank_bit: UBWC parameter
+ * @ubwc_static: ubwc static configuration
+ * @ubwc_swizzle: ubwc default swizzle setting
+ * @has_dest_scaler: indicates support of destination scaler
+ * @clk_ctrls clock control register definition
+ */
+struct dpu_mdp_cfg {
+ DPU_HW_BLK_INFO;
+ u32 highest_bank_bit;
+ u32 ubwc_static;
+ u32 ubwc_swizzle;
+ bool has_dest_scaler;
+ struct dpu_clk_ctrl_reg clk_ctrls[DPU_CLK_CTRL_MAX];
+};
+
+/* struct dpu_mdp_cfg : MDP TOP-BLK instance info
+ * @id: index identifying this block
+ * @base: register base offset to mdss
+ * @features bit mask identifying sub-blocks/features
+ */
+struct dpu_ctl_cfg {
+ DPU_HW_BLK_INFO;
+};
+
+/**
+ * struct dpu_sspp_cfg - information of source pipes
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: SSPP sub-blocks information
+ * @xin_id: bus client identifier
+ * @clk_ctrl clock control identifier
+ * @type sspp type identifier
+ */
+struct dpu_sspp_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_sspp_sub_blks *sblk;
+ u32 xin_id;
+ enum dpu_clk_ctrl_type clk_ctrl;
+ u32 type;
+};
+
+/**
+ * struct dpu_lm_cfg - information of layer mixer blocks
+ * @id: index identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk: LM Sub-blocks information
+ * @pingpong: ID of connected PingPong, PINGPONG_MAX if unsupported
+ * @ds: ID of connected DS, DS_MAX if unsupported
+ * @lm_pair_mask: Bitmask of LMs that can be controlled by same CTL
+ */
+struct dpu_lm_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_lm_sub_blks *sblk;
+ u32 pingpong;
+ u32 ds;
+ unsigned long lm_pair_mask;
+};
+
+/**
+ * struct dpu_ds_top_cfg - information of dest scaler top
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying features
+ * @version hw version of dest scaler
+ * @maxinputwidth maximum input line width
+ * @maxoutputwidth maximum output line width
+ * @maxupscale maximum upscale ratio
+ */
+struct dpu_ds_top_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 maxinputwidth;
+ u32 maxoutputwidth;
+ u32 maxupscale;
+};
+
+/**
+ * struct dpu_ds_cfg - information of dest scaler blocks
+ * @id enum identifying this block
+ * @base register offset wrt DS top offset
+ * @features bit mask identifying features
+ * @version hw version of the qseed block
+ * @top DS top information
+ */
+struct dpu_ds_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ const struct dpu_ds_top_cfg *top;
+};
+
+/**
+ * struct dpu_pingpong_cfg - information of PING-PONG blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @sblk sub-blocks information
+ */
+struct dpu_pingpong_cfg {
+ DPU_HW_BLK_INFO;
+ const struct dpu_pingpong_sub_blks *sblk;
+};
+
+/**
+ * struct dpu_cdm_cfg - information of chroma down blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @intf_connect Bitmask of INTF IDs this CDM can connect to
+ */
+struct dpu_cdm_cfg {
+ DPU_HW_BLK_INFO;
+ unsigned long intf_connect;
+};
+
+/**
+ * struct dpu_intf_cfg - information of timing engine blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @type: Interface type(DSI, DP, HDMI)
+ * @controller_id: Controller Instance ID in case of multiple of intf type
+ * @prog_fetch_lines_worst_case Worst case latency num lines needed to prefetch
+ */
+struct dpu_intf_cfg {
+ DPU_HW_BLK_INFO;
+ u32 type; /* interface type*/
+ u32 controller_id;
+ u32 prog_fetch_lines_worst_case;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_cfg - dynamic OT setting
+ * @pps pixel per seconds
+ * @ot_limit OT limit to use up to specified pixel per second
+ */
+struct dpu_vbif_dynamic_ot_cfg {
+ u64 pps;
+ u32 ot_limit;
+};
+
+/**
+ * struct dpu_vbif_dynamic_ot_tbl - dynamic OT setting table
+ * @count length of cfg
+ * @cfg pointer to array of configuration settings with
+ * ascending requirements
+ */
+struct dpu_vbif_dynamic_ot_tbl {
+ u32 count;
+ struct dpu_vbif_dynamic_ot_cfg *cfg;
+};
+
+/**
+ * struct dpu_vbif_qos_tbl - QoS priority table
+ * @npriority_lvl num of priority level
+ * @priority_lvl pointer to array of priority level in ascending order
+ */
+struct dpu_vbif_qos_tbl {
+ u32 npriority_lvl;
+ u32 *priority_lvl;
+};
+
+/**
+ * struct dpu_vbif_cfg - information of VBIF blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @ot_rd_limit default OT read limit
+ * @ot_wr_limit default OT write limit
+ * @xin_halt_timeout maximum time (in usec) for xin to halt
+ * @dynamic_ot_rd_tbl dynamic OT read configuration table
+ * @dynamic_ot_wr_tbl dynamic OT write configuration table
+ * @qos_rt_tbl real-time QoS priority table
+ * @qos_nrt_tbl non-real-time QoS priority table
+ * @memtype_count number of defined memtypes
+ * @memtype array of xin memtype definitions
+ */
+struct dpu_vbif_cfg {
+ DPU_HW_BLK_INFO;
+ u32 default_ot_rd_limit;
+ u32 default_ot_wr_limit;
+ u32 xin_halt_timeout;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_rd_tbl;
+ struct dpu_vbif_dynamic_ot_tbl dynamic_ot_wr_tbl;
+ struct dpu_vbif_qos_tbl qos_rt_tbl;
+ struct dpu_vbif_qos_tbl qos_nrt_tbl;
+ u32 memtype_count;
+ u32 memtype[MAX_XIN_COUNT];
+};
+/**
+ * struct dpu_reg_dma_cfg - information of lut dma blocks
+ * @id enum identifying this block
+ * @base register offset of this block
+ * @features bit mask identifying sub-blocks/features
+ * @version version of lutdma hw block
+ * @trigger_sel_off offset to trigger select registers of lutdma
+ */
+struct dpu_reg_dma_cfg {
+ DPU_HW_BLK_INFO;
+ u32 version;
+ u32 trigger_sel_off;
+};
+
+/**
+ * Define CDP use cases
+ * @DPU_PERF_CDP_UDAGE_RT: real-time use cases
+ * @DPU_PERF_CDP_USAGE_NRT: non real-time use cases such as WFD
+ */
+enum {
+ DPU_PERF_CDP_USAGE_RT,
+ DPU_PERF_CDP_USAGE_NRT,
+ DPU_PERF_CDP_USAGE_MAX
+};
+
+/**
+ * struct dpu_perf_cdp_cfg - define CDP use case configuration
+ * @rd_enable: true if read pipe CDP is enabled
+ * @wr_enable: true if write pipe CDP is enabled
+ */
+struct dpu_perf_cdp_cfg {
+ bool rd_enable;
+ bool wr_enable;
+};
+
+/**
+ * struct dpu_perf_cfg - performance control settings
+ * @max_bw_low low threshold of maximum bandwidth (kbps)
+ * @max_bw_high high threshold of maximum bandwidth (kbps)
+ * @min_core_ib minimum bandwidth for core (kbps)
+ * @min_core_ib minimum mnoc ib vote in kbps
+ * @min_llcc_ib minimum llcc ib vote in kbps
+ * @min_dram_ib minimum dram ib vote in kbps
+ * @core_ib_ff core instantaneous bandwidth fudge factor
+ * @core_clk_ff core clock fudge factor
+ * @comp_ratio_rt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @comp_ratio_nrt string of 0 or more of <fourcc>/<ven>/<mod>/<comp ratio>
+ * @undersized_prefill_lines undersized prefill in lines
+ * @xtra_prefill_lines extra prefill latency in lines
+ * @dest_scale_prefill_lines destination scaler latency in lines
+ * @macrotile_perfill_lines macrotile latency in lines
+ * @yuv_nv12_prefill_lines yuv_nv12 latency in lines
+ * @linear_prefill_lines linear latency in lines
+ * @downscaling_prefill_lines downscaling latency in lines
+ * @amortizable_theshold minimum y position for traffic shaping prefill
+ * @min_prefill_lines minimum pipeline latency in lines
+ * @safe_lut_tbl: LUT tables for safe signals
+ * @danger_lut_tbl: LUT tables for danger signals
+ * @qos_lut_tbl: LUT tables for QoS signals
+ * @cdp_cfg cdp use case configurations
+ */
+struct dpu_perf_cfg {
+ u32 max_bw_low;
+ u32 max_bw_high;
+ u32 min_core_ib;
+ u32 min_llcc_ib;
+ u32 min_dram_ib;
+ const char *core_ib_ff;
+ const char *core_clk_ff;
+ const char *comp_ratio_rt;
+ const char *comp_ratio_nrt;
+ u32 undersized_prefill_lines;
+ u32 xtra_prefill_lines;
+ u32 dest_scale_prefill_lines;
+ u32 macrotile_prefill_lines;
+ u32 yuv_nv12_prefill_lines;
+ u32 linear_prefill_lines;
+ u32 downscaling_prefill_lines;
+ u32 amortizable_threshold;
+ u32 min_prefill_lines;
+ u32 safe_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ u32 danger_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_qos_lut_tbl qos_lut_tbl[DPU_QOS_LUT_USAGE_MAX];
+ struct dpu_perf_cdp_cfg cdp_cfg[DPU_PERF_CDP_USAGE_MAX];
+};
+
+/**
+ * struct dpu_mdss_cfg - information of MDSS HW
+ * This is the main catalog data structure representing
+ * this HW version. Contains number of instances,
+ * register offsets, capabilities of the all MDSS HW sub-blocks.
+ *
+ * @dma_formats Supported formats for dma pipe
+ * @cursor_formats Supported formats for cursor pipe
+ * @vig_formats Supported formats for vig pipe
+ */
+struct dpu_mdss_cfg {
+ u32 hwversion;
+
+ const struct dpu_caps *caps;
+
+ u32 mdp_count;
+ struct dpu_mdp_cfg *mdp;
+
+ u32 ctl_count;
+ struct dpu_ctl_cfg *ctl;
+
+ u32 sspp_count;
+ struct dpu_sspp_cfg *sspp;
+
+ u32 mixer_count;
+ struct dpu_lm_cfg *mixer;
+
+ u32 ds_count;
+ struct dpu_ds_cfg *ds;
+
+ u32 pingpong_count;
+ struct dpu_pingpong_cfg *pingpong;
+
+ u32 cdm_count;
+ struct dpu_cdm_cfg *cdm;
+
+ u32 intf_count;
+ struct dpu_intf_cfg *intf;
+
+ u32 vbif_count;
+ struct dpu_vbif_cfg *vbif;
+
+ u32 reg_dma_count;
+ struct dpu_reg_dma_cfg dma_cfg;
+
+ u32 ad_count;
+
+ /* Add additional block data structures here */
+
+ struct dpu_perf_cfg perf;
+ struct dpu_format_extended *dma_formats;
+ struct dpu_format_extended *cursor_formats;
+ struct dpu_format_extended *vig_formats;
+};
+
+struct dpu_mdss_hw_cfg_handler {
+ u32 hw_rev;
+ void (*cfg_init)(struct dpu_mdss_cfg *dpu_cfg);
+};
+
+/*
+ * Access Macros
+ */
+#define BLK_MDP(s) ((s)->mdp)
+#define BLK_CTL(s) ((s)->ctl)
+#define BLK_VIG(s) ((s)->vig)
+#define BLK_RGB(s) ((s)->rgb)
+#define BLK_DMA(s) ((s)->dma)
+#define BLK_CURSOR(s) ((s)->cursor)
+#define BLK_MIXER(s) ((s)->mixer)
+#define BLK_DS(s) ((s)->ds)
+#define BLK_PINGPONG(s) ((s)->pingpong)
+#define BLK_CDM(s) ((s)->cdm)
+#define BLK_INTF(s) ((s)->intf)
+#define BLK_AD(s) ((s)->ad)
+
+/**
+ * dpu_hw_catalog_init - dpu hardware catalog init API retrieves
+ * hardcoded target specific catalog information in config structure
+ * @hw_rev: caller needs provide the hardware revision.
+ *
+ * Return: dpu config structure
+ */
+struct dpu_mdss_cfg *dpu_hw_catalog_init(u32 hw_rev);
+
+/**
+ * dpu_hw_catalog_deinit - dpu hardware catalog cleanup
+ * @dpu_cfg: pointer returned from init function
+ */
+void dpu_hw_catalog_deinit(struct dpu_mdss_cfg *dpu_cfg);
+
+/**
+ * dpu_hw_sspp_multirect_enabled - check multirect enabled for the sspp
+ * @cfg: pointer to sspp cfg
+ */
+static inline bool dpu_hw_sspp_multirect_enabled(const struct dpu_sspp_cfg *cfg)
+{
+ return test_bit(DPU_SSPP_SMART_DMA_V1, &cfg->features) ||
+ test_bit(DPU_SSPP_SMART_DMA_V2, &cfg->features);
+}
+#endif /* _DPU_HW_CATALOG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
new file mode 100644
index 000000000000..3c9f028628ef
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_catalog_format.h
@@ -0,0 +1,168 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+
+static const struct dpu_format_extended plane_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended plane_formats_yuv[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV21, 0},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_NV61, 0},
+ {DRM_FORMAT_VYUY, 0},
+ {DRM_FORMAT_UYVY, 0},
+ {DRM_FORMAT_YUYV, 0},
+ {DRM_FORMAT_YVYU, 0},
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_YVU420, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended cursor_formats[] = {
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {0, 0},
+};
+
+static const struct dpu_format_extended wb2_formats[] = {
+ {DRM_FORMAT_RGB565, 0},
+ {DRM_FORMAT_BGR565, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_RGB888, 0},
+ {DRM_FORMAT_ARGB8888, 0},
+ {DRM_FORMAT_RGBA8888, 0},
+ {DRM_FORMAT_ABGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XRGB8888, 0},
+ {DRM_FORMAT_RGBX8888, 0},
+ {DRM_FORMAT_XBGR8888, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB1555, 0},
+ {DRM_FORMAT_RGBA5551, 0},
+ {DRM_FORMAT_XRGB1555, 0},
+ {DRM_FORMAT_RGBX5551, 0},
+ {DRM_FORMAT_ARGB4444, 0},
+ {DRM_FORMAT_RGBA4444, 0},
+ {DRM_FORMAT_RGBX4444, 0},
+ {DRM_FORMAT_XRGB4444, 0},
+
+ {DRM_FORMAT_BGR565, 0},
+ {DRM_FORMAT_BGR888, 0},
+ {DRM_FORMAT_ABGR8888, 0},
+ {DRM_FORMAT_BGRA8888, 0},
+ {DRM_FORMAT_BGRX8888, 0},
+ {DRM_FORMAT_XBGR8888, 0},
+ {DRM_FORMAT_ABGR1555, 0},
+ {DRM_FORMAT_BGRA5551, 0},
+ {DRM_FORMAT_XBGR1555, 0},
+ {DRM_FORMAT_BGRX5551, 0},
+ {DRM_FORMAT_ABGR4444, 0},
+ {DRM_FORMAT_BGRA4444, 0},
+ {DRM_FORMAT_BGRX4444, 0},
+ {DRM_FORMAT_XBGR4444, 0},
+
+ {DRM_FORMAT_YUV420, 0},
+ {DRM_FORMAT_NV12, 0},
+ {DRM_FORMAT_NV12, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_NV16, 0},
+ {DRM_FORMAT_YUYV, 0},
+
+ {0, 0},
+};
+
+static const struct dpu_format_extended rgb_10bit_formats[] = {
+ {DRM_FORMAT_BGRA1010102, 0},
+ {DRM_FORMAT_BGRX1010102, 0},
+ {DRM_FORMAT_RGBA1010102, 0},
+ {DRM_FORMAT_RGBX1010102, 0},
+ {DRM_FORMAT_ABGR2101010, 0},
+ {DRM_FORMAT_ABGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_XBGR2101010, 0},
+ {DRM_FORMAT_XBGR2101010, DRM_FORMAT_MOD_QCOM_COMPRESSED},
+ {DRM_FORMAT_ARGB2101010, 0},
+ {DRM_FORMAT_XRGB2101010, 0},
+};
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
new file mode 100644
index 000000000000..554874ba0c3b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.c
@@ -0,0 +1,323 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CDM_CSC_10_OPMODE 0x000
+#define CDM_CSC_10_BASE 0x004
+
+#define CDM_CDWN2_OP_MODE 0x100
+#define CDM_CDWN2_CLAMP_OUT 0x104
+#define CDM_CDWN2_PARAMS_3D_0 0x108
+#define CDM_CDWN2_PARAMS_3D_1 0x10C
+#define CDM_CDWN2_COEFF_COSITE_H_0 0x110
+#define CDM_CDWN2_COEFF_COSITE_H_1 0x114
+#define CDM_CDWN2_COEFF_COSITE_H_2 0x118
+#define CDM_CDWN2_COEFF_OFFSITE_H_0 0x11C
+#define CDM_CDWN2_COEFF_OFFSITE_H_1 0x120
+#define CDM_CDWN2_COEFF_OFFSITE_H_2 0x124
+#define CDM_CDWN2_COEFF_COSITE_V 0x128
+#define CDM_CDWN2_COEFF_OFFSITE_V 0x12C
+#define CDM_CDWN2_OUT_SIZE 0x130
+
+#define CDM_HDMI_PACK_OP_MODE 0x200
+#define CDM_CSC_10_MATRIX_COEFF_0 0x004
+
+/**
+ * Horizontal coefficients for cosite chroma downscale
+ * s13 representation of coefficients
+ */
+static u32 cosite_h_coeff[] = {0x00000016, 0x000001cc, 0x0100009e};
+
+/**
+ * Horizontal coefficients for offsite chroma downscale
+ */
+static u32 offsite_h_coeff[] = {0x000b0005, 0x01db01eb, 0x00e40046};
+
+/**
+ * Vertical coefficients for cosite chroma downscale
+ */
+static u32 cosite_v_coeff[] = {0x00080004};
+/**
+ * Vertical coefficients for offsite chroma downscale
+ */
+static u32 offsite_v_coeff[] = {0x00060002};
+
+/* Limited Range rgb2yuv coeff with clamp and bias values for CSC 10 module */
+static struct dpu_csc_cfg rgb2yuv_cfg = {
+ {
+ 0x0083, 0x0102, 0x0032,
+ 0x1fb5, 0x1f6c, 0x00e1,
+ 0x00e1, 0x1f45, 0x1fdc
+ },
+ { 0x00, 0x00, 0x00 },
+ { 0x0040, 0x0200, 0x0200 },
+ { 0x000, 0x3ff, 0x000, 0x3ff, 0x000, 0x3ff },
+ { 0x040, 0x3ac, 0x040, 0x3c0, 0x040, 0x3c0 },
+};
+
+static struct dpu_cdm_cfg *_cdm_offset(enum dpu_cdm cdm,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->cdm_count; i++) {
+ if (cdm == m->cdm[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->cdm[i].base;
+ b->length = m->cdm[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CDM;
+ return &m->cdm[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_cdm_setup_csc_10bit(struct dpu_hw_cdm *ctx,
+ struct dpu_csc_cfg *data)
+{
+ dpu_hw_csc_setup(&ctx->hw, CDM_CSC_10_MATRIX_COEFF_0, data, true);
+
+ return 0;
+}
+
+static int dpu_hw_cdm_setup_cdwn(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 opmode = 0;
+ u32 out_size = 0;
+
+ if (cfg->output_bit_depth == CDM_CDWN_OUTPUT_10BIT)
+ opmode &= ~BIT(7);
+ else
+ opmode |= BIT(7);
+
+ /* ENABLE DWNS_H bit */
+ opmode |= BIT(1);
+
+ switch (cfg->h_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_H field */
+ opmode &= ~(0x18);
+ /* CLEAR DWNS_H bit */
+ opmode &= ~BIT(1);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_H field (pixel drop is 0) */
+ opmode &= ~(0x18);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_H field (Average is 0x1) */
+ opmode &= ~(0x18);
+ opmode |= (0x1 << 0x3);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_H field (Average is 0x2) */
+ opmode &= ~(0x18);
+ opmode |= (0x2 << 0x3);
+ /* Co-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_0,
+ cosite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_1,
+ cosite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_COSITE_H_2,
+ cosite_h_coeff[2]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_H field (Average is 0x3) */
+ opmode &= ~(0x18);
+ opmode |= (0x3 << 0x3);
+
+ /* Off-site horizontal coefficients */
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_0,
+ offsite_h_coeff[0]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_1,
+ offsite_h_coeff[1]);
+ DPU_REG_WRITE(c, CDM_CDWN2_COEFF_OFFSITE_H_2,
+ offsite_h_coeff[2]);
+ break;
+ default:
+ pr_err("%s invalid horz down sampling type\n", __func__);
+ return -EINVAL;
+ }
+
+ /* ENABLE DWNS_V bit */
+ opmode |= BIT(2);
+
+ switch (cfg->v_cdwn_type) {
+ case CDM_CDWN_DISABLE:
+ /* CLEAR METHOD_V field */
+ opmode &= ~(0x60);
+ /* CLEAR DWNS_V bit */
+ opmode &= ~BIT(2);
+ break;
+ case CDM_CDWN_PIXEL_DROP:
+ /* Clear METHOD_V field (pixel drop is 0) */
+ opmode &= ~(0x60);
+ break;
+ case CDM_CDWN_AVG:
+ /* Clear METHOD_V field (Average is 0x1) */
+ opmode &= ~(0x60);
+ opmode |= (0x1 << 0x5);
+ break;
+ case CDM_CDWN_COSITE:
+ /* Clear METHOD_V field (Average is 0x2) */
+ opmode &= ~(0x60);
+ opmode |= (0x2 << 0x5);
+ /* Co-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_COSITE_V,
+ cosite_v_coeff[0]);
+ break;
+ case CDM_CDWN_OFFSITE:
+ /* Clear METHOD_V field (Average is 0x3) */
+ opmode &= ~(0x60);
+ opmode |= (0x3 << 0x5);
+
+ /* Off-site vertical coefficients */
+ DPU_REG_WRITE(c,
+ CDM_CDWN2_COEFF_OFFSITE_V,
+ offsite_v_coeff[0]);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ if (cfg->v_cdwn_type || cfg->h_cdwn_type)
+ opmode |= BIT(0); /* EN CDWN module */
+ else
+ opmode &= ~BIT(0);
+
+ out_size = (cfg->output_width & 0xFFFF) |
+ ((cfg->output_height & 0xFFFF) << 16);
+ DPU_REG_WRITE(c, CDM_CDWN2_OUT_SIZE, out_size);
+ DPU_REG_WRITE(c, CDM_CDWN2_OP_MODE, opmode);
+ DPU_REG_WRITE(c, CDM_CDWN2_CLAMP_OUT,
+ ((0x3FF << 16) | 0x0));
+
+ return 0;
+}
+
+static int dpu_hw_cdm_enable(struct dpu_hw_cdm *ctx,
+ struct dpu_hw_cdm_cfg *cdm)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ const struct dpu_format *fmt = cdm->output_fmt;
+ struct cdm_output_cfg cdm_cfg = { 0 };
+ u32 opmode = 0;
+ u32 csc = 0;
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ return -EINVAL;
+
+ if (cdm->output_type == CDM_CDWN_OUTPUT_HDMI) {
+ if (fmt->chroma_sample != DPU_CHROMA_H1V2)
+ return -EINVAL; /*unsupported format */
+ opmode = BIT(0);
+ opmode |= (fmt->chroma_sample << 1);
+ cdm_cfg.intf_en = true;
+ }
+
+ csc |= BIT(2);
+ csc &= ~BIT(1);
+ csc |= BIT(0);
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+
+ DPU_REG_WRITE(c, CDM_CSC_10_OPMODE, csc);
+ DPU_REG_WRITE(c, CDM_HDMI_PACK_OP_MODE, opmode);
+ return 0;
+}
+
+static void dpu_hw_cdm_disable(struct dpu_hw_cdm *ctx)
+{
+ struct cdm_output_cfg cdm_cfg = { 0 };
+
+ if (ctx->hw_mdp && ctx->hw_mdp->ops.setup_cdm_output)
+ ctx->hw_mdp->ops.setup_cdm_output(ctx->hw_mdp, &cdm_cfg);
+}
+
+static void _setup_cdm_ops(struct dpu_hw_cdm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_csc_data = dpu_hw_cdm_setup_csc_10bit;
+ ops->setup_cdwn = dpu_hw_cdm_setup_cdwn;
+ ops->enable = dpu_hw_cdm_enable;
+ ops->disable = dpu_hw_cdm_disable;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp)
+{
+ struct dpu_hw_cdm *c;
+ struct dpu_cdm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _cdm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_cdm_ops(&c->ops, c->caps->features);
+ c->hw_mdp = hw_mdp;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CDM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ /*
+ * Perform any default initialization for the chroma down module
+ * @setup default csc coefficients
+ */
+ dpu_hw_cdm_setup_csc_10bit(c, &rgb2yuv_cfg);
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm)
+{
+ if (cdm)
+ dpu_hw_blk_destroy(&cdm->base);
+ kfree(cdm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
new file mode 100644
index 000000000000..5cceb1ecb8e0
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_cdm.h
@@ -0,0 +1,139 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CDM_H
+#define _DPU_HW_CDM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_top.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_cdm;
+
+struct dpu_hw_cdm_cfg {
+ u32 output_width;
+ u32 output_height;
+ u32 output_bit_depth;
+ u32 h_cdwn_type;
+ u32 v_cdwn_type;
+ const struct dpu_format *output_fmt;
+ u32 output_type;
+ int flags;
+};
+
+enum dpu_hw_cdwn_type {
+ CDM_CDWN_DISABLE,
+ CDM_CDWN_PIXEL_DROP,
+ CDM_CDWN_AVG,
+ CDM_CDWN_COSITE,
+ CDM_CDWN_OFFSITE,
+};
+
+enum dpu_hw_cdwn_output_type {
+ CDM_CDWN_OUTPUT_HDMI,
+ CDM_CDWN_OUTPUT_WB,
+};
+
+enum dpu_hw_cdwn_output_bit_depth {
+ CDM_CDWN_OUTPUT_8BIT,
+ CDM_CDWN_OUTPUT_10BIT,
+};
+
+/**
+ * struct dpu_hw_cdm_ops : Interface to the chroma down Hw driver functions
+ * Assumption is these functions will be called after
+ * clocks are enabled
+ * @setup_csc: Programs the csc matrix
+ * @setup_cdwn: Sets up the chroma down sub module
+ * @enable: Enables the output to interface and programs the
+ * output packer
+ * @disable: Puts the cdm in bypass mode
+ */
+struct dpu_hw_cdm_ops {
+ /**
+ * Programs the CSC matrix for conversion from RGB space to YUV space,
+ * it is optional to call this function as this matrix is automatically
+ * set during initialization, user should call this if it wants
+ * to program a different matrix than default matrix.
+ * @cdm: Pointer to the chroma down context structure
+ * @data Pointer to CSC configuration data
+ * return: 0 if success; error code otherwise
+ */
+ int (*setup_csc_data)(struct dpu_hw_cdm *cdm,
+ struct dpu_csc_cfg *data);
+
+ /**
+ * Programs the Chroma downsample part.
+ * @cdm Pointer to chroma down context
+ */
+ int (*setup_cdwn)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Enable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ int (*enable)(struct dpu_hw_cdm *cdm,
+ struct dpu_hw_cdm_cfg *cfg);
+
+ /**
+ * Disable the CDM module
+ * @cdm Pointer to chroma down context
+ */
+ void (*disable)(struct dpu_hw_cdm *cdm);
+};
+
+struct dpu_hw_cdm {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* chroma down */
+ const struct dpu_cdm_cfg *caps;
+ enum dpu_cdm idx;
+
+ /* mdp top hw driver */
+ struct dpu_hw_mdp *hw_mdp;
+
+ /* ops */
+ struct dpu_hw_cdm_ops ops;
+};
+
+/**
+ * dpu_hw_cdm - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_cdm *to_dpu_hw_cdm(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_cdm, base);
+}
+
+/**
+ * dpu_hw_cdm_init - initializes the cdm hw driver object.
+ * should be called once before accessing every cdm.
+ * @idx: cdm index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ * @hw_mdp: pointer to mdp top hw driver object
+ */
+struct dpu_hw_cdm *dpu_hw_cdm_init(enum dpu_cdm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m,
+ struct dpu_hw_mdp *hw_mdp);
+
+/**
+ * dpu_hw_cdm_destroy - destroys CDM driver context
+ * @cdm: pointer to CDM driver context
+ */
+void dpu_hw_cdm_destroy(struct dpu_hw_cdm *cdm);
+
+#endif /*_DPU_HW_CDM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
new file mode 100644
index 000000000000..06be7cf7ce50
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.c
@@ -0,0 +1,540 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/delay.h>
+#include "dpu_hwio.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define CTL_LAYER(lm) \
+ (((lm) == LM_5) ? (0x024) : (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT(lm) \
+ (0x40 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT2(lm) \
+ (0x70 + (((lm) - LM_0) * 0x004))
+#define CTL_LAYER_EXT3(lm) \
+ (0xA0 + (((lm) - LM_0) * 0x004))
+#define CTL_TOP 0x014
+#define CTL_FLUSH 0x018
+#define CTL_START 0x01C
+#define CTL_PREPARE 0x0d0
+#define CTL_SW_RESET 0x030
+#define CTL_LAYER_EXTN_OFFSET 0x40
+
+#define CTL_MIXER_BORDER_OUT BIT(24)
+#define CTL_FLUSH_MASK_CTL BIT(17)
+
+#define DPU_REG_RESET_TIMEOUT_US 2000
+
+static struct dpu_ctl_cfg *_ctl_offset(enum dpu_ctl ctl,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->ctl_count; i++) {
+ if (ctl == m->ctl[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->ctl[i].base;
+ b->length = m->ctl[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_CTL;
+ return &m->ctl[i];
+ }
+ }
+ return ERR_PTR(-ENOMEM);
+}
+
+static int _mixer_stages(const struct dpu_lm_cfg *mixer, int count,
+ enum dpu_lm lm)
+{
+ int i;
+ int stages = -EINVAL;
+
+ for (i = 0; i < count; i++) {
+ if (lm == mixer[i].id) {
+ stages = mixer[i].sblk->maxblendstages;
+ break;
+ }
+ }
+
+ return stages;
+}
+
+static inline void dpu_hw_ctl_trigger_start(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_START, 0x1);
+}
+
+static inline void dpu_hw_ctl_trigger_pending(struct dpu_hw_ctl *ctx)
+{
+ DPU_REG_WRITE(&ctx->hw, CTL_PREPARE, 0x1);
+}
+
+static inline void dpu_hw_ctl_clear_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ ctx->pending_flush_mask = 0x0;
+}
+
+static inline void dpu_hw_ctl_update_pending_flush(struct dpu_hw_ctl *ctx,
+ u32 flushbits)
+{
+ ctx->pending_flush_mask |= flushbits;
+}
+
+static u32 dpu_hw_ctl_get_pending_flush(struct dpu_hw_ctl *ctx)
+{
+ if (!ctx)
+ return 0x0;
+
+ return ctx->pending_flush_mask;
+}
+
+static inline void dpu_hw_ctl_trigger_flush(struct dpu_hw_ctl *ctx)
+{
+
+ DPU_REG_WRITE(&ctx->hw, CTL_FLUSH, ctx->pending_flush_mask);
+}
+
+static inline u32 dpu_hw_ctl_get_flush_register(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, CTL_FLUSH);
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_sspp(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp sspp)
+{
+ uint32_t flushbits = 0;
+
+ switch (sspp) {
+ case SSPP_VIG0:
+ flushbits = BIT(0);
+ break;
+ case SSPP_VIG1:
+ flushbits = BIT(1);
+ break;
+ case SSPP_VIG2:
+ flushbits = BIT(2);
+ break;
+ case SSPP_VIG3:
+ flushbits = BIT(18);
+ break;
+ case SSPP_RGB0:
+ flushbits = BIT(3);
+ break;
+ case SSPP_RGB1:
+ flushbits = BIT(4);
+ break;
+ case SSPP_RGB2:
+ flushbits = BIT(5);
+ break;
+ case SSPP_RGB3:
+ flushbits = BIT(19);
+ break;
+ case SSPP_DMA0:
+ flushbits = BIT(11);
+ break;
+ case SSPP_DMA1:
+ flushbits = BIT(12);
+ break;
+ case SSPP_DMA2:
+ flushbits = BIT(24);
+ break;
+ case SSPP_DMA3:
+ flushbits = BIT(25);
+ break;
+ case SSPP_CURSOR0:
+ flushbits = BIT(22);
+ break;
+ case SSPP_CURSOR1:
+ flushbits = BIT(23);
+ break;
+ default:
+ break;
+ }
+
+ return flushbits;
+}
+
+static inline uint32_t dpu_hw_ctl_get_bitmask_mixer(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm)
+{
+ uint32_t flushbits = 0;
+
+ switch (lm) {
+ case LM_0:
+ flushbits = BIT(6);
+ break;
+ case LM_1:
+ flushbits = BIT(7);
+ break;
+ case LM_2:
+ flushbits = BIT(8);
+ break;
+ case LM_3:
+ flushbits = BIT(9);
+ break;
+ case LM_4:
+ flushbits = BIT(10);
+ break;
+ case LM_5:
+ flushbits = BIT(20);
+ break;
+ default:
+ return -EINVAL;
+ }
+
+ flushbits |= CTL_FLUSH_MASK_CTL;
+
+ return flushbits;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_intf(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_intf intf)
+{
+ switch (intf) {
+ case INTF_0:
+ *flushbits |= BIT(31);
+ break;
+ case INTF_1:
+ *flushbits |= BIT(30);
+ break;
+ case INTF_2:
+ *flushbits |= BIT(29);
+ break;
+ case INTF_3:
+ *flushbits |= BIT(28);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static inline int dpu_hw_ctl_get_bitmask_cdm(struct dpu_hw_ctl *ctx,
+ u32 *flushbits, enum dpu_cdm cdm)
+{
+ switch (cdm) {
+ case CDM_0:
+ *flushbits |= BIT(26);
+ break;
+ default:
+ return -EINVAL;
+ }
+ return 0;
+}
+
+static u32 dpu_hw_ctl_poll_reset_status(struct dpu_hw_ctl *ctx, u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ ktime_t timeout;
+ u32 status;
+
+ timeout = ktime_add_us(ktime_get(), timeout_us);
+
+ /*
+ * it takes around 30us to have mdp finish resetting its ctl path
+ * poll every 50us so that reset should be completed at 1st poll
+ */
+ do {
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x1;
+ if (status)
+ usleep_range(20, 50);
+ } while (status && ktime_compare_safe(ktime_get(), timeout) < 0);
+
+ return status;
+}
+
+static int dpu_hw_ctl_reset_control(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ pr_debug("issuing hw ctl reset for ctl:%d\n", ctx->idx);
+ DPU_REG_WRITE(c, CTL_SW_RESET, 0x1);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US))
+ return -EINVAL;
+
+ return 0;
+}
+
+static int dpu_hw_ctl_wait_reset_status(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 status;
+
+ status = DPU_REG_READ(c, CTL_SW_RESET);
+ status &= 0x01;
+ if (!status)
+ return 0;
+
+ pr_debug("hw ctl reset is set for ctl:%d\n", ctx->idx);
+ if (dpu_hw_ctl_poll_reset_status(ctx, DPU_REG_RESET_TIMEOUT_US)) {
+ pr_err("hw recovery is not complete for ctl:%d\n", ctx->idx);
+ return -EINVAL;
+ }
+
+ return 0;
+}
+
+static void dpu_hw_ctl_clear_all_blendstages(struct dpu_hw_ctl *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int i;
+
+ for (i = 0; i < ctx->mixer_count; i++) {
+ DPU_REG_WRITE(c, CTL_LAYER(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(LM_0 + i), 0);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(LM_0 + i), 0);
+ }
+}
+
+static void dpu_hw_ctl_setup_blendstage(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *stage_cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 mixercfg = 0, mixercfg_ext = 0, mix, ext;
+ u32 mixercfg_ext2 = 0, mixercfg_ext3 = 0;
+ int i, j;
+ u8 stages;
+ int pipes_per_stage;
+
+ stages = _mixer_stages(ctx->mixer_hw_caps, ctx->mixer_count, lm);
+ if (stages < 0)
+ return;
+
+ if (test_bit(DPU_MIXER_SOURCESPLIT,
+ &ctx->mixer_hw_caps->features))
+ pipes_per_stage = PIPES_PER_STAGE;
+ else
+ pipes_per_stage = 1;
+
+ mixercfg = CTL_MIXER_BORDER_OUT; /* always set BORDER_OUT */
+
+ if (!stage_cfg)
+ goto exit;
+
+ for (i = 0; i <= stages; i++) {
+ /* overflow to ext register if 'i + 1 > 7' */
+ mix = (i + 1) & 0x7;
+ ext = i >= 7;
+
+ for (j = 0 ; j < pipes_per_stage; j++) {
+ enum dpu_sspp_multirect_index rect_index =
+ stage_cfg->multirect_index[i][j];
+
+ switch (stage_cfg->stage[i][j]) {
+ case SSPP_VIG0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 0;
+ } else {
+ mixercfg |= mix << 0;
+ mixercfg_ext |= ext << 0;
+ }
+ break;
+ case SSPP_VIG1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 4;
+ } else {
+ mixercfg |= mix << 3;
+ mixercfg_ext |= ext << 2;
+ }
+ break;
+ case SSPP_VIG2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 6;
+ mixercfg_ext |= ext << 4;
+ }
+ break;
+ case SSPP_VIG3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext3 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 26;
+ mixercfg_ext |= ext << 6;
+ }
+ break;
+ case SSPP_RGB0:
+ mixercfg |= mix << 9;
+ mixercfg_ext |= ext << 8;
+ break;
+ case SSPP_RGB1:
+ mixercfg |= mix << 12;
+ mixercfg_ext |= ext << 10;
+ break;
+ case SSPP_RGB2:
+ mixercfg |= mix << 15;
+ mixercfg_ext |= ext << 12;
+ break;
+ case SSPP_RGB3:
+ mixercfg |= mix << 29;
+ mixercfg_ext |= ext << 14;
+ break;
+ case SSPP_DMA0:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 8;
+ } else {
+ mixercfg |= mix << 18;
+ mixercfg_ext |= ext << 16;
+ }
+ break;
+ case SSPP_DMA1:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 12;
+ } else {
+ mixercfg |= mix << 21;
+ mixercfg_ext |= ext << 18;
+ }
+ break;
+ case SSPP_DMA2:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 16;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 0;
+ }
+ break;
+ case SSPP_DMA3:
+ if (rect_index == DPU_SSPP_RECT_1) {
+ mixercfg_ext2 |= ((i + 1) & 0xF) << 20;
+ } else {
+ mix |= (i + 1) & 0xF;
+ mixercfg_ext2 |= mix << 4;
+ }
+ break;
+ case SSPP_CURSOR0:
+ mixercfg_ext |= ((i + 1) & 0xF) << 20;
+ break;
+ case SSPP_CURSOR1:
+ mixercfg_ext |= ((i + 1) & 0xF) << 26;
+ break;
+ default:
+ break;
+ }
+ }
+ }
+
+exit:
+ DPU_REG_WRITE(c, CTL_LAYER(lm), mixercfg);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT(lm), mixercfg_ext);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT2(lm), mixercfg_ext2);
+ DPU_REG_WRITE(c, CTL_LAYER_EXT3(lm), mixercfg_ext3);
+}
+
+static void dpu_hw_ctl_intf_cfg(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 intf_cfg = 0;
+
+ intf_cfg |= (cfg->intf & 0xF) << 4;
+
+ if (cfg->mode_3d) {
+ intf_cfg |= BIT(19);
+ intf_cfg |= (cfg->mode_3d - 0x1) << 20;
+ }
+
+ switch (cfg->intf_mode_sel) {
+ case DPU_CTL_MODE_SEL_VID:
+ intf_cfg &= ~BIT(17);
+ intf_cfg &= ~(0x3 << 15);
+ break;
+ case DPU_CTL_MODE_SEL_CMD:
+ intf_cfg |= BIT(17);
+ intf_cfg |= ((cfg->stream_sel & 0x3) << 15);
+ break;
+ default:
+ pr_err("unknown interface type %d\n", cfg->intf_mode_sel);
+ return;
+ }
+
+ DPU_REG_WRITE(c, CTL_TOP, intf_cfg);
+}
+
+static void _setup_ctl_ops(struct dpu_hw_ctl_ops *ops,
+ unsigned long cap)
+{
+ ops->clear_pending_flush = dpu_hw_ctl_clear_pending_flush;
+ ops->update_pending_flush = dpu_hw_ctl_update_pending_flush;
+ ops->get_pending_flush = dpu_hw_ctl_get_pending_flush;
+ ops->trigger_flush = dpu_hw_ctl_trigger_flush;
+ ops->get_flush_register = dpu_hw_ctl_get_flush_register;
+ ops->trigger_start = dpu_hw_ctl_trigger_start;
+ ops->trigger_pending = dpu_hw_ctl_trigger_pending;
+ ops->setup_intf_cfg = dpu_hw_ctl_intf_cfg;
+ ops->reset = dpu_hw_ctl_reset_control;
+ ops->wait_reset_status = dpu_hw_ctl_wait_reset_status;
+ ops->clear_all_blendstages = dpu_hw_ctl_clear_all_blendstages;
+ ops->setup_blendstage = dpu_hw_ctl_setup_blendstage;
+ ops->get_bitmask_sspp = dpu_hw_ctl_get_bitmask_sspp;
+ ops->get_bitmask_mixer = dpu_hw_ctl_get_bitmask_mixer;
+ ops->get_bitmask_intf = dpu_hw_ctl_get_bitmask_intf;
+ ops->get_bitmask_cdm = dpu_hw_ctl_get_bitmask_cdm;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_ctl *c;
+ struct dpu_ctl_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _ctl_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_ctl %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->caps = cfg;
+ _setup_ctl_ops(&c->ops, c->caps->features);
+ c->idx = idx;
+ c->mixer_count = m->mixer_count;
+ c->mixer_hw_caps = m->mixer;
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_CTL, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
new file mode 100644
index 000000000000..c66a71f8b839
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_ctl.h
@@ -0,0 +1,218 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_CTL_H
+#define _DPU_HW_CTL_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_blk.h"
+
+/**
+ * dpu_ctl_mode_sel: Interface mode selection
+ * DPU_CTL_MODE_SEL_VID: Video mode interface
+ * DPU_CTL_MODE_SEL_CMD: Command mode interface
+ */
+enum dpu_ctl_mode_sel {
+ DPU_CTL_MODE_SEL_VID = 0,
+ DPU_CTL_MODE_SEL_CMD
+};
+
+struct dpu_hw_ctl;
+/**
+ * struct dpu_hw_stage_cfg - blending stage cfg
+ * @stage : SSPP_ID at each stage
+ * @multirect_index: index of the rectangle of SSPP.
+ */
+struct dpu_hw_stage_cfg {
+ enum dpu_sspp stage[DPU_STAGE_MAX][PIPES_PER_STAGE];
+ enum dpu_sspp_multirect_index multirect_index
+ [DPU_STAGE_MAX][PIPES_PER_STAGE];
+};
+
+/**
+ * struct dpu_hw_intf_cfg :Describes how the DPU writes data to output interface
+ * @intf : Interface id
+ * @mode_3d: 3d mux configuration
+ * @intf_mode_sel: Interface mode, cmd / vid
+ * @stream_sel: Stream selection for multi-stream interfaces
+ */
+struct dpu_hw_intf_cfg {
+ enum dpu_intf intf;
+ enum dpu_3d_blend_mode mode_3d;
+ enum dpu_ctl_mode_sel intf_mode_sel;
+ int stream_sel;
+};
+
+/**
+ * struct dpu_hw_ctl_ops - Interface to the wb Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_ctl_ops {
+ /**
+ * kickoff hw operation for Sw controlled interfaces
+ * DSI cmd mode and WB interface are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_start)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * kickoff prepare is in progress hw operation for sw
+ * controlled interfaces: DSI cmd mode and WB interface
+ * are SW controlled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_pending)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Clear the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Query the value of the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ */
+ u32 (*get_pending_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * OR in the given flushbits to the cached pending_flush_mask
+ * No effect on hardware
+ * @ctx : ctl path ctx pointer
+ * @flushbits : module flushmask
+ */
+ void (*update_pending_flush)(struct dpu_hw_ctl *ctx,
+ u32 flushbits);
+
+ /**
+ * Write the value of the pending_flush_mask to hardware
+ * @ctx : ctl path ctx pointer
+ */
+ void (*trigger_flush)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Read the value of the flush register
+ * @ctx : ctl path ctx pointer
+ * @Return: value of the ctl flush register.
+ */
+ u32 (*get_flush_register)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Setup ctl_path interface config
+ * @ctx
+ * @cfg : interface config structure pointer
+ */
+ void (*setup_intf_cfg)(struct dpu_hw_ctl *ctx,
+ struct dpu_hw_intf_cfg *cfg);
+
+ int (*reset)(struct dpu_hw_ctl *c);
+
+ /*
+ * wait_reset_status - checks ctl reset status
+ * @ctx : ctl path ctx pointer
+ *
+ * This function checks the ctl reset status bit.
+ * If the reset bit is set, it keeps polling the status till the hw
+ * reset is complete.
+ * Returns: 0 on success or -error if reset incomplete within interval
+ */
+ int (*wait_reset_status)(struct dpu_hw_ctl *ctx);
+
+ uint32_t (*get_bitmask_sspp)(struct dpu_hw_ctl *ctx,
+ enum dpu_sspp blk);
+
+ uint32_t (*get_bitmask_mixer)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm blk);
+
+ int (*get_bitmask_intf)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_intf blk);
+
+ int (*get_bitmask_cdm)(struct dpu_hw_ctl *ctx,
+ u32 *flushbits,
+ enum dpu_cdm blk);
+
+ /**
+ * Set all blend stages to disabled
+ * @ctx : ctl path ctx pointer
+ */
+ void (*clear_all_blendstages)(struct dpu_hw_ctl *ctx);
+
+ /**
+ * Configure layer mixer to pipe configuration
+ * @ctx : ctl path ctx pointer
+ * @lm : layer mixer enumeration
+ * @cfg : blend stage configuration
+ */
+ void (*setup_blendstage)(struct dpu_hw_ctl *ctx,
+ enum dpu_lm lm, struct dpu_hw_stage_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_ctl : CTL PATH driver object
+ * @base: hardware block base structure
+ * @hw: block register map object
+ * @idx: control path index
+ * @caps: control path capabilities
+ * @mixer_count: number of mixers
+ * @mixer_hw_caps: mixer hardware capabilities
+ * @pending_flush_mask: storage for pending ctl_flush managed via ops
+ * @ops: operation list
+ */
+struct dpu_hw_ctl {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* ctl path */
+ int idx;
+ const struct dpu_ctl_cfg *caps;
+ int mixer_count;
+ const struct dpu_lm_cfg *mixer_hw_caps;
+ u32 pending_flush_mask;
+
+ /* ops */
+ struct dpu_hw_ctl_ops ops;
+};
+
+/**
+ * dpu_hw_ctl - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_ctl *to_dpu_hw_ctl(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_ctl, base);
+}
+
+/**
+ * dpu_hw_ctl_init(): Initializes the ctl_path hw driver object.
+ * should be called before accessing every ctl path registers.
+ * @idx: ctl_path index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_ctl *dpu_hw_ctl_init(enum dpu_ctl idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_ctl_destroy(): Destroys ctl driver context
+ * should be called to free the context
+ */
+void dpu_hw_ctl_destroy(struct dpu_hw_ctl *ctx);
+
+#endif /*_DPU_HW_CTL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
new file mode 100644
index 000000000000..c0b7f0049365
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.c
@@ -0,0 +1,1183 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/bitops.h>
+#include <linux/slab.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+/**
+ * Register offsets in MDSS register file for the interrupt registers
+ * w.r.t. to the MDP base
+ */
+#define MDP_SSPP_TOP0_OFF 0x0
+#define MDP_INTF_0_OFF 0x6A000
+#define MDP_INTF_1_OFF 0x6A800
+#define MDP_INTF_2_OFF 0x6B000
+#define MDP_INTF_3_OFF 0x6B800
+#define MDP_INTF_4_OFF 0x6C000
+#define MDP_AD4_0_OFF 0x7C000
+#define MDP_AD4_1_OFF 0x7D000
+#define MDP_AD4_INTR_EN_OFF 0x41c
+#define MDP_AD4_INTR_CLEAR_OFF 0x424
+#define MDP_AD4_INTR_STATUS_OFF 0x420
+
+/**
+ * WB interrupt status bit definitions
+ */
+#define DPU_INTR_WB_0_DONE BIT(0)
+#define DPU_INTR_WB_1_DONE BIT(1)
+#define DPU_INTR_WB_2_DONE BIT(4)
+
+/**
+ * WDOG timer interrupt status bit definitions
+ */
+#define DPU_INTR_WD_TIMER_0_DONE BIT(2)
+#define DPU_INTR_WD_TIMER_1_DONE BIT(3)
+#define DPU_INTR_WD_TIMER_2_DONE BIT(5)
+#define DPU_INTR_WD_TIMER_3_DONE BIT(6)
+#define DPU_INTR_WD_TIMER_4_DONE BIT(7)
+
+/**
+ * Pingpong interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_DONE BIT(8)
+#define DPU_INTR_PING_PONG_1_DONE BIT(9)
+#define DPU_INTR_PING_PONG_2_DONE BIT(10)
+#define DPU_INTR_PING_PONG_3_DONE BIT(11)
+#define DPU_INTR_PING_PONG_0_RD_PTR BIT(12)
+#define DPU_INTR_PING_PONG_1_RD_PTR BIT(13)
+#define DPU_INTR_PING_PONG_2_RD_PTR BIT(14)
+#define DPU_INTR_PING_PONG_3_RD_PTR BIT(15)
+#define DPU_INTR_PING_PONG_0_WR_PTR BIT(16)
+#define DPU_INTR_PING_PONG_1_WR_PTR BIT(17)
+#define DPU_INTR_PING_PONG_2_WR_PTR BIT(18)
+#define DPU_INTR_PING_PONG_3_WR_PTR BIT(19)
+#define DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE BIT(20)
+#define DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE BIT(21)
+#define DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE BIT(22)
+#define DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE BIT(23)
+
+/**
+ * Interface interrupt status bit definitions
+ */
+#define DPU_INTR_INTF_0_UNDERRUN BIT(24)
+#define DPU_INTR_INTF_1_UNDERRUN BIT(26)
+#define DPU_INTR_INTF_2_UNDERRUN BIT(28)
+#define DPU_INTR_INTF_3_UNDERRUN BIT(30)
+#define DPU_INTR_INTF_0_VSYNC BIT(25)
+#define DPU_INTR_INTF_1_VSYNC BIT(27)
+#define DPU_INTR_INTF_2_VSYNC BIT(29)
+#define DPU_INTR_INTF_3_VSYNC BIT(31)
+
+/**
+ * Pingpong Secondary interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE BIT(0)
+#define DPU_INTR_PING_PONG_S0_WR_PTR BIT(4)
+#define DPU_INTR_PING_PONG_S0_RD_PTR BIT(8)
+#define DPU_INTR_PING_PONG_S0_TEAR_DETECTED BIT(22)
+#define DPU_INTR_PING_PONG_S0_TE_DETECTED BIT(28)
+
+/**
+ * Pingpong TEAR detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TEAR_DETECTED BIT(16)
+#define DPU_INTR_PING_PONG_1_TEAR_DETECTED BIT(17)
+#define DPU_INTR_PING_PONG_2_TEAR_DETECTED BIT(18)
+#define DPU_INTR_PING_PONG_3_TEAR_DETECTED BIT(19)
+
+/**
+ * Pingpong TE detection interrupt status bit definitions
+ */
+#define DPU_INTR_PING_PONG_0_TE_DETECTED BIT(24)
+#define DPU_INTR_PING_PONG_1_TE_DETECTED BIT(25)
+#define DPU_INTR_PING_PONG_2_TE_DETECTED BIT(26)
+#define DPU_INTR_PING_PONG_3_TE_DETECTED BIT(27)
+
+/**
+ * Ctl start interrupt status bit definitions
+ */
+#define DPU_INTR_CTL_0_START BIT(9)
+#define DPU_INTR_CTL_1_START BIT(10)
+#define DPU_INTR_CTL_2_START BIT(11)
+#define DPU_INTR_CTL_3_START BIT(12)
+#define DPU_INTR_CTL_4_START BIT(13)
+
+/**
+ * Concurrent WB overflow interrupt status bit definitions
+ */
+#define DPU_INTR_CWB_2_OVERFLOW BIT(14)
+#define DPU_INTR_CWB_3_OVERFLOW BIT(15)
+
+/**
+ * Histogram VIG done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_DONE BIT(0)
+#define DPU_INTR_HIST_VIG_1_DONE BIT(4)
+#define DPU_INTR_HIST_VIG_2_DONE BIT(8)
+#define DPU_INTR_HIST_VIG_3_DONE BIT(10)
+
+/**
+ * Histogram VIG reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_VIG_0_RSTSEQ_DONE BIT(1)
+#define DPU_INTR_HIST_VIG_1_RSTSEQ_DONE BIT(5)
+#define DPU_INTR_HIST_VIG_2_RSTSEQ_DONE BIT(9)
+#define DPU_INTR_HIST_VIG_3_RSTSEQ_DONE BIT(11)
+
+/**
+ * Histogram DSPP done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_DONE BIT(12)
+#define DPU_INTR_HIST_DSPP_1_DONE BIT(16)
+#define DPU_INTR_HIST_DSPP_2_DONE BIT(20)
+#define DPU_INTR_HIST_DSPP_3_DONE BIT(22)
+
+/**
+ * Histogram DSPP reset Sequence done interrupt status bit definitions
+ */
+#define DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE BIT(13)
+#define DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE BIT(17)
+#define DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE BIT(21)
+#define DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE BIT(23)
+
+/**
+ * INTF interrupt status bit definitions
+ */
+#define DPU_INTR_VIDEO_INTO_STATIC BIT(0)
+#define DPU_INTR_VIDEO_OUTOF_STATIC BIT(1)
+#define DPU_INTR_DSICMD_0_INTO_STATIC BIT(2)
+#define DPU_INTR_DSICMD_0_OUTOF_STATIC BIT(3)
+#define DPU_INTR_DSICMD_1_INTO_STATIC BIT(4)
+#define DPU_INTR_DSICMD_1_OUTOF_STATIC BIT(5)
+#define DPU_INTR_DSICMD_2_INTO_STATIC BIT(6)
+#define DPU_INTR_DSICMD_2_OUTOF_STATIC BIT(7)
+#define DPU_INTR_PROG_LINE BIT(8)
+
+/**
+ * AD4 interrupt status bit definitions
+ */
+#define DPU_INTR_BRIGHTPR_UPDATED BIT(4)
+#define DPU_INTR_DARKENH_UPDATED BIT(3)
+#define DPU_INTR_STREN_OUTROI_UPDATED BIT(2)
+#define DPU_INTR_STREN_INROI_UPDATED BIT(1)
+#define DPU_INTR_BACKLIGHT_UPDATED BIT(0)
+/**
+ * struct dpu_intr_reg - array of DPU register sets
+ * @clr_off: offset to CLEAR reg
+ * @en_off: offset to ENABLE reg
+ * @status_off: offset to STATUS reg
+ */
+struct dpu_intr_reg {
+ u32 clr_off;
+ u32 en_off;
+ u32 status_off;
+};
+
+/**
+ * struct dpu_irq_type - maps each irq with i/f
+ * @intr_type: type of interrupt listed in dpu_intr_type
+ * @instance_idx: instance index of the associated HW block in DPU
+ * @irq_mask: corresponding bit in the interrupt status reg
+ * @reg_idx: which reg set to use
+ */
+struct dpu_irq_type {
+ u32 intr_type;
+ u32 instance_idx;
+ u32 irq_mask;
+ u32 reg_idx;
+};
+
+/**
+ * List of DPU interrupt registers
+ */
+static const struct dpu_intr_reg dpu_intr_set[] = {
+ {
+ MDP_SSPP_TOP0_OFF+INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR_EN,
+ MDP_SSPP_TOP0_OFF+INTR_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+INTR2_CLEAR,
+ MDP_SSPP_TOP0_OFF+INTR2_EN,
+ MDP_SSPP_TOP0_OFF+INTR2_STATUS
+ },
+ {
+ MDP_SSPP_TOP0_OFF+HIST_INTR_CLEAR,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_EN,
+ MDP_SSPP_TOP0_OFF+HIST_INTR_STATUS
+ },
+ {
+ MDP_INTF_0_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_0_OFF+INTF_INTR_EN,
+ MDP_INTF_0_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_1_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_1_OFF+INTF_INTR_EN,
+ MDP_INTF_1_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_2_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_2_OFF+INTF_INTR_EN,
+ MDP_INTF_2_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_3_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_3_OFF+INTF_INTR_EN,
+ MDP_INTF_3_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_INTF_4_OFF+INTF_INTR_CLEAR,
+ MDP_INTF_4_OFF+INTF_INTR_EN,
+ MDP_INTF_4_OFF+INTF_INTR_STATUS
+ },
+ {
+ MDP_AD4_0_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_0_OFF + MDP_AD4_INTR_STATUS_OFF,
+ },
+ {
+ MDP_AD4_1_OFF + MDP_AD4_INTR_CLEAR_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_EN_OFF,
+ MDP_AD4_1_OFF + MDP_AD4_INTR_STATUS_OFF,
+ }
+};
+
+/**
+ * IRQ mapping table - use for lookup an irq_idx in this table that have
+ * a matching interface type and instance index.
+ */
+static const struct dpu_irq_type dpu_irq_map[] = {
+ /* BEGIN MAP_RANGE: 0-31, INTR */
+ /* irq_idx: 0-3 */
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_0, DPU_INTR_WB_0_DONE, 0},
+ { DPU_IRQ_TYPE_WB_ROT_COMP, WB_1, DPU_INTR_WB_1_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_0, DPU_INTR_WD_TIMER_0_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_1, DPU_INTR_WD_TIMER_1_DONE, 0},
+ /* irq_idx: 4-7 */
+ { DPU_IRQ_TYPE_WB_WFD_COMP, WB_2, DPU_INTR_WB_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_2, DPU_INTR_WD_TIMER_2_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_3, DPU_INTR_WD_TIMER_3_DONE, 0},
+ { DPU_IRQ_TYPE_WD_TIMER, WD_TIMER_4, DPU_INTR_WD_TIMER_4_DONE, 0},
+ /* irq_idx: 8-11 */
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_COMP, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_DONE, 0},
+ /* irq_idx: 12-15 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_RD_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_RD_PTR, 0},
+ /* irq_idx: 16-19 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_WR_PTR, 0},
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_WR_PTR, 0},
+ /* irq_idx: 20-23 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_AUTOREFRESH_DONE, 0},
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_AUTOREFRESH_DONE, 0},
+ /* irq_idx: 24-27 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_0, DPU_INTR_INTF_0_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_0, DPU_INTR_INTF_0_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_1, DPU_INTR_INTF_1_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_1, DPU_INTR_INTF_1_VSYNC, 0},
+ /* irq_idx: 28-31 */
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_2, DPU_INTR_INTF_2_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_2, DPU_INTR_INTF_2_VSYNC, 0},
+ { DPU_IRQ_TYPE_INTF_UNDER_RUN, INTF_3, DPU_INTR_INTF_3_UNDERRUN, 0},
+ { DPU_IRQ_TYPE_INTF_VSYNC, INTF_3, DPU_INTR_INTF_3_VSYNC, 0},
+
+ /* BEGIN MAP_RANGE: 32-64, INTR2 */
+ /* irq_idx: 32-35 */
+ { DPU_IRQ_TYPE_PING_PONG_AUTO_REF, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_AUTOREFRESH_DONE, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 36-39 */
+ { DPU_IRQ_TYPE_PING_PONG_WR_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_WR_PTR, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 40 */
+ { DPU_IRQ_TYPE_PING_PONG_RD_PTR, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_RD_PTR, 1},
+ /* irq_idx: 41-45 */
+ { DPU_IRQ_TYPE_CTL_START, CTL_0,
+ DPU_INTR_CTL_0_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_1,
+ DPU_INTR_CTL_1_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_2,
+ DPU_INTR_CTL_2_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_3,
+ DPU_INTR_CTL_3_START, 1},
+ { DPU_IRQ_TYPE_CTL_START, CTL_4,
+ DPU_INTR_CTL_4_START, 1},
+ /* irq_idx: 46-47 */
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_2, DPU_INTR_CWB_2_OVERFLOW, 1},
+ { DPU_IRQ_TYPE_CWB_OVERFLOW, CWB_3, DPU_INTR_CWB_3_OVERFLOW, 1},
+ /* irq_idx: 48-51 */
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TEAR_DETECTED, 1},
+ /* irq_idx: 52-55 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TEAR_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ /* irq_idx: 56-59 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_0,
+ DPU_INTR_PING_PONG_0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_1,
+ DPU_INTR_PING_PONG_1_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_2,
+ DPU_INTR_PING_PONG_2_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_3,
+ DPU_INTR_PING_PONG_3_TE_DETECTED, 1},
+ /* irq_idx: 60-63 */
+ { DPU_IRQ_TYPE_PING_PONG_TE_CHECK, PINGPONG_S0,
+ DPU_INTR_PING_PONG_S0_TE_DETECTED, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 1},
+
+ /* BEGIN MAP_RANGE: 64-95 HIST */
+ /* irq_idx: 64-67 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG0, DPU_INTR_HIST_VIG_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG0,
+ DPU_INTR_HIST_VIG_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 68-71 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG1, DPU_INTR_HIST_VIG_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG1,
+ DPU_INTR_HIST_VIG_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 72-75 */
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG2, DPU_INTR_HIST_VIG_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG2,
+ DPU_INTR_HIST_VIG_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_DONE, SSPP_VIG3, DPU_INTR_HIST_VIG_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_VIG_RSTSEQ, SSPP_VIG3,
+ DPU_INTR_HIST_VIG_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 76-79 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_0, DPU_INTR_HIST_DSPP_0_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_0,
+ DPU_INTR_HIST_DSPP_0_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 80-83 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_1, DPU_INTR_HIST_DSPP_1_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_1,
+ DPU_INTR_HIST_DSPP_1_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 84-87 */
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_2, DPU_INTR_HIST_DSPP_2_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_2,
+ DPU_INTR_HIST_DSPP_2_RSTSEQ_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_DONE, DSPP_3, DPU_INTR_HIST_DSPP_3_DONE, 2},
+ { DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ, DSPP_3,
+ DPU_INTR_HIST_DSPP_3_RSTSEQ_DONE, 2},
+ /* irq_idx: 88-91 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ /* irq_idx: 92-95 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 2},
+
+ /* BEGIN MAP_RANGE: 96-127 INTF_0_INTR */
+ /* irq_idx: 96-99 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_0,
+ DPU_INTR_VIDEO_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_0,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_0,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_0,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 3},
+ /* irq_idx: 100-103 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_0,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_0,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_0,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 3},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_0,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 3},
+ /* irq_idx: 104-107 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_0, DPU_INTR_PROG_LINE, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 108-111 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 112-115 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 116-119 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 120-123 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ /* irq_idx: 124-127 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 3},
+
+ /* BEGIN MAP_RANGE: 128-159 INTF_1_INTR */
+ /* irq_idx: 128-131 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_1,
+ DPU_INTR_VIDEO_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_1,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_1,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_1,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 4},
+ /* irq_idx: 132-135 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_1,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_1,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_1,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 4},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_1,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 4},
+ /* irq_idx: 136-139 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_1, DPU_INTR_PROG_LINE, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 140-143 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 144-147 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 148-151 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 152-155 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ /* irq_idx: 156-159 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 4},
+
+ /* BEGIN MAP_RANGE: 160-191 INTF_2_INTR */
+ /* irq_idx: 160-163 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_2,
+ DPU_INTR_VIDEO_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_2,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_2,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_2,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 5},
+ /* irq_idx: 164-167 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_2,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_2,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_2,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 5},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_2,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 5},
+ /* irq_idx: 168-171 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_2, DPU_INTR_PROG_LINE, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 172-175 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 176-179 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 180-183 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 184-187 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ /* irq_idx: 188-191 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 5},
+
+ /* BEGIN MAP_RANGE: 192-223 INTF_3_INTR */
+ /* irq_idx: 192-195 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_3,
+ DPU_INTR_VIDEO_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_3,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_3,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_3,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 6},
+ /* irq_idx: 196-199 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_3,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_3,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_3,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 6},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_3,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 6},
+ /* irq_idx: 200-203 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_3, DPU_INTR_PROG_LINE, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 204-207 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 208-211 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 212-215 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 216-219 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ /* irq_idx: 220-223 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 6},
+
+ /* BEGIN MAP_RANGE: 224-255 INTF_4_INTR */
+ /* irq_idx: 224-227 */
+ { DPU_IRQ_TYPE_SFI_VIDEO_IN, INTF_4,
+ DPU_INTR_VIDEO_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_VIDEO_OUT, INTF_4,
+ DPU_INTR_VIDEO_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_IN, INTF_4,
+ DPU_INTR_DSICMD_0_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_0_OUT, INTF_4,
+ DPU_INTR_DSICMD_0_OUTOF_STATIC, 7},
+ /* irq_idx: 228-231 */
+ { DPU_IRQ_TYPE_SFI_CMD_1_IN, INTF_4,
+ DPU_INTR_DSICMD_1_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_1_OUT, INTF_4,
+ DPU_INTR_DSICMD_1_OUTOF_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_IN, INTF_4,
+ DPU_INTR_DSICMD_2_INTO_STATIC, 7},
+ { DPU_IRQ_TYPE_SFI_CMD_2_OUT, INTF_4,
+ DPU_INTR_DSICMD_2_OUTOF_STATIC, 7},
+ /* irq_idx: 232-235 */
+ { DPU_IRQ_TYPE_PROG_LINE, INTF_4, DPU_INTR_PROG_LINE, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 236-239 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 240-243 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 244-247 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 248-251 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ /* irq_idx: 252-255 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 7},
+
+ /* BEGIN MAP_RANGE: 256-287 AD4_0_INTR */
+ /* irq_idx: 256-259 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_0, DPU_INTR_BACKLIGHT_UPDATED, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 260-263 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 264-267 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 268-271 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 272-275 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 276-279 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 280-283 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ /* irq_idx: 284-287 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 8},
+
+ /* BEGIN MAP_RANGE: 288-319 AD4_1_INTR */
+ /* irq_idx: 288-291 */
+ { DPU_IRQ_TYPE_AD4_BL_DONE, DSPP_1, DPU_INTR_BACKLIGHT_UPDATED, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 292-295 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 296-299 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 300-303 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 304-307 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 308-311 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 312-315 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ /* irq_idx: 315-319 */
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+ { DPU_IRQ_TYPE_RESERVED, 0, 0, 9},
+};
+
+static int dpu_hw_intr_irqidx_lookup(enum dpu_intr_type intr_type,
+ u32 instance_idx)
+{
+ int i;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_irq_map); i++) {
+ if (intr_type == dpu_irq_map[i].intr_type &&
+ instance_idx == dpu_irq_map[i].instance_idx)
+ return i;
+ }
+
+ pr_debug("IRQ lookup fail!! intr_type=%d, instance_idx=%d\n",
+ intr_type, instance_idx);
+ return -EINVAL;
+}
+
+static void dpu_hw_intr_set_mask(struct dpu_hw_intr *intr, uint32_t reg_off,
+ uint32_t mask)
+{
+ if (!intr)
+ return;
+
+ DPU_REG_WRITE(&intr->hw, reg_off, mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_dispatch_irq(struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *, int),
+ void *arg)
+{
+ int reg_idx;
+ int irq_idx;
+ int start_idx;
+ int end_idx;
+ u32 irq_status;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ /*
+ * The dispatcher will save the IRQ status before calling here.
+ * Now need to go through each IRQ status and find matching
+ * irq lookup index.
+ */
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (reg_idx = 0; reg_idx < ARRAY_SIZE(dpu_intr_set); reg_idx++) {
+ irq_status = intr->save_irq_status[reg_idx];
+
+ /*
+ * Each Interrupt register has a range of 32 indexes, and
+ * that is static for dpu_irq_map.
+ */
+ start_idx = reg_idx * 32;
+ end_idx = start_idx + 32;
+
+ if (start_idx >= ARRAY_SIZE(dpu_irq_map) ||
+ end_idx > ARRAY_SIZE(dpu_irq_map))
+ continue;
+
+ /*
+ * Search through matching intr status from irq map.
+ * start_idx and end_idx defined the search range in
+ * the dpu_irq_map.
+ */
+ for (irq_idx = start_idx;
+ (irq_idx < end_idx) && irq_status;
+ irq_idx++)
+ if ((irq_status & dpu_irq_map[irq_idx].irq_mask) &&
+ (dpu_irq_map[irq_idx].reg_idx == reg_idx)) {
+ /*
+ * Once a match on irq mask, perform a callback
+ * to the given cbfunc. cbfunc will take care
+ * the interrupt status clearing. If cbfunc is
+ * not provided, then the interrupt clearing
+ * is here.
+ */
+ if (cbfunc)
+ cbfunc(arg, irq_idx);
+ else
+ intr->ops.clear_intr_status_nolock(
+ intr, irq_idx);
+
+ /*
+ * When callback finish, clear the irq_status
+ * with the matching mask. Once irq_status
+ * is all cleared, the search can be stopped.
+ */
+ irq_status &= ~dpu_irq_map[irq_idx].irq_mask;
+ }
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static int dpu_hw_intr_enable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if (cache_irq_mask & irq->irq_mask) {
+ dbgstr = "DPU IRQ already set:";
+ } else {
+ dbgstr = "DPU IRQ enabled:";
+
+ cache_irq_mask |= irq->irq_mask;
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+ /* Enabling interrupts with the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq_nolock(struct dpu_hw_intr *intr, int irq_idx)
+{
+ int reg_idx;
+ const struct dpu_intr_reg *reg;
+ const struct dpu_irq_type *irq;
+ const char *dbgstr = NULL;
+ uint32_t cache_irq_mask;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ irq = &dpu_irq_map[irq_idx];
+ reg_idx = irq->reg_idx;
+ reg = &dpu_intr_set[reg_idx];
+
+ cache_irq_mask = intr->cache_irq_mask[reg_idx];
+ if ((cache_irq_mask & irq->irq_mask) == 0) {
+ dbgstr = "DPU IRQ is already cleared:";
+ } else {
+ dbgstr = "DPU IRQ mask disable:";
+
+ cache_irq_mask &= ~irq->irq_mask;
+ /* Disable interrupts based on the new mask */
+ DPU_REG_WRITE(&intr->hw, reg->en_off, cache_irq_mask);
+ /* Cleaning any pending interrupt */
+ DPU_REG_WRITE(&intr->hw, reg->clr_off, irq->irq_mask);
+
+ /* ensure register write goes through */
+ wmb();
+
+ intr->cache_irq_mask[reg_idx] = cache_irq_mask;
+ }
+
+ pr_debug("%s MASK:0x%.8x, CACHE-MASK:0x%.8x\n", dbgstr,
+ irq->irq_mask, cache_irq_mask);
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irq(struct dpu_hw_intr *intr, int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return -EINVAL;
+
+ if (irq_idx < 0 || irq_idx >= ARRAY_SIZE(dpu_irq_map)) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return -EINVAL;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_disable_irq_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return 0;
+}
+
+static int dpu_hw_intr_clear_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off, 0xffffffff);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_disable_irqs(struct dpu_hw_intr *intr)
+{
+ int i;
+
+ if (!intr)
+ return -EINVAL;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].en_off, 0x00000000);
+
+ /* ensure register writes go through */
+ wmb();
+
+ return 0;
+}
+
+static int dpu_hw_intr_get_valid_interrupts(struct dpu_hw_intr *intr,
+ uint32_t *mask)
+{
+ if (!intr || !mask)
+ return -EINVAL;
+
+ *mask = IRQ_SOURCE_MDP | IRQ_SOURCE_DSI0 | IRQ_SOURCE_DSI1
+ | IRQ_SOURCE_HDMI | IRQ_SOURCE_EDP;
+
+ return 0;
+}
+
+static void dpu_hw_intr_get_interrupt_statuses(struct dpu_hw_intr *intr)
+{
+ int i;
+ u32 enable_mask;
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ for (i = 0; i < ARRAY_SIZE(dpu_intr_set); i++) {
+ /* Read interrupt status */
+ intr->save_irq_status[i] = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[i].status_off);
+
+ /* Read enable mask */
+ enable_mask = DPU_REG_READ(&intr->hw, dpu_intr_set[i].en_off);
+
+ /* and clear the interrupt */
+ if (intr->save_irq_status[i])
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[i].clr_off,
+ intr->save_irq_status[i]);
+
+ /* Finally update IRQ status based on enable mask */
+ intr->save_irq_status[i] &= enable_mask;
+ }
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static void dpu_hw_intr_clear_intr_status_nolock(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ int reg_idx;
+
+ if (!intr)
+ return;
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ dpu_irq_map[irq_idx].irq_mask);
+
+ /* ensure register writes go through */
+ wmb();
+}
+
+static void dpu_hw_intr_clear_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx)
+{
+ unsigned long irq_flags;
+
+ if (!intr)
+ return;
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+ dpu_hw_intr_clear_intr_status_nolock(intr, irq_idx);
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+}
+
+static u32 dpu_hw_intr_get_interrupt_status(struct dpu_hw_intr *intr,
+ int irq_idx, bool clear)
+{
+ int reg_idx;
+ unsigned long irq_flags;
+ u32 intr_status;
+
+ if (!intr)
+ return 0;
+
+ if (irq_idx >= ARRAY_SIZE(dpu_irq_map) || irq_idx < 0) {
+ pr_err("invalid IRQ index: [%d]\n", irq_idx);
+ return 0;
+ }
+
+ spin_lock_irqsave(&intr->irq_lock, irq_flags);
+
+ reg_idx = dpu_irq_map[irq_idx].reg_idx;
+ intr_status = DPU_REG_READ(&intr->hw,
+ dpu_intr_set[reg_idx].status_off) &
+ dpu_irq_map[irq_idx].irq_mask;
+ if (intr_status && clear)
+ DPU_REG_WRITE(&intr->hw, dpu_intr_set[reg_idx].clr_off,
+ intr_status);
+
+ /* ensure register writes go through */
+ wmb();
+
+ spin_unlock_irqrestore(&intr->irq_lock, irq_flags);
+
+ return intr_status;
+}
+
+static void __setup_intr_ops(struct dpu_hw_intr_ops *ops)
+{
+ ops->set_mask = dpu_hw_intr_set_mask;
+ ops->irq_idx_lookup = dpu_hw_intr_irqidx_lookup;
+ ops->enable_irq = dpu_hw_intr_enable_irq;
+ ops->disable_irq = dpu_hw_intr_disable_irq;
+ ops->dispatch_irqs = dpu_hw_intr_dispatch_irq;
+ ops->clear_all_irqs = dpu_hw_intr_clear_irqs;
+ ops->disable_all_irqs = dpu_hw_intr_disable_irqs;
+ ops->get_valid_interrupts = dpu_hw_intr_get_valid_interrupts;
+ ops->get_interrupt_statuses = dpu_hw_intr_get_interrupt_statuses;
+ ops->clear_interrupt_status = dpu_hw_intr_clear_interrupt_status;
+ ops->clear_intr_status_nolock = dpu_hw_intr_clear_intr_status_nolock;
+ ops->get_interrupt_status = dpu_hw_intr_get_interrupt_status;
+}
+
+static void __intr_offset(struct dpu_mdss_cfg *m,
+ void __iomem *addr, struct dpu_hw_blk_reg_map *hw)
+{
+ hw->base_off = addr;
+ hw->blk_off = m->mdp[0].base;
+ hw->hwversion = m->hwversion;
+}
+
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intr *intr;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ intr = kzalloc(sizeof(*intr), GFP_KERNEL);
+ if (!intr)
+ return ERR_PTR(-ENOMEM);
+
+ __intr_offset(m, addr, &intr->hw);
+ __setup_intr_ops(&intr->ops);
+
+ intr->irq_idx_tbl_size = ARRAY_SIZE(dpu_irq_map);
+
+ intr->cache_irq_mask = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->cache_irq_mask == NULL) {
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ intr->save_irq_status = kcalloc(ARRAY_SIZE(dpu_intr_set), sizeof(u32),
+ GFP_KERNEL);
+ if (intr->save_irq_status == NULL) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr);
+ return ERR_PTR(-ENOMEM);
+ }
+
+ spin_lock_init(&intr->irq_lock);
+
+ return intr;
+}
+
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr)
+{
+ if (intr) {
+ kfree(intr->cache_irq_mask);
+ kfree(intr->save_irq_status);
+ kfree(intr);
+ }
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
new file mode 100644
index 000000000000..61e4cba36562
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_interrupts.h
@@ -0,0 +1,257 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTERRUPTS_H
+#define _DPU_HW_INTERRUPTS_H
+
+#include <linux/types.h>
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_mdss.h"
+
+#define IRQ_SOURCE_MDP BIT(0)
+#define IRQ_SOURCE_DSI0 BIT(4)
+#define IRQ_SOURCE_DSI1 BIT(5)
+#define IRQ_SOURCE_HDMI BIT(8)
+#define IRQ_SOURCE_EDP BIT(12)
+#define IRQ_SOURCE_MHL BIT(16)
+
+/**
+ * dpu_intr_type - HW Interrupt Type
+ * @DPU_IRQ_TYPE_WB_ROT_COMP: WB rotator done
+ * @DPU_IRQ_TYPE_WB_WFD_COMP: WB WFD done
+ * @DPU_IRQ_TYPE_PING_PONG_COMP: PingPong done
+ * @DPU_IRQ_TYPE_PING_PONG_RD_PTR: PingPong read pointer
+ * @DPU_IRQ_TYPE_PING_PONG_WR_PTR: PingPong write pointer
+ * @DPU_IRQ_TYPE_PING_PONG_AUTO_REF: PingPong auto refresh
+ * @DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK: PingPong Tear check
+ * @DPU_IRQ_TYPE_PING_PONG_TE_CHECK: PingPong TE detection
+ * @DPU_IRQ_TYPE_INTF_UNDER_RUN: INTF underrun
+ * @DPU_IRQ_TYPE_INTF_VSYNC: INTF VSYNC
+ * @DPU_IRQ_TYPE_CWB_OVERFLOW: Concurrent WB overflow
+ * @DPU_IRQ_TYPE_HIST_VIG_DONE: VIG Histogram done
+ * @DPU_IRQ_TYPE_HIST_VIG_RSTSEQ: VIG Histogram reset
+ * @DPU_IRQ_TYPE_HIST_DSPP_DONE: DSPP Histogram done
+ * @DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ: DSPP Histogram reset
+ * @DPU_IRQ_TYPE_WD_TIMER: Watchdog timer
+ * @DPU_IRQ_TYPE_SFI_VIDEO_IN: Video static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_VIDEO_OUT: Video static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_IN: DSI CMD0 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_0_OUT: DSI CMD0 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_IN: DSI CMD1 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_1_OUT: DSI CMD1 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_IN: DSI CMD2 static frame INTR into static
+ * @DPU_IRQ_TYPE_SFI_CMD_2_OUT: DSI CMD2 static frame INTR out-of static
+ * @DPU_IRQ_TYPE_PROG_LINE: Programmable Line interrupt
+ * @DPU_IRQ_TYPE_AD4_BL_DONE: AD4 backlight
+ * @DPU_IRQ_TYPE_CTL_START: Control start
+ * @DPU_IRQ_TYPE_RESERVED: Reserved for expansion
+ */
+enum dpu_intr_type {
+ DPU_IRQ_TYPE_WB_ROT_COMP,
+ DPU_IRQ_TYPE_WB_WFD_COMP,
+ DPU_IRQ_TYPE_PING_PONG_COMP,
+ DPU_IRQ_TYPE_PING_PONG_RD_PTR,
+ DPU_IRQ_TYPE_PING_PONG_WR_PTR,
+ DPU_IRQ_TYPE_PING_PONG_AUTO_REF,
+ DPU_IRQ_TYPE_PING_PONG_TEAR_CHECK,
+ DPU_IRQ_TYPE_PING_PONG_TE_CHECK,
+ DPU_IRQ_TYPE_INTF_UNDER_RUN,
+ DPU_IRQ_TYPE_INTF_VSYNC,
+ DPU_IRQ_TYPE_CWB_OVERFLOW,
+ DPU_IRQ_TYPE_HIST_VIG_DONE,
+ DPU_IRQ_TYPE_HIST_VIG_RSTSEQ,
+ DPU_IRQ_TYPE_HIST_DSPP_DONE,
+ DPU_IRQ_TYPE_HIST_DSPP_RSTSEQ,
+ DPU_IRQ_TYPE_WD_TIMER,
+ DPU_IRQ_TYPE_SFI_VIDEO_IN,
+ DPU_IRQ_TYPE_SFI_VIDEO_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_0_IN,
+ DPU_IRQ_TYPE_SFI_CMD_0_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_1_IN,
+ DPU_IRQ_TYPE_SFI_CMD_1_OUT,
+ DPU_IRQ_TYPE_SFI_CMD_2_IN,
+ DPU_IRQ_TYPE_SFI_CMD_2_OUT,
+ DPU_IRQ_TYPE_PROG_LINE,
+ DPU_IRQ_TYPE_AD4_BL_DONE,
+ DPU_IRQ_TYPE_CTL_START,
+ DPU_IRQ_TYPE_RESERVED,
+};
+
+struct dpu_hw_intr;
+
+/**
+ * Interrupt operations.
+ */
+struct dpu_hw_intr_ops {
+ /**
+ * set_mask - Programs the given interrupt register with the
+ * given interrupt mask. Register value will get overwritten.
+ * @intr: HW interrupt handle
+ * @reg_off: MDSS HW register offset
+ * @irqmask: IRQ mask value
+ */
+ void (*set_mask)(
+ struct dpu_hw_intr *intr,
+ uint32_t reg,
+ uint32_t irqmask);
+
+ /**
+ * irq_idx_lookup - Lookup IRQ index on the HW interrupt type
+ * Used for all irq related ops
+ * @intr_type: Interrupt type defined in dpu_intr_type
+ * @instance_idx: HW interrupt block instance
+ * @return: irq_idx or -EINVAL for lookup fail
+ */
+ int (*irq_idx_lookup)(
+ enum dpu_intr_type intr_type,
+ u32 instance_idx);
+
+ /**
+ * enable_irq - Enable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*enable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * disable_irq - Disable IRQ based on lookup IRQ index
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_irq)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_all_irqs - Clears all the interrupts (i.e. acknowledges
+ * any asserted IRQs). Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*clear_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * disable_all_irqs - Disables all the interrupts. Useful during reset.
+ * @intr: HW interrupt handle
+ * @return: 0 for success, otherwise failure
+ */
+ int (*disable_all_irqs)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * dispatch_irqs - IRQ dispatcher will call the given callback
+ * function when a matching interrupt status bit is
+ * found in the irq mapping table.
+ * @intr: HW interrupt handle
+ * @cbfunc: Callback function pointer
+ * @arg: Argument to pass back during callback
+ */
+ void (*dispatch_irqs)(
+ struct dpu_hw_intr *intr,
+ void (*cbfunc)(void *arg, int irq_idx),
+ void *arg);
+
+ /**
+ * get_interrupt_statuses - Gets and store value from all interrupt
+ * status registers that are currently fired.
+ * @intr: HW interrupt handle
+ */
+ void (*get_interrupt_statuses)(
+ struct dpu_hw_intr *intr);
+
+ /**
+ * clear_interrupt_status - Clears HW interrupt status based on given
+ * lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * clear_intr_status_nolock() - clears the HW interrupts without lock
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ */
+ void (*clear_intr_status_nolock)(
+ struct dpu_hw_intr *intr,
+ int irq_idx);
+
+ /**
+ * get_interrupt_status - Gets HW interrupt status, and clear if set,
+ * based on given lookup IRQ index.
+ * @intr: HW interrupt handle
+ * @irq_idx: Lookup irq index return from irq_idx_lookup
+ * @clear: True to clear irq after read
+ */
+ u32 (*get_interrupt_status)(
+ struct dpu_hw_intr *intr,
+ int irq_idx,
+ bool clear);
+
+ /**
+ * get_valid_interrupts - Gets a mask of all valid interrupt sources
+ * within DPU. These are actually status bits
+ * within interrupt registers that specify the
+ * source of the interrupt in IRQs. For example,
+ * valid interrupt sources can be MDP, DSI,
+ * HDMI etc.
+ * @intr: HW interrupt handle
+ * @mask: Returning the interrupt source MASK
+ * @return: 0 for success, otherwise failure
+ */
+ int (*get_valid_interrupts)(
+ struct dpu_hw_intr *intr,
+ uint32_t *mask);
+};
+
+/**
+ * struct dpu_hw_intr: hw interrupts handling data structure
+ * @hw: virtual address mapping
+ * @ops: function pointer mapping for IRQ handling
+ * @cache_irq_mask: array of IRQ enable masks reg storage created during init
+ * @save_irq_status: array of IRQ status reg storage created during init
+ * @irq_idx_tbl_size: total number of irq_idx mapped in the hw_interrupts
+ * @irq_lock: spinlock for accessing IRQ resources
+ */
+struct dpu_hw_intr {
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_hw_intr_ops ops;
+ u32 *cache_irq_mask;
+ u32 *save_irq_status;
+ u32 irq_idx_tbl_size;
+ spinlock_t irq_lock;
+};
+
+/**
+ * dpu_hw_intr_init(): Initializes the interrupts hw object
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intr *dpu_hw_intr_init(void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intr_destroy(): Cleanup interrutps hw object
+ * @intr: pointer to interrupts hw object
+ */
+void dpu_hw_intr_destroy(struct dpu_hw_intr *intr);
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
new file mode 100644
index 000000000000..d280df5613c9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.c
@@ -0,0 +1,349 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_intf.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define INTF_TIMING_ENGINE_EN 0x000
+#define INTF_CONFIG 0x004
+#define INTF_HSYNC_CTL 0x008
+#define INTF_VSYNC_PERIOD_F0 0x00C
+#define INTF_VSYNC_PERIOD_F1 0x010
+#define INTF_VSYNC_PULSE_WIDTH_F0 0x014
+#define INTF_VSYNC_PULSE_WIDTH_F1 0x018
+#define INTF_DISPLAY_V_START_F0 0x01C
+#define INTF_DISPLAY_V_START_F1 0x020
+#define INTF_DISPLAY_V_END_F0 0x024
+#define INTF_DISPLAY_V_END_F1 0x028
+#define INTF_ACTIVE_V_START_F0 0x02C
+#define INTF_ACTIVE_V_START_F1 0x030
+#define INTF_ACTIVE_V_END_F0 0x034
+#define INTF_ACTIVE_V_END_F1 0x038
+#define INTF_DISPLAY_HCTL 0x03C
+#define INTF_ACTIVE_HCTL 0x040
+#define INTF_BORDER_COLOR 0x044
+#define INTF_UNDERFLOW_COLOR 0x048
+#define INTF_HSYNC_SKEW 0x04C
+#define INTF_POLARITY_CTL 0x050
+#define INTF_TEST_CTL 0x054
+#define INTF_TP_COLOR0 0x058
+#define INTF_TP_COLOR1 0x05C
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_DEFLICKER_CONFIG 0x0F0
+#define INTF_DEFLICKER_STRNG_COEFF 0x0F4
+#define INTF_DEFLICKER_WEAK_COEFF 0x0F8
+
+#define INTF_DSI_CMD_MODE_TRIGGER_EN 0x084
+#define INTF_PANEL_FORMAT 0x090
+#define INTF_TPG_ENABLE 0x100
+#define INTF_TPG_MAIN_CONTROL 0x104
+#define INTF_TPG_VIDEO_CONFIG 0x108
+#define INTF_TPG_COMPONENT_LIMITS 0x10C
+#define INTF_TPG_RECTANGLE 0x110
+#define INTF_TPG_INITIAL_VALUE 0x114
+#define INTF_TPG_BLK_WHITE_PATTERN_FRAMES 0x118
+#define INTF_TPG_RGB_MAPPING 0x11C
+#define INTF_PROG_FETCH_START 0x170
+#define INTF_PROG_ROT_START 0x174
+
+#define INTF_FRAME_LINE_COUNT_EN 0x0A8
+#define INTF_FRAME_COUNT 0x0AC
+#define INTF_LINE_COUNT 0x0B0
+
+#define INTF_MISR_CTRL 0x180
+#define INTF_MISR_SIGNATURE 0x184
+
+static struct dpu_intf_cfg *_intf_offset(enum dpu_intf intf,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->intf_count; i++) {
+ if ((intf == m->intf[i].id) &&
+ (m->intf[i].type != INTF_NONE)) {
+ b->base_off = addr;
+ b->blk_off = m->intf[i].base;
+ b->length = m->intf[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_INTF;
+ return &m->intf[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static void dpu_hw_intf_setup_timing_engine(struct dpu_hw_intf *ctx,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 hsync_period, vsync_period;
+ u32 display_v_start, display_v_end;
+ u32 hsync_start_x, hsync_end_x;
+ u32 active_h_start, active_h_end;
+ u32 active_v_start, active_v_end;
+ u32 active_hctl, display_hctl, hsync_ctl;
+ u32 polarity_ctl, den_polarity, hsync_polarity, vsync_polarity;
+ u32 panel_format;
+ u32 intf_cfg;
+
+ /* read interface_cfg */
+ intf_cfg = DPU_REG_READ(c, INTF_CONFIG);
+ hsync_period = p->hsync_pulse_width + p->h_back_porch + p->width +
+ p->h_front_porch;
+ vsync_period = p->vsync_pulse_width + p->v_back_porch + p->height +
+ p->v_front_porch;
+
+ display_v_start = ((p->vsync_pulse_width + p->v_back_porch) *
+ hsync_period) + p->hsync_skew;
+ display_v_end = ((vsync_period - p->v_front_porch) * hsync_period) +
+ p->hsync_skew - 1;
+
+ if (ctx->cap->type == INTF_EDP || ctx->cap->type == INTF_DP) {
+ display_v_start += p->hsync_pulse_width + p->h_back_porch;
+ display_v_end -= p->h_front_porch;
+ }
+
+ hsync_start_x = p->h_back_porch + p->hsync_pulse_width;
+ hsync_end_x = hsync_period - p->h_front_porch - 1;
+
+ if (p->width != p->xres) {
+ active_h_start = hsync_start_x;
+ active_h_end = active_h_start + p->xres - 1;
+ } else {
+ active_h_start = 0;
+ active_h_end = 0;
+ }
+
+ if (p->height != p->yres) {
+ active_v_start = display_v_start;
+ active_v_end = active_v_start + (p->yres * hsync_period) - 1;
+ } else {
+ active_v_start = 0;
+ active_v_end = 0;
+ }
+
+ if (active_h_end) {
+ active_hctl = (active_h_end << 16) | active_h_start;
+ intf_cfg |= BIT(29); /* ACTIVE_H_ENABLE */
+ } else {
+ active_hctl = 0;
+ }
+
+ if (active_v_end)
+ intf_cfg |= BIT(30); /* ACTIVE_V_ENABLE */
+
+ hsync_ctl = (hsync_period << 16) | p->hsync_pulse_width;
+ display_hctl = (hsync_end_x << 16) | hsync_start_x;
+
+ den_polarity = 0;
+ if (ctx->cap->type == INTF_HDMI) {
+ hsync_polarity = p->yres >= 720 ? 0 : 1;
+ vsync_polarity = p->yres >= 720 ? 0 : 1;
+ } else {
+ hsync_polarity = 0;
+ vsync_polarity = 0;
+ }
+ polarity_ctl = (den_polarity << 2) | /* DEN Polarity */
+ (vsync_polarity << 1) | /* VSYNC Polarity */
+ (hsync_polarity << 0); /* HSYNC Polarity */
+
+ if (!DPU_FORMAT_IS_YUV(fmt))
+ panel_format = (fmt->bits[C0_G_Y] |
+ (fmt->bits[C1_B_Cb] << 2) |
+ (fmt->bits[C2_R_Cr] << 4) |
+ (0x21 << 8));
+ else
+ /* Interface treats all the pixel data in RGB888 format */
+ panel_format = (COLOR_8BIT |
+ (COLOR_8BIT << 2) |
+ (COLOR_8BIT << 4) |
+ (0x21 << 8));
+
+ DPU_REG_WRITE(c, INTF_HSYNC_CTL, hsync_ctl);
+ DPU_REG_WRITE(c, INTF_VSYNC_PERIOD_F0, vsync_period * hsync_period);
+ DPU_REG_WRITE(c, INTF_VSYNC_PULSE_WIDTH_F0,
+ p->vsync_pulse_width * hsync_period);
+ DPU_REG_WRITE(c, INTF_DISPLAY_HCTL, display_hctl);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_START_F0, display_v_start);
+ DPU_REG_WRITE(c, INTF_DISPLAY_V_END_F0, display_v_end);
+ DPU_REG_WRITE(c, INTF_ACTIVE_HCTL, active_hctl);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_START_F0, active_v_start);
+ DPU_REG_WRITE(c, INTF_ACTIVE_V_END_F0, active_v_end);
+ DPU_REG_WRITE(c, INTF_BORDER_COLOR, p->border_clr);
+ DPU_REG_WRITE(c, INTF_UNDERFLOW_COLOR, p->underflow_clr);
+ DPU_REG_WRITE(c, INTF_HSYNC_SKEW, p->hsync_skew);
+ DPU_REG_WRITE(c, INTF_POLARITY_CTL, polarity_ctl);
+ DPU_REG_WRITE(c, INTF_FRAME_LINE_COUNT_EN, 0x3);
+ DPU_REG_WRITE(c, INTF_CONFIG, intf_cfg);
+ DPU_REG_WRITE(c, INTF_PANEL_FORMAT, panel_format);
+}
+
+static void dpu_hw_intf_enable_timing_engine(
+ struct dpu_hw_intf *intf,
+ u8 enable)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ /* Note: Display interface select is handled in top block hw layer */
+ DPU_REG_WRITE(c, INTF_TIMING_ENGINE_EN, enable != 0);
+}
+
+static void dpu_hw_intf_setup_prg_fetch(
+ struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ int fetch_enable;
+
+ /*
+ * Fetch should always be outside the active lines. If the fetching
+ * is programmed within active region, hardware behavior is unknown.
+ */
+
+ fetch_enable = DPU_REG_READ(c, INTF_CONFIG);
+ if (fetch->enable) {
+ fetch_enable |= BIT(31);
+ DPU_REG_WRITE(c, INTF_PROG_FETCH_START,
+ fetch->fetch_start);
+ } else {
+ fetch_enable &= ~BIT(31);
+ }
+
+ DPU_REG_WRITE(c, INTF_CONFIG, fetch_enable);
+}
+
+static void dpu_hw_intf_get_status(
+ struct dpu_hw_intf *intf,
+ struct intf_status *s)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ s->is_en = DPU_REG_READ(c, INTF_TIMING_ENGINE_EN);
+ if (s->is_en) {
+ s->frame_count = DPU_REG_READ(c, INTF_FRAME_COUNT);
+ s->line_count = DPU_REG_READ(c, INTF_LINE_COUNT);
+ } else {
+ s->line_count = 0;
+ s->frame_count = 0;
+ }
+}
+
+static void dpu_hw_intf_setup_misr(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, INTF_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_intf_collect_misr(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_MISR_SIGNATURE);
+}
+
+static u32 dpu_hw_intf_get_line_count(struct dpu_hw_intf *intf)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!intf)
+ return 0;
+
+ c = &intf->hw;
+
+ return DPU_REG_READ(c, INTF_LINE_COUNT);
+}
+
+static void _setup_intf_ops(struct dpu_hw_intf_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_timing_gen = dpu_hw_intf_setup_timing_engine;
+ ops->setup_prg_fetch = dpu_hw_intf_setup_prg_fetch;
+ ops->get_status = dpu_hw_intf_get_status;
+ ops->enable_timing = dpu_hw_intf_enable_timing_engine;
+ ops->setup_misr = dpu_hw_intf_setup_misr;
+ ops->collect_misr = dpu_hw_intf_collect_misr;
+ ops->get_line_count = dpu_hw_intf_get_line_count;
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_intf *c;
+ struct dpu_intf_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _intf_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ pr_err("failed to create dpu_hw_intf %d\n", idx);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ c->mdss = m;
+ _setup_intf_ops(&c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_INTF, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf)
+{
+ if (intf)
+ dpu_hw_blk_destroy(&intf->base);
+ kfree(intf);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
new file mode 100644
index 000000000000..a79d735da68d
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_intf.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_INTF_H
+#define _DPU_HW_INTF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_intf;
+
+/* intf timing settings */
+struct intf_timing_params {
+ u32 width; /* active width */
+ u32 height; /* active height */
+ u32 xres; /* Display panel width */
+ u32 yres; /* Display panel height */
+
+ u32 h_back_porch;
+ u32 h_front_porch;
+ u32 v_back_porch;
+ u32 v_front_porch;
+ u32 hsync_pulse_width;
+ u32 vsync_pulse_width;
+ u32 hsync_polarity;
+ u32 vsync_polarity;
+ u32 border_clr;
+ u32 underflow_clr;
+ u32 hsync_skew;
+};
+
+struct intf_prog_fetch {
+ u8 enable;
+ /* vsync counter for the front porch pixel line */
+ u32 fetch_start;
+};
+
+struct intf_status {
+ u8 is_en; /* interface timing engine is enabled or not */
+ u32 frame_count; /* frame count since timing engine enabled */
+ u32 line_count; /* current line count including blanking */
+};
+
+/**
+ * struct dpu_hw_intf_ops : Interface to the interface Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @ setup_timing_gen : programs the timing engine
+ * @ setup_prog_fetch : enables/disables the programmable fetch logic
+ * @ enable_timing: enable/disable timing engine
+ * @ get_status: returns if timing engine is enabled or not
+ * @ setup_misr: enables/disables MISR in HW register
+ * @ collect_misr: reads and stores MISR data from HW register
+ * @ get_line_count: reads current vertical line counter
+ */
+struct dpu_hw_intf_ops {
+ void (*setup_timing_gen)(struct dpu_hw_intf *intf,
+ const struct intf_timing_params *p,
+ const struct dpu_format *fmt);
+
+ void (*setup_prg_fetch)(struct dpu_hw_intf *intf,
+ const struct intf_prog_fetch *fetch);
+
+ void (*enable_timing)(struct dpu_hw_intf *intf,
+ u8 enable);
+
+ void (*get_status)(struct dpu_hw_intf *intf,
+ struct intf_status *status);
+
+ void (*setup_misr)(struct dpu_hw_intf *intf,
+ bool enable, u32 frame_count);
+
+ u32 (*collect_misr)(struct dpu_hw_intf *intf);
+
+ u32 (*get_line_count)(struct dpu_hw_intf *intf);
+};
+
+struct dpu_hw_intf {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* intf */
+ enum dpu_intf idx;
+ const struct dpu_intf_cfg *cap;
+ const struct dpu_mdss_cfg *mdss;
+
+ /* ops */
+ struct dpu_hw_intf_ops ops;
+};
+
+/**
+ * to_dpu_hw_intf - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_intf *to_dpu_hw_intf(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_intf, base);
+}
+
+/**
+ * dpu_hw_intf_init(): Initializes the intf driver for the passed
+ * interface idx.
+ * @idx: interface index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_intf *dpu_hw_intf_init(enum dpu_intf idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_intf_destroy(): Destroys INTF driver context
+ * @intf: Pointer to INTF driver context
+ */
+void dpu_hw_intf_destroy(struct dpu_hw_intf *intf);
+
+#endif /*_DPU_HW_INTF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
new file mode 100644
index 000000000000..4ab72b0f07a5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.c
@@ -0,0 +1,261 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_kms.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define LM_OP_MODE 0x00
+#define LM_OUT_SIZE 0x04
+#define LM_BORDER_COLOR_0 0x08
+#define LM_BORDER_COLOR_1 0x010
+
+/* These register are offset to mixer base + stage base */
+#define LM_BLEND0_OP 0x00
+#define LM_BLEND0_CONST_ALPHA 0x04
+#define LM_FG_COLOR_FILL_COLOR_0 0x08
+#define LM_FG_COLOR_FILL_COLOR_1 0x0C
+#define LM_FG_COLOR_FILL_SIZE 0x10
+#define LM_FG_COLOR_FILL_XY 0x14
+
+#define LM_BLEND0_FG_ALPHA 0x04
+#define LM_BLEND0_BG_ALPHA 0x08
+
+#define LM_MISR_CTRL 0x310
+#define LM_MISR_SIGNATURE 0x314
+
+static struct dpu_lm_cfg *_lm_offset(enum dpu_lm mixer,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->mixer_count; i++) {
+ if (mixer == m->mixer[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mixer[i].base;
+ b->length = m->mixer[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_LM;
+ return &m->mixer[i];
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+/**
+ * _stage_offset(): returns the relative offset of the blend registers
+ * for the stage to be setup
+ * @c: mixer ctx contains the mixer to be programmed
+ * @stage: stage index to setup
+ */
+static inline int _stage_offset(struct dpu_hw_mixer *ctx, enum dpu_stage stage)
+{
+ const struct dpu_lm_sub_blks *sblk = ctx->cap->sblk;
+ int rc;
+
+ if (stage == DPU_STAGE_BASE)
+ rc = -EINVAL;
+ else if (stage <= sblk->maxblendstages)
+ rc = sblk->blendstage_base[stage - DPU_STAGE_0];
+ else
+ rc = -EINVAL;
+
+ return rc;
+}
+
+static void dpu_hw_lm_setup_out(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *mixer)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 outsize;
+ u32 op_mode;
+
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ outsize = mixer->out_height << 16 | mixer->out_width;
+ DPU_REG_WRITE(c, LM_OUT_SIZE, outsize);
+
+ /* SPLIT_LEFT_RIGHT */
+ if (mixer->right_mixer)
+ op_mode |= BIT(31);
+ else
+ op_mode &= ~BIT(31);
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_setup_border_color(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ if (border_en) {
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_0,
+ (color->color_0 & 0xFFF) |
+ ((color->color_1 & 0xFFF) << 0x10));
+ DPU_REG_WRITE(c, LM_BORDER_COLOR_1,
+ (color->color_2 & 0xFFF) |
+ ((color->color_3 & 0xFFF) << 0x10));
+ }
+}
+
+static void dpu_hw_lm_setup_blend_config_sdm845(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+ u32 const_alpha;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ const_alpha = (bg_alpha & 0xFF) | ((fg_alpha & 0xFF) << 16);
+ DPU_REG_WRITE(c, LM_BLEND0_CONST_ALPHA + stage_off, const_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_blend_config(struct dpu_hw_mixer *ctx,
+ u32 stage, u32 fg_alpha, u32 bg_alpha, u32 blend_op)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int stage_off;
+
+ if (stage == DPU_STAGE_BASE)
+ return;
+
+ stage_off = _stage_offset(ctx, stage);
+ if (WARN_ON(stage_off < 0))
+ return;
+
+ DPU_REG_WRITE(c, LM_BLEND0_FG_ALPHA + stage_off, fg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_BG_ALPHA + stage_off, bg_alpha);
+ DPU_REG_WRITE(c, LM_BLEND0_OP + stage_off, blend_op);
+}
+
+static void dpu_hw_lm_setup_color3(struct dpu_hw_mixer *ctx,
+ uint32_t mixer_op_mode)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ int op_mode;
+
+ /* read the existing op_mode configuration */
+ op_mode = DPU_REG_READ(c, LM_OP_MODE);
+
+ op_mode = (op_mode & (BIT(31) | BIT(30))) | mixer_op_mode;
+
+ DPU_REG_WRITE(c, LM_OP_MODE, op_mode);
+}
+
+static void dpu_hw_lm_gc(struct dpu_hw_mixer *mixer,
+ void *cfg)
+{
+}
+
+static void dpu_hw_lm_setup_misr(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+ u32 config = 0;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, MISR_CTRL_STATUS_CLEAR);
+ /* clear misr data */
+ wmb();
+
+ if (enable)
+ config = (frame_count & MISR_FRAME_COUNT_MASK) |
+ MISR_CTRL_ENABLE | INTF_MISR_CTRL_FREE_RUN_MASK;
+
+ DPU_REG_WRITE(c, LM_MISR_CTRL, config);
+}
+
+static u32 dpu_hw_lm_collect_misr(struct dpu_hw_mixer *ctx)
+{
+ struct dpu_hw_blk_reg_map *c = &ctx->hw;
+
+ return DPU_REG_READ(c, LM_MISR_SIGNATURE);
+}
+
+static void _setup_mixer_ops(struct dpu_mdss_cfg *m,
+ struct dpu_hw_lm_ops *ops,
+ unsigned long features)
+{
+ ops->setup_mixer_out = dpu_hw_lm_setup_out;
+ if (IS_SDM845_TARGET(m->hwversion) || IS_SDM670_TARGET(m->hwversion))
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config_sdm845;
+ else
+ ops->setup_blend_config = dpu_hw_lm_setup_blend_config;
+ ops->setup_alpha_out = dpu_hw_lm_setup_color3;
+ ops->setup_border_color = dpu_hw_lm_setup_border_color;
+ ops->setup_gc = dpu_hw_lm_gc;
+ ops->setup_misr = dpu_hw_lm_setup_misr;
+ ops->collect_misr = dpu_hw_lm_collect_misr;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mixer *c;
+ struct dpu_lm_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _lm_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_mixer_ops(m, &c->ops, c->cap->features);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_LM, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm)
+{
+ if (lm)
+ dpu_hw_blk_destroy(&lm->base);
+ kfree(lm);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
new file mode 100644
index 000000000000..e29e5dab31bf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_lm.h
@@ -0,0 +1,122 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_LM_H
+#define _DPU_HW_LM_H
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mixer;
+
+struct dpu_hw_mixer_cfg {
+ u32 out_width;
+ u32 out_height;
+ bool right_mixer;
+ int flags;
+};
+
+struct dpu_hw_color3_cfg {
+ u8 keep_fg[DPU_STAGE_MAX];
+};
+
+/**
+ *
+ * struct dpu_hw_lm_ops : Interface to the mixer Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_lm_ops {
+ /*
+ * Sets up mixer output width and height
+ * and border color if enabled
+ */
+ void (*setup_mixer_out)(struct dpu_hw_mixer *ctx,
+ struct dpu_hw_mixer_cfg *cfg);
+
+ /*
+ * Alpha blending configuration
+ * for the specified stage
+ */
+ void (*setup_blend_config)(struct dpu_hw_mixer *ctx, uint32_t stage,
+ uint32_t fg_alpha, uint32_t bg_alpha, uint32_t blend_op);
+
+ /*
+ * Alpha color component selection from either fg or bg
+ */
+ void (*setup_alpha_out)(struct dpu_hw_mixer *ctx, uint32_t mixer_op);
+
+ /**
+ * setup_border_color : enable/disable border color
+ */
+ void (*setup_border_color)(struct dpu_hw_mixer *ctx,
+ struct dpu_mdss_color *color,
+ u8 border_en);
+ /**
+ * setup_gc : enable/disable gamma correction feature
+ */
+ void (*setup_gc)(struct dpu_hw_mixer *mixer,
+ void *cfg);
+
+ /* setup_misr: enables/disables MISR in HW register */
+ void (*setup_misr)(struct dpu_hw_mixer *ctx,
+ bool enable, u32 frame_count);
+
+ /* collect_misr: reads and stores MISR data from HW register */
+ u32 (*collect_misr)(struct dpu_hw_mixer *ctx);
+};
+
+struct dpu_hw_mixer {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* lm */
+ enum dpu_lm idx;
+ const struct dpu_lm_cfg *cap;
+ const struct dpu_mdp_cfg *mdp;
+ const struct dpu_ctl_cfg *ctl;
+
+ /* ops */
+ struct dpu_hw_lm_ops ops;
+
+ /* store mixer info specific to display */
+ struct dpu_hw_mixer_cfg cfg;
+};
+
+/**
+ * to_dpu_hw_mixer - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mixer *to_dpu_hw_mixer(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mixer, base);
+}
+
+/**
+ * dpu_hw_lm_init(): Initializes the mixer hw driver object.
+ * should be called once before accessing every mixer.
+ * @idx: mixer index for which driver object is required
+ * @addr: mapped register io address of MDP
+ * @m : pointer to mdss catalog data
+ */
+struct dpu_hw_mixer *dpu_hw_lm_init(enum dpu_lm idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_lm_destroy(): Destroys layer mixer driver context
+ * @lm: Pointer to LM driver context
+ */
+void dpu_hw_lm_destroy(struct dpu_hw_mixer *lm);
+
+#endif /*_DPU_HW_LM_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
new file mode 100644
index 000000000000..35e6bf930924
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_mdss.h
@@ -0,0 +1,465 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_MDSS_H
+#define _DPU_HW_MDSS_H
+
+#include <linux/kernel.h>
+#include <linux/err.h>
+
+#include "msm_drv.h"
+
+#define DPU_DBG_NAME "dpu"
+
+#define DPU_NONE 0
+
+#ifndef DPU_CSC_MATRIX_COEFF_SIZE
+#define DPU_CSC_MATRIX_COEFF_SIZE 9
+#endif
+
+#ifndef DPU_CSC_CLAMP_SIZE
+#define DPU_CSC_CLAMP_SIZE 6
+#endif
+
+#ifndef DPU_CSC_BIAS_SIZE
+#define DPU_CSC_BIAS_SIZE 3
+#endif
+
+#ifndef DPU_MAX_PLANES
+#define DPU_MAX_PLANES 4
+#endif
+
+#define PIPES_PER_STAGE 2
+#ifndef DPU_MAX_DE_CURVES
+#define DPU_MAX_DE_CURVES 3
+#endif
+
+enum dpu_format_flags {
+ DPU_FORMAT_FLAG_YUV_BIT,
+ DPU_FORMAT_FLAG_DX_BIT,
+ DPU_FORMAT_FLAG_COMPRESSED_BIT,
+ DPU_FORMAT_FLAG_BIT_MAX,
+};
+
+#define DPU_FORMAT_FLAG_YUV BIT(DPU_FORMAT_FLAG_YUV_BIT)
+#define DPU_FORMAT_FLAG_DX BIT(DPU_FORMAT_FLAG_DX_BIT)
+#define DPU_FORMAT_FLAG_COMPRESSED BIT(DPU_FORMAT_FLAG_COMPRESSED_BIT)
+#define DPU_FORMAT_IS_YUV(X) \
+ (test_bit(DPU_FORMAT_FLAG_YUV_BIT, (X)->flag))
+#define DPU_FORMAT_IS_DX(X) \
+ (test_bit(DPU_FORMAT_FLAG_DX_BIT, (X)->flag))
+#define DPU_FORMAT_IS_LINEAR(X) ((X)->fetch_mode == DPU_FETCH_LINEAR)
+#define DPU_FORMAT_IS_TILE(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ !test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+#define DPU_FORMAT_IS_UBWC(X) \
+ (((X)->fetch_mode == DPU_FETCH_UBWC) && \
+ test_bit(DPU_FORMAT_FLAG_COMPRESSED_BIT, (X)->flag))
+
+#define DPU_BLEND_FG_ALPHA_FG_CONST (0 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_CONST (1 << 0)
+#define DPU_BLEND_FG_ALPHA_FG_PIXEL (2 << 0)
+#define DPU_BLEND_FG_ALPHA_BG_PIXEL (3 << 0)
+#define DPU_BLEND_FG_INV_ALPHA (1 << 2)
+#define DPU_BLEND_FG_MOD_ALPHA (1 << 3)
+#define DPU_BLEND_FG_INV_MOD_ALPHA (1 << 4)
+#define DPU_BLEND_FG_TRANSP_EN (1 << 5)
+#define DPU_BLEND_BG_ALPHA_FG_CONST (0 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_CONST (1 << 8)
+#define DPU_BLEND_BG_ALPHA_FG_PIXEL (2 << 8)
+#define DPU_BLEND_BG_ALPHA_BG_PIXEL (3 << 8)
+#define DPU_BLEND_BG_INV_ALPHA (1 << 10)
+#define DPU_BLEND_BG_MOD_ALPHA (1 << 11)
+#define DPU_BLEND_BG_INV_MOD_ALPHA (1 << 12)
+#define DPU_BLEND_BG_TRANSP_EN (1 << 13)
+
+#define DPU_VSYNC0_SOURCE_GPIO 0
+#define DPU_VSYNC1_SOURCE_GPIO 1
+#define DPU_VSYNC2_SOURCE_GPIO 2
+#define DPU_VSYNC_SOURCE_INTF_0 3
+#define DPU_VSYNC_SOURCE_INTF_1 4
+#define DPU_VSYNC_SOURCE_INTF_2 5
+#define DPU_VSYNC_SOURCE_INTF_3 6
+#define DPU_VSYNC_SOURCE_WD_TIMER_4 11
+#define DPU_VSYNC_SOURCE_WD_TIMER_3 12
+#define DPU_VSYNC_SOURCE_WD_TIMER_2 13
+#define DPU_VSYNC_SOURCE_WD_TIMER_1 14
+#define DPU_VSYNC_SOURCE_WD_TIMER_0 15
+
+enum dpu_hw_blk_type {
+ DPU_HW_BLK_TOP = 0,
+ DPU_HW_BLK_SSPP,
+ DPU_HW_BLK_LM,
+ DPU_HW_BLK_CTL,
+ DPU_HW_BLK_CDM,
+ DPU_HW_BLK_PINGPONG,
+ DPU_HW_BLK_INTF,
+ DPU_HW_BLK_WB,
+ DPU_HW_BLK_MAX,
+};
+
+enum dpu_mdp {
+ MDP_TOP = 0x1,
+ MDP_MAX,
+};
+
+enum dpu_sspp {
+ SSPP_NONE,
+ SSPP_VIG0,
+ SSPP_VIG1,
+ SSPP_VIG2,
+ SSPP_VIG3,
+ SSPP_RGB0,
+ SSPP_RGB1,
+ SSPP_RGB2,
+ SSPP_RGB3,
+ SSPP_DMA0,
+ SSPP_DMA1,
+ SSPP_DMA2,
+ SSPP_DMA3,
+ SSPP_CURSOR0,
+ SSPP_CURSOR1,
+ SSPP_MAX
+};
+
+enum dpu_sspp_type {
+ SSPP_TYPE_VIG,
+ SSPP_TYPE_RGB,
+ SSPP_TYPE_DMA,
+ SSPP_TYPE_CURSOR,
+ SSPP_TYPE_MAX
+};
+
+enum dpu_lm {
+ LM_0 = 1,
+ LM_1,
+ LM_2,
+ LM_3,
+ LM_4,
+ LM_5,
+ LM_6,
+ LM_MAX
+};
+
+enum dpu_stage {
+ DPU_STAGE_BASE = 0,
+ DPU_STAGE_0,
+ DPU_STAGE_1,
+ DPU_STAGE_2,
+ DPU_STAGE_3,
+ DPU_STAGE_4,
+ DPU_STAGE_5,
+ DPU_STAGE_6,
+ DPU_STAGE_7,
+ DPU_STAGE_8,
+ DPU_STAGE_9,
+ DPU_STAGE_10,
+ DPU_STAGE_MAX
+};
+enum dpu_dspp {
+ DSPP_0 = 1,
+ DSPP_1,
+ DSPP_2,
+ DSPP_3,
+ DSPP_MAX
+};
+
+enum dpu_ds {
+ DS_TOP,
+ DS_0,
+ DS_1,
+ DS_MAX
+};
+
+enum dpu_ctl {
+ CTL_0 = 1,
+ CTL_1,
+ CTL_2,
+ CTL_3,
+ CTL_4,
+ CTL_MAX
+};
+
+enum dpu_cdm {
+ CDM_0 = 1,
+ CDM_1,
+ CDM_MAX
+};
+
+enum dpu_pingpong {
+ PINGPONG_0 = 1,
+ PINGPONG_1,
+ PINGPONG_2,
+ PINGPONG_3,
+ PINGPONG_4,
+ PINGPONG_S0,
+ PINGPONG_MAX
+};
+
+enum dpu_intf {
+ INTF_0 = 1,
+ INTF_1,
+ INTF_2,
+ INTF_3,
+ INTF_4,
+ INTF_5,
+ INTF_6,
+ INTF_MAX
+};
+
+enum dpu_intf_type {
+ INTF_NONE = 0x0,
+ INTF_DSI = 0x1,
+ INTF_HDMI = 0x3,
+ INTF_LCDC = 0x5,
+ INTF_EDP = 0x9,
+ INTF_DP = 0xa,
+ INTF_TYPE_MAX,
+
+ /* virtual interfaces */
+ INTF_WB = 0x100,
+};
+
+enum dpu_intf_mode {
+ INTF_MODE_NONE = 0,
+ INTF_MODE_CMD,
+ INTF_MODE_VIDEO,
+ INTF_MODE_WB_BLOCK,
+ INTF_MODE_WB_LINE,
+ INTF_MODE_MAX
+};
+
+enum dpu_wb {
+ WB_0 = 1,
+ WB_1,
+ WB_2,
+ WB_3,
+ WB_MAX
+};
+
+enum dpu_ad {
+ AD_0 = 0x1,
+ AD_1,
+ AD_MAX
+};
+
+enum dpu_cwb {
+ CWB_0 = 0x1,
+ CWB_1,
+ CWB_2,
+ CWB_3,
+ CWB_MAX
+};
+
+enum dpu_wd_timer {
+ WD_TIMER_0 = 0x1,
+ WD_TIMER_1,
+ WD_TIMER_2,
+ WD_TIMER_3,
+ WD_TIMER_4,
+ WD_TIMER_5,
+ WD_TIMER_MAX
+};
+
+enum dpu_vbif {
+ VBIF_0,
+ VBIF_1,
+ VBIF_MAX,
+ VBIF_RT = VBIF_0,
+ VBIF_NRT = VBIF_1
+};
+
+enum dpu_iommu_domain {
+ DPU_IOMMU_DOMAIN_UNSECURE,
+ DPU_IOMMU_DOMAIN_SECURE,
+ DPU_IOMMU_DOMAIN_MAX
+};
+
+/**
+ * DPU HW,Component order color map
+ */
+enum {
+ C0_G_Y = 0,
+ C1_B_Cb = 1,
+ C2_R_Cr = 2,
+ C3_ALPHA = 3
+};
+
+/**
+ * enum dpu_plane_type - defines how the color component pixel packing
+ * @DPU_PLANE_INTERLEAVED : Color components in single plane
+ * @DPU_PLANE_PLANAR : Color component in separate planes
+ * @DPU_PLANE_PSEUDO_PLANAR : Chroma components interleaved in separate plane
+ */
+enum dpu_plane_type {
+ DPU_PLANE_INTERLEAVED,
+ DPU_PLANE_PLANAR,
+ DPU_PLANE_PSEUDO_PLANAR,
+};
+
+/**
+ * enum dpu_chroma_samp_type - chroma sub-samplng type
+ * @DPU_CHROMA_RGB : No chroma subsampling
+ * @DPU_CHROMA_H2V1 : Chroma pixels are horizontally subsampled
+ * @DPU_CHROMA_H1V2 : Chroma pixels are vertically subsampled
+ * @DPU_CHROMA_420 : 420 subsampling
+ */
+enum dpu_chroma_samp_type {
+ DPU_CHROMA_RGB,
+ DPU_CHROMA_H2V1,
+ DPU_CHROMA_H1V2,
+ DPU_CHROMA_420
+};
+
+/**
+ * dpu_fetch_type - Defines How DPU HW fetches data
+ * @DPU_FETCH_LINEAR : fetch is line by line
+ * @DPU_FETCH_TILE : fetches data in Z order from a tile
+ * @DPU_FETCH_UBWC : fetch and decompress data
+ */
+enum dpu_fetch_type {
+ DPU_FETCH_LINEAR,
+ DPU_FETCH_TILE,
+ DPU_FETCH_UBWC
+};
+
+/**
+ * Value of enum chosen to fit the number of bits
+ * expected by the HW programming.
+ */
+enum {
+ COLOR_ALPHA_1BIT = 0,
+ COLOR_ALPHA_4BIT = 1,
+ COLOR_4BIT = 0,
+ COLOR_5BIT = 1, /* No 5-bit Alpha */
+ COLOR_6BIT = 2, /* 6-Bit Alpha also = 2 */
+ COLOR_8BIT = 3, /* 8-Bit Alpha also = 3 */
+};
+
+/**
+ * enum dpu_3d_blend_mode
+ * Desribes how the 3d data is blended
+ * @BLEND_3D_NONE : 3d blending not enabled
+ * @BLEND_3D_FRAME_INT : Frame interleaving
+ * @BLEND_3D_H_ROW_INT : Horizontal row interleaving
+ * @BLEND_3D_V_ROW_INT : vertical row interleaving
+ * @BLEND_3D_COL_INT : column interleaving
+ * @BLEND_3D_MAX :
+ */
+enum dpu_3d_blend_mode {
+ BLEND_3D_NONE = 0,
+ BLEND_3D_FRAME_INT,
+ BLEND_3D_H_ROW_INT,
+ BLEND_3D_V_ROW_INT,
+ BLEND_3D_COL_INT,
+ BLEND_3D_MAX
+};
+
+/** struct dpu_format - defines the format configuration which
+ * allows DPU HW to correctly fetch and decode the format
+ * @base: base msm_format struture containing fourcc code
+ * @fetch_planes: how the color components are packed in pixel format
+ * @element: element color ordering
+ * @bits: element bit widths
+ * @chroma_sample: chroma sub-samplng type
+ * @unpack_align_msb: unpack aligned, 0 to LSB, 1 to MSB
+ * @unpack_tight: 0 for loose, 1 for tight
+ * @unpack_count: 0 = 1 component, 1 = 2 component
+ * @bpp: bytes per pixel
+ * @alpha_enable: whether the format has an alpha channel
+ * @num_planes: number of planes (including meta data planes)
+ * @fetch_mode: linear, tiled, or ubwc hw fetch behavior
+ * @is_yuv: is format a yuv variant
+ * @flag: usage bit flags
+ * @tile_width: format tile width
+ * @tile_height: format tile height
+ */
+struct dpu_format {
+ struct msm_format base;
+ enum dpu_plane_type fetch_planes;
+ u8 element[DPU_MAX_PLANES];
+ u8 bits[DPU_MAX_PLANES];
+ enum dpu_chroma_samp_type chroma_sample;
+ u8 unpack_align_msb;
+ u8 unpack_tight;
+ u8 unpack_count;
+ u8 bpp;
+ u8 alpha_enable;
+ u8 num_planes;
+ enum dpu_fetch_type fetch_mode;
+ DECLARE_BITMAP(flag, DPU_FORMAT_FLAG_BIT_MAX);
+ u16 tile_width;
+ u16 tile_height;
+};
+#define to_dpu_format(x) container_of(x, struct dpu_format, base)
+
+/**
+ * struct dpu_hw_fmt_layout - format information of the source pixel data
+ * @format: pixel format parameters
+ * @num_planes: number of planes (including meta data planes)
+ * @width: image width
+ * @height: image height
+ * @total_size: total size in bytes
+ * @plane_addr: address of each plane
+ * @plane_size: length of each plane
+ * @plane_pitch: pitch of each plane
+ */
+struct dpu_hw_fmt_layout {
+ const struct dpu_format *format;
+ uint32_t num_planes;
+ uint32_t width;
+ uint32_t height;
+ uint32_t total_size;
+ uint32_t plane_addr[DPU_MAX_PLANES];
+ uint32_t plane_size[DPU_MAX_PLANES];
+ uint32_t plane_pitch[DPU_MAX_PLANES];
+};
+
+struct dpu_csc_cfg {
+ /* matrix coefficients in S15.16 format */
+ uint32_t csc_mv[DPU_CSC_MATRIX_COEFF_SIZE];
+ uint32_t csc_pre_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_post_bv[DPU_CSC_BIAS_SIZE];
+ uint32_t csc_pre_lv[DPU_CSC_CLAMP_SIZE];
+ uint32_t csc_post_lv[DPU_CSC_CLAMP_SIZE];
+};
+
+/**
+ * struct dpu_mdss_color - mdss color description
+ * color 0 : green
+ * color 1 : blue
+ * color 2 : red
+ * color 3 : alpha
+ */
+struct dpu_mdss_color {
+ u32 color_0;
+ u32 color_1;
+ u32 color_2;
+ u32 color_3;
+};
+
+/*
+ * Define bit masks for h/w logging.
+ */
+#define DPU_DBG_MASK_NONE (1 << 0)
+#define DPU_DBG_MASK_CDM (1 << 1)
+#define DPU_DBG_MASK_INTF (1 << 2)
+#define DPU_DBG_MASK_LM (1 << 3)
+#define DPU_DBG_MASK_CTL (1 << 4)
+#define DPU_DBG_MASK_PINGPONG (1 << 5)
+#define DPU_DBG_MASK_SSPP (1 << 6)
+#define DPU_DBG_MASK_WB (1 << 7)
+#define DPU_DBG_MASK_TOP (1 << 8)
+#define DPU_DBG_MASK_VBIF (1 << 9)
+#define DPU_DBG_MASK_ROT (1 << 10)
+
+#endif /* _DPU_HW_MDSS_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
new file mode 100644
index 000000000000..cc3a623903f4
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.c
@@ -0,0 +1,250 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/iopoll.h>
+
+#include "dpu_hw_mdss.h"
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+#include "dpu_trace.h"
+
+#define PP_TEAR_CHECK_EN 0x000
+#define PP_SYNC_CONFIG_VSYNC 0x004
+#define PP_SYNC_CONFIG_HEIGHT 0x008
+#define PP_SYNC_WRCOUNT 0x00C
+#define PP_VSYNC_INIT_VAL 0x010
+#define PP_INT_COUNT_VAL 0x014
+#define PP_SYNC_THRESH 0x018
+#define PP_START_POS 0x01C
+#define PP_RD_PTR_IRQ 0x020
+#define PP_WR_PTR_IRQ 0x024
+#define PP_OUT_LINE_COUNT 0x028
+#define PP_LINE_COUNT 0x02C
+
+#define PP_FBC_MODE 0x034
+#define PP_FBC_BUDGET_CTL 0x038
+#define PP_FBC_LOSSY_MODE 0x03C
+
+static struct dpu_pingpong_cfg *_pingpong_offset(enum dpu_pingpong pp,
+ struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->pingpong_count; i++) {
+ if (pp == m->pingpong[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->pingpong[i].base;
+ b->length = m->pingpong[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_PINGPONG;
+ return &m->pingpong[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static int dpu_hw_pp_setup_te_config(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *te)
+{
+ struct dpu_hw_blk_reg_map *c;
+ int cfg;
+
+ if (!pp || !te)
+ return -EINVAL;
+ c = &pp->hw;
+
+ cfg = BIT(19); /*VSYNC_COUNTER_EN */
+ if (te->hw_vsync_mode)
+ cfg |= BIT(20);
+
+ cfg |= te->vsync_count;
+
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_HEIGHT, te->sync_cfg_height);
+ DPU_REG_WRITE(c, PP_VSYNC_INIT_VAL, te->vsync_init_val);
+ DPU_REG_WRITE(c, PP_RD_PTR_IRQ, te->rd_ptr_irq);
+ DPU_REG_WRITE(c, PP_START_POS, te->start_pos);
+ DPU_REG_WRITE(c, PP_SYNC_THRESH,
+ ((te->sync_threshold_continue << 16) |
+ te->sync_threshold_start));
+ DPU_REG_WRITE(c, PP_SYNC_WRCOUNT,
+ (te->start_pos + te->sync_threshold_start + 1));
+
+ return 0;
+}
+
+static int dpu_hw_pp_poll_timeout_wr_ptr(struct dpu_hw_pingpong *pp,
+ u32 timeout_us)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+ int rc;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ rc = readl_poll_timeout(c->base_off + c->blk_off + PP_LINE_COUNT,
+ val, (val & 0xffff) >= 1, 10, timeout_us);
+
+ return rc;
+}
+
+static int dpu_hw_pp_enable_te(struct dpu_hw_pingpong *pp, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!pp)
+ return -EINVAL;
+ c = &pp->hw;
+
+ DPU_REG_WRITE(c, PP_TEAR_CHECK_EN, enable);
+ return 0;
+}
+
+static int dpu_hw_pp_connect_external_te(struct dpu_hw_pingpong *pp,
+ bool enable_external_te)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 cfg;
+ int orig;
+
+ if (!pp)
+ return -EINVAL;
+
+ c = &pp->hw;
+ cfg = DPU_REG_READ(c, PP_SYNC_CONFIG_VSYNC);
+ orig = (bool)(cfg & BIT(20));
+ if (enable_external_te)
+ cfg |= BIT(20);
+ else
+ cfg &= ~BIT(20);
+ DPU_REG_WRITE(c, PP_SYNC_CONFIG_VSYNC, cfg);
+ trace_dpu_pp_connect_ext_te(pp->idx - PINGPONG_0, cfg);
+
+ return orig;
+}
+
+static int dpu_hw_pp_get_vsync_info(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 val;
+
+ if (!pp || !info)
+ return -EINVAL;
+ c = &pp->hw;
+
+ val = DPU_REG_READ(c, PP_VSYNC_INIT_VAL);
+ info->rd_ptr_init_val = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_INT_COUNT_VAL);
+ info->rd_ptr_frame_count = (val & 0xffff0000) >> 16;
+ info->rd_ptr_line_count = val & 0xffff;
+
+ val = DPU_REG_READ(c, PP_LINE_COUNT);
+ info->wr_ptr_line_count = val & 0xffff;
+
+ return 0;
+}
+
+static u32 dpu_hw_pp_get_line_count(struct dpu_hw_pingpong *pp)
+{
+ struct dpu_hw_blk_reg_map *c = &pp->hw;
+ u32 height, init;
+ u32 line = 0xFFFF;
+
+ if (!pp)
+ return 0;
+ c = &pp->hw;
+
+ init = DPU_REG_READ(c, PP_VSYNC_INIT_VAL) & 0xFFFF;
+ height = DPU_REG_READ(c, PP_SYNC_CONFIG_HEIGHT) & 0xFFFF;
+
+ if (height < init)
+ goto line_count_exit;
+
+ line = DPU_REG_READ(c, PP_INT_COUNT_VAL) & 0xFFFF;
+
+ if (line < init)
+ line += (0xFFFF - init);
+ else
+ line -= init;
+
+line_count_exit:
+ return line;
+}
+
+static void _setup_pingpong_ops(struct dpu_hw_pingpong_ops *ops,
+ const struct dpu_pingpong_cfg *hw_cap)
+{
+ ops->setup_tearcheck = dpu_hw_pp_setup_te_config;
+ ops->enable_tearcheck = dpu_hw_pp_enable_te;
+ ops->connect_external_te = dpu_hw_pp_connect_external_te;
+ ops->get_vsync_info = dpu_hw_pp_get_vsync_info;
+ ops->poll_timeout_wr_ptr = dpu_hw_pp_poll_timeout_wr_ptr;
+ ops->get_line_count = dpu_hw_pp_get_line_count;
+};
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_pingpong *c;
+ struct dpu_pingpong_cfg *cfg;
+ int rc;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _pingpong_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ c->idx = idx;
+ c->caps = cfg;
+ _setup_pingpong_ops(&c->ops, c->caps);
+
+ rc = dpu_hw_blk_init(&c->base, DPU_HW_BLK_PINGPONG, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return c;
+
+blk_init_error:
+ kzfree(c);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp)
+{
+ if (pp)
+ dpu_hw_blk_destroy(&pp->base);
+ kfree(pp);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
new file mode 100644
index 000000000000..3caccd7d6a3e
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_pingpong.h
@@ -0,0 +1,136 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_PINGPONG_H
+#define _DPU_HW_PINGPONG_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_pingpong;
+
+struct dpu_hw_tear_check {
+ /*
+ * This is ratio of MDP VSYNC clk freq(Hz) to
+ * refresh rate divided by no of lines
+ */
+ u32 vsync_count;
+ u32 sync_cfg_height;
+ u32 vsync_init_val;
+ u32 sync_threshold_start;
+ u32 sync_threshold_continue;
+ u32 start_pos;
+ u32 rd_ptr_irq;
+ u8 hw_vsync_mode;
+};
+
+struct dpu_hw_pp_vsync_info {
+ u32 rd_ptr_init_val; /* value of rd pointer at vsync edge */
+ u32 rd_ptr_frame_count; /* num frames sent since enabling interface */
+ u32 rd_ptr_line_count; /* current line on panel (rd ptr) */
+ u32 wr_ptr_line_count; /* current line within pp fifo (wr ptr) */
+};
+
+/**
+ *
+ * struct dpu_hw_pingpong_ops : Interface to the pingpong Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ * @setup_tearcheck : program tear check values
+ * @enable_tearcheck : enables tear check
+ * @get_vsync_info : retries timing info of the panel
+ * @setup_dither : function to program the dither hw block
+ * @get_line_count: obtain current vertical line counter
+ */
+struct dpu_hw_pingpong_ops {
+ /**
+ * enables vysnc generation and sets up init value of
+ * read pointer and programs the tear check cofiguration
+ */
+ int (*setup_tearcheck)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_tear_check *cfg);
+
+ /**
+ * enables tear check block
+ */
+ int (*enable_tearcheck)(struct dpu_hw_pingpong *pp,
+ bool enable);
+
+ /**
+ * read, modify, write to either set or clear listening to external TE
+ * @Return: 1 if TE was originally connected, 0 if not, or -ERROR
+ */
+ int (*connect_external_te)(struct dpu_hw_pingpong *pp,
+ bool enable_external_te);
+
+ /**
+ * provides the programmed and current
+ * line_count
+ */
+ int (*get_vsync_info)(struct dpu_hw_pingpong *pp,
+ struct dpu_hw_pp_vsync_info *info);
+
+ /**
+ * poll until write pointer transmission starts
+ * @Return: 0 on success, -ETIMEDOUT on timeout
+ */
+ int (*poll_timeout_wr_ptr)(struct dpu_hw_pingpong *pp, u32 timeout_us);
+
+ /**
+ * Obtain current vertical line counter
+ */
+ u32 (*get_line_count)(struct dpu_hw_pingpong *pp);
+};
+
+struct dpu_hw_pingpong {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* pingpong */
+ enum dpu_pingpong idx;
+ const struct dpu_pingpong_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_pingpong_ops ops;
+};
+
+/**
+ * dpu_hw_pingpong - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pingpong *to_dpu_hw_pingpong(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pingpong, base);
+}
+
+/**
+ * dpu_hw_pingpong_init - initializes the pingpong driver for the passed
+ * pingpong idx.
+ * @idx: Pingpong index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ * Returns: Error code or allocated dpu_hw_pingpong context
+ */
+struct dpu_hw_pingpong *dpu_hw_pingpong_init(enum dpu_pingpong idx,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *m);
+
+/**
+ * dpu_hw_pingpong_destroy - destroys pingpong driver context
+ * should be called to free the context
+ * @pp: Pointer to PP driver context returned by dpu_hw_pingpong_init
+ */
+void dpu_hw_pingpong_destroy(struct dpu_hw_pingpong *pp);
+
+#endif /*_DPU_HW_PINGPONG_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
new file mode 100644
index 000000000000..c25b52a6b219
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.c
@@ -0,0 +1,753 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define DPU_FETCH_CONFIG_RESET_VALUE 0x00000087
+
+/* DPU_SSPP_SRC */
+#define SSPP_SRC_SIZE 0x00
+#define SSPP_SRC_XY 0x08
+#define SSPP_OUT_SIZE 0x0c
+#define SSPP_OUT_XY 0x10
+#define SSPP_SRC0_ADDR 0x14
+#define SSPP_SRC1_ADDR 0x18
+#define SSPP_SRC2_ADDR 0x1C
+#define SSPP_SRC3_ADDR 0x20
+#define SSPP_SRC_YSTRIDE0 0x24
+#define SSPP_SRC_YSTRIDE1 0x28
+#define SSPP_SRC_FORMAT 0x30
+#define SSPP_SRC_UNPACK_PATTERN 0x34
+#define SSPP_SRC_OP_MODE 0x38
+
+/* SSPP_MULTIRECT*/
+#define SSPP_SRC_SIZE_REC1 0x16C
+#define SSPP_SRC_XY_REC1 0x168
+#define SSPP_OUT_SIZE_REC1 0x160
+#define SSPP_OUT_XY_REC1 0x164
+#define SSPP_SRC_FORMAT_REC1 0x174
+#define SSPP_SRC_UNPACK_PATTERN_REC1 0x178
+#define SSPP_SRC_OP_MODE_REC1 0x17C
+#define SSPP_MULTIRECT_OPMODE 0x170
+#define SSPP_SRC_CONSTANT_COLOR_REC1 0x180
+#define SSPP_EXCL_REC_SIZE_REC1 0x184
+#define SSPP_EXCL_REC_XY_REC1 0x188
+
+#define MDSS_MDP_OP_DEINTERLACE BIT(22)
+#define MDSS_MDP_OP_DEINTERLACE_ODD BIT(23)
+#define MDSS_MDP_OP_IGC_ROM_1 BIT(18)
+#define MDSS_MDP_OP_IGC_ROM_0 BIT(17)
+#define MDSS_MDP_OP_IGC_EN BIT(16)
+#define MDSS_MDP_OP_FLIP_UD BIT(14)
+#define MDSS_MDP_OP_FLIP_LR BIT(13)
+#define MDSS_MDP_OP_BWC_EN BIT(0)
+#define MDSS_MDP_OP_PE_OVERRIDE BIT(31)
+#define MDSS_MDP_OP_BWC_LOSSLESS (0 << 1)
+#define MDSS_MDP_OP_BWC_Q_HIGH (1 << 1)
+#define MDSS_MDP_OP_BWC_Q_MED (2 << 1)
+
+#define SSPP_SRC_CONSTANT_COLOR 0x3c
+#define SSPP_EXCL_REC_CTL 0x40
+#define SSPP_UBWC_STATIC_CTRL 0x44
+#define SSPP_FETCH_CONFIG 0x048
+#define SSPP_DANGER_LUT 0x60
+#define SSPP_SAFE_LUT 0x64
+#define SSPP_CREQ_LUT 0x68
+#define SSPP_QOS_CTRL 0x6C
+#define SSPP_DECIMATION_CONFIG 0xB4
+#define SSPP_SRC_ADDR_SW_STATUS 0x70
+#define SSPP_CREQ_LUT_0 0x74
+#define SSPP_CREQ_LUT_1 0x78
+#define SSPP_SW_PIX_EXT_C0_LR 0x100
+#define SSPP_SW_PIX_EXT_C0_TB 0x104
+#define SSPP_SW_PIX_EXT_C0_REQ_PIXELS 0x108
+#define SSPP_SW_PIX_EXT_C1C2_LR 0x110
+#define SSPP_SW_PIX_EXT_C1C2_TB 0x114
+#define SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS 0x118
+#define SSPP_SW_PIX_EXT_C3_LR 0x120
+#define SSPP_SW_PIX_EXT_C3_TB 0x124
+#define SSPP_SW_PIX_EXT_C3_REQ_PIXELS 0x128
+#define SSPP_TRAFFIC_SHAPER 0x130
+#define SSPP_CDP_CNTL 0x134
+#define SSPP_UBWC_ERROR_STATUS 0x138
+#define SSPP_TRAFFIC_SHAPER_PREFILL 0x150
+#define SSPP_TRAFFIC_SHAPER_REC1_PREFILL 0x154
+#define SSPP_TRAFFIC_SHAPER_REC1 0x158
+#define SSPP_EXCL_REC_SIZE 0x1B4
+#define SSPP_EXCL_REC_XY 0x1B8
+#define SSPP_VIG_OP_MODE 0x0
+#define SSPP_VIG_CSC_10_OP_MODE 0x0
+#define SSPP_TRAFFIC_SHAPER_BPC_MAX 0xFF
+
+/* SSPP_QOS_CTRL */
+#define SSPP_QOS_CTRL_VBLANK_EN BIT(16)
+#define SSPP_QOS_CTRL_DANGER_SAFE_EN BIT(0)
+#define SSPP_QOS_CTRL_DANGER_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_DANGER_VBLANK_OFF 4
+#define SSPP_QOS_CTRL_CREQ_VBLANK_MASK 0x3
+#define SSPP_QOS_CTRL_CREQ_VBLANK_OFF 20
+
+/* DPU_SSPP_SCALER_QSEED2 */
+#define SCALE_CONFIG 0x04
+#define COMP0_3_PHASE_STEP_X 0x10
+#define COMP0_3_PHASE_STEP_Y 0x14
+#define COMP1_2_PHASE_STEP_X 0x18
+#define COMP1_2_PHASE_STEP_Y 0x1c
+#define COMP0_3_INIT_PHASE_X 0x20
+#define COMP0_3_INIT_PHASE_Y 0x24
+#define COMP1_2_INIT_PHASE_X 0x28
+#define COMP1_2_INIT_PHASE_Y 0x2C
+#define VIG_0_QSEED2_SHARP 0x30
+
+/*
+ * Definitions for ViG op modes
+ */
+#define VIG_OP_CSC_DST_DATAFMT BIT(19)
+#define VIG_OP_CSC_SRC_DATAFMT BIT(18)
+#define VIG_OP_CSC_EN BIT(17)
+#define VIG_OP_MEM_PROT_CONT BIT(15)
+#define VIG_OP_MEM_PROT_VAL BIT(14)
+#define VIG_OP_MEM_PROT_SAT BIT(13)
+#define VIG_OP_MEM_PROT_HUE BIT(12)
+#define VIG_OP_HIST BIT(8)
+#define VIG_OP_SKY_COL BIT(7)
+#define VIG_OP_FOIL BIT(6)
+#define VIG_OP_SKIN_COL BIT(5)
+#define VIG_OP_PA_EN BIT(4)
+#define VIG_OP_PA_SAT_ZERO_EXP BIT(2)
+#define VIG_OP_MEM_PROT_BLEND BIT(1)
+
+/*
+ * Definitions for CSC 10 op modes
+ */
+#define VIG_CSC_10_SRC_DATAFMT BIT(1)
+#define VIG_CSC_10_EN BIT(0)
+#define CSC_10BIT_OFFSET 4
+
+/* traffic shaper clock in Hz */
+#define TS_CLK 19200000
+
+static inline int _sspp_subblk_offset(struct dpu_hw_pipe *ctx,
+ int s_id,
+ u32 *idx)
+{
+ int rc = 0;
+ const struct dpu_sspp_sub_blks *sblk = ctx->cap->sblk;
+
+ if (!ctx)
+ return -EINVAL;
+
+ switch (s_id) {
+ case DPU_SSPP_SRC:
+ *idx = sblk->src_blk.base;
+ break;
+ case DPU_SSPP_SCALER_QSEED2:
+ case DPU_SSPP_SCALER_QSEED3:
+ case DPU_SSPP_SCALER_RGB:
+ *idx = sblk->scaler_blk.base;
+ break;
+ case DPU_SSPP_CSC:
+ case DPU_SSPP_CSC_10BIT:
+ *idx = sblk->csc_blk.base;
+ break;
+ default:
+ rc = -EINVAL;
+ }
+
+ return rc;
+}
+
+static void dpu_hw_sspp_setup_multirect(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode)
+{
+ u32 mode_mask;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (index == DPU_SSPP_RECT_SOLO) {
+ /**
+ * if rect index is RECT_SOLO, we cannot expect a
+ * virtual plane sharing the same SSPP id. So we go
+ * and disable multirect
+ */
+ mode_mask = 0;
+ } else {
+ mode_mask = DPU_REG_READ(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx);
+ mode_mask |= index;
+ if (mode == DPU_SSPP_MULTIRECT_TIME_MX)
+ mode_mask |= BIT(2);
+ else
+ mode_mask &= ~BIT(2);
+ }
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_MULTIRECT_OPMODE + idx, mode_mask);
+}
+
+static void _sspp_setup_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (!test_bit(DPU_SSPP_SCALER_QSEED2, &ctx->cap->features) ||
+ _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED2, &idx) ||
+ !test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_OP_MODE + idx);
+
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_OP_MODE + idx, opmode);
+}
+
+static void _sspp_setup_csc10_opmode(struct dpu_hw_pipe *ctx,
+ u32 mask, u8 en)
+{
+ u32 idx;
+ u32 opmode;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC_10BIT, &idx))
+ return;
+
+ opmode = DPU_REG_READ(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx);
+ if (en)
+ opmode |= mask;
+ else
+ opmode &= ~mask;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_VIG_CSC_10_OP_MODE + idx, opmode);
+}
+
+/**
+ * Setup source pixel format, flip,
+ */
+static void dpu_hw_sspp_setup_format(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 chroma_samp, unpack, src_format;
+ u32 opmode = 0;
+ u32 fast_clear = 0;
+ u32 op_mode_off, unpack_pat_off, format_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !fmt)
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO || rect_mode == DPU_SSPP_RECT_0) {
+ op_mode_off = SSPP_SRC_OP_MODE;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN;
+ format_off = SSPP_SRC_FORMAT;
+ } else {
+ op_mode_off = SSPP_SRC_OP_MODE_REC1;
+ unpack_pat_off = SSPP_SRC_UNPACK_PATTERN_REC1;
+ format_off = SSPP_SRC_FORMAT_REC1;
+ }
+
+ c = &ctx->hw;
+ opmode = DPU_REG_READ(c, op_mode_off + idx);
+ opmode &= ~(MDSS_MDP_OP_FLIP_LR | MDSS_MDP_OP_FLIP_UD |
+ MDSS_MDP_OP_BWC_EN | MDSS_MDP_OP_PE_OVERRIDE);
+
+ if (flags & DPU_SSPP_FLIP_LR)
+ opmode |= MDSS_MDP_OP_FLIP_LR;
+ if (flags & DPU_SSPP_FLIP_UD)
+ opmode |= MDSS_MDP_OP_FLIP_UD;
+
+ chroma_samp = fmt->chroma_sample;
+ if (flags & DPU_SSPP_SOURCE_ROTATED_90) {
+ if (chroma_samp == DPU_CHROMA_H2V1)
+ chroma_samp = DPU_CHROMA_H1V2;
+ else if (chroma_samp == DPU_CHROMA_H1V2)
+ chroma_samp = DPU_CHROMA_H2V1;
+ }
+
+ src_format = (chroma_samp << 23) | (fmt->fetch_planes << 19) |
+ (fmt->bits[C3_ALPHA] << 6) | (fmt->bits[C2_R_Cr] << 4) |
+ (fmt->bits[C1_B_Cb] << 2) | (fmt->bits[C0_G_Y] << 0);
+
+ if (flags & DPU_SSPP_ROT_90)
+ src_format |= BIT(11); /* ROT90 */
+
+ if (fmt->alpha_enable && fmt->fetch_planes == DPU_PLANE_INTERLEAVED)
+ src_format |= BIT(8); /* SRCC3_EN */
+
+ if (flags & DPU_SSPP_SOLID_FILL)
+ src_format |= BIT(22);
+
+ unpack = (fmt->element[3] << 24) | (fmt->element[2] << 16) |
+ (fmt->element[1] << 8) | (fmt->element[0] << 0);
+ src_format |= ((fmt->unpack_count - 1) << 12) |
+ (fmt->unpack_tight << 17) |
+ (fmt->unpack_align_msb << 18) |
+ ((fmt->bpp - 1) << 9);
+
+ if (fmt->fetch_mode != DPU_FETCH_LINEAR) {
+ if (DPU_FORMAT_IS_UBWC(fmt))
+ opmode |= MDSS_MDP_OP_BWC_EN;
+ src_format |= (fmt->fetch_mode & 3) << 30; /*FRAME_FORMAT */
+ DPU_REG_WRITE(c, SSPP_FETCH_CONFIG,
+ DPU_FETCH_CONFIG_RESET_VALUE |
+ ctx->mdp->highest_bank_bit << 18);
+ if (IS_UBWC_20_SUPPORTED(ctx->catalog->caps->ubwc_version)) {
+ fast_clear = fmt->alpha_enable ? BIT(31) : 0;
+ DPU_REG_WRITE(c, SSPP_UBWC_STATIC_CTRL,
+ fast_clear | (ctx->mdp->ubwc_swizzle) |
+ (ctx->mdp->highest_bank_bit << 4));
+ }
+ }
+
+ opmode |= MDSS_MDP_OP_PE_OVERRIDE;
+
+ /* if this is YUV pixel format, enable CSC */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ src_format |= BIT(15);
+
+ if (DPU_FORMAT_IS_DX(fmt))
+ src_format |= BIT(14);
+
+ /* update scaler opmode, if appropriate */
+ if (test_bit(DPU_SSPP_CSC, &ctx->cap->features))
+ _sspp_setup_opmode(ctx, VIG_OP_CSC_EN | VIG_OP_CSC_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+ else if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features))
+ _sspp_setup_csc10_opmode(ctx,
+ VIG_CSC_10_EN | VIG_CSC_10_SRC_DATAFMT,
+ DPU_FORMAT_IS_YUV(fmt));
+
+ DPU_REG_WRITE(c, format_off + idx, src_format);
+ DPU_REG_WRITE(c, unpack_pat_off + idx, unpack);
+ DPU_REG_WRITE(c, op_mode_off + idx, opmode);
+
+ /* clear previous UBWC error */
+ DPU_REG_WRITE(c, SSPP_UBWC_ERROR_STATUS + idx, BIT(31));
+}
+
+static void dpu_hw_sspp_setup_pe_config(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u8 color;
+ u32 lr_pe[4], tb_pe[4], tot_req_pixels[4];
+ const u32 bytemask = 0xff;
+ const u32 shortmask = 0xffff;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !pe_ext)
+ return;
+
+ c = &ctx->hw;
+
+ /* program SW pixel extension override for all pipes*/
+ for (color = 0; color < DPU_MAX_PLANES; color++) {
+ /* color 2 has the same set of registers as color 1 */
+ if (color == 2)
+ continue;
+
+ lr_pe[color] = ((pe_ext->right_ftch[color] & bytemask) << 24)|
+ ((pe_ext->right_rpt[color] & bytemask) << 16)|
+ ((pe_ext->left_ftch[color] & bytemask) << 8)|
+ (pe_ext->left_rpt[color] & bytemask);
+
+ tb_pe[color] = ((pe_ext->btm_ftch[color] & bytemask) << 24)|
+ ((pe_ext->btm_rpt[color] & bytemask) << 16)|
+ ((pe_ext->top_ftch[color] & bytemask) << 8)|
+ (pe_ext->top_rpt[color] & bytemask);
+
+ tot_req_pixels[color] = (((pe_ext->roi_h[color] +
+ pe_ext->num_ext_pxls_top[color] +
+ pe_ext->num_ext_pxls_btm[color]) & shortmask) << 16) |
+ ((pe_ext->roi_w[color] +
+ pe_ext->num_ext_pxls_left[color] +
+ pe_ext->num_ext_pxls_right[color]) & shortmask);
+ }
+
+ /* color 0 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_LR + idx, lr_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_TB + idx, tb_pe[0]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C0_REQ_PIXELS + idx,
+ tot_req_pixels[0]);
+
+ /* color 1 and color 2 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_LR + idx, lr_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_TB + idx, tb_pe[1]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C1C2_REQ_PIXELS + idx,
+ tot_req_pixels[1]);
+
+ /* color 3 */
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_LR + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_TB + idx, lr_pe[3]);
+ DPU_REG_WRITE(c, SSPP_SW_PIX_EXT_C3_REQ_PIXELS + idx,
+ tot_req_pixels[3]);
+}
+
+static void _dpu_hw_sspp_setup_scaler3(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *sspp,
+ struct dpu_hw_pixel_ext *pe,
+ void *scaler_cfg)
+{
+ u32 idx;
+ struct dpu_hw_scaler3_cfg *scaler3_cfg = scaler_cfg;
+
+ (void)pe;
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx) || !sspp
+ || !scaler3_cfg || !ctx || !ctx->cap || !ctx->cap->sblk)
+ return;
+
+ dpu_hw_setup_scaler3(&ctx->hw, scaler3_cfg, idx,
+ ctx->cap->sblk->scaler_blk.version,
+ sspp->layout.format);
+}
+
+static u32 _dpu_hw_sspp_get_scaler3_ver(struct dpu_hw_pipe *ctx)
+{
+ u32 idx;
+
+ if (!ctx || _sspp_subblk_offset(ctx, DPU_SSPP_SCALER_QSEED3, &idx))
+ return 0;
+
+ return dpu_hw_get_scaler3_ver(&ctx->hw, idx);
+}
+
+/**
+ * dpu_hw_sspp_setup_rects()
+ */
+static void dpu_hw_sspp_setup_rects(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_index)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 src_size, src_xy, dst_size, dst_xy, ystride0, ystride1;
+ u32 src_size_off, src_xy_off, out_size_off, out_xy_off;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx) || !cfg)
+ return;
+
+ c = &ctx->hw;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0) {
+ src_size_off = SSPP_SRC_SIZE;
+ src_xy_off = SSPP_SRC_XY;
+ out_size_off = SSPP_OUT_SIZE;
+ out_xy_off = SSPP_OUT_XY;
+ } else {
+ src_size_off = SSPP_SRC_SIZE_REC1;
+ src_xy_off = SSPP_SRC_XY_REC1;
+ out_size_off = SSPP_OUT_SIZE_REC1;
+ out_xy_off = SSPP_OUT_XY_REC1;
+ }
+
+
+ /* src and dest rect programming */
+ src_xy = (cfg->src_rect.y1 << 16) | cfg->src_rect.x1;
+ src_size = (drm_rect_height(&cfg->src_rect) << 16) |
+ drm_rect_width(&cfg->src_rect);
+ dst_xy = (cfg->dst_rect.y1 << 16) | cfg->dst_rect.x1;
+ dst_size = (drm_rect_height(&cfg->dst_rect) << 16) |
+ drm_rect_width(&cfg->dst_rect);
+
+ if (rect_index == DPU_SSPP_RECT_SOLO) {
+ ystride0 = (cfg->layout.plane_pitch[0]) |
+ (cfg->layout.plane_pitch[1] << 16);
+ ystride1 = (cfg->layout.plane_pitch[2]) |
+ (cfg->layout.plane_pitch[3] << 16);
+ } else {
+ ystride0 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE0 + idx);
+ ystride1 = DPU_REG_READ(c, SSPP_SRC_YSTRIDE1 + idx);
+
+ if (rect_index == DPU_SSPP_RECT_0) {
+ ystride0 = (ystride0 & 0xFFFF0000) |
+ (cfg->layout.plane_pitch[0] & 0x0000FFFF);
+ ystride1 = (ystride1 & 0xFFFF0000)|
+ (cfg->layout.plane_pitch[2] & 0x0000FFFF);
+ } else {
+ ystride0 = (ystride0 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[0] << 16) &
+ 0xFFFF0000);
+ ystride1 = (ystride1 & 0x0000FFFF) |
+ ((cfg->layout.plane_pitch[2] << 16) &
+ 0xFFFF0000);
+ }
+ }
+
+ /* rectangle register programming */
+ DPU_REG_WRITE(c, src_size_off + idx, src_size);
+ DPU_REG_WRITE(c, src_xy_off + idx, src_xy);
+ DPU_REG_WRITE(c, out_size_off + idx, dst_size);
+ DPU_REG_WRITE(c, out_xy_off + idx, dst_xy);
+
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE0 + idx, ystride0);
+ DPU_REG_WRITE(c, SSPP_SRC_YSTRIDE1 + idx, ystride1);
+}
+
+static void dpu_hw_sspp_setup_sourceaddress(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index rect_mode)
+{
+ int i;
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_mode == DPU_SSPP_RECT_SOLO) {
+ for (i = 0; i < ARRAY_SIZE(cfg->layout.plane_addr); i++)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx + i * 0x4,
+ cfg->layout.plane_addr[i]);
+ } else if (rect_mode == DPU_SSPP_RECT_0) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC0_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC2_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC1_ADDR + idx,
+ cfg->layout.plane_addr[0]);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC3_ADDR + idx,
+ cfg->layout.plane_addr[2]);
+ }
+}
+
+static void dpu_hw_sspp_setup_csc(struct dpu_hw_pipe *ctx,
+ struct dpu_csc_cfg *data)
+{
+ u32 idx;
+ bool csc10 = false;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_CSC, &idx) || !data)
+ return;
+
+ if (test_bit(DPU_SSPP_CSC_10BIT, &ctx->cap->features)) {
+ idx += CSC_10BIT_OFFSET;
+ csc10 = true;
+ }
+
+ dpu_hw_csc_setup(&ctx->hw, idx, data, csc10);
+}
+
+static void dpu_hw_sspp_setup_solidfill(struct dpu_hw_pipe *ctx, u32 color, enum
+ dpu_sspp_multirect_index rect_index)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (rect_index == DPU_SSPP_RECT_SOLO || rect_index == DPU_SSPP_RECT_0)
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR + idx, color);
+ else
+ DPU_REG_WRITE(&ctx->hw, SSPP_SRC_CONSTANT_COLOR_REC1 + idx,
+ color);
+}
+
+static void dpu_hw_sspp_setup_danger_safe_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_DANGER_LUT + idx, cfg->danger_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_SAFE_LUT + idx, cfg->safe_lut);
+}
+
+static void dpu_hw_sspp_setup_creq_lut(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (ctx->cap && test_bit(DPU_SSPP_QOS_8LVL, &ctx->cap->features)) {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_0 + idx, cfg->creq_lut);
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT_1 + idx,
+ cfg->creq_lut >> 32);
+ } else {
+ DPU_REG_WRITE(&ctx->hw, SSPP_CREQ_LUT + idx, cfg->creq_lut);
+ }
+}
+
+static void dpu_hw_sspp_setup_qos_ctrl(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg)
+{
+ u32 idx;
+ u32 qos_ctrl = 0;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->vblank_en) {
+ qos_ctrl |= ((cfg->creq_vblank &
+ SSPP_QOS_CTRL_CREQ_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_CREQ_VBLANK_OFF);
+ qos_ctrl |= ((cfg->danger_vblank &
+ SSPP_QOS_CTRL_DANGER_VBLANK_MASK) <<
+ SSPP_QOS_CTRL_DANGER_VBLANK_OFF);
+ qos_ctrl |= SSPP_QOS_CTRL_VBLANK_EN;
+ }
+
+ if (cfg->danger_safe_en)
+ qos_ctrl |= SSPP_QOS_CTRL_DANGER_SAFE_EN;
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_QOS_CTRL + idx, qos_ctrl);
+}
+
+static void dpu_hw_sspp_setup_cdp(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg)
+{
+ u32 idx;
+ u32 cdp_cntl = 0;
+
+ if (!ctx || !cfg)
+ return;
+
+ if (_sspp_subblk_offset(ctx, DPU_SSPP_SRC, &idx))
+ return;
+
+ if (cfg->enable)
+ cdp_cntl |= BIT(0);
+ if (cfg->ubwc_meta_enable)
+ cdp_cntl |= BIT(1);
+ if (cfg->tile_amortize_enable)
+ cdp_cntl |= BIT(2);
+ if (cfg->preload_ahead == DPU_SSPP_CDP_PRELOAD_AHEAD_64)
+ cdp_cntl |= BIT(3);
+
+ DPU_REG_WRITE(&ctx->hw, SSPP_CDP_CNTL, cdp_cntl);
+}
+
+static void _setup_layer_ops(struct dpu_hw_pipe *c,
+ unsigned long features)
+{
+ if (test_bit(DPU_SSPP_SRC, &features)) {
+ c->ops.setup_format = dpu_hw_sspp_setup_format;
+ c->ops.setup_rects = dpu_hw_sspp_setup_rects;
+ c->ops.setup_sourceaddress = dpu_hw_sspp_setup_sourceaddress;
+ c->ops.setup_solidfill = dpu_hw_sspp_setup_solidfill;
+ c->ops.setup_pe = dpu_hw_sspp_setup_pe_config;
+ }
+
+ if (test_bit(DPU_SSPP_QOS, &features)) {
+ c->ops.setup_danger_safe_lut =
+ dpu_hw_sspp_setup_danger_safe_lut;
+ c->ops.setup_creq_lut = dpu_hw_sspp_setup_creq_lut;
+ c->ops.setup_qos_ctrl = dpu_hw_sspp_setup_qos_ctrl;
+ }
+
+ if (test_bit(DPU_SSPP_CSC, &features) ||
+ test_bit(DPU_SSPP_CSC_10BIT, &features))
+ c->ops.setup_csc = dpu_hw_sspp_setup_csc;
+
+ if (dpu_hw_sspp_multirect_enabled(c->cap))
+ c->ops.setup_multirect = dpu_hw_sspp_setup_multirect;
+
+ if (test_bit(DPU_SSPP_SCALER_QSEED3, &features)) {
+ c->ops.setup_scaler = _dpu_hw_sspp_setup_scaler3;
+ c->ops.get_scaler_ver = _dpu_hw_sspp_get_scaler3_ver;
+ }
+
+ if (test_bit(DPU_SSPP_CDP, &features))
+ c->ops.setup_cdp = dpu_hw_sspp_setup_cdp;
+}
+
+static struct dpu_sspp_cfg *_sspp_offset(enum dpu_sspp sspp,
+ void __iomem *addr,
+ struct dpu_mdss_cfg *catalog,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if ((sspp < SSPP_MAX) && catalog && addr && b) {
+ for (i = 0; i < catalog->sspp_count; i++) {
+ if (sspp == catalog->sspp[i].id) {
+ b->base_off = addr;
+ b->blk_off = catalog->sspp[i].base;
+ b->length = catalog->sspp[i].len;
+ b->hwversion = catalog->hwversion;
+ b->log_mask = DPU_DBG_MASK_SSPP;
+ return &catalog->sspp[i];
+ }
+ }
+ }
+
+ return ERR_PTR(-ENOMEM);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe)
+{
+ struct dpu_hw_pipe *hw_pipe;
+ struct dpu_sspp_cfg *cfg;
+ int rc;
+
+ if (!addr || !catalog)
+ return ERR_PTR(-EINVAL);
+
+ hw_pipe = kzalloc(sizeof(*hw_pipe), GFP_KERNEL);
+ if (!hw_pipe)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _sspp_offset(idx, addr, catalog, &hw_pipe->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(hw_pipe);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /* Assign ops */
+ hw_pipe->catalog = catalog;
+ hw_pipe->mdp = &catalog->mdp[0];
+ hw_pipe->idx = idx;
+ hw_pipe->cap = cfg;
+ _setup_layer_ops(hw_pipe, hw_pipe->cap->features);
+
+ rc = dpu_hw_blk_init(&hw_pipe->base, DPU_HW_BLK_SSPP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ return hw_pipe;
+
+blk_init_error:
+ kzfree(hw_pipe);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx)
+{
+ if (ctx)
+ dpu_hw_blk_destroy(&ctx->base);
+ kfree(ctx);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
new file mode 100644
index 000000000000..4d81e5f5ce1b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_sspp.h
@@ -0,0 +1,424 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_SSPP_H
+#define _DPU_HW_SSPP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+#include "dpu_formats.h"
+
+struct dpu_hw_pipe;
+
+/**
+ * Flags
+ */
+#define DPU_SSPP_FLIP_LR BIT(0)
+#define DPU_SSPP_FLIP_UD BIT(1)
+#define DPU_SSPP_SOURCE_ROTATED_90 BIT(2)
+#define DPU_SSPP_ROT_90 BIT(3)
+#define DPU_SSPP_SOLID_FILL BIT(4)
+
+/**
+ * Define all scaler feature bits in catalog
+ */
+#define DPU_SSPP_SCALER ((1UL << DPU_SSPP_SCALER_RGB) | \
+ (1UL << DPU_SSPP_SCALER_QSEED2) | \
+ (1UL << DPU_SSPP_SCALER_QSEED3))
+
+/**
+ * Component indices
+ */
+enum {
+ DPU_SSPP_COMP_0,
+ DPU_SSPP_COMP_1_2,
+ DPU_SSPP_COMP_2,
+ DPU_SSPP_COMP_3,
+
+ DPU_SSPP_COMP_MAX
+};
+
+/**
+ * DPU_SSPP_RECT_SOLO - multirect disabled
+ * DPU_SSPP_RECT_0 - rect0 of a multirect pipe
+ * DPU_SSPP_RECT_1 - rect1 of a multirect pipe
+ *
+ * Note: HW supports multirect with either RECT0 or
+ * RECT1. Considering no benefit of such configs over
+ * SOLO mode and to keep the plane management simple,
+ * we dont support single rect multirect configs.
+ */
+enum dpu_sspp_multirect_index {
+ DPU_SSPP_RECT_SOLO = 0,
+ DPU_SSPP_RECT_0,
+ DPU_SSPP_RECT_1,
+};
+
+enum dpu_sspp_multirect_mode {
+ DPU_SSPP_MULTIRECT_NONE = 0,
+ DPU_SSPP_MULTIRECT_PARALLEL,
+ DPU_SSPP_MULTIRECT_TIME_MX,
+};
+
+enum {
+ DPU_FRAME_LINEAR,
+ DPU_FRAME_TILE_A4X,
+ DPU_FRAME_TILE_A5X,
+};
+
+enum dpu_hw_filter {
+ DPU_SCALE_FILTER_NEAREST = 0,
+ DPU_SCALE_FILTER_BIL,
+ DPU_SCALE_FILTER_PCMN,
+ DPU_SCALE_FILTER_CA,
+ DPU_SCALE_FILTER_MAX
+};
+
+enum dpu_hw_filter_alpa {
+ DPU_SCALE_ALPHA_PIXEL_REP,
+ DPU_SCALE_ALPHA_BIL
+};
+
+enum dpu_hw_filter_yuv {
+ DPU_SCALE_2D_4X4,
+ DPU_SCALE_2D_CIR,
+ DPU_SCALE_1D_SEP,
+ DPU_SCALE_BIL
+};
+
+struct dpu_hw_sharp_cfg {
+ u32 strength;
+ u32 edge_thr;
+ u32 smooth_thr;
+ u32 noise_thr;
+};
+
+struct dpu_hw_pixel_ext {
+ /* scaling factors are enabled for this input layer */
+ uint8_t enable_pxl_ext;
+
+ int init_phase_x[DPU_MAX_PLANES];
+ int phase_step_x[DPU_MAX_PLANES];
+ int init_phase_y[DPU_MAX_PLANES];
+ int phase_step_y[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels extension in left, right, top and bottom direction
+ * for all color components. This pixel value for each color component
+ * should be sum of fetch + repeat pixels.
+ */
+ int num_ext_pxls_left[DPU_MAX_PLANES];
+ int num_ext_pxls_right[DPU_MAX_PLANES];
+ int num_ext_pxls_top[DPU_MAX_PLANES];
+ int num_ext_pxls_btm[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top and
+ * bottom directions from source image for scaling.
+ */
+ int left_ftch[DPU_MAX_PLANES];
+ int right_ftch[DPU_MAX_PLANES];
+ int top_ftch[DPU_MAX_PLANES];
+ int btm_ftch[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int left_rpt[DPU_MAX_PLANES];
+ int right_rpt[DPU_MAX_PLANES];
+ int top_rpt[DPU_MAX_PLANES];
+ int btm_rpt[DPU_MAX_PLANES];
+
+ uint32_t roi_w[DPU_MAX_PLANES];
+ uint32_t roi_h[DPU_MAX_PLANES];
+
+ /*
+ * Filter type to be used for scaling in horizontal and vertical
+ * directions
+ */
+ enum dpu_hw_filter horz_filter[DPU_MAX_PLANES];
+ enum dpu_hw_filter vert_filter[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_hw_pipe_cfg : Pipe description
+ * @layout: format layout information for programming buffer to hardware
+ * @src_rect: src ROI, caller takes into account the different operations
+ * such as decimation, flip etc to program this field
+ * @dest_rect: destination ROI.
+ * @index: index of the rectangle of SSPP
+ * @mode: parallel or time multiplex multirect mode
+ */
+struct dpu_hw_pipe_cfg {
+ struct dpu_hw_fmt_layout layout;
+ struct drm_rect src_rect;
+ struct drm_rect dst_rect;
+ enum dpu_sspp_multirect_index index;
+ enum dpu_sspp_multirect_mode mode;
+};
+
+/**
+ * struct dpu_hw_pipe_qos_cfg : Source pipe QoS configuration
+ * @danger_lut: LUT for generate danger level based on fill level
+ * @safe_lut: LUT for generate safe level based on fill level
+ * @creq_lut: LUT for generate creq level based on fill level
+ * @creq_vblank: creq value generated to vbif during vertical blanking
+ * @danger_vblank: danger value generated during vertical blanking
+ * @vblank_en: enable creq_vblank and danger_vblank during vblank
+ * @danger_safe_en: enable danger safe generation
+ */
+struct dpu_hw_pipe_qos_cfg {
+ u32 danger_lut;
+ u32 safe_lut;
+ u64 creq_lut;
+ u32 creq_vblank;
+ u32 danger_vblank;
+ bool vblank_en;
+ bool danger_safe_en;
+};
+
+/**
+ * enum CDP preload ahead address size
+ */
+enum {
+ DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ DPU_SSPP_CDP_PRELOAD_AHEAD_64
+};
+
+/**
+ * struct dpu_hw_pipe_cdp_cfg : CDP configuration
+ * @enable: true to enable CDP
+ * @ubwc_meta_enable: true to enable ubwc metadata preload
+ * @tile_amortize_enable: true to enable amortization control for tile format
+ * @preload_ahead: number of request to preload ahead
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_32,
+ * DPU_SSPP_CDP_PRELOAD_AHEAD_64
+ */
+struct dpu_hw_pipe_cdp_cfg {
+ bool enable;
+ bool ubwc_meta_enable;
+ bool tile_amortize_enable;
+ u32 preload_ahead;
+};
+
+/**
+ * struct dpu_hw_pipe_ts_cfg - traffic shaper configuration
+ * @size: size to prefill in bytes, or zero to disable
+ * @time: time to prefill in usec, or zero to disable
+ */
+struct dpu_hw_pipe_ts_cfg {
+ u64 size;
+ u64 time;
+};
+
+/**
+ * struct dpu_hw_sspp_ops - interface to the SSPP Hw driver functions
+ * Caller must call the init function to get the pipe context for each pipe
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_sspp_ops {
+ /**
+ * setup_format - setup pixel format cropping rectangle, flip
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @flags: Extra flags for format config
+ * @index: rectangle index in multirect
+ */
+ void (*setup_format)(struct dpu_hw_pipe *ctx,
+ const struct dpu_format *fmt, u32 flags,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_rects - setup pipe ROI rectangles
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_rects)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_pe - setup pipe pixel extension
+ * @ctx: Pointer to pipe context
+ * @pe_ext: Pointer to pixel ext settings
+ */
+ void (*setup_pe)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pixel_ext *pe_ext);
+
+ /**
+ * setup_sourceaddress - setup pipe source addresses
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe config structure
+ * @index: rectangle index in multirect
+ */
+ void (*setup_sourceaddress)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *cfg,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_csc - setup color space coversion
+ * @ctx: Pointer to pipe context
+ * @data: Pointer to config structure
+ */
+ void (*setup_csc)(struct dpu_hw_pipe *ctx, struct dpu_csc_cfg *data);
+
+ /**
+ * setup_solidfill - enable/disable colorfill
+ * @ctx: Pointer to pipe context
+ * @const_color: Fill color value
+ * @flags: Pipe flags
+ * @index: rectangle index in multirect
+ */
+ void (*setup_solidfill)(struct dpu_hw_pipe *ctx, u32 color,
+ enum dpu_sspp_multirect_index index);
+
+ /**
+ * setup_multirect - setup multirect configuration
+ * @ctx: Pointer to pipe context
+ * @index: rectangle index in multirect
+ * @mode: parallel fetch / time multiplex multirect mode
+ */
+
+ void (*setup_multirect)(struct dpu_hw_pipe *ctx,
+ enum dpu_sspp_multirect_index index,
+ enum dpu_sspp_multirect_mode mode);
+
+ /**
+ * setup_sharpening - setup sharpening
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to config structure
+ */
+ void (*setup_sharpening)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_sharp_cfg *cfg);
+
+ /**
+ * setup_danger_safe_lut - setup danger safe LUTs
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_danger_safe_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_creq_lut - setup CREQ LUT
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_creq_lut)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_qos_ctrl - setup QoS control
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to pipe QoS configuration
+ *
+ */
+ void (*setup_qos_ctrl)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_qos_cfg *cfg);
+
+ /**
+ * setup_histogram - setup histograms
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to histogram configuration
+ */
+ void (*setup_histogram)(struct dpu_hw_pipe *ctx,
+ void *cfg);
+
+ /**
+ * setup_scaler - setup scaler
+ * @ctx: Pointer to pipe context
+ * @pipe_cfg: Pointer to pipe configuration
+ * @pe_cfg: Pointer to pixel extension configuration
+ * @scaler_cfg: Pointer to scaler configuration
+ */
+ void (*setup_scaler)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct dpu_hw_pixel_ext *pe_cfg,
+ void *scaler_cfg);
+
+ /**
+ * get_scaler_ver - get scaler h/w version
+ * @ctx: Pointer to pipe context
+ */
+ u32 (*get_scaler_ver)(struct dpu_hw_pipe *ctx);
+
+ /**
+ * setup_cdp - setup client driven prefetch
+ * @ctx: Pointer to pipe context
+ * @cfg: Pointer to cdp configuration
+ */
+ void (*setup_cdp)(struct dpu_hw_pipe *ctx,
+ struct dpu_hw_pipe_cdp_cfg *cfg);
+};
+
+/**
+ * struct dpu_hw_pipe - pipe description
+ * @base: hardware block base structure
+ * @hw: block hardware details
+ * @catalog: back pointer to catalog
+ * @mdp: pointer to associated mdp portion of the catalog
+ * @idx: pipe index
+ * @cap: pointer to layer_cfg
+ * @ops: pointer to operations possible for this pipe
+ */
+struct dpu_hw_pipe {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+ struct dpu_mdss_cfg *catalog;
+ struct dpu_mdp_cfg *mdp;
+
+ /* Pipe */
+ enum dpu_sspp idx;
+ const struct dpu_sspp_cfg *cap;
+
+ /* Ops */
+ struct dpu_hw_sspp_ops ops;
+};
+
+/**
+ * dpu_hw_pipe - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_pipe *to_dpu_hw_pipe(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_pipe, base);
+}
+
+/**
+ * dpu_hw_sspp_init - initializes the sspp hw driver object.
+ * Should be called once before accessing every pipe.
+ * @idx: Pipe index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @catalog : Pointer to mdss catalog data
+ * @is_virtual_pipe: is this pipe virtual pipe
+ */
+struct dpu_hw_pipe *dpu_hw_sspp_init(enum dpu_sspp idx,
+ void __iomem *addr, struct dpu_mdss_cfg *catalog,
+ bool is_virtual_pipe);
+
+/**
+ * dpu_hw_sspp_destroy(): Destroys SSPP driver context
+ * should be called during Hw pipe cleanup.
+ * @ctx: Pointer to SSPP driver context returned by dpu_hw_sspp_init
+ */
+void dpu_hw_sspp_destroy(struct dpu_hw_pipe *ctx);
+
+#endif /*_DPU_HW_SSPP_H */
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
new file mode 100644
index 000000000000..db2798e862fc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.c
@@ -0,0 +1,398 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_top.h"
+#include "dpu_dbg.h"
+#include "dpu_kms.h"
+
+#define SSPP_SPARE 0x28
+#define UBWC_STATIC 0x144
+
+#define FLD_SPLIT_DISPLAY_CMD BIT(1)
+#define FLD_SMART_PANEL_FREE_RUN BIT(2)
+#define FLD_INTF_1_SW_TRG_MUX BIT(4)
+#define FLD_INTF_2_SW_TRG_MUX BIT(8)
+#define FLD_TE_LINE_INTER_WATERLEVEL_MASK 0xFFFF
+
+#define DANGER_STATUS 0x360
+#define SAFE_STATUS 0x364
+
+#define TE_LINE_INTERVAL 0x3F4
+
+#define TRAFFIC_SHAPER_EN BIT(31)
+#define TRAFFIC_SHAPER_RD_CLIENT(num) (0x030 + (num * 4))
+#define TRAFFIC_SHAPER_WR_CLIENT(num) (0x060 + (num * 4))
+#define TRAFFIC_SHAPER_FIXPOINT_FACTOR 4
+
+#define MDP_WD_TIMER_0_CTL 0x380
+#define MDP_WD_TIMER_0_CTL2 0x384
+#define MDP_WD_TIMER_0_LOAD_VALUE 0x388
+#define MDP_WD_TIMER_1_CTL 0x390
+#define MDP_WD_TIMER_1_CTL2 0x394
+#define MDP_WD_TIMER_1_LOAD_VALUE 0x398
+#define MDP_WD_TIMER_2_CTL 0x420
+#define MDP_WD_TIMER_2_CTL2 0x424
+#define MDP_WD_TIMER_2_LOAD_VALUE 0x428
+#define MDP_WD_TIMER_3_CTL 0x430
+#define MDP_WD_TIMER_3_CTL2 0x434
+#define MDP_WD_TIMER_3_LOAD_VALUE 0x438
+#define MDP_WD_TIMER_4_CTL 0x440
+#define MDP_WD_TIMER_4_CTL2 0x444
+#define MDP_WD_TIMER_4_LOAD_VALUE 0x448
+
+#define MDP_TICK_COUNT 16
+#define XO_CLK_RATE 19200
+#define MS_TICKS_IN_SEC 1000
+
+#define CALCULATE_WD_LOAD_VALUE(fps) \
+ ((uint32_t)((MS_TICKS_IN_SEC * XO_CLK_RATE)/(MDP_TICK_COUNT * fps)))
+
+#define DCE_SEL 0x450
+
+static void dpu_hw_setup_split_pipe(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 upper_pipe = 0;
+ u32 lower_pipe = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->en) {
+ if (cfg->mode == INTF_MODE_CMD) {
+ lower_pipe = FLD_SPLIT_DISPLAY_CMD;
+ /* interface controlling sw trigger */
+ if (cfg->intf == INTF_2)
+ lower_pipe |= FLD_INTF_1_SW_TRG_MUX;
+ else
+ lower_pipe |= FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = lower_pipe;
+ } else {
+ if (cfg->intf == INTF_2) {
+ lower_pipe = FLD_INTF_1_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_2_SW_TRG_MUX;
+ } else {
+ lower_pipe = FLD_INTF_2_SW_TRG_MUX;
+ upper_pipe = FLD_INTF_1_SW_TRG_MUX;
+ }
+ }
+ }
+
+ DPU_REG_WRITE(c, SSPP_SPARE, cfg->split_flush_en ? 0x1 : 0x0);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_LOWER_PIPE_CTRL, lower_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_UPPER_PIPE_CTRL, upper_pipe);
+ DPU_REG_WRITE(c, SPLIT_DISPLAY_EN, cfg->en & 0x1);
+}
+
+static void dpu_hw_setup_cdm_output(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 out_ctl = 0;
+
+ if (!mdp || !cfg)
+ return;
+
+ c = &mdp->hw;
+
+ if (cfg->intf_en)
+ out_ctl |= BIT(19);
+
+ DPU_REG_WRITE(c, MDP_OUT_CTL_0, out_ctl);
+}
+
+static bool dpu_hw_setup_clk_force_ctrl(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off, bit_off;
+ u32 reg_val, new_val;
+ bool clk_forced_on;
+
+ if (!mdp)
+ return false;
+
+ c = &mdp->hw;
+
+ if (clk_ctrl <= DPU_CLK_CTRL_NONE || clk_ctrl >= DPU_CLK_CTRL_MAX)
+ return false;
+
+ reg_off = mdp->caps->clk_ctrls[clk_ctrl].reg_off;
+ bit_off = mdp->caps->clk_ctrls[clk_ctrl].bit_off;
+
+ reg_val = DPU_REG_READ(c, reg_off);
+
+ if (enable)
+ new_val = reg_val | BIT(bit_off);
+ else
+ new_val = reg_val & ~BIT(bit_off);
+
+ DPU_REG_WRITE(c, reg_off, new_val);
+
+ clk_forced_on = !(reg_val & BIT(bit_off));
+
+ return clk_forced_on;
+}
+
+
+static void dpu_hw_get_danger_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, DANGER_STATUS);
+ status->mdp = (value >> 0) & 0x3;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x3;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x3;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x3;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x3;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x3;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x3;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x3;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x3;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x3;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x3;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x3;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x3;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x3;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x3;
+}
+
+static void dpu_hw_setup_vsync_source(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg, wd_load_value, wd_ctl, wd_ctl2, i;
+ static const u32 pp_offset[PINGPONG_MAX] = {0xC, 0x8, 0x4, 0x13, 0x18};
+
+ if (!mdp || !cfg || (cfg->pp_count > ARRAY_SIZE(cfg->ppnumber)))
+ return;
+
+ c = &mdp->hw;
+ reg = DPU_REG_READ(c, MDP_VSYNC_SEL);
+ for (i = 0; i < cfg->pp_count; i++) {
+ int pp_idx = cfg->ppnumber[i] - PINGPONG_0;
+
+ if (pp_idx >= ARRAY_SIZE(pp_offset))
+ continue;
+
+ reg &= ~(0xf << pp_offset[pp_idx]);
+ reg |= (cfg->vsync_source & 0xf) << pp_offset[pp_idx];
+ }
+ DPU_REG_WRITE(c, MDP_VSYNC_SEL, reg);
+
+ if (cfg->vsync_source >= DPU_VSYNC_SOURCE_WD_TIMER_4 &&
+ cfg->vsync_source <= DPU_VSYNC_SOURCE_WD_TIMER_0) {
+ switch (cfg->vsync_source) {
+ case DPU_VSYNC_SOURCE_WD_TIMER_4:
+ wd_load_value = MDP_WD_TIMER_4_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_4_CTL;
+ wd_ctl2 = MDP_WD_TIMER_4_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_3:
+ wd_load_value = MDP_WD_TIMER_3_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_3_CTL;
+ wd_ctl2 = MDP_WD_TIMER_3_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_2:
+ wd_load_value = MDP_WD_TIMER_2_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_2_CTL;
+ wd_ctl2 = MDP_WD_TIMER_2_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_1:
+ wd_load_value = MDP_WD_TIMER_1_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_1_CTL;
+ wd_ctl2 = MDP_WD_TIMER_1_CTL2;
+ break;
+ case DPU_VSYNC_SOURCE_WD_TIMER_0:
+ default:
+ wd_load_value = MDP_WD_TIMER_0_LOAD_VALUE;
+ wd_ctl = MDP_WD_TIMER_0_CTL;
+ wd_ctl2 = MDP_WD_TIMER_0_CTL2;
+ break;
+ }
+
+ DPU_REG_WRITE(c, wd_load_value,
+ CALCULATE_WD_LOAD_VALUE(cfg->frame_rate));
+
+ DPU_REG_WRITE(c, wd_ctl, BIT(0)); /* clear timer */
+ reg = DPU_REG_READ(c, wd_ctl2);
+ reg |= BIT(8); /* enable heartbeat timer */
+ reg |= BIT(0); /* enable WD timer */
+ DPU_REG_WRITE(c, wd_ctl2, reg);
+
+ /* make sure that timers are enabled/disabled for vsync state */
+ wmb();
+ }
+}
+
+static void dpu_hw_get_safe_status(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 value;
+
+ if (!mdp || !status)
+ return;
+
+ c = &mdp->hw;
+
+ value = DPU_REG_READ(c, SAFE_STATUS);
+ status->mdp = (value >> 0) & 0x1;
+ status->sspp[SSPP_VIG0] = (value >> 4) & 0x1;
+ status->sspp[SSPP_VIG1] = (value >> 6) & 0x1;
+ status->sspp[SSPP_VIG2] = (value >> 8) & 0x1;
+ status->sspp[SSPP_VIG3] = (value >> 10) & 0x1;
+ status->sspp[SSPP_RGB0] = (value >> 12) & 0x1;
+ status->sspp[SSPP_RGB1] = (value >> 14) & 0x1;
+ status->sspp[SSPP_RGB2] = (value >> 16) & 0x1;
+ status->sspp[SSPP_RGB3] = (value >> 18) & 0x1;
+ status->sspp[SSPP_DMA0] = (value >> 20) & 0x1;
+ status->sspp[SSPP_DMA1] = (value >> 22) & 0x1;
+ status->sspp[SSPP_DMA2] = (value >> 28) & 0x1;
+ status->sspp[SSPP_DMA3] = (value >> 30) & 0x1;
+ status->sspp[SSPP_CURSOR0] = (value >> 24) & 0x1;
+ status->sspp[SSPP_CURSOR1] = (value >> 26) & 0x1;
+}
+
+static void dpu_hw_reset_ubwc(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_blk_reg_map c;
+
+ if (!mdp || !m)
+ return;
+
+ if (!IS_UBWC_20_SUPPORTED(m->caps->ubwc_version))
+ return;
+
+ /* force blk offset to zero to access beginning of register region */
+ c = mdp->hw;
+ c.blk_off = 0x0;
+ DPU_REG_WRITE(&c, UBWC_STATIC, m->mdp[0].ubwc_static);
+}
+
+static void dpu_hw_intf_audio_select(struct dpu_hw_mdp *mdp)
+{
+ struct dpu_hw_blk_reg_map *c;
+
+ if (!mdp)
+ return;
+
+ c = &mdp->hw;
+
+ DPU_REG_WRITE(c, HDMI_DP_CORE_SELECT, 0x1);
+}
+
+static void _setup_mdp_ops(struct dpu_hw_mdp_ops *ops,
+ unsigned long cap)
+{
+ ops->setup_split_pipe = dpu_hw_setup_split_pipe;
+ ops->setup_cdm_output = dpu_hw_setup_cdm_output;
+ ops->setup_clk_force_ctrl = dpu_hw_setup_clk_force_ctrl;
+ ops->get_danger_status = dpu_hw_get_danger_status;
+ ops->setup_vsync_source = dpu_hw_setup_vsync_source;
+ ops->get_safe_status = dpu_hw_get_safe_status;
+ ops->reset_ubwc = dpu_hw_reset_ubwc;
+ ops->intf_audio_select = dpu_hw_intf_audio_select;
+}
+
+static const struct dpu_mdp_cfg *_top_offset(enum dpu_mdp mdp,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ if (!m || !addr || !b)
+ return ERR_PTR(-EINVAL);
+
+ for (i = 0; i < m->mdp_count; i++) {
+ if (mdp == m->mdp[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->mdp[i].base;
+ b->length = m->mdp[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_TOP;
+ return &m->mdp[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+static struct dpu_hw_blk_ops dpu_hw_ops = {
+ .start = NULL,
+ .stop = NULL,
+};
+
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_mdp *mdp;
+ const struct dpu_mdp_cfg *cfg;
+ int rc;
+
+ if (!addr || !m)
+ return ERR_PTR(-EINVAL);
+
+ mdp = kzalloc(sizeof(*mdp), GFP_KERNEL);
+ if (!mdp)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &mdp->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(mdp);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ mdp->idx = idx;
+ mdp->caps = cfg;
+ _setup_mdp_ops(&mdp->ops, mdp->caps->features);
+
+ rc = dpu_hw_blk_init(&mdp->base, DPU_HW_BLK_TOP, idx, &dpu_hw_ops);
+ if (rc) {
+ DPU_ERROR("failed to init hw blk %d\n", rc);
+ goto blk_init_error;
+ }
+
+ dpu_dbg_set_dpu_top_offset(mdp->hw.blk_off);
+
+ return mdp;
+
+blk_init_error:
+ kzfree(mdp);
+
+ return ERR_PTR(rc);
+}
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp)
+{
+ if (mdp)
+ dpu_hw_blk_destroy(&mdp->base);
+ kfree(mdp);
+}
+
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
new file mode 100644
index 000000000000..899925aaa6d7
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_top.h
@@ -0,0 +1,202 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_TOP_H
+#define _DPU_HW_TOP_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+#include "dpu_hw_blk.h"
+
+struct dpu_hw_mdp;
+
+/**
+ * struct traffic_shaper_cfg: traffic shaper configuration
+ * @en : enable/disable traffic shaper
+ * @rd_client : true if read client; false if write client
+ * @client_id : client identifier
+ * @bpc_denom : denominator of byte per clk
+ * @bpc_numer : numerator of byte per clk
+ */
+struct traffic_shaper_cfg {
+ bool en;
+ bool rd_client;
+ u32 client_id;
+ u32 bpc_denom;
+ u64 bpc_numer;
+};
+
+/**
+ * struct split_pipe_cfg - pipe configuration for dual display panels
+ * @en : Enable/disable dual pipe confguration
+ * @mode : Panel interface mode
+ * @intf : Interface id for main control path
+ * @split_flush_en: Allows both the paths to be flushed when master path is
+ * flushed
+ */
+struct split_pipe_cfg {
+ bool en;
+ enum dpu_intf_mode mode;
+ enum dpu_intf intf;
+ bool split_flush_en;
+};
+
+/**
+ * struct cdm_output_cfg: output configuration for cdm
+ * @intf_en : enable/disable interface output
+ */
+struct cdm_output_cfg {
+ bool intf_en;
+};
+
+/**
+ * struct dpu_danger_safe_status: danger and safe status signals
+ * @mdp: top level status
+ * @sspp: source pipe status
+ */
+struct dpu_danger_safe_status {
+ u8 mdp;
+ u8 sspp[SSPP_MAX];
+};
+
+/**
+ * struct dpu_vsync_source_cfg - configure vsync source and configure the
+ * watchdog timers if required.
+ * @pp_count: number of ping pongs active
+ * @frame_rate: Display frame rate
+ * @ppnumber: ping pong index array
+ * @vsync_source: vsync source selection
+ */
+struct dpu_vsync_source_cfg {
+ u32 pp_count;
+ u32 frame_rate;
+ u32 ppnumber[PINGPONG_MAX];
+ u32 vsync_source;
+};
+
+/**
+ * struct dpu_hw_mdp_ops - interface to the MDP TOP Hw driver functions
+ * Assumption is these functions will be called after clocks are enabled.
+ * @setup_split_pipe : Programs the pipe control registers
+ * @setup_pp_split : Programs the pp split control registers
+ * @setup_cdm_output : programs cdm control
+ * @setup_traffic_shaper : programs traffic shaper control
+ */
+struct dpu_hw_mdp_ops {
+ /** setup_split_pipe() : Regsiters are not double buffered, thisk
+ * function should be called before timing control enable
+ * @mdp : mdp top context driver
+ * @cfg : upper and lower part of pipe configuration
+ */
+ void (*setup_split_pipe)(struct dpu_hw_mdp *mdp,
+ struct split_pipe_cfg *p);
+
+ /**
+ * setup_cdm_output() : Setup selection control of the cdm data path
+ * @mdp : mdp top context driver
+ * @cfg : cdm output configuration
+ */
+ void (*setup_cdm_output)(struct dpu_hw_mdp *mdp,
+ struct cdm_output_cfg *cfg);
+
+ /**
+ * setup_traffic_shaper() : Setup traffic shaper control
+ * @mdp : mdp top context driver
+ * @cfg : traffic shaper configuration
+ */
+ void (*setup_traffic_shaper)(struct dpu_hw_mdp *mdp,
+ struct traffic_shaper_cfg *cfg);
+
+ /**
+ * setup_clk_force_ctrl - set clock force control
+ * @mdp: mdp top context driver
+ * @clk_ctrl: clock to be controlled
+ * @enable: force on enable
+ * @return: if the clock is forced-on by this function
+ */
+ bool (*setup_clk_force_ctrl)(struct dpu_hw_mdp *mdp,
+ enum dpu_clk_ctrl_type clk_ctrl, bool enable);
+
+ /**
+ * get_danger_status - get danger status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_danger_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * setup_vsync_source - setup vsync source configuration details
+ * @mdp: mdp top context driver
+ * @cfg: vsync source selection configuration
+ */
+ void (*setup_vsync_source)(struct dpu_hw_mdp *mdp,
+ struct dpu_vsync_source_cfg *cfg);
+
+ /**
+ * get_safe_status - get safe status
+ * @mdp: mdp top context driver
+ * @status: Pointer to danger safe status
+ */
+ void (*get_safe_status)(struct dpu_hw_mdp *mdp,
+ struct dpu_danger_safe_status *status);
+
+ /**
+ * reset_ubwc - reset top level UBWC configuration
+ * @mdp: mdp top context driver
+ * @m: pointer to mdss catalog data
+ */
+ void (*reset_ubwc)(struct dpu_hw_mdp *mdp, struct dpu_mdss_cfg *m);
+
+ /**
+ * intf_audio_select - select the external interface for audio
+ * @mdp: mdp top context driver
+ */
+ void (*intf_audio_select)(struct dpu_hw_mdp *mdp);
+};
+
+struct dpu_hw_mdp {
+ struct dpu_hw_blk base;
+ struct dpu_hw_blk_reg_map hw;
+
+ /* top */
+ enum dpu_mdp idx;
+ const struct dpu_mdp_cfg *caps;
+
+ /* ops */
+ struct dpu_hw_mdp_ops ops;
+};
+
+/**
+ * to_dpu_hw_mdp - convert base object dpu_hw_base to container
+ * @hw: Pointer to base hardware block
+ * return: Pointer to hardware block container
+ */
+static inline struct dpu_hw_mdp *to_dpu_hw_mdp(struct dpu_hw_blk *hw)
+{
+ return container_of(hw, struct dpu_hw_mdp, base);
+}
+
+/**
+ * dpu_hw_mdptop_init - initializes the top driver for the passed idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDP
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_mdp *dpu_hw_mdptop_init(enum dpu_mdp idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_mdp_destroy(struct dpu_hw_mdp *mdp);
+
+#endif /*_DPU_HW_TOP_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
new file mode 100644
index 000000000000..4cabae480a7b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.c
@@ -0,0 +1,368 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+/* using a file static variables for debugfs access */
+static u32 dpu_hw_util_log_mask = DPU_DBG_MASK_NONE;
+
+/* DPU_SCALER_QSEED3 */
+#define QSEED3_HW_VERSION 0x00
+#define QSEED3_OP_MODE 0x04
+#define QSEED3_RGB2Y_COEFF 0x08
+#define QSEED3_PHASE_INIT 0x0C
+#define QSEED3_PHASE_STEP_Y_H 0x10
+#define QSEED3_PHASE_STEP_Y_V 0x14
+#define QSEED3_PHASE_STEP_UV_H 0x18
+#define QSEED3_PHASE_STEP_UV_V 0x1C
+#define QSEED3_PRELOAD 0x20
+#define QSEED3_DE_SHARPEN 0x24
+#define QSEED3_DE_SHARPEN_CTL 0x28
+#define QSEED3_DE_SHAPE_CTL 0x2C
+#define QSEED3_DE_THRESHOLD 0x30
+#define QSEED3_DE_ADJUST_DATA_0 0x34
+#define QSEED3_DE_ADJUST_DATA_1 0x38
+#define QSEED3_DE_ADJUST_DATA_2 0x3C
+#define QSEED3_SRC_SIZE_Y_RGB_A 0x40
+#define QSEED3_SRC_SIZE_UV 0x44
+#define QSEED3_DST_SIZE 0x48
+#define QSEED3_COEF_LUT_CTRL 0x4C
+#define QSEED3_COEF_LUT_SWAP_BIT 0
+#define QSEED3_COEF_LUT_DIR_BIT 1
+#define QSEED3_COEF_LUT_Y_CIR_BIT 2
+#define QSEED3_COEF_LUT_UV_CIR_BIT 3
+#define QSEED3_COEF_LUT_Y_SEP_BIT 4
+#define QSEED3_COEF_LUT_UV_SEP_BIT 5
+#define QSEED3_BUFFER_CTRL 0x50
+#define QSEED3_CLK_CTRL0 0x54
+#define QSEED3_CLK_CTRL1 0x58
+#define QSEED3_CLK_STATUS 0x5C
+#define QSEED3_MISR_CTRL 0x70
+#define QSEED3_MISR_SIGNATURE_0 0x74
+#define QSEED3_MISR_SIGNATURE_1 0x78
+#define QSEED3_PHASE_INIT_Y_H 0x90
+#define QSEED3_PHASE_INIT_Y_V 0x94
+#define QSEED3_PHASE_INIT_UV_H 0x98
+#define QSEED3_PHASE_INIT_UV_V 0x9C
+#define QSEED3_COEF_LUT 0x100
+#define QSEED3_FILTERS 5
+#define QSEED3_LUT_REGIONS 4
+#define QSEED3_CIRCULAR_LUTS 9
+#define QSEED3_SEPARABLE_LUTS 10
+#define QSEED3_LUT_SIZE 60
+#define QSEED3_ENABLE 2
+#define QSEED3_DIR_LUT_SIZE (200 * sizeof(u32))
+#define QSEED3_CIR_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_CIRCULAR_LUTS * sizeof(u32))
+#define QSEED3_SEP_LUT_SIZE \
+ (QSEED3_LUT_SIZE * QSEED3_SEPARABLE_LUTS * sizeof(u32))
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name)
+{
+ /* don't need to mutex protect this */
+ if (c->log_mask & dpu_hw_util_log_mask)
+ DPU_DEBUG_DRIVER("[%s:0x%X] <= 0x%X\n",
+ name, c->blk_off + reg_off, val);
+ writel_relaxed(val, c->base_off + c->blk_off + reg_off);
+}
+
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off)
+{
+ return readl_relaxed(c->base_off + c->blk_off + reg_off);
+}
+
+u32 *dpu_hw_util_get_log_mask_ptr(void)
+{
+ return &dpu_hw_util_log_mask;
+}
+
+static void _dpu_hw_setup_scaler3_lut(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg, u32 offset)
+{
+ int i, j, filter;
+ int config_lut = 0x0;
+ unsigned long lut_flags;
+ u32 lut_addr, lut_offset, lut_len;
+ u32 *lut[QSEED3_FILTERS] = {NULL, NULL, NULL, NULL, NULL};
+ static const uint32_t off_tbl[QSEED3_FILTERS][QSEED3_LUT_REGIONS][2] = {
+ {{18, 0x000}, {12, 0x120}, {12, 0x1E0}, {8, 0x2A0} },
+ {{6, 0x320}, {3, 0x3E0}, {3, 0x440}, {3, 0x4A0} },
+ {{6, 0x500}, {3, 0x5c0}, {3, 0x620}, {3, 0x680} },
+ {{6, 0x380}, {3, 0x410}, {3, 0x470}, {3, 0x4d0} },
+ {{6, 0x560}, {3, 0x5f0}, {3, 0x650}, {3, 0x6b0} },
+ };
+
+ lut_flags = (unsigned long) scaler3_cfg->lut_flag;
+ if (test_bit(QSEED3_COEF_LUT_DIR_BIT, &lut_flags) &&
+ (scaler3_cfg->dir_len == QSEED3_DIR_LUT_SIZE)) {
+ lut[0] = scaler3_cfg->dir_lut;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[1] = scaler3_cfg->cir_lut +
+ scaler3_cfg->y_rgb_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_CIR_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_cir_lut_idx < QSEED3_CIRCULAR_LUTS) &&
+ (scaler3_cfg->cir_len == QSEED3_CIR_LUT_SIZE)) {
+ lut[2] = scaler3_cfg->cir_lut +
+ scaler3_cfg->uv_cir_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_Y_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->y_rgb_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[3] = scaler3_cfg->sep_lut +
+ scaler3_cfg->y_rgb_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+ if (test_bit(QSEED3_COEF_LUT_UV_SEP_BIT, &lut_flags) &&
+ (scaler3_cfg->uv_sep_lut_idx < QSEED3_SEPARABLE_LUTS) &&
+ (scaler3_cfg->sep_len == QSEED3_SEP_LUT_SIZE)) {
+ lut[4] = scaler3_cfg->sep_lut +
+ scaler3_cfg->uv_sep_lut_idx * QSEED3_LUT_SIZE;
+ config_lut = 1;
+ }
+
+ if (config_lut) {
+ for (filter = 0; filter < QSEED3_FILTERS; filter++) {
+ if (!lut[filter])
+ continue;
+ lut_offset = 0;
+ for (i = 0; i < QSEED3_LUT_REGIONS; i++) {
+ lut_addr = QSEED3_COEF_LUT + offset
+ + off_tbl[filter][i][1];
+ lut_len = off_tbl[filter][i][0] << 2;
+ for (j = 0; j < lut_len; j++) {
+ DPU_REG_WRITE(c,
+ lut_addr,
+ (lut[filter])[lut_offset++]);
+ lut_addr += 4;
+ }
+ }
+ }
+ }
+
+ if (test_bit(QSEED3_COEF_LUT_SWAP_BIT, &lut_flags))
+ DPU_REG_WRITE(c, QSEED3_COEF_LUT_CTRL + offset, BIT(0));
+
+}
+
+static void _dpu_hw_setup_scaler3_de(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_de_cfg *de_cfg, u32 offset)
+{
+ u32 sharp_lvl, sharp_ctl, shape_ctl, de_thr;
+ u32 adjust_a, adjust_b, adjust_c;
+
+ if (!de_cfg->enable)
+ return;
+
+ sharp_lvl = (de_cfg->sharpen_level1 & 0x1FF) |
+ ((de_cfg->sharpen_level2 & 0x1FF) << 16);
+
+ sharp_ctl = ((de_cfg->limit & 0xF) << 9) |
+ ((de_cfg->prec_shift & 0x7) << 13) |
+ ((de_cfg->clip & 0x7) << 16);
+
+ shape_ctl = (de_cfg->thr_quiet & 0xFF) |
+ ((de_cfg->thr_dieout & 0x3FF) << 16);
+
+ de_thr = (de_cfg->thr_low & 0x3FF) |
+ ((de_cfg->thr_high & 0x3FF) << 16);
+
+ adjust_a = (de_cfg->adjust_a[0] & 0x3FF) |
+ ((de_cfg->adjust_a[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_a[2] & 0x3FF) << 20);
+
+ adjust_b = (de_cfg->adjust_b[0] & 0x3FF) |
+ ((de_cfg->adjust_b[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_b[2] & 0x3FF) << 20);
+
+ adjust_c = (de_cfg->adjust_c[0] & 0x3FF) |
+ ((de_cfg->adjust_c[1] & 0x3FF) << 10) |
+ ((de_cfg->adjust_c[2] & 0x3FF) << 20);
+
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN + offset, sharp_lvl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHARPEN_CTL + offset, sharp_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_SHAPE_CTL + offset, shape_ctl);
+ DPU_REG_WRITE(c, QSEED3_DE_THRESHOLD + offset, de_thr);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_0 + offset, adjust_a);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_1 + offset, adjust_b);
+ DPU_REG_WRITE(c, QSEED3_DE_ADJUST_DATA_2 + offset, adjust_c);
+
+}
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format)
+{
+ u32 op_mode = 0;
+ u32 phase_init, preload, src_y_rgb, src_uv, dst;
+
+ if (!scaler3_cfg->enable)
+ goto end;
+
+ op_mode |= BIT(0);
+ op_mode |= (scaler3_cfg->y_rgb_filter_cfg & 0x3) << 16;
+
+ if (format && DPU_FORMAT_IS_YUV(format)) {
+ op_mode |= BIT(12);
+ op_mode |= (scaler3_cfg->uv_filter_cfg & 0x3) << 24;
+ }
+
+ op_mode |= (scaler3_cfg->blend_cfg & 1) << 31;
+ op_mode |= (scaler3_cfg->dir_en) ? BIT(4) : 0;
+
+ preload =
+ ((scaler3_cfg->preload_x[0] & 0x7F) << 0) |
+ ((scaler3_cfg->preload_y[0] & 0x7F) << 8) |
+ ((scaler3_cfg->preload_x[1] & 0x7F) << 16) |
+ ((scaler3_cfg->preload_y[1] & 0x7F) << 24);
+
+ src_y_rgb = (scaler3_cfg->src_width[0] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[0] & 0x1FFFF) << 16);
+
+ src_uv = (scaler3_cfg->src_width[1] & 0x1FFFF) |
+ ((scaler3_cfg->src_height[1] & 0x1FFFF) << 16);
+
+ dst = (scaler3_cfg->dst_width & 0x1FFFF) |
+ ((scaler3_cfg->dst_height & 0x1FFFF) << 16);
+
+ if (scaler3_cfg->de.enable) {
+ _dpu_hw_setup_scaler3_de(c, &scaler3_cfg->de, scaler_offset);
+ op_mode |= BIT(8);
+ }
+
+ if (scaler3_cfg->lut_flag)
+ _dpu_hw_setup_scaler3_lut(c, scaler3_cfg,
+ scaler_offset);
+
+ if (scaler_version == 0x1002) {
+ phase_init =
+ ((scaler3_cfg->init_phase_x[0] & 0x3F) << 0) |
+ ((scaler3_cfg->init_phase_y[0] & 0x3F) << 8) |
+ ((scaler3_cfg->init_phase_x[1] & 0x3F) << 16) |
+ ((scaler3_cfg->init_phase_y[1] & 0x3F) << 24);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT + scaler_offset, phase_init);
+ } else {
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_H + scaler_offset,
+ scaler3_cfg->init_phase_x[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_Y_V + scaler_offset,
+ scaler3_cfg->init_phase_y[0] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_H + scaler_offset,
+ scaler3_cfg->init_phase_x[1] & 0x1FFFFF);
+ DPU_REG_WRITE(c, QSEED3_PHASE_INIT_UV_V + scaler_offset,
+ scaler3_cfg->init_phase_y[1] & 0x1FFFFF);
+ }
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_H + scaler_offset,
+ scaler3_cfg->phase_step_x[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_Y_V + scaler_offset,
+ scaler3_cfg->phase_step_y[0] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_H + scaler_offset,
+ scaler3_cfg->phase_step_x[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PHASE_STEP_UV_V + scaler_offset,
+ scaler3_cfg->phase_step_y[1] & 0xFFFFFF);
+
+ DPU_REG_WRITE(c, QSEED3_PRELOAD + scaler_offset, preload);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_Y_RGB_A + scaler_offset, src_y_rgb);
+
+ DPU_REG_WRITE(c, QSEED3_SRC_SIZE_UV + scaler_offset, src_uv);
+
+ DPU_REG_WRITE(c, QSEED3_DST_SIZE + scaler_offset, dst);
+
+end:
+ if (format && !DPU_FORMAT_IS_DX(format))
+ op_mode |= BIT(14);
+
+ if (format && format->alpha_enable) {
+ op_mode |= BIT(10);
+ if (scaler_version == 0x1002)
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x1) << 30;
+ else
+ op_mode |= (scaler3_cfg->alpha_filter_cfg & 0x3) << 29;
+ }
+
+ DPU_REG_WRITE(c, QSEED3_OP_MODE + scaler_offset, op_mode);
+}
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset)
+{
+ return DPU_REG_READ(c, QSEED3_HW_VERSION + scaler_offset);
+}
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10)
+{
+ static const u32 matrix_shift = 7;
+ u32 clamp_shift = csc10 ? 16 : 8;
+ u32 val;
+
+ /* matrix coeff - convert S15.16 to S4.9 */
+ val = ((data->csc_mv[0] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[1] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off, val);
+ val = ((data->csc_mv[2] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[3] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x4, val);
+ val = ((data->csc_mv[4] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[5] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0x8, val);
+ val = ((data->csc_mv[6] >> matrix_shift) & 0x1FFF) |
+ (((data->csc_mv[7] >> matrix_shift) & 0x1FFF) << 16);
+ DPU_REG_WRITE(c, csc_reg_off + 0xc, val);
+ val = (data->csc_mv[8] >> matrix_shift) & 0x1FFF;
+ DPU_REG_WRITE(c, csc_reg_off + 0x10, val);
+
+ /* Pre clamp */
+ val = (data->csc_pre_lv[0] << clamp_shift) | data->csc_pre_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x14, val);
+ val = (data->csc_pre_lv[2] << clamp_shift) | data->csc_pre_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x18, val);
+ val = (data->csc_pre_lv[4] << clamp_shift) | data->csc_pre_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x1c, val);
+
+ /* Post clamp */
+ val = (data->csc_post_lv[0] << clamp_shift) | data->csc_post_lv[1];
+ DPU_REG_WRITE(c, csc_reg_off + 0x20, val);
+ val = (data->csc_post_lv[2] << clamp_shift) | data->csc_post_lv[3];
+ DPU_REG_WRITE(c, csc_reg_off + 0x24, val);
+ val = (data->csc_post_lv[4] << clamp_shift) | data->csc_post_lv[5];
+ DPU_REG_WRITE(c, csc_reg_off + 0x28, val);
+
+ /* Pre-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x2c, data->csc_pre_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x30, data->csc_pre_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x34, data->csc_pre_bv[2]);
+
+ /* Post-Bias */
+ DPU_REG_WRITE(c, csc_reg_off + 0x38, data->csc_post_bv[0]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x3c, data->csc_post_bv[1]);
+ DPU_REG_WRITE(c, csc_reg_off + 0x40, data->csc_post_bv[2]);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
new file mode 100644
index 000000000000..1240f505ca53
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_util.h
@@ -0,0 +1,348 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_UTIL_H
+#define _DPU_HW_UTIL_H
+
+#include <linux/io.h>
+#include <linux/slab.h>
+#include "dpu_hw_mdss.h"
+
+#define REG_MASK(n) ((BIT(n)) - 1)
+struct dpu_format_extended;
+
+/*
+ * This is the common struct maintained by each sub block
+ * for mapping the register offsets in this block to the
+ * absoulute IO address
+ * @base_off: mdp register mapped offset
+ * @blk_off: pipe offset relative to mdss offset
+ * @length length of register block offset
+ * @xin_id xin id
+ * @hwversion mdss hw version number
+ */
+struct dpu_hw_blk_reg_map {
+ void __iomem *base_off;
+ u32 blk_off;
+ u32 length;
+ u32 xin_id;
+ u32 hwversion;
+ u32 log_mask;
+};
+
+/**
+ * struct dpu_hw_scaler3_de_cfg : QSEEDv3 detail enhancer configuration
+ * @enable: detail enhancer enable/disable
+ * @sharpen_level1: sharpening strength for noise
+ * @sharpen_level2: sharpening strength for signal
+ * @ clip: clip shift
+ * @ limit: limit value
+ * @ thr_quiet: quiet threshold
+ * @ thr_dieout: dieout threshold
+ * @ thr_high: low threshold
+ * @ thr_high: high threshold
+ * @ prec_shift: precision shift
+ * @ adjust_a: A-coefficients for mapping curve
+ * @ adjust_b: B-coefficients for mapping curve
+ * @ adjust_c: C-coefficients for mapping curve
+ */
+struct dpu_hw_scaler3_de_cfg {
+ u32 enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+
+/**
+ * struct dpu_hw_scaler3_cfg : QSEEDv3 configuration
+ * @enable: scaler enable
+ * @dir_en: direction detection block enable
+ * @ init_phase_x: horizontal initial phase
+ * @ phase_step_x: horizontal phase step
+ * @ init_phase_y: vertical initial phase
+ * @ phase_step_y: vertical phase step
+ * @ preload_x: horizontal preload value
+ * @ preload_y: vertical preload value
+ * @ src_width: source width
+ * @ src_height: source height
+ * @ dst_width: destination width
+ * @ dst_height: destination height
+ * @ y_rgb_filter_cfg: y/rgb plane filter configuration
+ * @ uv_filter_cfg: uv plane filter configuration
+ * @ alpha_filter_cfg: alpha filter configuration
+ * @ blend_cfg: blend coefficients configuration
+ * @ lut_flag: scaler LUT update flags
+ * 0x1 swap LUT bank
+ * 0x2 update 2D filter LUT
+ * 0x4 update y circular filter LUT
+ * 0x8 update uv circular filter LUT
+ * 0x10 update y separable filter LUT
+ * 0x20 update uv separable filter LUT
+ * @ dir_lut_idx: 2D filter LUT index
+ * @ y_rgb_cir_lut_idx: y circular filter LUT index
+ * @ uv_cir_lut_idx: uv circular filter LUT index
+ * @ y_rgb_sep_lut_idx: y circular filter LUT index
+ * @ uv_sep_lut_idx: uv separable filter LUT index
+ * @ dir_lut: pointer to 2D LUT
+ * @ cir_lut: pointer to circular filter LUT
+ * @ sep_lut: pointer to separable filter LUT
+ * @ de: detail enhancer configuration
+ */
+struct dpu_hw_scaler3_cfg {
+ u32 enable;
+ u32 dir_en;
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ u32 preload_x[DPU_MAX_PLANES];
+ u32 preload_y[DPU_MAX_PLANES];
+ u32 src_width[DPU_MAX_PLANES];
+ u32 src_height[DPU_MAX_PLANES];
+
+ u32 dst_width;
+ u32 dst_height;
+
+ u32 y_rgb_filter_cfg;
+ u32 uv_filter_cfg;
+ u32 alpha_filter_cfg;
+ u32 blend_cfg;
+
+ u32 lut_flag;
+ u32 dir_lut_idx;
+
+ u32 y_rgb_cir_lut_idx;
+ u32 uv_cir_lut_idx;
+ u32 y_rgb_sep_lut_idx;
+ u32 uv_sep_lut_idx;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_hw_scaler3_de_cfg de;
+};
+
+struct dpu_hw_scaler3_lut_cfg {
+ bool is_configured;
+ u32 *dir_lut;
+ size_t dir_len;
+ u32 *cir_lut;
+ size_t cir_len;
+ u32 *sep_lut;
+ size_t sep_len;
+};
+
+/**
+ * struct dpu_drm_pix_ext_v1 - version 1 of pixel ext structure
+ * @num_ext_pxls_lr: Number of total horizontal pixels
+ * @num_ext_pxls_tb: Number of total vertical lines
+ * @left_ftch: Number of extra pixels to overfetch from left
+ * @right_ftch: Number of extra pixels to overfetch from right
+ * @top_ftch: Number of extra lines to overfetch from top
+ * @btm_ftch: Number of extra lines to overfetch from bottom
+ * @left_rpt: Number of extra pixels to repeat from left
+ * @right_rpt: Number of extra pixels to repeat from right
+ * @top_rpt: Number of extra lines to repeat from top
+ * @btm_rpt: Number of extra lines to repeat from bottom
+ */
+struct dpu_drm_pix_ext_v1 {
+ /*
+ * Number of pixels ext in left, right, top and bottom direction
+ * for all color components.
+ */
+ int32_t num_ext_pxls_lr[DPU_MAX_PLANES];
+ int32_t num_ext_pxls_tb[DPU_MAX_PLANES];
+
+ /*
+ * Number of pixels needs to be overfetched in left, right, top
+ * and bottom directions from source image for scaling.
+ */
+ int32_t left_ftch[DPU_MAX_PLANES];
+ int32_t right_ftch[DPU_MAX_PLANES];
+ int32_t top_ftch[DPU_MAX_PLANES];
+ int32_t btm_ftch[DPU_MAX_PLANES];
+ /*
+ * Number of pixels needs to be repeated in left, right, top and
+ * bottom directions for scaling.
+ */
+ int32_t left_rpt[DPU_MAX_PLANES];
+ int32_t right_rpt[DPU_MAX_PLANES];
+ int32_t top_rpt[DPU_MAX_PLANES];
+ int32_t btm_rpt[DPU_MAX_PLANES];
+
+};
+
+/**
+ * struct dpu_drm_de_v1 - version 1 of detail enhancer structure
+ * @enable: Enables/disables detail enhancer
+ * @sharpen_level1: Sharpening strength for noise
+ * @sharpen_level2: Sharpening strength for context
+ * @clip: Clip coefficient
+ * @limit: Detail enhancer limit factor
+ * @thr_quiet: Quite zone threshold
+ * @thr_dieout: Die-out zone threshold
+ * @thr_low: Linear zone left threshold
+ * @thr_high: Linear zone right threshold
+ * @prec_shift: Detail enhancer precision
+ * @adjust_a: Mapping curves A coefficients
+ * @adjust_b: Mapping curves B coefficients
+ * @adjust_c: Mapping curves C coefficients
+ */
+struct dpu_drm_de_v1 {
+ uint32_t enable;
+ int16_t sharpen_level1;
+ int16_t sharpen_level2;
+ uint16_t clip;
+ uint16_t limit;
+ uint16_t thr_quiet;
+ uint16_t thr_dieout;
+ uint16_t thr_low;
+ uint16_t thr_high;
+ uint16_t prec_shift;
+ int16_t adjust_a[DPU_MAX_DE_CURVES];
+ int16_t adjust_b[DPU_MAX_DE_CURVES];
+ int16_t adjust_c[DPU_MAX_DE_CURVES];
+};
+
+/**
+ * struct dpu_drm_scaler_v2 - version 2 of struct dpu_drm_scaler
+ * @enable: Scaler enable
+ * @dir_en: Detail enhancer enable
+ * @pe: Pixel extension settings
+ * @horz_decimate: Horizontal decimation factor
+ * @vert_decimate: Vertical decimation factor
+ * @init_phase_x: Initial scaler phase values for x
+ * @phase_step_x: Phase step values for x
+ * @init_phase_y: Initial scaler phase values for y
+ * @phase_step_y: Phase step values for y
+ * @preload_x: Horizontal preload value
+ * @preload_y: Vertical preload value
+ * @src_width: Source width
+ * @src_height: Source height
+ * @dst_width: Destination width
+ * @dst_height: Destination height
+ * @y_rgb_filter_cfg: Y/RGB plane filter configuration
+ * @uv_filter_cfg: UV plane filter configuration
+ * @alpha_filter_cfg: Alpha filter configuration
+ * @blend_cfg: Selection of blend coefficients
+ * @lut_flag: LUT configuration flags
+ * @dir_lut_idx: 2d 4x4 LUT index
+ * @y_rgb_cir_lut_idx: Y/RGB circular LUT index
+ * @uv_cir_lut_idx: UV circular LUT index
+ * @y_rgb_sep_lut_idx: Y/RGB separable LUT index
+ * @uv_sep_lut_idx: UV separable LUT index
+ * @de: Detail enhancer settings
+ */
+struct dpu_drm_scaler_v2 {
+ /*
+ * General definitions
+ */
+ uint32_t enable;
+ uint32_t dir_en;
+
+ /*
+ * Pix ext settings
+ */
+ struct dpu_drm_pix_ext_v1 pe;
+
+ /*
+ * Decimation settings
+ */
+ uint32_t horz_decimate;
+ uint32_t vert_decimate;
+
+ /*
+ * Phase settings
+ */
+ int32_t init_phase_x[DPU_MAX_PLANES];
+ int32_t phase_step_x[DPU_MAX_PLANES];
+ int32_t init_phase_y[DPU_MAX_PLANES];
+ int32_t phase_step_y[DPU_MAX_PLANES];
+
+ uint32_t preload_x[DPU_MAX_PLANES];
+ uint32_t preload_y[DPU_MAX_PLANES];
+ uint32_t src_width[DPU_MAX_PLANES];
+ uint32_t src_height[DPU_MAX_PLANES];
+
+ uint32_t dst_width;
+ uint32_t dst_height;
+
+ uint32_t y_rgb_filter_cfg;
+ uint32_t uv_filter_cfg;
+ uint32_t alpha_filter_cfg;
+ uint32_t blend_cfg;
+
+ uint32_t lut_flag;
+ uint32_t dir_lut_idx;
+
+ /* for Y(RGB) and UV planes*/
+ uint32_t y_rgb_cir_lut_idx;
+ uint32_t uv_cir_lut_idx;
+ uint32_t y_rgb_sep_lut_idx;
+ uint32_t uv_sep_lut_idx;
+
+ /*
+ * Detail enhancer settings
+ */
+ struct dpu_drm_de_v1 de;
+};
+
+
+u32 *dpu_hw_util_get_log_mask_ptr(void);
+
+void dpu_reg_write(struct dpu_hw_blk_reg_map *c,
+ u32 reg_off,
+ u32 val,
+ const char *name);
+int dpu_reg_read(struct dpu_hw_blk_reg_map *c, u32 reg_off);
+
+#define DPU_REG_WRITE(c, off, val) dpu_reg_write(c, off, val, #off)
+#define DPU_REG_READ(c, off) dpu_reg_read(c, off)
+
+#define MISR_FRAME_COUNT_MASK 0xFF
+#define MISR_CTRL_ENABLE BIT(8)
+#define MISR_CTRL_STATUS BIT(9)
+#define MISR_CTRL_STATUS_CLEAR BIT(10)
+#define INTF_MISR_CTRL_FREE_RUN_MASK BIT(31)
+
+void *dpu_hw_util_get_dir(void);
+
+void dpu_hw_setup_scaler3(struct dpu_hw_blk_reg_map *c,
+ struct dpu_hw_scaler3_cfg *scaler3_cfg,
+ u32 scaler_offset, u32 scaler_version,
+ const struct dpu_format *format);
+
+u32 dpu_hw_get_scaler3_ver(struct dpu_hw_blk_reg_map *c,
+ u32 scaler_offset);
+
+void dpu_hw_csc_setup(struct dpu_hw_blk_reg_map *c,
+ u32 csc_reg_off,
+ struct dpu_csc_cfg *data, bool csc10);
+
+#endif /* _DPU_HW_UTIL_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
new file mode 100644
index 000000000000..d43905525f92
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.c
@@ -0,0 +1,275 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include "dpu_hwio.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_dbg.h"
+
+#define VBIF_VERSION 0x0000
+#define VBIF_CLK_FORCE_CTRL0 0x0008
+#define VBIF_CLK_FORCE_CTRL1 0x000C
+#define VBIF_QOS_REMAP_00 0x0020
+#define VBIF_QOS_REMAP_01 0x0024
+#define VBIF_QOS_REMAP_10 0x0028
+#define VBIF_QOS_REMAP_11 0x002C
+#define VBIF_WRITE_GATHER_EN 0x00AC
+#define VBIF_IN_RD_LIM_CONF0 0x00B0
+#define VBIF_IN_RD_LIM_CONF1 0x00B4
+#define VBIF_IN_RD_LIM_CONF2 0x00B8
+#define VBIF_IN_WR_LIM_CONF0 0x00C0
+#define VBIF_IN_WR_LIM_CONF1 0x00C4
+#define VBIF_IN_WR_LIM_CONF2 0x00C8
+#define VBIF_OUT_RD_LIM_CONF0 0x00D0
+#define VBIF_OUT_WR_LIM_CONF0 0x00D4
+#define VBIF_OUT_AXI_AMEMTYPE_CONF0 0x0160
+#define VBIF_OUT_AXI_AMEMTYPE_CONF1 0x0164
+#define VBIF_XIN_PND_ERR 0x0190
+#define VBIF_XIN_SRC_ERR 0x0194
+#define VBIF_XIN_CLR_ERR 0x019C
+#define VBIF_XIN_HALT_CTRL0 0x0200
+#define VBIF_XIN_HALT_CTRL1 0x0204
+#define VBIF_XINL_QOS_RP_REMAP_000 0x0550
+#define VBIF_XINL_QOS_LVL_REMAP_000 0x0590
+
+static void dpu_hw_clear_errors(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 pnd, src;
+
+ if (!vbif)
+ return;
+ c = &vbif->hw;
+ pnd = DPU_REG_READ(c, VBIF_XIN_PND_ERR);
+ src = DPU_REG_READ(c, VBIF_XIN_SRC_ERR);
+
+ if (pnd_errors)
+ *pnd_errors = pnd;
+ if (src_errors)
+ *src_errors = src;
+
+ DPU_REG_WRITE(c, VBIF_XIN_CLR_ERR, pnd | src);
+}
+
+static void dpu_hw_set_mem_type(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_off;
+ u32 bit_off;
+ u32 reg_val;
+
+ /*
+ * Assume 4 bits per bit field, 8 fields per 32-bit register so
+ * 16 bit fields maximum across two registers
+ */
+ if (!vbif || xin_id >= MAX_XIN_COUNT || xin_id >= 16)
+ return;
+
+ c = &vbif->hw;
+
+ if (xin_id >= 8) {
+ xin_id -= 8;
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF1;
+ } else {
+ reg_off = VBIF_OUT_AXI_AMEMTYPE_CONF0;
+ }
+ bit_off = (xin_id & 0x7) * 4;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0x7 << bit_off);
+ reg_val |= (value & 0x7) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static void dpu_hw_set_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ reg_val &= ~(0xFF << bit_off);
+ reg_val |= (limit) << bit_off;
+ DPU_REG_WRITE(c, reg_off, reg_val);
+}
+
+static u32 dpu_hw_get_limit_conf(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+ u32 reg_off;
+ u32 bit_off;
+ u32 limit;
+
+ if (rd)
+ reg_off = VBIF_IN_RD_LIM_CONF0;
+ else
+ reg_off = VBIF_IN_WR_LIM_CONF0;
+
+ reg_off += (xin_id / 4) * 4;
+ bit_off = (xin_id % 4) * 8;
+ reg_val = DPU_REG_READ(c, reg_off);
+ limit = (reg_val >> bit_off) & 0xFF;
+
+ return limit;
+}
+
+static void dpu_hw_set_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL0);
+
+ if (enable)
+ reg_val |= BIT(xin_id);
+ else
+ reg_val &= ~BIT(xin_id);
+
+ DPU_REG_WRITE(c, VBIF_XIN_HALT_CTRL0, reg_val);
+}
+
+static bool dpu_hw_get_halt_ctrl(struct dpu_hw_vbif *vbif,
+ u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c = &vbif->hw;
+ u32 reg_val;
+
+ reg_val = DPU_REG_READ(c, VBIF_XIN_HALT_CTRL1);
+
+ return (reg_val & BIT(xin_id)) ? true : false;
+}
+
+static void dpu_hw_set_qos_remap(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val, reg_val_lvl, mask, reg_high, reg_shift;
+
+ if (!vbif)
+ return;
+
+ c = &vbif->hw;
+
+ reg_high = ((xin_id & 0x8) >> 3) * 4 + (level * 8);
+ reg_shift = (xin_id & 0x7) * 4;
+
+ reg_val = DPU_REG_READ(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high);
+ reg_val_lvl = DPU_REG_READ(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high);
+
+ mask = 0x7 << reg_shift;
+
+ reg_val &= ~mask;
+ reg_val |= (remap_level << reg_shift) & mask;
+
+ reg_val_lvl &= ~mask;
+ reg_val_lvl |= (remap_level << reg_shift) & mask;
+
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_RP_REMAP_000 + reg_high, reg_val);
+ DPU_REG_WRITE(c, VBIF_XINL_QOS_LVL_REMAP_000 + reg_high, reg_val_lvl);
+}
+
+static void dpu_hw_set_write_gather_en(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ struct dpu_hw_blk_reg_map *c;
+ u32 reg_val;
+
+ if (!vbif || xin_id >= MAX_XIN_COUNT)
+ return;
+
+ c = &vbif->hw;
+
+ reg_val = DPU_REG_READ(c, VBIF_WRITE_GATHER_EN);
+ reg_val |= BIT(xin_id);
+ DPU_REG_WRITE(c, VBIF_WRITE_GATHER_EN, reg_val);
+}
+
+static void _setup_vbif_ops(struct dpu_hw_vbif_ops *ops,
+ unsigned long cap)
+{
+ ops->set_limit_conf = dpu_hw_set_limit_conf;
+ ops->get_limit_conf = dpu_hw_get_limit_conf;
+ ops->set_halt_ctrl = dpu_hw_set_halt_ctrl;
+ ops->get_halt_ctrl = dpu_hw_get_halt_ctrl;
+ if (test_bit(DPU_VBIF_QOS_REMAP, &cap))
+ ops->set_qos_remap = dpu_hw_set_qos_remap;
+ ops->set_mem_type = dpu_hw_set_mem_type;
+ ops->clear_errors = dpu_hw_clear_errors;
+ ops->set_write_gather_en = dpu_hw_set_write_gather_en;
+}
+
+static const struct dpu_vbif_cfg *_top_offset(enum dpu_vbif vbif,
+ const struct dpu_mdss_cfg *m,
+ void __iomem *addr,
+ struct dpu_hw_blk_reg_map *b)
+{
+ int i;
+
+ for (i = 0; i < m->vbif_count; i++) {
+ if (vbif == m->vbif[i].id) {
+ b->base_off = addr;
+ b->blk_off = m->vbif[i].base;
+ b->length = m->vbif[i].len;
+ b->hwversion = m->hwversion;
+ b->log_mask = DPU_DBG_MASK_VBIF;
+ return &m->vbif[i];
+ }
+ }
+
+ return ERR_PTR(-EINVAL);
+}
+
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m)
+{
+ struct dpu_hw_vbif *c;
+ const struct dpu_vbif_cfg *cfg;
+
+ c = kzalloc(sizeof(*c), GFP_KERNEL);
+ if (!c)
+ return ERR_PTR(-ENOMEM);
+
+ cfg = _top_offset(idx, m, addr, &c->hw);
+ if (IS_ERR_OR_NULL(cfg)) {
+ kfree(c);
+ return ERR_PTR(-EINVAL);
+ }
+
+ /*
+ * Assign ops
+ */
+ c->idx = idx;
+ c->cap = cfg;
+ _setup_vbif_ops(&c->ops, c->cap->features);
+
+ /* no need to register sub-range in dpu dbg, dump entire vbif io base */
+
+ return c;
+}
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif)
+{
+ kfree(vbif);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
new file mode 100644
index 000000000000..471ff673c045
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hw_vbif.h
@@ -0,0 +1,128 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HW_VBIF_H
+#define _DPU_HW_VBIF_H
+
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_util.h"
+
+struct dpu_hw_vbif;
+
+/**
+ * struct dpu_hw_vbif_ops : Interface to the VBIF hardware driver functions
+ * Assumption is these functions will be called after clocks are enabled
+ */
+struct dpu_hw_vbif_ops {
+ /**
+ * set_limit_conf - set transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @limit: outstanding transaction limit
+ */
+ void (*set_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd, u32 limit);
+
+ /**
+ * get_limit_conf - get transaction limit config
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @rd: true for read limit; false for write limit
+ * @return: outstanding transaction limit
+ */
+ u32 (*get_limit_conf)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool rd);
+
+ /**
+ * set_halt_ctrl - set halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @enable: halt control enable
+ */
+ void (*set_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, bool enable);
+
+ /**
+ * get_halt_ctrl - get halt control
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @return: halt control enable
+ */
+ bool (*get_halt_ctrl)(struct dpu_hw_vbif *vbif,
+ u32 xin_id);
+
+ /**
+ * set_qos_remap - set QoS priority remap
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @level: priority level
+ * @remap_level: remapped level
+ */
+ void (*set_qos_remap)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 level, u32 remap_level);
+
+ /**
+ * set_mem_type - set memory type
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ * @value: memory type value
+ */
+ void (*set_mem_type)(struct dpu_hw_vbif *vbif,
+ u32 xin_id, u32 value);
+
+ /**
+ * clear_errors - clear any vbif errors
+ * This function clears any detected pending/source errors
+ * on the VBIF interface, and optionally returns the detected
+ * error mask(s).
+ * @vbif: vbif context driver
+ * @pnd_errors: pointer to pending error reporting variable
+ * @src_errors: pointer to source error reporting variable
+ */
+ void (*clear_errors)(struct dpu_hw_vbif *vbif,
+ u32 *pnd_errors, u32 *src_errors);
+
+ /**
+ * set_write_gather_en - set write_gather enable
+ * @vbif: vbif context driver
+ * @xin_id: client interface identifier
+ */
+ void (*set_write_gather_en)(struct dpu_hw_vbif *vbif, u32 xin_id);
+};
+
+struct dpu_hw_vbif {
+ /* base */
+ struct dpu_hw_blk_reg_map hw;
+
+ /* vbif */
+ enum dpu_vbif idx;
+ const struct dpu_vbif_cfg *cap;
+
+ /* ops */
+ struct dpu_hw_vbif_ops ops;
+};
+
+/**
+ * dpu_hw_vbif_init - initializes the vbif driver for the passed interface idx
+ * @idx: Interface index for which driver object is required
+ * @addr: Mapped register io address of MDSS
+ * @m: Pointer to mdss catalog data
+ */
+struct dpu_hw_vbif *dpu_hw_vbif_init(enum dpu_vbif idx,
+ void __iomem *addr,
+ const struct dpu_mdss_cfg *m);
+
+void dpu_hw_vbif_destroy(struct dpu_hw_vbif *vbif);
+
+#endif /*_DPU_HW_VBIF_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
new file mode 100644
index 000000000000..5b2bc9b65b15
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_hwio.h
@@ -0,0 +1,56 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef _DPU_HWIO_H
+#define _DPU_HWIO_H
+
+#include "dpu_hw_util.h"
+
+/**
+ * MDP TOP block Register and bit fields and defines
+ */
+#define DISP_INTF_SEL 0x004
+#define INTR_EN 0x010
+#define INTR_STATUS 0x014
+#define INTR_CLEAR 0x018
+#define INTR2_EN 0x008
+#define INTR2_STATUS 0x00c
+#define INTR2_CLEAR 0x02c
+#define HIST_INTR_EN 0x01c
+#define HIST_INTR_STATUS 0x020
+#define HIST_INTR_CLEAR 0x024
+#define INTF_INTR_EN 0x1C0
+#define INTF_INTR_STATUS 0x1C4
+#define INTF_INTR_CLEAR 0x1C8
+#define SPLIT_DISPLAY_EN 0x2F4
+#define SPLIT_DISPLAY_UPPER_PIPE_CTRL 0x2F8
+#define DSPP_IGC_COLOR0_RAM_LUTN 0x300
+#define DSPP_IGC_COLOR1_RAM_LUTN 0x304
+#define DSPP_IGC_COLOR2_RAM_LUTN 0x308
+#define HW_EVENTS_CTL 0x37C
+#define CLK_CTRL3 0x3A8
+#define CLK_STATUS3 0x3AC
+#define CLK_CTRL4 0x3B0
+#define CLK_STATUS4 0x3B4
+#define CLK_CTRL5 0x3B8
+#define CLK_STATUS5 0x3BC
+#define CLK_CTRL7 0x3D0
+#define CLK_STATUS7 0x3D4
+#define SPLIT_DISPLAY_LOWER_PIPE_CTRL 0x3F0
+#define SPLIT_DISPLAY_TE_LINE_INTERVAL 0x3F4
+#define INTF_SW_RESET_MASK 0x3FC
+#define HDMI_DP_CORE_SELECT 0x408
+#define MDP_OUT_CTL_0 0x410
+#define MDP_VSYNC_SEL 0x414
+#define DCE_SEL 0x450
+
+#endif /*_DPU_HWIO_H */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
new file mode 100644
index 000000000000..790d39f816dc
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.c
@@ -0,0 +1,203 @@
+/* Copyright (c) 2012-2015, 2017-2018, The Linux Foundation.
+ * All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <linux/clk.h>
+#include <linux/clk/clk-conf.h>
+#include <linux/err.h>
+#include <linux/delay.h>
+
+#include "dpu_io_util.h"
+
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk)
+{
+ int i;
+
+ for (i = num_clk - 1; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+}
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ clk_arry[i].clk = clk_get(dev, clk_arry[i].clk_name);
+ rc = PTR_ERR_OR_ZERO(clk_arry[i].clk);
+ if (rc) {
+ DEV_ERR("%pS->%s: '%s' get failed. rc=%d\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name, rc);
+ goto error;
+ }
+ }
+
+ return rc;
+
+error:
+ for (i--; i >= 0; i--) {
+ if (clk_arry[i].clk)
+ clk_put(clk_arry[i].clk);
+ clk_arry[i].clk = NULL;
+ }
+
+ return rc;
+}
+
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk)
+{
+ int i, rc = 0;
+
+ for (i = 0; i < num_clk; i++) {
+ if (clk_arry[i].clk) {
+ if (clk_arry[i].type != DSS_CLK_AHB) {
+ DEV_DBG("%pS->%s: '%s' rate %ld\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name,
+ clk_arry[i].rate);
+ rc = clk_set_rate(clk_arry[i].clk,
+ clk_arry[i].rate);
+ if (rc) {
+ DEV_ERR("%pS->%s: %s failed. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ break;
+ }
+ }
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ break;
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable)
+{
+ int i, rc = 0;
+
+ if (enable) {
+ for (i = 0; i < num_clk; i++) {
+ DEV_DBG("%pS->%s: enable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ if (clk_arry[i].clk) {
+ rc = clk_prepare_enable(clk_arry[i].clk);
+ if (rc)
+ DEV_ERR("%pS->%s: %s en fail. rc=%d\n",
+ __builtin_return_address(0),
+ __func__,
+ clk_arry[i].clk_name, rc);
+ } else {
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ rc = -EPERM;
+ }
+
+ if (rc) {
+ msm_dss_enable_clk(&clk_arry[i],
+ i, false);
+ break;
+ }
+ }
+ } else {
+ for (i = num_clk - 1; i >= 0; i--) {
+ DEV_DBG("%pS->%s: disable '%s'\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+
+ if (clk_arry[i].clk)
+ clk_disable_unprepare(clk_arry[i].clk);
+ else
+ DEV_ERR("%pS->%s: '%s' is not available\n",
+ __builtin_return_address(0), __func__,
+ clk_arry[i].clk_name);
+ }
+ }
+
+ return rc;
+}
+
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp)
+{
+ u32 i, rc = 0;
+ const char *clock_name;
+ int num_clk = 0;
+
+ if (!pdev || !mp)
+ return -EINVAL;
+
+ mp->num_clk = 0;
+ num_clk = of_property_count_strings(pdev->dev.of_node, "clock-names");
+ if (num_clk <= 0) {
+ pr_debug("clocks are not defined\n");
+ return 0;
+ }
+
+ mp->clk_config = devm_kzalloc(&pdev->dev,
+ sizeof(struct dss_clk) * num_clk,
+ GFP_KERNEL);
+ if (!mp->clk_config)
+ return -ENOMEM;
+
+ for (i = 0; i < num_clk; i++) {
+ rc = of_property_read_string_index(pdev->dev.of_node,
+ "clock-names", i,
+ &clock_name);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock name for %d\n",
+ i);
+ break;
+ }
+ strlcpy(mp->clk_config[i].clk_name, clock_name,
+ sizeof(mp->clk_config[i].clk_name));
+
+ mp->clk_config[i].type = DSS_CLK_AHB;
+ }
+
+ rc = msm_dss_get_clk(&pdev->dev, mp->clk_config, num_clk);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to get clock refs %d\n", rc);
+ goto err;
+ }
+
+ rc = of_clk_set_defaults(pdev->dev.of_node, false);
+ if (rc) {
+ dev_err(&pdev->dev, "Failed to set clock defaults %d\n", rc);
+ goto err;
+ }
+
+ for (i = 0; i < num_clk; i++) {
+ u32 rate = clk_get_rate(mp->clk_config[i].clk);
+ if (!rate)
+ continue;
+ mp->clk_config[i].rate = rate;
+ mp->clk_config[i].type = DSS_CLK_PCLK;
+ }
+
+ mp->num_clk = num_clk;
+ return 0;
+
+err:
+ msm_dss_put_clk(mp->clk_config, num_clk);
+ return rc;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
new file mode 100644
index 000000000000..bc07381d7429
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_io_util.h
@@ -0,0 +1,57 @@
+/* Copyright (c) 2012, 2017-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IO_UTIL_H__
+#define __DPU_IO_UTIL_H__
+
+#include <linux/gpio.h>
+#include <linux/platform_device.h>
+#include <linux/types.h>
+
+#define DEV_DBG(fmt, args...) pr_debug(fmt, ##args)
+#define DEV_INFO(fmt, args...) pr_info(fmt, ##args)
+#define DEV_WARN(fmt, args...) pr_warn(fmt, ##args)
+#define DEV_ERR(fmt, args...) pr_err(fmt, ##args)
+
+struct dss_gpio {
+ unsigned int gpio;
+ unsigned int value;
+ char gpio_name[32];
+};
+
+enum dss_clk_type {
+ DSS_CLK_AHB, /* no set rate. rate controlled through rpm */
+ DSS_CLK_PCLK,
+};
+
+struct dss_clk {
+ struct clk *clk; /* clk handle */
+ char clk_name[32];
+ enum dss_clk_type type;
+ unsigned long rate;
+ unsigned long max_rate;
+};
+
+struct dss_module_power {
+ unsigned int num_gpio;
+ struct dss_gpio *gpio_config;
+ unsigned int num_clk;
+ struct dss_clk *clk_config;
+};
+
+int msm_dss_get_clk(struct device *dev, struct dss_clk *clk_arry, int num_clk);
+void msm_dss_put_clk(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_clk_set_rate(struct dss_clk *clk_arry, int num_clk);
+int msm_dss_enable_clk(struct dss_clk *clk_arry, int num_clk, int enable);
+int msm_dss_parse_clock(struct platform_device *pdev,
+ struct dss_module_power *mp);
+#endif /* __DPU_IO_UTIL_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
new file mode 100644
index 000000000000..d5e6ce0140cf
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.c
@@ -0,0 +1,66 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/irqdomain.h>
+#include <linux/irq.h>
+#include <linux/kthread.h>
+
+#include "dpu_irq.h"
+#include "dpu_core_irq.h"
+
+irqreturn_t dpu_irq(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ return dpu_core_irq(dpu_kms);
+}
+
+void dpu_irq_preinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev) {
+ pr_err("invalid device handles\n");
+ return;
+ }
+
+ dpu_core_irq_preinstall(dpu_kms);
+}
+
+int dpu_irq_postinstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ int rc;
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ rc = dpu_core_irq_postinstall(dpu_kms);
+
+ return rc;
+}
+
+void dpu_irq_uninstall(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+
+ if (!kms) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ dpu_core_irq_uninstall(dpu_kms);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
new file mode 100644
index 000000000000..3e147f7176e2
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_irq.h
@@ -0,0 +1,59 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_IRQ_H__
+#define __DPU_IRQ_H__
+
+#include <linux/kernel.h>
+#include <linux/irqdomain.h>
+
+#include "msm_kms.h"
+
+/**
+ * dpu_irq_controller - define MDSS level interrupt controller context
+ * @enabled_mask: enable status of MDSS level interrupt
+ * @domain: interrupt domain of this controller
+ */
+struct dpu_irq_controller {
+ unsigned long enabled_mask;
+ struct irq_domain *domain;
+};
+
+/**
+ * dpu_irq_preinstall - perform pre-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_preinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_postinstall - perform post-installation of MDSS IRQ handler
+ * @kms: pointer to kms context
+ * @return: 0 if success; error code otherwise
+ */
+int dpu_irq_postinstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq_uninstall - uninstall MDSS IRQ handler
+ * @drm_dev: pointer to kms context
+ * @return: none
+ */
+void dpu_irq_uninstall(struct msm_kms *kms);
+
+/**
+ * dpu_irq - MDSS level IRQ handler
+ * @kms: pointer to kms context
+ * @return: interrupt handling status
+ */
+irqreturn_t dpu_irq(struct msm_kms *kms);
+
+#endif /* __DPU_IRQ_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
new file mode 100644
index 000000000000..7dd6bd2d6d37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.c
@@ -0,0 +1,1345 @@
+/*
+ * Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <drm/drm_crtc.h>
+#include <linux/debugfs.h>
+#include <linux/of_irq.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+
+#include "dpu_kms.h"
+#include "dpu_core_irq.h"
+#include "dpu_formats.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_vbif.h"
+#include "dpu_encoder.h"
+#include "dpu_plane.h"
+#include "dpu_crtc.h"
+
+#define CREATE_TRACE_POINTS
+#include "dpu_trace.h"
+
+static const char * const iommu_ports[] = {
+ "mdp_0",
+};
+
+/*
+ * To enable overall DRM driver logging
+ * # echo 0x2 > /sys/module/drm/parameters/debug
+ *
+ * To enable DRM driver h/w logging
+ * # echo <mask> > /sys/kernel/debug/dri/0/debug/hw_log_mask
+ *
+ * See dpu_hw_mdss.h for h/w logging mask definitions (search for DPU_DBG_MASK_)
+ */
+#define DPU_DEBUGFS_DIR "msm_dpu"
+#define DPU_DEBUGFS_HWMASKNAME "hw_log_mask"
+
+static int dpu_kms_hw_init(struct msm_kms *kms);
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms);
+
+static unsigned long dpu_iomap_size(struct platform_device *pdev,
+ const char *name)
+{
+ struct resource *res;
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, name);
+ if (!res) {
+ DRM_ERROR("failed to get memory resource: %s\n", name);
+ return 0;
+ }
+
+ return resource_size(res);
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int _dpu_danger_signal_status(struct seq_file *s,
+ bool danger_status)
+{
+ struct dpu_kms *kms = (struct dpu_kms *)s->private;
+ struct msm_drm_private *priv;
+ struct dpu_danger_safe_status status;
+ int i;
+
+ if (!kms || !kms->dev || !kms->dev->dev_private || !kms->hw_mdp) {
+ DPU_ERROR("invalid arg(s)\n");
+ return 0;
+ }
+
+ priv = kms->dev->dev_private;
+ memset(&status, 0, sizeof(struct dpu_danger_safe_status));
+
+ pm_runtime_get_sync(&kms->pdev->dev);
+ if (danger_status) {
+ seq_puts(s, "\nDanger signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ } else {
+ seq_puts(s, "\nSafe signal status:\n");
+ if (kms->hw_mdp->ops.get_danger_status)
+ kms->hw_mdp->ops.get_danger_status(kms->hw_mdp,
+ &status);
+ }
+ pm_runtime_put_sync(&kms->pdev->dev);
+
+ seq_printf(s, "MDP : 0x%x\n", status.mdp);
+
+ for (i = SSPP_VIG0; i < SSPP_MAX; i++)
+ seq_printf(s, "SSPP%d : 0x%x \t", i - SSPP_VIG0,
+ status.sspp[i]);
+ seq_puts(s, "\n");
+
+ return 0;
+}
+
+#define DEFINE_DPU_DEBUGFS_SEQ_FOPS(__prefix) \
+static int __prefix ## _open(struct inode *inode, struct file *file) \
+{ \
+ return single_open(file, __prefix ## _show, inode->i_private); \
+} \
+static const struct file_operations __prefix ## _fops = { \
+ .owner = THIS_MODULE, \
+ .open = __prefix ## _open, \
+ .release = single_release, \
+ .read = seq_read, \
+ .llseek = seq_lseek, \
+}
+
+static int dpu_debugfs_danger_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, true);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_danger_stats);
+
+static int dpu_debugfs_safe_stats_show(struct seq_file *s, void *v)
+{
+ return _dpu_danger_signal_status(s, false);
+}
+DEFINE_DPU_DEBUGFS_SEQ_FOPS(dpu_debugfs_safe_stats);
+
+static void dpu_debugfs_danger_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_danger);
+ dpu_kms->debugfs_danger = NULL;
+}
+
+static int dpu_debugfs_danger_init(struct dpu_kms *dpu_kms,
+ struct dentry *parent)
+{
+ dpu_kms->debugfs_danger = debugfs_create_dir("danger",
+ parent);
+ if (!dpu_kms->debugfs_danger) {
+ DPU_ERROR("failed to create danger debugfs\n");
+ return -EINVAL;
+ }
+
+ debugfs_create_file("danger_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_danger_stats_fops);
+ debugfs_create_file("safe_status", 0600, dpu_kms->debugfs_danger,
+ dpu_kms, &dpu_debugfs_safe_stats_fops);
+
+ return 0;
+}
+
+static int _dpu_debugfs_show_regset32(struct seq_file *s, void *data)
+{
+ struct dpu_debugfs_regset32 *regset;
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ void __iomem *base;
+ uint32_t i, addr;
+
+ if (!s || !s->private)
+ return 0;
+
+ regset = s->private;
+
+ dpu_kms = regset->dpu_kms;
+ if (!dpu_kms || !dpu_kms->mmio)
+ return 0;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return 0;
+
+ priv = dev->dev_private;
+ if (!priv)
+ return 0;
+
+ base = dpu_kms->mmio + regset->offset;
+
+ /* insert padding spaces, if needed */
+ if (regset->offset & 0xF) {
+ seq_printf(s, "[%x]", regset->offset & ~0xF);
+ for (i = 0; i < (regset->offset & 0xF); i += 4)
+ seq_puts(s, " ");
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ /* main register output */
+ for (i = 0; i < regset->blk_len; i += 4) {
+ addr = regset->offset + i;
+ if ((addr & 0xF) == 0x0)
+ seq_printf(s, i ? "\n[%x]" : "[%x]", addr);
+ seq_printf(s, " %08x", readl_relaxed(base + i));
+ }
+ seq_puts(s, "\n");
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+}
+
+static int dpu_debugfs_open_regset32(struct inode *inode,
+ struct file *file)
+{
+ return single_open(file, _dpu_debugfs_show_regset32, inode->i_private);
+}
+
+static const struct file_operations dpu_fops_regset32 = {
+ .open = dpu_debugfs_open_regset32,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = single_release,
+};
+
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms)
+{
+ if (regset) {
+ regset->offset = offset;
+ regset->blk_len = length;
+ regset->dpu_kms = dpu_kms;
+ }
+}
+
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset)
+{
+ if (!name || !regset || !regset->dpu_kms || !regset->blk_len)
+ return NULL;
+
+ /* make sure offset is a multiple of 4 */
+ regset->offset = round_down(regset->offset, 4);
+
+ return debugfs_create_file(name, mode, parent,
+ regset, &dpu_fops_regset32);
+}
+
+static int _dpu_debugfs_init(struct dpu_kms *dpu_kms)
+{
+ void *p;
+ int rc;
+
+ p = dpu_hw_util_get_log_mask_ptr();
+
+ if (!dpu_kms || !p)
+ return -EINVAL;
+
+ dpu_kms->debugfs_root = debugfs_create_dir("debug",
+ dpu_kms->dev->primary->debugfs_root);
+ if (IS_ERR_OR_NULL(dpu_kms->debugfs_root)) {
+ DRM_ERROR("debugfs create_dir failed %ld\n",
+ PTR_ERR(dpu_kms->debugfs_root));
+ return PTR_ERR(dpu_kms->debugfs_root);
+ }
+
+ rc = dpu_dbg_debugfs_register(dpu_kms->debugfs_root);
+ if (rc) {
+ DRM_ERROR("failed to reg dpu dbg debugfs: %d\n", rc);
+ return rc;
+ }
+
+ /* allow root to be NULL */
+ debugfs_create_x32(DPU_DEBUGFS_HWMASKNAME, 0600, dpu_kms->debugfs_root, p);
+
+ (void) dpu_debugfs_danger_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_vbif_init(dpu_kms, dpu_kms->debugfs_root);
+ (void) dpu_debugfs_core_irq_init(dpu_kms, dpu_kms->debugfs_root);
+
+ rc = dpu_core_perf_debugfs_init(&dpu_kms->perf, dpu_kms->debugfs_root);
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ return rc;
+ }
+
+ return 0;
+}
+
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+ /* don't need to NULL check debugfs_root */
+ if (dpu_kms) {
+ dpu_debugfs_vbif_destroy(dpu_kms);
+ dpu_debugfs_danger_destroy(dpu_kms);
+ dpu_debugfs_core_irq_destroy(dpu_kms);
+ debugfs_remove_recursive(dpu_kms->debugfs_root);
+ }
+}
+#else
+static void _dpu_debugfs_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+
+static int dpu_kms_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ return dpu_crtc_vblank(crtc, true);
+}
+
+static void dpu_kms_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc)
+{
+ dpu_crtc_vblank(crtc, false);
+}
+
+static void dpu_kms_prepare_commit(struct msm_kms *kms,
+ struct drm_atomic_state *state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_device *dev;
+ struct drm_encoder *encoder;
+
+ if (!kms)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+
+ if (!dev || !dev->dev_private)
+ return;
+ priv = dev->dev_private;
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head)
+ if (encoder->crtc != NULL)
+ dpu_encoder_prepare_commit(encoder);
+}
+
+/*
+ * Override the encoder enable since we need to setup the inline rotator and do
+ * some crtc magic before enabling any bridge that might be present.
+ */
+void dpu_kms_encoder_enable(struct drm_encoder *encoder)
+{
+ const struct drm_encoder_helper_funcs *funcs = encoder->helper_private;
+ struct drm_crtc *crtc = encoder->crtc;
+
+ /* Forward this enable call to the commit hook */
+ if (funcs && funcs->commit)
+ funcs->commit(encoder);
+
+ if (crtc && crtc->state->active) {
+ trace_dpu_kms_enc_enable(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+}
+
+static void dpu_kms_commit(struct msm_kms *kms, struct drm_atomic_state *state)
+{
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *crtc_state;
+ int i;
+
+ for_each_new_crtc_in_state(state, crtc, crtc_state, i) {
+ /* If modeset is required, kickoff is run in encoder_enable */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ continue;
+
+ if (crtc->state->active) {
+ trace_dpu_kms_commit(DRMID(crtc));
+ dpu_crtc_commit_kickoff(crtc);
+ }
+ }
+}
+
+static void dpu_kms_complete_commit(struct msm_kms *kms,
+ struct drm_atomic_state *old_state)
+{
+ struct dpu_kms *dpu_kms;
+ struct msm_drm_private *priv;
+ struct drm_crtc *crtc;
+ struct drm_crtc_state *old_crtc_state;
+ int i;
+
+ if (!kms || !old_state)
+ return;
+ dpu_kms = to_dpu_kms(kms);
+
+ if (!dpu_kms->dev || !dpu_kms->dev->dev_private)
+ return;
+ priv = dpu_kms->dev->dev_private;
+
+ DPU_ATRACE_BEGIN("kms_complete_commit");
+
+ for_each_old_crtc_in_state(old_state, crtc, old_crtc_state, i)
+ dpu_crtc_complete_commit(crtc, old_crtc_state);
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ DPU_ATRACE_END("kms_complete_commit");
+}
+
+static void dpu_kms_wait_for_commit_done(struct msm_kms *kms,
+ struct drm_crtc *crtc)
+{
+ struct drm_encoder *encoder;
+ struct drm_device *dev;
+ int ret;
+
+ if (!kms || !crtc || !crtc->state) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ dev = crtc->dev;
+
+ if (!crtc->state->enable) {
+ DPU_DEBUG("[crtc:%d] not enable\n", crtc->base.id);
+ return;
+ }
+
+ if (!crtc->state->active) {
+ DPU_DEBUG("[crtc:%d] not active\n", crtc->base.id);
+ return;
+ }
+
+ list_for_each_entry(encoder, &dev->mode_config.encoder_list, head) {
+ if (encoder->crtc != crtc)
+ continue;
+ /*
+ * Wait for post-flush if necessary to delay before
+ * plane_cleanup. For example, wait for vsync in case of video
+ * mode panels. This may be a no-op for command mode panels.
+ */
+ trace_dpu_kms_wait_for_commit_done(DRMID(crtc));
+ ret = dpu_encoder_wait_for_event(encoder, MSM_ENC_COMMIT_DONE);
+ if (ret && ret != -EWOULDBLOCK) {
+ DPU_ERROR("wait for commit done returned %d\n", ret);
+ break;
+ }
+ }
+}
+
+static void _dpu_kms_initialize_dsi(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ struct drm_encoder *encoder = NULL;
+ int i, rc;
+
+ /*TODO: Support two independent DSI connectors */
+ encoder = dpu_encoder_init(dev, DRM_MODE_CONNECTOR_DSI);
+ if (IS_ERR_OR_NULL(encoder)) {
+ DPU_ERROR("encoder init failed for dsi display\n");
+ return;
+ }
+
+ priv->encoders[priv->num_encoders++] = encoder;
+
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (!priv->dsi[i]) {
+ DPU_DEBUG("invalid msm_dsi for ctrl %d\n", i);
+ return;
+ }
+
+ rc = msm_dsi_modeset_init(priv->dsi[i], dev, encoder);
+ if (rc) {
+ DPU_ERROR("modeset_init failed for dsi[%d], rc = %d\n",
+ i, rc);
+ continue;
+ }
+ }
+}
+
+/**
+ * _dpu_kms_setup_displays - create encoders, bridges and connectors
+ * for underlying displays
+ * @dev: Pointer to drm device structure
+ * @priv: Pointer to private drm device data
+ * @dpu_kms: Pointer to dpu kms structure
+ * Returns: Zero on success
+ */
+static void _dpu_kms_setup_displays(struct drm_device *dev,
+ struct msm_drm_private *priv,
+ struct dpu_kms *dpu_kms)
+{
+ _dpu_kms_initialize_dsi(dev, priv, dpu_kms);
+
+ /**
+ * Extend this function to initialize other
+ * types of displays
+ */
+}
+
+static void _dpu_kms_drm_obj_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_drm_private *priv;
+ int i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return;
+ } else if (!dpu_kms->dev) {
+ DPU_ERROR("invalid dev\n");
+ return;
+ } else if (!dpu_kms->dev->dev_private) {
+ DPU_ERROR("invalid dev_private\n");
+ return;
+ }
+ priv = dpu_kms->dev->dev_private;
+
+ for (i = 0; i < priv->num_crtcs; i++)
+ priv->crtcs[i]->funcs->destroy(priv->crtcs[i]);
+ priv->num_crtcs = 0;
+
+ for (i = 0; i < priv->num_planes; i++)
+ priv->planes[i]->funcs->destroy(priv->planes[i]);
+ priv->num_planes = 0;
+
+ for (i = 0; i < priv->num_connectors; i++)
+ priv->connectors[i]->funcs->destroy(priv->connectors[i]);
+ priv->num_connectors = 0;
+
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->funcs->destroy(priv->encoders[i]);
+ priv->num_encoders = 0;
+}
+
+static int _dpu_kms_drm_obj_init(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ struct drm_plane *primary_planes[MAX_PLANES], *plane;
+ struct drm_crtc *crtc;
+
+ struct msm_drm_private *priv;
+ struct dpu_mdss_cfg *catalog;
+
+ int primary_planes_idx = 0, i, ret;
+ int max_crtc_count;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+ priv = dev->dev_private;
+ catalog = dpu_kms->catalog;
+
+ /*
+ * Create encoder and query display drivers to create
+ * bridges and connectors
+ */
+ _dpu_kms_setup_displays(dev, priv, dpu_kms);
+
+ max_crtc_count = min(catalog->mixer_count, priv->num_encoders);
+
+ /* Create the planes */
+ for (i = 0; i < catalog->sspp_count; i++) {
+ bool primary = true;
+
+ if (catalog->sspp[i].features & BIT(DPU_SSPP_CURSOR)
+ || primary_planes_idx >= max_crtc_count)
+ primary = false;
+
+ plane = dpu_plane_init(dev, catalog->sspp[i].id, primary,
+ (1UL << max_crtc_count) - 1, 0);
+ if (IS_ERR(plane)) {
+ DPU_ERROR("dpu_plane_init failed\n");
+ ret = PTR_ERR(plane);
+ goto fail;
+ }
+ priv->planes[priv->num_planes++] = plane;
+
+ if (primary)
+ primary_planes[primary_planes_idx++] = plane;
+ }
+
+ max_crtc_count = min(max_crtc_count, primary_planes_idx);
+
+ /* Create one CRTC per encoder */
+ for (i = 0; i < max_crtc_count; i++) {
+ crtc = dpu_crtc_init(dev, primary_planes[i]);
+ if (IS_ERR(crtc)) {
+ ret = PTR_ERR(crtc);
+ goto fail;
+ }
+ priv->crtcs[priv->num_crtcs++] = crtc;
+ }
+
+ /* All CRTCs are compatible with all encoders */
+ for (i = 0; i < priv->num_encoders; i++)
+ priv->encoders[i]->possible_crtcs = (1 << priv->num_crtcs) - 1;
+
+ return 0;
+fail:
+ _dpu_kms_drm_obj_destroy(dpu_kms);
+ return ret;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static int dpu_kms_debugfs_init(struct msm_kms *kms, struct drm_minor *minor)
+{
+ struct dpu_kms *dpu_kms = to_dpu_kms(kms);
+ struct drm_device *dev;
+ int rc;
+
+ if (!dpu_kms || !dpu_kms->dev || !dpu_kms->dev->dev) {
+ DPU_ERROR("invalid dpu_kms\n");
+ return -EINVAL;
+ }
+
+ dev = dpu_kms->dev;
+
+ rc = _dpu_debugfs_init(dpu_kms);
+ if (rc)
+ DPU_ERROR("dpu_debugfs init failed: %d\n", rc);
+
+ return rc;
+}
+#endif
+
+static long dpu_kms_round_pixclk(struct msm_kms *kms, unsigned long rate,
+ struct drm_encoder *encoder)
+{
+ return rate;
+}
+
+static void _dpu_kms_hw_destroy(struct dpu_kms *dpu_kms)
+{
+ struct drm_device *dev;
+ int i;
+
+ dev = dpu_kms->dev;
+ if (!dev)
+ return;
+
+ if (dpu_kms->hw_intr)
+ dpu_hw_intr_destroy(dpu_kms->hw_intr);
+ dpu_kms->hw_intr = NULL;
+
+ if (dpu_kms->power_event)
+ dpu_power_handle_unregister_event(
+ &dpu_kms->phandle, dpu_kms->power_event);
+
+ /* safe to call these more than once during shutdown */
+ _dpu_debugfs_destroy(dpu_kms);
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ if (dpu_kms->catalog) {
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ if ((vbif_idx < VBIF_MAX) && dpu_kms->hw_vbif[vbif_idx])
+ dpu_hw_vbif_destroy(dpu_kms->hw_vbif[vbif_idx]);
+ }
+ }
+
+ if (dpu_kms->rm_init)
+ dpu_rm_destroy(&dpu_kms->rm);
+ dpu_kms->rm_init = false;
+
+ if (dpu_kms->catalog)
+ dpu_hw_catalog_deinit(dpu_kms->catalog);
+ dpu_kms->catalog = NULL;
+
+ if (dpu_kms->core_client)
+ dpu_power_client_destroy(&dpu_kms->phandle,
+ dpu_kms->core_client);
+ dpu_kms->core_client = NULL;
+
+ if (dpu_kms->vbif[VBIF_NRT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_NRT]);
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+
+ if (dpu_kms->vbif[VBIF_RT])
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->vbif[VBIF_RT]);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+
+ if (dpu_kms->mmio)
+ devm_iounmap(&dpu_kms->pdev->dev, dpu_kms->mmio);
+ dpu_kms->mmio = NULL;
+}
+
+static void dpu_kms_destroy(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+
+ dpu_dbg_destroy();
+ _dpu_kms_hw_destroy(dpu_kms);
+}
+
+static int dpu_kms_pm_suspend(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_atomic_state *state;
+ struct dpu_kms *dpu_kms;
+ int ret = 0, num_crtcs = 0;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ /* disable hot-plug polling */
+ drm_kms_helper_poll_disable(ddev);
+
+ /* acquire modeset lock(s) */
+ drm_modeset_acquire_init(&ctx, 0);
+
+retry:
+ DPU_ATRACE_BEGIN("kms_pm_suspend");
+
+ ret = drm_modeset_lock_all_ctx(ddev, &ctx);
+ if (ret)
+ goto unlock;
+
+ /* save current state for resume */
+ if (dpu_kms->suspend_state)
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ dpu_kms->suspend_state = drm_atomic_helper_duplicate_state(ddev, &ctx);
+ if (IS_ERR_OR_NULL(dpu_kms->suspend_state)) {
+ DRM_ERROR("failed to back up suspend state\n");
+ dpu_kms->suspend_state = NULL;
+ goto unlock;
+ }
+
+ /* create atomic state to disable all CRTCs */
+ state = drm_atomic_state_alloc(ddev);
+ if (IS_ERR_OR_NULL(state)) {
+ DRM_ERROR("failed to allocate crtc disable state\n");
+ goto unlock;
+ }
+
+ state->acquire_ctx = &ctx;
+
+ /* check for nothing to do */
+ if (num_crtcs == 0) {
+ DRM_DEBUG("all crtcs are already in the off state\n");
+ drm_atomic_state_put(state);
+ goto suspended;
+ }
+
+ /* commit the "disable all" state */
+ ret = drm_atomic_commit(state);
+ if (ret < 0) {
+ DRM_ERROR("failed to disable crtcs, %d\n", ret);
+ drm_atomic_state_put(state);
+ goto unlock;
+ }
+
+suspended:
+ dpu_kms->suspend_block = true;
+
+unlock:
+ if (ret == -EDEADLK) {
+ drm_modeset_backoff(&ctx);
+ goto retry;
+ }
+ drm_modeset_drop_locks(&ctx);
+ drm_modeset_acquire_fini(&ctx);
+
+ DPU_ATRACE_END("kms_pm_suspend");
+ return 0;
+}
+
+static int dpu_kms_pm_resume(struct device *dev)
+{
+ struct drm_device *ddev;
+ struct dpu_kms *dpu_kms;
+ int ret;
+
+ if (!dev)
+ return -EINVAL;
+
+ ddev = dev_get_drvdata(dev);
+ if (!ddev || !ddev_to_msm_kms(ddev))
+ return -EINVAL;
+
+ dpu_kms = to_dpu_kms(ddev_to_msm_kms(ddev));
+
+ DPU_ATRACE_BEGIN("kms_pm_resume");
+
+ drm_mode_config_reset(ddev);
+
+ drm_modeset_lock_all(ddev);
+
+ dpu_kms->suspend_block = false;
+
+ if (dpu_kms->suspend_state) {
+ dpu_kms->suspend_state->acquire_ctx =
+ ddev->mode_config.acquire_ctx;
+ ret = drm_atomic_commit(dpu_kms->suspend_state);
+ if (ret < 0) {
+ DRM_ERROR("failed to restore state, %d\n", ret);
+ drm_atomic_state_put(dpu_kms->suspend_state);
+ }
+ dpu_kms->suspend_state = NULL;
+ }
+ drm_modeset_unlock_all(ddev);
+
+ /* enable hot-plug polling */
+ drm_kms_helper_poll_enable(ddev);
+
+ DPU_ATRACE_END("kms_pm_resume");
+ return 0;
+}
+
+static void _dpu_kms_set_encoder_mode(struct msm_kms *kms,
+ struct drm_encoder *encoder,
+ bool cmd_mode)
+{
+ struct msm_display_info info;
+ struct msm_drm_private *priv = encoder->dev->dev_private;
+ int i, rc = 0;
+
+ memset(&info, 0, sizeof(info));
+
+ info.intf_type = encoder->encoder_type;
+ info.capabilities = cmd_mode ? MSM_DISPLAY_CAP_CMD_MODE :
+ MSM_DISPLAY_CAP_VID_MODE;
+
+ /* TODO: No support for DSI swap */
+ for (i = 0; i < ARRAY_SIZE(priv->dsi); i++) {
+ if (priv->dsi[i]) {
+ info.h_tile_instance[info.num_of_h_tiles] = i;
+ info.num_of_h_tiles++;
+ }
+ }
+
+ rc = dpu_encoder_setup(encoder->dev, encoder, &info);
+ if (rc)
+ DPU_ERROR("failed to setup DPU encoder %d: rc:%d\n",
+ encoder->base.id, rc);
+}
+
+static const struct msm_kms_funcs kms_funcs = {
+ .hw_init = dpu_kms_hw_init,
+ .irq_preinstall = dpu_irq_preinstall,
+ .irq_postinstall = dpu_irq_postinstall,
+ .irq_uninstall = dpu_irq_uninstall,
+ .irq = dpu_irq,
+ .prepare_commit = dpu_kms_prepare_commit,
+ .commit = dpu_kms_commit,
+ .complete_commit = dpu_kms_complete_commit,
+ .wait_for_crtc_commit_done = dpu_kms_wait_for_commit_done,
+ .enable_vblank = dpu_kms_enable_vblank,
+ .disable_vblank = dpu_kms_disable_vblank,
+ .check_modified_format = dpu_format_check_modified_format,
+ .get_format = dpu_get_msm_format,
+ .round_pixclk = dpu_kms_round_pixclk,
+ .pm_suspend = dpu_kms_pm_suspend,
+ .pm_resume = dpu_kms_pm_resume,
+ .destroy = dpu_kms_destroy,
+ .set_encoder_mode = _dpu_kms_set_encoder_mode,
+#ifdef CONFIG_DEBUG_FS
+ .debugfs_init = dpu_kms_debugfs_init,
+#endif
+};
+
+/* the caller api needs to turn on clock before calling it */
+static inline void _dpu_kms_core_hw_rev_init(struct dpu_kms *dpu_kms)
+{
+ dpu_kms->core_rev = readl_relaxed(dpu_kms->mmio + 0x0);
+}
+
+static int _dpu_kms_mmu_destroy(struct dpu_kms *dpu_kms)
+{
+ struct msm_mmu *mmu;
+
+ mmu = dpu_kms->base.aspace->mmu;
+
+ mmu->funcs->detach(mmu, (const char **)iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ msm_gem_address_space_put(dpu_kms->base.aspace);
+
+ return 0;
+}
+
+static int _dpu_kms_mmu_init(struct dpu_kms *dpu_kms)
+{
+ struct iommu_domain *domain;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ domain = iommu_domain_alloc(&platform_bus_type);
+ if (!domain)
+ return 0;
+
+ aspace = msm_gem_address_space_create(dpu_kms->dev->dev,
+ domain, "dpu1");
+ if (IS_ERR(aspace)) {
+ ret = PTR_ERR(aspace);
+ goto fail;
+ }
+
+ dpu_kms->base.aspace = aspace;
+
+ ret = aspace->mmu->funcs->attach(aspace->mmu, iommu_ports,
+ ARRAY_SIZE(iommu_ports));
+ if (ret) {
+ DPU_ERROR("failed to attach iommu %d\n", ret);
+ msm_gem_address_space_put(aspace);
+ goto fail;
+ }
+
+ return 0;
+fail:
+ _dpu_kms_mmu_destroy(dpu_kms);
+
+ return ret;
+}
+
+static struct dss_clk *_dpu_kms_get_clk(struct dpu_kms *dpu_kms,
+ char *clock_name)
+{
+ struct dss_module_power *mp = &dpu_kms->mp;
+ int i;
+
+ for (i = 0; i < mp->num_clk; i++) {
+ if (!strcmp(mp->clk_config[i].clk_name, clock_name))
+ return &mp->clk_config[i];
+ }
+
+ return NULL;
+}
+
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name)
+{
+ struct dss_clk *clk;
+
+ clk = _dpu_kms_get_clk(dpu_kms, clock_name);
+ if (!clk)
+ return -EINVAL;
+
+ return clk_get_rate(clk->clk);
+}
+
+static void dpu_kms_handle_power_event(u32 event_type, void *usr)
+{
+ struct dpu_kms *dpu_kms = usr;
+
+ if (!dpu_kms)
+ return;
+
+ if (event_type == DPU_POWER_EVENT_POST_ENABLE)
+ dpu_vbif_init_memtypes(dpu_kms);
+}
+
+static int dpu_kms_hw_init(struct msm_kms *kms)
+{
+ struct dpu_kms *dpu_kms;
+ struct drm_device *dev;
+ struct msm_drm_private *priv;
+ int i, rc = -EINVAL;
+
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ goto end;
+ }
+
+ dpu_kms = to_dpu_kms(kms);
+ dev = dpu_kms->dev;
+ if (!dev) {
+ DPU_ERROR("invalid device\n");
+ goto end;
+ }
+
+ rc = dpu_dbg_init(&dpu_kms->pdev->dev);
+ if (rc) {
+ DRM_ERROR("failed to init dpu dbg: %d\n", rc);
+ goto end;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("invalid private data\n");
+ goto dbg_destroy;
+ }
+
+ dpu_kms->mmio = msm_ioremap(dpu_kms->pdev, "mdp", "mdp");
+ if (IS_ERR(dpu_kms->mmio)) {
+ rc = PTR_ERR(dpu_kms->mmio);
+ DPU_ERROR("mdp register memory map failed: %d\n", rc);
+ dpu_kms->mmio = NULL;
+ goto error;
+ }
+ DRM_DEBUG("mapped dpu address space @%pK\n", dpu_kms->mmio);
+ dpu_kms->mmio_len = dpu_iomap_size(dpu_kms->pdev, "mdp");
+
+ dpu_kms->vbif[VBIF_RT] = msm_ioremap(dpu_kms->pdev, "vbif", "vbif");
+ if (IS_ERR(dpu_kms->vbif[VBIF_RT])) {
+ rc = PTR_ERR(dpu_kms->vbif[VBIF_RT]);
+ DPU_ERROR("vbif register memory map failed: %d\n", rc);
+ dpu_kms->vbif[VBIF_RT] = NULL;
+ goto error;
+ }
+ dpu_kms->vbif_len[VBIF_RT] = dpu_iomap_size(dpu_kms->pdev, "vbif");
+ dpu_kms->vbif[VBIF_NRT] = msm_ioremap(dpu_kms->pdev, "vbif_nrt", "vbif_nrt");
+ if (IS_ERR(dpu_kms->vbif[VBIF_NRT])) {
+ dpu_kms->vbif[VBIF_NRT] = NULL;
+ DPU_DEBUG("VBIF NRT is not defined");
+ } else {
+ dpu_kms->vbif_len[VBIF_NRT] = dpu_iomap_size(dpu_kms->pdev,
+ "vbif_nrt");
+ }
+
+ dpu_kms->reg_dma = msm_ioremap(dpu_kms->pdev, "regdma", "regdma");
+ if (IS_ERR(dpu_kms->reg_dma)) {
+ dpu_kms->reg_dma = NULL;
+ DPU_DEBUG("REG_DMA is not defined");
+ } else {
+ dpu_kms->reg_dma_len = dpu_iomap_size(dpu_kms->pdev, "regdma");
+ }
+
+ dpu_kms->core_client = dpu_power_client_create(&dpu_kms->phandle,
+ "core");
+ if (IS_ERR_OR_NULL(dpu_kms->core_client)) {
+ rc = PTR_ERR(dpu_kms->core_client);
+ if (!dpu_kms->core_client)
+ rc = -EINVAL;
+ DPU_ERROR("dpu power client create failed: %d\n", rc);
+ dpu_kms->core_client = NULL;
+ goto error;
+ }
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+
+ _dpu_kms_core_hw_rev_init(dpu_kms);
+
+ pr_info("dpu hardware revision:0x%x\n", dpu_kms->core_rev);
+
+ dpu_kms->catalog = dpu_hw_catalog_init(dpu_kms->core_rev);
+ if (IS_ERR_OR_NULL(dpu_kms->catalog)) {
+ rc = PTR_ERR(dpu_kms->catalog);
+ if (!dpu_kms->catalog)
+ rc = -EINVAL;
+ DPU_ERROR("catalog init failed: %d\n", rc);
+ dpu_kms->catalog = NULL;
+ goto power_error;
+ }
+
+ dpu_dbg_init_dbg_buses(dpu_kms->core_rev);
+
+ /*
+ * Now we need to read the HW catalog and initialize resources such as
+ * clocks, regulators, GDSC/MMAGIC, ioremap the register ranges etc
+ */
+ rc = _dpu_kms_mmu_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("dpu_kms_mmu_init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ rc = dpu_rm_init(&dpu_kms->rm, dpu_kms->catalog, dpu_kms->mmio,
+ dpu_kms->dev);
+ if (rc) {
+ DPU_ERROR("rm init failed: %d\n", rc);
+ goto power_error;
+ }
+
+ dpu_kms->rm_init = true;
+
+ dpu_kms->hw_mdp = dpu_rm_get_mdp(&dpu_kms->rm);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_mdp)) {
+ rc = PTR_ERR(dpu_kms->hw_mdp);
+ if (!dpu_kms->hw_mdp)
+ rc = -EINVAL;
+ DPU_ERROR("failed to get hw_mdp: %d\n", rc);
+ dpu_kms->hw_mdp = NULL;
+ goto power_error;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ u32 vbif_idx = dpu_kms->catalog->vbif[i].id;
+
+ dpu_kms->hw_vbif[i] = dpu_hw_vbif_init(vbif_idx,
+ dpu_kms->vbif[vbif_idx], dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_vbif[vbif_idx])) {
+ rc = PTR_ERR(dpu_kms->hw_vbif[vbif_idx]);
+ if (!dpu_kms->hw_vbif[vbif_idx])
+ rc = -EINVAL;
+ DPU_ERROR("failed to init vbif %d: %d\n", vbif_idx, rc);
+ dpu_kms->hw_vbif[vbif_idx] = NULL;
+ goto power_error;
+ }
+ }
+
+ rc = dpu_core_perf_init(&dpu_kms->perf, dev, dpu_kms->catalog,
+ &dpu_kms->phandle,
+ _dpu_kms_get_clk(dpu_kms, "core"));
+ if (rc) {
+ DPU_ERROR("failed to init perf %d\n", rc);
+ goto perf_err;
+ }
+
+ dpu_kms->hw_intr = dpu_hw_intr_init(dpu_kms->mmio, dpu_kms->catalog);
+ if (IS_ERR_OR_NULL(dpu_kms->hw_intr)) {
+ rc = PTR_ERR(dpu_kms->hw_intr);
+ DPU_ERROR("hw_intr init failed: %d\n", rc);
+ dpu_kms->hw_intr = NULL;
+ goto hw_intr_init_err;
+ }
+
+ /*
+ * _dpu_kms_drm_obj_init should create the DRM related objects
+ * i.e. CRTCs, planes, encoders, connectors and so forth
+ */
+ rc = _dpu_kms_drm_obj_init(dpu_kms);
+ if (rc) {
+ DPU_ERROR("modeset init failed: %d\n", rc);
+ goto drm_obj_init_err;
+ }
+
+ dev->mode_config.min_width = 0;
+ dev->mode_config.min_height = 0;
+
+ /*
+ * max crtc width is equal to the max mixer width * 2 and max height is
+ * is 4K
+ */
+ dev->mode_config.max_width =
+ dpu_kms->catalog->caps->max_mixer_width * 2;
+ dev->mode_config.max_height = 4096;
+
+ /*
+ * Support format modifiers for compression etc.
+ */
+ dev->mode_config.allow_fb_modifiers = true;
+
+ /*
+ * Handle (re)initializations during power enable
+ */
+ dpu_kms_handle_power_event(DPU_POWER_EVENT_POST_ENABLE, dpu_kms);
+ dpu_kms->power_event = dpu_power_handle_register_event(
+ &dpu_kms->phandle,
+ DPU_POWER_EVENT_POST_ENABLE,
+ dpu_kms_handle_power_event, dpu_kms, "kms");
+
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+ return 0;
+
+drm_obj_init_err:
+ dpu_core_perf_destroy(&dpu_kms->perf);
+hw_intr_init_err:
+perf_err:
+power_error:
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+error:
+ _dpu_kms_hw_destroy(dpu_kms);
+dbg_destroy:
+ dpu_dbg_destroy();
+end:
+ return rc;
+}
+
+struct msm_kms *dpu_kms_init(struct drm_device *dev)
+{
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+ int irq;
+
+ if (!dev || !dev->dev_private) {
+ DPU_ERROR("drm device node invalid\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ priv = dev->dev_private;
+ dpu_kms = to_dpu_kms(priv->kms);
+
+ irq = irq_of_parse_and_map(dpu_kms->pdev->dev.of_node, 0);
+ if (irq < 0) {
+ DPU_ERROR("failed to get irq: %d\n", irq);
+ return ERR_PTR(irq);
+ }
+ dpu_kms->base.irq = irq;
+
+ return &dpu_kms->base;
+}
+
+static int dpu_bind(struct device *dev, struct device *master, void *data)
+{
+ struct drm_device *ddev = dev_get_drvdata(master);
+ struct platform_device *pdev = to_platform_device(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct dpu_kms *dpu_kms;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_kms = devm_kzalloc(&pdev->dev, sizeof(*dpu_kms), GFP_KERNEL);
+ if (!dpu_kms)
+ return -ENOMEM;
+
+ mp = &dpu_kms->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ return ret;
+ }
+
+ dpu_power_resource_init(pdev, &dpu_kms->phandle);
+
+ platform_set_drvdata(pdev, dpu_kms);
+
+ msm_kms_init(&dpu_kms->base, &kms_funcs);
+ dpu_kms->dev = ddev;
+ dpu_kms->pdev = pdev;
+
+ pm_runtime_enable(&pdev->dev);
+ dpu_kms->rpm_enabled = true;
+
+ priv->kms = &dpu_kms->base;
+ return ret;
+}
+
+static void dpu_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ dpu_power_resource_deinit(pdev, &dpu_kms->phandle);
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+ mp->num_clk = 0;
+
+ if (dpu_kms->rpm_enabled)
+ pm_runtime_disable(&pdev->dev);
+}
+
+static const struct component_ops dpu_ops = {
+ .bind = dpu_bind,
+ .unbind = dpu_unbind,
+};
+
+static int dpu_dev_probe(struct platform_device *pdev)
+{
+ return component_add(&pdev->dev, &dpu_ops);
+}
+
+static int dpu_dev_remove(struct platform_device *pdev)
+{
+ component_del(&pdev->dev, &dpu_ops);
+ return 0;
+}
+
+static int __maybe_unused dpu_runtime_suspend(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, false);
+ if (rc)
+ DPU_ERROR("resource disable failed: %d\n", rc);
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (rc)
+ DPU_ERROR("clock disable failed rc:%d\n", rc);
+
+exit:
+ return rc;
+}
+
+static int __maybe_unused dpu_runtime_resume(struct device *dev)
+{
+ int rc = -1;
+ struct platform_device *pdev = to_platform_device(dev);
+ struct dpu_kms *dpu_kms = platform_get_drvdata(pdev);
+ struct drm_device *ddev;
+ struct dss_module_power *mp = &dpu_kms->mp;
+
+ ddev = dpu_kms->dev;
+ if (!ddev) {
+ DPU_ERROR("invalid drm_device\n");
+ goto exit;
+ }
+
+ rc = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (rc) {
+ DPU_ERROR("clock enable failed rc:%d\n", rc);
+ goto exit;
+ }
+
+ rc = dpu_power_resource_enable(&dpu_kms->phandle,
+ dpu_kms->core_client, true);
+ if (rc)
+ DPU_ERROR("resource enable failed: %d\n", rc);
+
+exit:
+ return rc;
+}
+
+static const struct dev_pm_ops dpu_pm_ops = {
+ SET_RUNTIME_PM_OPS(dpu_runtime_suspend, dpu_runtime_resume, NULL)
+};
+
+static const struct of_device_id dpu_dt_match[] = {
+ { .compatible = "qcom,sdm845-dpu", },
+ {}
+};
+MODULE_DEVICE_TABLE(of, dpu_dt_match);
+
+static struct platform_driver dpu_driver = {
+ .probe = dpu_dev_probe,
+ .remove = dpu_dev_remove,
+ .driver = {
+ .name = "msm_dpu",
+ .of_match_table = dpu_dt_match,
+ .pm = &dpu_pm_ops,
+ },
+};
+
+void __init msm_dpu_register(void)
+{
+ platform_driver_register(&dpu_driver);
+}
+
+void __exit msm_dpu_unregister(void)
+{
+ platform_driver_unregister(&dpu_driver);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
new file mode 100644
index 000000000000..66d466628e2b
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_kms.h
@@ -0,0 +1,290 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef __DPU_KMS_H__
+#define __DPU_KMS_H__
+
+#include "msm_drv.h"
+#include "msm_kms.h"
+#include "msm_mmu.h"
+#include "msm_gem.h"
+#include "dpu_dbg.h"
+#include "dpu_hw_catalog.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_interrupts.h"
+#include "dpu_hw_top.h"
+#include "dpu_rm.h"
+#include "dpu_power_handle.h"
+#include "dpu_irq.h"
+#include "dpu_core_perf.h"
+
+#define DRMID(x) ((x) ? (x)->base.id : -1)
+
+/**
+ * DPU_DEBUG - macro for kms/plane/crtc/encoder/connector logs
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_KMS)) \
+ DRM_DEBUG(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+/**
+ * DPU_DEBUG_DRIVER - macro for hardware driver logging
+ * @fmt: Pointer to format string
+ */
+#define DPU_DEBUG_DRIVER(fmt, ...) \
+ do { \
+ if (unlikely(drm_debug & DRM_UT_DRIVER)) \
+ DRM_ERROR(fmt, ##__VA_ARGS__); \
+ else \
+ pr_debug(fmt, ##__VA_ARGS__); \
+ } while (0)
+
+#define DPU_ERROR(fmt, ...) pr_err("[dpu error]" fmt, ##__VA_ARGS__)
+
+/**
+ * ktime_compare_safe - compare two ktime structures
+ * This macro is similar to the standard ktime_compare() function, but
+ * attempts to also handle ktime overflows.
+ * @A: First ktime value
+ * @B: Second ktime value
+ * Returns: -1 if A < B, 0 if A == B, 1 if A > B
+ */
+#define ktime_compare_safe(A, B) \
+ ktime_compare(ktime_sub((A), (B)), ktime_set(0, 0))
+
+#define DPU_NAME_SIZE 12
+
+/* timeout in frames waiting for frame done */
+#define DPU_FRAME_DONE_TIMEOUT 60
+
+/*
+ * struct dpu_irq_callback - IRQ callback handlers
+ * @list: list to callback
+ * @func: intr handler
+ * @arg: argument for the handler
+ */
+struct dpu_irq_callback {
+ struct list_head list;
+ void (*func)(void *arg, int irq_idx);
+ void *arg;
+};
+
+/**
+ * struct dpu_irq: IRQ structure contains callback registration info
+ * @total_irq: total number of irq_idx obtained from HW interrupts mapping
+ * @irq_cb_tbl: array of IRQ callbacks setting
+ * @enable_counts array of IRQ enable counts
+ * @cb_lock: callback lock
+ * @debugfs_file: debugfs file for irq statistics
+ */
+struct dpu_irq {
+ u32 total_irqs;
+ struct list_head *irq_cb_tbl;
+ atomic_t *enable_counts;
+ atomic_t *irq_counts;
+ spinlock_t cb_lock;
+ struct dentry *debugfs_file;
+};
+
+struct dpu_kms {
+ struct msm_kms base;
+ struct drm_device *dev;
+ int core_rev;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_power_handle phandle;
+ struct dpu_power_client *core_client;
+ struct dpu_power_event *power_event;
+
+ /* directory entry for debugfs */
+ struct dentry *debugfs_root;
+ struct dentry *debugfs_danger;
+ struct dentry *debugfs_vbif;
+
+ /* io/register spaces: */
+ void __iomem *mmio, *vbif[VBIF_MAX], *reg_dma;
+ unsigned long mmio_len, vbif_len[VBIF_MAX], reg_dma_len;
+
+ struct regulator *vdd;
+ struct regulator *mmagic;
+ struct regulator *venus;
+
+ struct dpu_hw_intr *hw_intr;
+ struct dpu_irq irq_obj;
+
+ struct dpu_core_perf perf;
+
+ /* saved atomic state during system suspend */
+ struct drm_atomic_state *suspend_state;
+ bool suspend_block;
+
+ struct dpu_rm rm;
+ bool rm_init;
+
+ struct dpu_hw_vbif *hw_vbif[VBIF_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+
+ bool has_danger_ctrl;
+
+ struct platform_device *pdev;
+ bool rpm_enabled;
+ struct dss_module_power mp;
+};
+
+struct vsync_info {
+ u32 frame_count;
+ u32 line_count;
+};
+
+#define to_dpu_kms(x) container_of(x, struct dpu_kms, base)
+
+/* get struct msm_kms * from drm_device * */
+#define ddev_to_msm_kms(D) ((D) && (D)->dev_private ? \
+ ((struct msm_drm_private *)((D)->dev_private))->kms : NULL)
+
+/**
+ * dpu_kms_is_suspend_state - whether or not the system is pm suspended
+ * @dev: Pointer to drm device
+ * Return: Suspend status
+ */
+static inline bool dpu_kms_is_suspend_state(struct drm_device *dev)
+{
+ if (!ddev_to_msm_kms(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_state != NULL;
+}
+
+/**
+ * dpu_kms_is_suspend_blocked - whether or not commits are blocked due to pm
+ * suspend status
+ * @dev: Pointer to drm device
+ * Return: True if commits should be rejected due to pm suspend
+ */
+static inline bool dpu_kms_is_suspend_blocked(struct drm_device *dev)
+{
+ if (!dpu_kms_is_suspend_state(dev))
+ return false;
+
+ return to_dpu_kms(ddev_to_msm_kms(dev))->suspend_block;
+}
+
+/**
+ * Debugfs functions - extra helper functions for debugfs support
+ *
+ * Main debugfs documentation is located at,
+ *
+ * Documentation/filesystems/debugfs.txt
+ *
+ * @dpu_debugfs_setup_regset32: Initialize data for dpu_debugfs_create_regset32
+ * @dpu_debugfs_create_regset32: Create 32-bit register dump file
+ * @dpu_debugfs_get_root: Get root dentry for DPU_KMS's debugfs node
+ */
+
+/**
+ * Companion structure for dpu_debugfs_create_regset32. Do not initialize the
+ * members of this structure explicitly; use dpu_debugfs_setup_regset32 instead.
+ */
+struct dpu_debugfs_regset32 {
+ uint32_t offset;
+ uint32_t blk_len;
+ struct dpu_kms *dpu_kms;
+};
+
+/**
+ * dpu_debugfs_setup_regset32 - Initialize register block definition for debugfs
+ * This function is meant to initialize dpu_debugfs_regset32 structures for use
+ * with dpu_debugfs_create_regset32.
+ * @regset: opaque register definition structure
+ * @offset: sub-block offset
+ * @length: sub-block length, in bytes
+ * @dpu_kms: pointer to dpu kms structure
+ */
+void dpu_debugfs_setup_regset32(struct dpu_debugfs_regset32 *regset,
+ uint32_t offset, uint32_t length, struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_debugfs_create_regset32 - Create register read back file for debugfs
+ *
+ * This function is almost identical to the standard debugfs_create_regset32()
+ * function, with the main difference being that a list of register
+ * names/offsets do not need to be provided. The 'read' function simply outputs
+ * sequential register values over a specified range.
+ *
+ * Similar to the related debugfs_create_regset32 API, the structure pointed to
+ * by regset needs to persist for the lifetime of the created file. The calling
+ * code is responsible for initialization/management of this structure.
+ *
+ * The structure pointed to by regset is meant to be opaque. Please use
+ * dpu_debugfs_setup_regset32 to initialize it.
+ *
+ * @name: File name within debugfs
+ * @mode: File mode within debugfs
+ * @parent: Parent directory entry within debugfs, can be NULL
+ * @regset: Pointer to persistent register block definition
+ *
+ * Return: dentry pointer for newly created file, use either debugfs_remove()
+ * or debugfs_remove_recursive() (on a parent directory) to remove the
+ * file
+ */
+void *dpu_debugfs_create_regset32(const char *name, umode_t mode,
+ void *parent, struct dpu_debugfs_regset32 *regset);
+
+/**
+ * dpu_debugfs_get_root - Return root directory entry for KMS's debugfs
+ *
+ * The return value should be passed as the 'parent' argument to subsequent
+ * debugfs create calls.
+ *
+ * @dpu_kms: Pointer to DPU's KMS structure
+ *
+ * Return: dentry pointer for DPU's debugfs location
+ */
+void *dpu_debugfs_get_root(struct dpu_kms *dpu_kms);
+
+/**
+ * DPU info management functions
+ * These functions/definitions allow for building up a 'dpu_info' structure
+ * containing one or more "key=value\n" entries.
+ */
+#define DPU_KMS_INFO_MAX_SIZE 4096
+
+/**
+ * Vblank enable/disable functions
+ */
+int dpu_enable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+void dpu_disable_vblank(struct msm_kms *kms, struct drm_crtc *crtc);
+
+void dpu_kms_encoder_enable(struct drm_encoder *encoder);
+
+/**
+ * dpu_kms_get_clk_rate() - get the clock rate
+ * @dpu_kms: poiner to dpu_kms structure
+ * @clock_name: clock name to get the rate
+ *
+ * Return: current clock rate
+ */
+u64 dpu_kms_get_clk_rate(struct dpu_kms *dpu_kms, char *clock_name);
+
+#endif /* __dpu_kms_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
new file mode 100644
index 000000000000..9e533b86682c
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_mdss.c
@@ -0,0 +1,245 @@
+/*
+ * SPDX-License-Identifier: GPL-2.0
+ * Copyright (c) 2018, The Linux Foundation
+ */
+
+#include "dpu_kms.h"
+
+#define to_dpu_mdss(x) container_of(x, struct dpu_mdss, base)
+
+#define HW_INTR_STATUS 0x0010
+
+struct dpu_mdss {
+ struct msm_mdss base;
+ void __iomem *mmio;
+ unsigned long mmio_len;
+ u32 hwversion;
+ struct dss_module_power mp;
+ struct dpu_irq_controller irq_controller;
+};
+
+static irqreturn_t dpu_mdss_irq(int irq, void *arg)
+{
+ struct dpu_mdss *dpu_mdss = arg;
+ u32 interrupts;
+
+ interrupts = readl_relaxed(dpu_mdss->mmio + HW_INTR_STATUS);
+
+ while (interrupts) {
+ irq_hw_number_t hwirq = fls(interrupts) - 1;
+ unsigned int mapping;
+ int rc;
+
+ mapping = irq_find_mapping(dpu_mdss->irq_controller.domain,
+ hwirq);
+ if (mapping == 0) {
+ DRM_ERROR("couldn't find irq mapping for %lu\n", hwirq);
+ return IRQ_NONE;
+ }
+
+ rc = generic_handle_irq(mapping);
+ if (rc < 0) {
+ DRM_ERROR("handle irq fail: irq=%lu mapping=%u rc=%d\n",
+ hwirq, mapping, rc);
+ return IRQ_NONE;
+ }
+
+ interrupts &= ~(1 << hwirq);
+ }
+
+ return IRQ_HANDLED;
+}
+
+static void dpu_mdss_irq_mask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ clear_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static void dpu_mdss_irq_unmask(struct irq_data *irqd)
+{
+ struct dpu_mdss *dpu_mdss = irq_data_get_irq_chip_data(irqd);
+
+ /* memory barrier */
+ smp_mb__before_atomic();
+ set_bit(irqd->hwirq, &dpu_mdss->irq_controller.enabled_mask);
+ /* memory barrier */
+ smp_mb__after_atomic();
+}
+
+static struct irq_chip dpu_mdss_irq_chip = {
+ .name = "dpu_mdss",
+ .irq_mask = dpu_mdss_irq_mask,
+ .irq_unmask = dpu_mdss_irq_unmask,
+};
+
+static int dpu_mdss_irqdomain_map(struct irq_domain *domain,
+ unsigned int irq, irq_hw_number_t hwirq)
+{
+ struct dpu_mdss *dpu_mdss = domain->host_data;
+ int ret;
+
+ irq_set_chip_and_handler(irq, &dpu_mdss_irq_chip, handle_level_irq);
+ ret = irq_set_chip_data(irq, dpu_mdss);
+
+ return ret;
+}
+
+static const struct irq_domain_ops dpu_mdss_irqdomain_ops = {
+ .map = dpu_mdss_irqdomain_map,
+ .xlate = irq_domain_xlate_onecell,
+};
+
+static int _dpu_mdss_irq_domain_add(struct dpu_mdss *dpu_mdss)
+{
+ struct device *dev;
+ struct irq_domain *domain;
+
+ dev = dpu_mdss->base.dev->dev;
+
+ domain = irq_domain_add_linear(dev->of_node, 32,
+ &dpu_mdss_irqdomain_ops, dpu_mdss);
+ if (!domain) {
+ DPU_ERROR("failed to add irq_domain\n");
+ return -EINVAL;
+ }
+
+ dpu_mdss->irq_controller.enabled_mask = 0;
+ dpu_mdss->irq_controller.domain = domain;
+
+ return 0;
+}
+
+static int _dpu_mdss_irq_domain_fini(struct dpu_mdss *dpu_mdss)
+{
+ if (dpu_mdss->irq_controller.domain) {
+ irq_domain_remove(dpu_mdss->irq_controller.domain);
+ dpu_mdss->irq_controller.domain = NULL;
+ }
+ return 0;
+}
+static int dpu_mdss_enable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, true);
+ if (ret)
+ DPU_ERROR("clock enable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static int dpu_mdss_disable(struct msm_mdss *mdss)
+{
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+ int ret;
+
+ ret = msm_dss_enable_clk(mp->clk_config, mp->num_clk, false);
+ if (ret)
+ DPU_ERROR("clock disable failed, ret:%d\n", ret);
+
+ return ret;
+}
+
+static void dpu_mdss_destroy(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct dpu_mdss *dpu_mdss = to_dpu_mdss(priv->mdss);
+ struct dss_module_power *mp = &dpu_mdss->mp;
+
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+ devm_kfree(&pdev->dev, mp->clk_config);
+
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+
+ pm_runtime_disable(dev->dev);
+ priv->mdss = NULL;
+}
+
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = dpu_mdss_enable,
+ .disable = dpu_mdss_disable,
+ .destroy = dpu_mdss_destroy,
+};
+
+int dpu_mdss_init(struct drm_device *dev)
+{
+ struct platform_device *pdev = to_platform_device(dev->dev);
+ struct msm_drm_private *priv = dev->dev_private;
+ struct resource *res;
+ struct dpu_mdss *dpu_mdss;
+ struct dss_module_power *mp;
+ int ret = 0;
+
+ dpu_mdss = devm_kzalloc(dev->dev, sizeof(*dpu_mdss), GFP_KERNEL);
+ if (!dpu_mdss)
+ return -ENOMEM;
+
+ dpu_mdss->mmio = msm_ioremap(pdev, "mdss", "mdss");
+ if (IS_ERR(dpu_mdss->mmio))
+ return PTR_ERR(dpu_mdss->mmio);
+
+ DRM_DEBUG("mapped mdss address space @%pK\n", dpu_mdss->mmio);
+
+ res = platform_get_resource_byname(pdev, IORESOURCE_MEM, "mdss");
+ if (!res) {
+ DRM_ERROR("failed to get memory resource for mdss\n");
+ return -ENOMEM;
+ }
+ dpu_mdss->mmio_len = resource_size(res);
+
+ mp = &dpu_mdss->mp;
+ ret = msm_dss_parse_clock(pdev, mp);
+ if (ret) {
+ DPU_ERROR("failed to parse clocks, ret=%d\n", ret);
+ goto clk_parse_err;
+ }
+
+ dpu_mdss->base.dev = dev;
+ dpu_mdss->base.funcs = &mdss_funcs;
+
+ ret = _dpu_mdss_irq_domain_add(dpu_mdss);
+ if (ret)
+ goto irq_domain_error;
+
+ ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
+ dpu_mdss_irq, 0, "dpu_mdss_isr", dpu_mdss);
+ if (ret) {
+ DPU_ERROR("failed to init irq: %d\n", ret);
+ goto irq_error;
+ }
+
+ pm_runtime_enable(dev->dev);
+
+ pm_runtime_get_sync(dev->dev);
+ dpu_mdss->hwversion = readl_relaxed(dpu_mdss->mmio);
+ pm_runtime_put_sync(dev->dev);
+
+ priv->mdss = &dpu_mdss->base;
+
+ return ret;
+
+irq_error:
+ _dpu_mdss_irq_domain_fini(dpu_mdss);
+irq_domain_error:
+ msm_dss_put_clk(mp->clk_config, mp->num_clk);
+clk_parse_err:
+ devm_kfree(&pdev->dev, mp->clk_config);
+ if (dpu_mdss->mmio)
+ devm_iounmap(&pdev->dev, dpu_mdss->mmio);
+ dpu_mdss->mmio = NULL;
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
new file mode 100644
index 000000000000..b640e39ebaca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.c
@@ -0,0 +1,1963 @@
+/*
+ * Copyright (C) 2014-2018 The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+#include <linux/dma-buf.h>
+
+#include "msm_drv.h"
+#include "dpu_kms.h"
+#include "dpu_formats.h"
+#include "dpu_hw_sspp.h"
+#include "dpu_hw_catalog_format.h"
+#include "dpu_trace.h"
+#include "dpu_crtc.h"
+#include "dpu_vbif.h"
+#include "dpu_plane.h"
+
+#define DPU_DEBUG_PLANE(pl, fmt, ...) DPU_DEBUG("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DPU_ERROR_PLANE(pl, fmt, ...) DPU_ERROR("plane%d " fmt,\
+ (pl) ? (pl)->base.base.id : -1, ##__VA_ARGS__)
+
+#define DECIMATED_DIMENSION(dim, deci) (((dim) + ((1 << (deci)) - 1)) >> (deci))
+#define PHASE_STEP_SHIFT 21
+#define PHASE_STEP_UNIT_SCALE ((int) (1 << PHASE_STEP_SHIFT))
+#define PHASE_RESIDUAL 15
+
+#define SHARP_STRENGTH_DEFAULT 32
+#define SHARP_EDGE_THR_DEFAULT 112
+#define SHARP_SMOOTH_THR_DEFAULT 8
+#define SHARP_NOISE_THR_DEFAULT 2
+
+#define DPU_NAME_SIZE 12
+
+#define DPU_PLANE_COLOR_FILL_FLAG BIT(31)
+#define DPU_ZPOS_MAX 255
+
+/* multirect rect index */
+enum {
+ R0,
+ R1,
+ R_MAX
+};
+
+#define DPU_QSEED3_DEFAULT_PRELOAD_H 0x4
+#define DPU_QSEED3_DEFAULT_PRELOAD_V 0x3
+
+#define DEFAULT_REFRESH_RATE 60
+
+/**
+ * enum dpu_plane_qos - Different qos configurations for each pipe
+ *
+ * @DPU_PLANE_QOS_VBLANK_CTRL: Setup VBLANK qos for the pipe.
+ * @DPU_PLANE_QOS_VBLANK_AMORTIZE: Enables Amortization within pipe.
+ * this configuration is mutually exclusive from VBLANK_CTRL.
+ * @DPU_PLANE_QOS_PANIC_CTRL: Setup panic for the pipe.
+ */
+enum dpu_plane_qos {
+ DPU_PLANE_QOS_VBLANK_CTRL = BIT(0),
+ DPU_PLANE_QOS_VBLANK_AMORTIZE = BIT(1),
+ DPU_PLANE_QOS_PANIC_CTRL = BIT(2),
+};
+
+/*
+ * struct dpu_plane - local dpu plane structure
+ * @aspace: address space pointer
+ * @csc_ptr: Points to dpu_csc_cfg structure to use for current
+ * @mplane_list: List of multirect planes of the same pipe
+ * @catalog: Points to dpu catalog structure
+ * @revalidate: force revalidation of all the plane properties
+ */
+struct dpu_plane {
+ struct drm_plane base;
+
+ struct mutex lock;
+
+ enum dpu_sspp pipe;
+ uint32_t features; /* capabilities from catalog */
+ uint32_t nformats;
+ uint32_t formats[64];
+
+ struct dpu_hw_pipe *pipe_hw;
+ struct dpu_hw_pipe_cfg pipe_cfg;
+ struct dpu_hw_pipe_qos_cfg pipe_qos_cfg;
+ uint32_t color_fill;
+ bool is_error;
+ bool is_rt_pipe;
+ bool is_virtual;
+ struct list_head mplane_list;
+ struct dpu_mdss_cfg *catalog;
+
+ struct dpu_csc_cfg *csc_ptr;
+
+ const struct dpu_sspp_sub_blks *pipe_sblk;
+ char pipe_name[DPU_NAME_SIZE];
+
+ /* debugfs related stuff */
+ struct dentry *debugfs_root;
+ struct dpu_debugfs_regset32 debugfs_src;
+ struct dpu_debugfs_regset32 debugfs_scaler;
+ struct dpu_debugfs_regset32 debugfs_csc;
+ bool debugfs_default_scale;
+};
+
+#define to_dpu_plane(x) container_of(x, struct dpu_plane, base)
+
+static struct dpu_kms *_dpu_plane_get_kms(struct drm_plane *plane)
+{
+ struct msm_drm_private *priv;
+
+ if (!plane || !plane->dev)
+ return NULL;
+ priv = plane->dev->dev_private;
+ if (!priv)
+ return NULL;
+ return to_dpu_kms(priv->kms);
+}
+
+static bool dpu_plane_enabled(struct drm_plane_state *state)
+{
+ return state && state->fb && state->crtc;
+}
+
+static bool dpu_plane_sspp_enabled(struct drm_plane_state *state)
+{
+ return state && state->crtc;
+}
+
+/**
+ * _dpu_plane_calc_fill_level - calculate fill level of the given source format
+ * @plane: Pointer to drm plane
+ * @fmt: Pointer to source buffer format
+ * @src_wdith: width of source buffer
+ * Return: fill level corresponding to the source buffer/format or 0 if error
+ */
+static inline int _dpu_plane_calc_fill_level(struct drm_plane *plane,
+ const struct dpu_format *fmt, u32 src_width)
+{
+ struct dpu_plane *pdpu, *tmp;
+ struct dpu_plane_state *pstate;
+ u32 fixed_buff_size;
+ u32 total_fl;
+
+ if (!plane || !fmt || !plane->state || !src_width || !fmt->bpp) {
+ DPU_ERROR("invalid arguments\n");
+ return 0;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+ fixed_buff_size = pdpu->pipe_sblk->common->pixel_ram_size;
+
+ list_for_each_entry(tmp, &pdpu->mplane_list, mplane_list) {
+ if (!dpu_plane_enabled(tmp->base.state))
+ continue;
+ DPU_DEBUG("plane%d/%d src_width:%d/%d\n",
+ pdpu->base.base.id, tmp->base.base.id,
+ src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ src_width = max_t(u32, src_width,
+ drm_rect_width(&tmp->pipe_cfg.src_rect));
+ }
+
+ if (fmt->fetch_planes == DPU_PLANE_PSEUDO_PLANAR) {
+ if (fmt->chroma_sample == DPU_CHROMA_420) {
+ /* NV12 */
+ total_fl = (fixed_buff_size / 2) /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ /* non NV12 */
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ } else {
+ if (pstate->multirect_mode == DPU_SSPP_MULTIRECT_PARALLEL) {
+ total_fl = (fixed_buff_size / 2) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ } else {
+ total_fl = (fixed_buff_size) * 2 /
+ ((src_width + 32) * fmt->bpp);
+ }
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s w:%u fl:%u\n",
+ plane->base.id, pdpu->pipe - SSPP_VIG0,
+ (char *)&fmt->base.pixel_format,
+ src_width, total_fl);
+
+ return total_fl;
+}
+
+/**
+ * _dpu_plane_get_qos_lut - get LUT mapping based on fill level
+ * @tbl: Pointer to LUT table
+ * @total_fl: fill level
+ * Return: LUT setting corresponding to the fill level
+ */
+static u64 _dpu_plane_get_qos_lut(const struct dpu_qos_lut_tbl *tbl,
+ u32 total_fl)
+{
+ int i;
+
+ if (!tbl || !tbl->nentry || !tbl->entries)
+ return 0;
+
+ for (i = 0; i < tbl->nentry; i++)
+ if (total_fl <= tbl->entries[i].fl)
+ return tbl->entries[i].lut;
+
+ /* if last fl is zero, use as default */
+ if (!tbl->entries[i-1].fl)
+ return tbl->entries[i-1].lut;
+
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_qos_lut - set QoS LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_qos_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u64 qos_lut;
+ u32 total_fl = 0, lut_usage;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments plane %d fb %d\n",
+ plane != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_creq_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ lut_usage = DPU_QOS_LUT_USAGE_NRT;
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+ total_fl = _dpu_plane_calc_fill_level(plane, fmt,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect));
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt))
+ lut_usage = DPU_QOS_LUT_USAGE_LINEAR;
+ else
+ lut_usage = DPU_QOS_LUT_USAGE_MACROTILE;
+ }
+
+ qos_lut = _dpu_plane_get_qos_lut(
+ &pdpu->catalog->perf.qos_lut_tbl[lut_usage], total_fl);
+
+ pdpu->pipe_qos_cfg.creq_lut = qos_lut;
+
+ trace_dpu_perf_set_qos_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ pdpu->is_rt_pipe, total_fl, qos_lut, lut_usage);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s rt:%d fl:%u lut:0x%llx\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ pdpu->is_rt_pipe, total_fl, qos_lut);
+
+ pdpu->pipe_hw->ops.setup_creq_lut(pdpu->pipe_hw, &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_panic_lut - set danger/safe LUT of the given plane
+ * @plane: Pointer to drm plane
+ * @fb: Pointer to framebuffer associated with the given plane
+ */
+static void _dpu_plane_set_danger_lut(struct drm_plane *plane,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ const struct dpu_format *fmt = NULL;
+ u32 danger_lut, safe_lut;
+
+ if (!plane || !fb) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk || !pdpu->catalog) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_danger_safe_lut) {
+ return;
+ }
+
+ if (!pdpu->is_rt_pipe) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_NRT];
+ } else {
+ fmt = dpu_get_dpu_format_ext(
+ fb->format->format,
+ fb->modifier);
+
+ if (fmt && DPU_FORMAT_IS_LINEAR(fmt)) {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_LINEAR];
+ } else {
+ danger_lut = pdpu->catalog->perf.danger_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ safe_lut = pdpu->catalog->perf.safe_lut_tbl
+ [DPU_QOS_LUT_USAGE_MACROTILE];
+ }
+ }
+
+ pdpu->pipe_qos_cfg.danger_lut = danger_lut;
+ pdpu->pipe_qos_cfg.safe_lut = safe_lut;
+
+ trace_dpu_perf_set_danger_luts(pdpu->pipe - SSPP_VIG0,
+ (fmt) ? fmt->base.pixel_format : 0,
+ (fmt) ? fmt->fetch_mode : 0,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ DPU_DEBUG("plane%u: pnum:%d fmt: %4.4s mode:%d luts[0x%x, 0x%x]\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ fmt ? (char *)&fmt->base.pixel_format : NULL,
+ fmt ? fmt->fetch_mode : -1,
+ pdpu->pipe_qos_cfg.danger_lut,
+ pdpu->pipe_qos_cfg.safe_lut);
+
+ pdpu->pipe_hw->ops.setup_danger_safe_lut(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+/**
+ * _dpu_plane_set_qos_ctrl - set QoS control of the given plane
+ * @plane: Pointer to drm plane
+ * @enable: true to enable QoS control
+ * @flags: QoS control mode (enum dpu_plane_qos)
+ */
+static void _dpu_plane_set_qos_ctrl(struct drm_plane *plane,
+ bool enable, u32 flags)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->pipe_hw || !pdpu->pipe_sblk) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ } else if (!pdpu->pipe_hw->ops.setup_qos_ctrl) {
+ return;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_CTRL) {
+ pdpu->pipe_qos_cfg.creq_vblank = pdpu->pipe_sblk->creq_vblank;
+ pdpu->pipe_qos_cfg.danger_vblank =
+ pdpu->pipe_sblk->danger_vblank;
+ pdpu->pipe_qos_cfg.vblank_en = enable;
+ }
+
+ if (flags & DPU_PLANE_QOS_VBLANK_AMORTIZE) {
+ /* this feature overrules previous VBLANK_CTRL */
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.creq_vblank = 0; /* clear vblank bits */
+ }
+
+ if (flags & DPU_PLANE_QOS_PANIC_CTRL)
+ pdpu->pipe_qos_cfg.danger_safe_en = enable;
+
+ if (!pdpu->is_rt_pipe) {
+ pdpu->pipe_qos_cfg.vblank_en = false;
+ pdpu->pipe_qos_cfg.danger_safe_en = false;
+ }
+
+ DPU_DEBUG("plane%u: pnum:%d ds:%d vb:%d pri[0x%x, 0x%x] is_rt:%d\n",
+ plane->base.id,
+ pdpu->pipe - SSPP_VIG0,
+ pdpu->pipe_qos_cfg.danger_safe_en,
+ pdpu->pipe_qos_cfg.vblank_en,
+ pdpu->pipe_qos_cfg.creq_vblank,
+ pdpu->pipe_qos_cfg.danger_vblank,
+ pdpu->is_rt_pipe);
+
+ pdpu->pipe_hw->ops.setup_qos_ctrl(pdpu->pipe_hw,
+ &pdpu->pipe_qos_cfg);
+}
+
+int dpu_plane_danger_signal_ctrl(struct drm_plane *plane, bool enable)
+{
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (!pdpu->is_rt_pipe)
+ goto end;
+
+ pm_runtime_get_sync(&dpu_kms->pdev->dev);
+ _dpu_plane_set_qos_ctrl(plane, enable, DPU_PLANE_QOS_PANIC_CTRL);
+ pm_runtime_put_sync(&dpu_kms->pdev->dev);
+
+end:
+ return 0;
+}
+
+/**
+ * _dpu_plane_set_ot_limit - set OT limit for the given plane
+ * @plane: Pointer to drm plane
+ * @crtc: Pointer to drm crtc
+ */
+static void _dpu_plane_set_ot_limit(struct drm_plane *plane,
+ struct drm_crtc *crtc)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_ot_params ot_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev || !crtc) {
+ DPU_ERROR("invalid arguments plane %d crtc %d\n",
+ plane != 0, crtc != 0);
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&ot_params, 0, sizeof(ot_params));
+ ot_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ ot_params.num = pdpu->pipe_hw->idx - SSPP_NONE;
+ ot_params.width = drm_rect_width(&pdpu->pipe_cfg.src_rect);
+ ot_params.height = drm_rect_height(&pdpu->pipe_cfg.src_rect);
+ ot_params.is_wfd = !pdpu->is_rt_pipe;
+ ot_params.frame_rate = crtc->mode.vrefresh;
+ ot_params.vbif_idx = VBIF_RT;
+ ot_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ ot_params.rd = true;
+
+ dpu_vbif_set_ot_limit(dpu_kms, &ot_params);
+}
+
+/**
+ * _dpu_plane_set_vbif_qos - set vbif QoS for the given plane
+ * @plane: Pointer to drm plane
+ */
+static void _dpu_plane_set_qos_remap(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_vbif_set_qos_params qos_params;
+ struct msm_drm_private *priv;
+ struct dpu_kms *dpu_kms;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return;
+ }
+
+ dpu_kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR("invalid pipe reference\n");
+ return;
+ }
+
+ memset(&qos_params, 0, sizeof(qos_params));
+ qos_params.vbif_idx = VBIF_RT;
+ qos_params.clk_ctrl = pdpu->pipe_hw->cap->clk_ctrl;
+ qos_params.xin_id = pdpu->pipe_hw->cap->xin_id;
+ qos_params.num = pdpu->pipe_hw->idx - SSPP_VIG0;
+ qos_params.is_rt = pdpu->is_rt_pipe;
+
+ DPU_DEBUG("plane%d pipe:%d vbif:%d xin:%d rt:%d, clk_ctrl:%d\n",
+ plane->base.id, qos_params.num,
+ qos_params.vbif_idx,
+ qos_params.xin_id, qos_params.is_rt,
+ qos_params.clk_ctrl);
+
+ dpu_vbif_set_qos_remap(dpu_kms, &qos_params);
+}
+
+/**
+ * _dpu_plane_get_aspace: gets the address space
+ */
+static int _dpu_plane_get_aspace(
+ struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ struct msm_gem_address_space **aspace)
+{
+ struct dpu_kms *kms;
+
+ if (!pdpu || !pstate || !aspace) {
+ DPU_ERROR("invalid parameters\n");
+ return -EINVAL;
+ }
+
+ kms = _dpu_plane_get_kms(&pdpu->base);
+ if (!kms) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ *aspace = kms->base.aspace;
+
+ return 0;
+}
+
+static inline void _dpu_plane_set_scanout(struct drm_plane *plane,
+ struct dpu_plane_state *pstate,
+ struct dpu_hw_pipe_cfg *pipe_cfg,
+ struct drm_framebuffer *fb)
+{
+ struct dpu_plane *pdpu;
+ struct msm_gem_address_space *aspace = NULL;
+ int ret;
+
+ if (!plane || !pstate || !pipe_cfg || !fb) {
+ DPU_ERROR(
+ "invalid arg(s), plane %d state %d cfg %d fb %d\n",
+ plane != 0, pstate != 0, pipe_cfg != 0, fb != 0);
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid pipe_hw\n");
+ return;
+ }
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace %d\n", ret);
+ return;
+ }
+
+ ret = dpu_format_populate_layout(aspace, fb, &pipe_cfg->layout);
+ if (ret == -EAGAIN)
+ DPU_DEBUG_PLANE(pdpu, "not updating same src addrs\n");
+ else if (ret)
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ else if (pdpu->pipe_hw->ops.setup_sourceaddress) {
+ trace_dpu_plane_set_scanout(pdpu->pipe_hw->idx,
+ &pipe_cfg->layout,
+ pstate->multirect_index);
+ pdpu->pipe_hw->ops.setup_sourceaddress(pdpu->pipe_hw, pipe_cfg,
+ pstate->multirect_index);
+ }
+}
+
+static void _dpu_plane_setup_scaler3(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ uint32_t src_w, uint32_t src_h, uint32_t dst_w, uint32_t dst_h,
+ struct dpu_hw_scaler3_cfg *scale_cfg,
+ const struct dpu_format *fmt,
+ uint32_t chroma_subsmpl_h, uint32_t chroma_subsmpl_v)
+{
+ uint32_t i;
+
+ if (!pdpu || !pstate || !scale_cfg || !fmt || !chroma_subsmpl_h ||
+ !chroma_subsmpl_v) {
+ DPU_ERROR(
+ "pdpu %d pstate %d scale_cfg %d fmt %d smp_h %d smp_v %d\n",
+ !!pdpu, !!pstate, !!scale_cfg, !!fmt, chroma_subsmpl_h,
+ chroma_subsmpl_v);
+ return;
+ }
+
+ memset(scale_cfg, 0, sizeof(*scale_cfg));
+ memset(&pstate->pixel_ext, 0, sizeof(struct dpu_hw_pixel_ext));
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_w, dst_w);
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] =
+ mult_frac((1 << PHASE_STEP_SHIFT), src_h, dst_h);
+
+
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0] / chroma_subsmpl_v;
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0] / chroma_subsmpl_h;
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_1_2];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_2] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_1_2];
+
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_x[DPU_SSPP_COMP_0];
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_3] =
+ scale_cfg->phase_step_y[DPU_SSPP_COMP_0];
+
+ for (i = 0; i < DPU_MAX_PLANES; i++) {
+ scale_cfg->src_width[i] = src_w;
+ scale_cfg->src_height[i] = src_h;
+ if (i == DPU_SSPP_COMP_1_2 || i == DPU_SSPP_COMP_2) {
+ scale_cfg->src_width[i] /= chroma_subsmpl_h;
+ scale_cfg->src_height[i] /= chroma_subsmpl_v;
+ }
+ scale_cfg->preload_x[i] = DPU_QSEED3_DEFAULT_PRELOAD_H;
+ scale_cfg->preload_y[i] = DPU_QSEED3_DEFAULT_PRELOAD_V;
+ pstate->pixel_ext.num_ext_pxls_top[i] =
+ scale_cfg->src_height[i];
+ pstate->pixel_ext.num_ext_pxls_left[i] =
+ scale_cfg->src_width[i];
+ }
+ if (!(DPU_FORMAT_IS_YUV(fmt)) && (src_h == dst_h)
+ && (src_w == dst_w))
+ return;
+
+ scale_cfg->dst_width = dst_w;
+ scale_cfg->dst_height = dst_h;
+ scale_cfg->y_rgb_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->uv_filter_cfg = DPU_SCALE_BIL;
+ scale_cfg->alpha_filter_cfg = DPU_SCALE_ALPHA_BIL;
+ scale_cfg->lut_flag = 0;
+ scale_cfg->blend_cfg = 1;
+ scale_cfg->enable = 1;
+}
+
+static inline void _dpu_plane_setup_csc(struct dpu_plane *pdpu)
+{
+ static const struct dpu_csc_cfg dpu_csc_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xfff0, 0xff80, 0xff80,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x10, 0xeb, 0x10, 0xf0, 0x10, 0xf0,},
+ { 0x00, 0xff, 0x00, 0xff, 0x00, 0xff,},
+ };
+ static const struct dpu_csc_cfg dpu_csc10_YUV2RGB_601L = {
+ {
+ /* S15.16 format */
+ 0x00012A00, 0x00000000, 0x00019880,
+ 0x00012A00, 0xFFFF9B80, 0xFFFF3000,
+ 0x00012A00, 0x00020480, 0x00000000,
+ },
+ /* signed bias */
+ { 0xffc0, 0xfe00, 0xfe00,},
+ { 0x0, 0x0, 0x0,},
+ /* unsigned clamp */
+ { 0x40, 0x3ac, 0x40, 0x3c0, 0x40, 0x3c0,},
+ { 0x00, 0x3ff, 0x00, 0x3ff, 0x00, 0x3ff,},
+ };
+
+ if (!pdpu) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ if (BIT(DPU_SSPP_CSC_10BIT) & pdpu->features)
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc10_YUV2RGB_601L;
+ else
+ pdpu->csc_ptr = (struct dpu_csc_cfg *)&dpu_csc_YUV2RGB_601L;
+
+ DPU_DEBUG_PLANE(pdpu, "using 0x%X 0x%X 0x%X...\n",
+ pdpu->csc_ptr->csc_mv[0],
+ pdpu->csc_ptr->csc_mv[1],
+ pdpu->csc_ptr->csc_mv[2]);
+}
+
+static void _dpu_plane_setup_scaler(struct dpu_plane *pdpu,
+ struct dpu_plane_state *pstate,
+ const struct dpu_format *fmt, bool color_fill)
+{
+ struct dpu_hw_pixel_ext *pe;
+ uint32_t chroma_subsmpl_h, chroma_subsmpl_v;
+
+ if (!pdpu || !fmt || !pstate) {
+ DPU_ERROR("invalid arg(s), plane %d fmt %d state %d\n",
+ pdpu != 0, fmt != 0, pstate != 0);
+ return;
+ }
+
+ pe = &pstate->pixel_ext;
+
+ /* don't chroma subsample if decimating */
+ chroma_subsmpl_h =
+ drm_format_horz_chroma_subsampling(fmt->base.pixel_format);
+ chroma_subsmpl_v =
+ drm_format_vert_chroma_subsampling(fmt->base.pixel_format);
+
+ /* update scaler. calculate default config for QSEED3 */
+ _dpu_plane_setup_scaler3(pdpu, pstate,
+ drm_rect_width(&pdpu->pipe_cfg.src_rect),
+ drm_rect_height(&pdpu->pipe_cfg.src_rect),
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect),
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect),
+ &pstate->scaler3_cfg, fmt,
+ chroma_subsmpl_h, chroma_subsmpl_v);
+}
+
+/**
+ * _dpu_plane_color_fill - enables color fill on plane
+ * @pdpu: Pointer to DPU plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+static int _dpu_plane_color_fill(struct dpu_plane *pdpu,
+ uint32_t color, uint32_t alpha)
+{
+ const struct dpu_format *fmt;
+ const struct drm_plane *plane;
+ struct dpu_plane_state *pstate;
+
+ if (!pdpu || !pdpu->base.state) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ }
+
+ if (!pdpu->pipe_hw) {
+ DPU_ERROR_PLANE(pdpu, "invalid plane h/w pointer\n");
+ return -EINVAL;
+ }
+
+ plane = &pdpu->base;
+ pstate = to_dpu_plane_state(plane->state);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /*
+ * select fill format to match user property expectation,
+ * h/w only supports RGB variants
+ */
+ fmt = dpu_get_dpu_format(DRM_FORMAT_ABGR8888);
+
+ /* update sspp */
+ if (fmt && pdpu->pipe_hw->ops.setup_solidfill) {
+ pdpu->pipe_hw->ops.setup_solidfill(pdpu->pipe_hw,
+ (color & 0xFFFFFF) | ((alpha & 0xFF) << 24),
+ pstate->multirect_index);
+
+ /* override scaler/decimation if solid fill */
+ pdpu->pipe_cfg.src_rect.x1 = 0;
+ pdpu->pipe_cfg.src_rect.y1 = 0;
+ pdpu->pipe_cfg.src_rect.x2 =
+ drm_rect_width(&pdpu->pipe_cfg.dst_rect);
+ pdpu->pipe_cfg.src_rect.y2 =
+ drm_rect_height(&pdpu->pipe_cfg.dst_rect);
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, true);
+
+ if (pdpu->pipe_hw->ops.setup_format)
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw,
+ fmt, DPU_SSPP_SOLID_FILL,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_rects)
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_pe)
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+ }
+
+ return 0;
+}
+
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!drm_state)
+ return;
+
+ pstate = to_dpu_plane_state(drm_state);
+
+ pstate->multirect_index = DPU_SSPP_RECT_SOLO;
+ pstate->multirect_mode = DPU_SSPP_MULTIRECT_NONE;
+}
+
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane)
+{
+ struct dpu_plane_state *pstate[R_MAX];
+ const struct drm_plane_state *drm_state[R_MAX];
+ struct drm_rect src[R_MAX], dst[R_MAX];
+ struct dpu_plane *dpu_plane[R_MAX];
+ const struct dpu_format *fmt[R_MAX];
+ int i, buffer_lines;
+ unsigned int max_tile_height = 1;
+ bool parallel_fetch_qualified = true;
+ bool has_tiled_rect = false;
+
+ for (i = 0; i < R_MAX; i++) {
+ const struct msm_format *msm_fmt;
+
+ drm_state[i] = i ? plane->r1 : plane->r0;
+ msm_fmt = msm_framebuffer_format(drm_state[i]->fb);
+ fmt[i] = to_dpu_format(msm_fmt);
+
+ if (DPU_FORMAT_IS_UBWC(fmt[i])) {
+ has_tiled_rect = true;
+ if (fmt[i]->tile_height > max_tile_height)
+ max_tile_height = fmt[i]->tile_height;
+ }
+ }
+
+ for (i = 0; i < R_MAX; i++) {
+ int width_threshold;
+
+ pstate[i] = to_dpu_plane_state(drm_state[i]);
+ dpu_plane[i] = to_dpu_plane(drm_state[i]->plane);
+
+ if (pstate[i] == NULL) {
+ DPU_ERROR("DPU plane state of plane id %d is NULL\n",
+ drm_state[i]->plane->base.id);
+ return -EINVAL;
+ }
+
+ src[i].x1 = drm_state[i]->src_x >> 16;
+ src[i].y1 = drm_state[i]->src_y >> 16;
+ src[i].x2 = src[i].x1 + (drm_state[i]->src_w >> 16);
+ src[i].y2 = src[i].y1 + (drm_state[i]->src_h >> 16);
+
+ dst[i] = drm_plane_state_dest(drm_state[i]);
+
+ if (drm_rect_calc_hscale(&src[i], &dst[i], 1, 1) != 1 ||
+ drm_rect_calc_vscale(&src[i], &dst[i], 1, 1) != 1) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "scaling is not supported in multirect mode\n");
+ return -EINVAL;
+ }
+
+ if (DPU_FORMAT_IS_YUV(fmt[i])) {
+ DPU_ERROR_PLANE(dpu_plane[i],
+ "Unsupported format for multirect mode\n");
+ return -EINVAL;
+ }
+
+ /**
+ * SSPP PD_MEM is split half - one for each RECT.
+ * Tiled formats need 5 lines of buffering while fetching
+ * whereas linear formats need only 2 lines.
+ * So we cannot support more than half of the supported SSPP
+ * width for tiled formats.
+ */
+ width_threshold = dpu_plane[i]->pipe_sblk->common->maxlinewidth;
+ if (has_tiled_rect)
+ width_threshold /= 2;
+
+ if (parallel_fetch_qualified &&
+ drm_rect_width(&src[i]) > width_threshold)
+ parallel_fetch_qualified = false;
+
+ }
+
+ /* Validate RECT's and set the mode */
+
+ /* Prefer PARALLEL FETCH Mode over TIME_MX Mode */
+ if (parallel_fetch_qualified) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_PARALLEL;
+
+ goto done;
+ }
+
+ /* TIME_MX Mode */
+ buffer_lines = 2 * max_tile_height;
+
+ if (dst[R1].y1 >= dst[R0].y2 + buffer_lines ||
+ dst[R0].y1 >= dst[R1].y2 + buffer_lines) {
+ pstate[R0]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ pstate[R1]->multirect_mode = DPU_SSPP_MULTIRECT_TIME_MX;
+ } else {
+ DPU_ERROR(
+ "No multirect mode possible for the planes (%d - %d)\n",
+ drm_state[R0]->plane->base.id,
+ drm_state[R1]->plane->base.id);
+ return -EINVAL;
+ }
+
+done:
+ if (dpu_plane[R0]->is_virtual) {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_1;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_0;
+ } else {
+ pstate[R0]->multirect_index = DPU_SSPP_RECT_0;
+ pstate[R1]->multirect_index = DPU_SSPP_RECT_1;
+ };
+
+ DPU_DEBUG_PLANE(dpu_plane[R0], "R0: %d - %d\n",
+ pstate[R0]->multirect_mode, pstate[R0]->multirect_index);
+ DPU_DEBUG_PLANE(dpu_plane[R1], "R1: %d - %d\n",
+ pstate[R1]->multirect_mode, pstate[R1]->multirect_index);
+ return 0;
+}
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush for the given plane
+ * @plane: Pointer to drm plane structure
+ * @ctl: Pointer to hardware control driver
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !flush_sspp) {
+ DPU_ERROR("invalid parameters\n");
+ return;
+ }
+
+ pstate = to_dpu_plane_state(plane->state);
+
+ *flush_sspp = ctl->ops.get_bitmask_sspp(ctl, dpu_plane_pipe(plane));
+}
+
+static int dpu_plane_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *new_state)
+{
+ struct drm_framebuffer *fb = new_state->fb;
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *pstate = to_dpu_plane_state(new_state);
+ struct dpu_hw_fmt_layout layout;
+ struct drm_gem_object *obj;
+ struct msm_gem_object *msm_obj;
+ struct dma_fence *fence;
+ struct msm_gem_address_space *aspace;
+ int ret;
+
+ if (!new_state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", fb->base.id);
+
+ ret = _dpu_plane_get_aspace(pdpu, pstate, &aspace);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "Failed to get aspace\n");
+ return ret;
+ }
+
+ /* cache aspace */
+ pstate->aspace = aspace;
+
+ /*
+ * TODO: Need to sort out the msm_framebuffer_prepare() call below so
+ * we can use msm_atomic_prepare_fb() instead of doing the
+ * implicit fence and fb prepare by hand here.
+ */
+ obj = msm_framebuffer_bo(new_state->fb, 0);
+ msm_obj = to_msm_bo(obj);
+ fence = reservation_object_get_excl_rcu(msm_obj->resv);
+ if (fence)
+ drm_atomic_set_fence_for_plane(new_state, fence);
+
+ if (pstate->aspace) {
+ ret = msm_framebuffer_prepare(new_state->fb,
+ pstate->aspace);
+ if (ret) {
+ DPU_ERROR("failed to prepare framebuffer\n");
+ return ret;
+ }
+ }
+
+ /* validate framebuffer layout before commit */
+ ret = dpu_format_populate_layout(pstate->aspace,
+ new_state->fb, &layout);
+ if (ret) {
+ DPU_ERROR_PLANE(pdpu, "failed to get format layout, %d\n", ret);
+ return ret;
+ }
+
+ return 0;
+}
+
+static void dpu_plane_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu = to_dpu_plane(plane);
+ struct dpu_plane_state *old_pstate;
+
+ if (!old_state || !old_state->fb)
+ return;
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u]\n", old_state->fb->base.id);
+
+ msm_framebuffer_cleanup(old_state->fb, old_pstate->aspace);
+}
+
+static bool dpu_plane_validate_src(struct drm_rect *src,
+ struct drm_rect *fb_rect,
+ uint32_t min_src_size)
+{
+ /* Ensure fb size is supported */
+ if (drm_rect_width(fb_rect) > MAX_IMG_WIDTH ||
+ drm_rect_height(fb_rect) > MAX_IMG_HEIGHT)
+ return false;
+
+ /* Ensure src rect is above the minimum size */
+ if (drm_rect_width(src) < min_src_size ||
+ drm_rect_height(src) < min_src_size)
+ return false;
+
+ /* Ensure src is fully encapsulated in fb */
+ return drm_rect_intersect(fb_rect, src) &&
+ drm_rect_equals(fb_rect, src);
+}
+
+static int dpu_plane_sspp_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ int ret = 0;
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ const struct dpu_format *fmt;
+ struct drm_rect src, dst, fb_rect = { 0 };
+ uint32_t max_upscale = 1, max_downscale = 1;
+ uint32_t min_src_size, max_linewidth;
+ int hscale = 1, vscale = 1;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(state);
+
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR_PLANE(pdpu, "invalid catalog\n");
+ ret = -EINVAL;
+ goto exit;
+ }
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ fb_rect.x2 = state->fb->width;
+ fb_rect.y2 = state->fb->height;
+
+ max_linewidth = pdpu->pipe_sblk->common->maxlinewidth;
+
+ if (pdpu->features & DPU_SSPP_SCALER) {
+ max_downscale = pdpu->pipe_sblk->maxdwnscale;
+ max_upscale = pdpu->pipe_sblk->maxupscale;
+ }
+ if (drm_rect_width(&src) < drm_rect_width(&dst))
+ hscale = drm_rect_calc_hscale(&src, &dst, 1, max_upscale);
+ else
+ hscale = drm_rect_calc_hscale(&dst, &src, 1, max_downscale);
+ if (drm_rect_height(&src) < drm_rect_height(&dst))
+ vscale = drm_rect_calc_vscale(&src, &dst, 1, max_upscale);
+ else
+ vscale = drm_rect_calc_vscale(&dst, &src, 1, max_downscale);
+
+ DPU_DEBUG_PLANE(pdpu, "check %d -> %d\n",
+ dpu_plane_enabled(plane->state), dpu_plane_enabled(state));
+
+ if (!dpu_plane_enabled(state))
+ goto exit;
+
+ fmt = to_dpu_format(msm_framebuffer_format(state->fb));
+
+ min_src_size = DPU_FORMAT_IS_YUV(fmt) ? 2 : 1;
+
+ if (DPU_FORMAT_IS_YUV(fmt) &&
+ (!(pdpu->features & DPU_SSPP_SCALER) ||
+ !(pdpu->features & (BIT(DPU_SSPP_CSC)
+ | BIT(DPU_SSPP_CSC_10BIT))))) {
+ DPU_ERROR_PLANE(pdpu,
+ "plane doesn't have scaler/csc for yuv\n");
+ ret = -EINVAL;
+
+ /* check src bounds */
+ } else if (!dpu_plane_validate_src(&src, &fb_rect, min_src_size)) {
+ DPU_ERROR_PLANE(pdpu, "invalid source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -E2BIG;
+
+ /* valid yuv image */
+ } else if (DPU_FORMAT_IS_YUV(fmt) &&
+ (src.x1 & 0x1 || src.y1 & 0x1 ||
+ drm_rect_width(&src) & 0x1 ||
+ drm_rect_height(&src) & 0x1)) {
+ DPU_ERROR_PLANE(pdpu, "invalid yuv source " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src));
+ ret = -EINVAL;
+
+ /* min dst support */
+ } else if (drm_rect_width(&dst) < 0x1 || drm_rect_height(&dst) < 0x1) {
+ DPU_ERROR_PLANE(pdpu, "invalid dest rect " DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&dst));
+ ret = -EINVAL;
+
+ /* check decimated source width */
+ } else if (drm_rect_width(&src) > max_linewidth) {
+ DPU_ERROR_PLANE(pdpu, "invalid src " DRM_RECT_FMT " line:%u\n",
+ DRM_RECT_ARG(&src), max_linewidth);
+ ret = -E2BIG;
+
+ /* check scaler capability */
+ } else if (hscale < 0 || vscale < 0) {
+ DPU_ERROR_PLANE(pdpu, "invalid scaling requested src="
+ DRM_RECT_FMT " dst=" DRM_RECT_FMT "\n",
+ DRM_RECT_ARG(&src), DRM_RECT_ARG(&dst));
+ ret = -E2BIG;
+ }
+
+exit:
+ return ret;
+}
+
+static int dpu_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ if (!state->fb)
+ return 0;
+
+ DPU_DEBUG_PLANE(to_dpu_plane(plane), "\n");
+
+ return dpu_plane_sspp_atomic_check(plane, state);
+}
+
+void dpu_plane_flush(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pstate = to_dpu_plane_state(plane->state);
+
+ /*
+ * These updates have to be done immediately before the plane flush
+ * timing, and may not be moved to the atomic_update/mode_set functions.
+ */
+ if (pdpu->is_error)
+ /* force white frame with 100% alpha pipe output on error */
+ _dpu_plane_color_fill(pdpu, 0xFFFFFF, 0xFF);
+ else if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG)
+ /* force 100% alpha */
+ _dpu_plane_color_fill(pdpu, pdpu->color_fill, 0xFF);
+ else if (pdpu->pipe_hw && pdpu->csc_ptr && pdpu->pipe_hw->ops.setup_csc)
+ pdpu->pipe_hw->ops.setup_csc(pdpu->pipe_hw, pdpu->csc_ptr);
+
+ /* flag h/w flush complete */
+ if (plane->state)
+ pstate->pending = false;
+}
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = error;
+}
+
+static int dpu_plane_sspp_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ uint32_t nplanes, src_flags;
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_pstate;
+ const struct dpu_format *fmt;
+ struct drm_crtc *crtc;
+ struct drm_framebuffer *fb;
+ struct drm_rect src, dst;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return -EINVAL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return -EINVAL;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return -EINVAL;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+
+ pstate = to_dpu_plane_state(state);
+
+ old_pstate = to_dpu_plane_state(old_state);
+
+ crtc = state->crtc;
+ fb = state->fb;
+ if (!crtc || !fb) {
+ DPU_ERROR_PLANE(pdpu, "invalid crtc %d or fb %d\n",
+ crtc != 0, fb != 0);
+ return -EINVAL;
+ }
+ fmt = to_dpu_format(msm_framebuffer_format(fb));
+ nplanes = fmt->num_planes;
+
+ memset(&(pdpu->pipe_cfg), 0, sizeof(struct dpu_hw_pipe_cfg));
+
+ _dpu_plane_set_scanout(plane, pstate, &pdpu->pipe_cfg, fb);
+
+ pstate->pending = true;
+
+ pdpu->is_rt_pipe = (dpu_crtc_get_client_type(crtc) != NRT_CLIENT);
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ src.x1 = state->src_x >> 16;
+ src.y1 = state->src_y >> 16;
+ src.x2 = src.x1 + (state->src_w >> 16);
+ src.y2 = src.y1 + (state->src_h >> 16);
+
+ dst = drm_plane_state_dest(state);
+
+ DPU_DEBUG_PLANE(pdpu, "FB[%u] " DRM_RECT_FMT "->crtc%u " DRM_RECT_FMT
+ ", %4.4s ubwc %d\n", fb->base.id, DRM_RECT_ARG(&src),
+ crtc->base.id, DRM_RECT_ARG(&dst),
+ (char *)&fmt->base.pixel_format,
+ DPU_FORMAT_IS_UBWC(fmt));
+
+ pdpu->pipe_cfg.src_rect = src;
+ pdpu->pipe_cfg.dst_rect = dst;
+
+ _dpu_plane_setup_scaler(pdpu, pstate, fmt, false);
+
+ /* override for color fill */
+ if (pdpu->color_fill & DPU_PLANE_COLOR_FILL_FLAG) {
+ /* skip remaining processing on color fill */
+ return 0;
+ }
+
+ if (pdpu->pipe_hw->ops.setup_rects) {
+ pdpu->pipe_hw->ops.setup_rects(pdpu->pipe_hw,
+ &pdpu->pipe_cfg,
+ pstate->multirect_index);
+ }
+
+ if (pdpu->pipe_hw->ops.setup_pe &&
+ (pstate->multirect_index != DPU_SSPP_RECT_1))
+ pdpu->pipe_hw->ops.setup_pe(pdpu->pipe_hw,
+ &pstate->pixel_ext);
+
+ /**
+ * when programmed in multirect mode, scalar block will be
+ * bypassed. Still we need to update alpha and bitwidth
+ * ONLY for RECT0
+ */
+ if (pdpu->pipe_hw->ops.setup_scaler &&
+ pstate->multirect_index != DPU_SSPP_RECT_1)
+ pdpu->pipe_hw->ops.setup_scaler(pdpu->pipe_hw,
+ &pdpu->pipe_cfg, &pstate->pixel_ext,
+ &pstate->scaler3_cfg);
+
+ if (pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(
+ pdpu->pipe_hw,
+ pstate->multirect_index,
+ pstate->multirect_mode);
+
+ if (pdpu->pipe_hw->ops.setup_format) {
+ src_flags = 0x0;
+
+ /* update format */
+ pdpu->pipe_hw->ops.setup_format(pdpu->pipe_hw, fmt, src_flags,
+ pstate->multirect_index);
+
+ if (pdpu->pipe_hw->ops.setup_cdp) {
+ struct dpu_hw_pipe_cdp_cfg *cdp_cfg = &pstate->cdp_cfg;
+
+ memset(cdp_cfg, 0, sizeof(struct dpu_hw_pipe_cdp_cfg));
+
+ cdp_cfg->enable = pdpu->catalog->perf.cdp_cfg
+ [DPU_PERF_CDP_USAGE_RT].rd_enable;
+ cdp_cfg->ubwc_meta_enable =
+ DPU_FORMAT_IS_UBWC(fmt);
+ cdp_cfg->tile_amortize_enable =
+ DPU_FORMAT_IS_UBWC(fmt) ||
+ DPU_FORMAT_IS_TILE(fmt);
+ cdp_cfg->preload_ahead = DPU_SSPP_CDP_PRELOAD_AHEAD_64;
+
+ pdpu->pipe_hw->ops.setup_cdp(pdpu->pipe_hw, cdp_cfg);
+ }
+
+ /* update csc */
+ if (DPU_FORMAT_IS_YUV(fmt))
+ _dpu_plane_setup_csc(pdpu);
+ else
+ pdpu->csc_ptr = 0;
+ }
+
+ _dpu_plane_set_qos_lut(plane, fb);
+ _dpu_plane_set_danger_lut(plane, fb);
+
+ if (plane->type != DRM_PLANE_TYPE_CURSOR) {
+ _dpu_plane_set_qos_ctrl(plane, true, DPU_PLANE_QOS_PANIC_CTRL);
+ _dpu_plane_set_ot_limit(plane, crtc);
+ }
+
+ _dpu_plane_set_qos_remap(plane);
+ return 0;
+}
+
+static void _dpu_plane_atomic_disable(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ } else if (!old_state) {
+ DPU_ERROR("invalid old state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ state = plane->state;
+ pstate = to_dpu_plane_state(state);
+
+ trace_dpu_plane_disable(DRMID(plane), is_dpu_plane_virtual(plane),
+ pstate->multirect_mode);
+
+ pstate->pending = true;
+
+ if (is_dpu_plane_virtual(plane) &&
+ pdpu->pipe_hw && pdpu->pipe_hw->ops.setup_multirect)
+ pdpu->pipe_hw->ops.setup_multirect(pdpu->pipe_hw,
+ DPU_SSPP_RECT_SOLO, DPU_SSPP_MULTIRECT_NONE);
+}
+
+static void dpu_plane_atomic_update(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct dpu_plane *pdpu;
+ struct drm_plane_state *state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ pdpu->is_error = false;
+ state = plane->state;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (!dpu_plane_sspp_enabled(state)) {
+ _dpu_plane_atomic_disable(plane, old_state);
+ } else {
+ int ret;
+
+ ret = dpu_plane_sspp_atomic_update(plane, old_state);
+ /* atomic_check should have ensured that this doesn't fail */
+ WARN_ON(ret < 0);
+ }
+}
+
+void dpu_plane_restore(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane || !plane->state) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* last plane state is same as current state */
+ dpu_plane_atomic_update(plane, plane->state);
+}
+
+static void dpu_plane_destroy(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu = plane ? to_dpu_plane(plane) : NULL;
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ if (pdpu) {
+ _dpu_plane_set_qos_ctrl(plane, false, DPU_PLANE_QOS_PANIC_CTRL);
+
+ mutex_destroy(&pdpu->lock);
+
+ drm_plane_helper_disable(plane, NULL);
+
+ /* this will destroy the states as well */
+ drm_plane_cleanup(plane);
+
+ if (pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+
+ kfree(pdpu);
+ }
+}
+
+static void dpu_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct dpu_plane_state *pstate;
+
+ if (!plane || !state) {
+ DPU_ERROR("invalid arg(s), plane %d state %d\n",
+ plane != 0, state != 0);
+ return;
+ }
+
+ pstate = to_dpu_plane_state(state);
+
+ /* remove ref count for frame buffers */
+ if (state->fb)
+ drm_framebuffer_put(state->fb);
+
+ kfree(pstate);
+}
+
+static struct drm_plane_state *
+dpu_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+ struct dpu_plane_state *old_state;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return NULL;
+ } else if (!plane->state) {
+ DPU_ERROR("invalid plane state\n");
+ return NULL;
+ }
+
+ old_state = to_dpu_plane_state(plane->state);
+ pdpu = to_dpu_plane(plane);
+ pstate = kmemdup(old_state, sizeof(*old_state), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return NULL;
+ }
+
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ pstate->pending = false;
+
+ __drm_atomic_helper_plane_duplicate_state(plane, &pstate->base);
+
+ return &pstate->base;
+}
+
+static void dpu_plane_reset(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_plane_state *pstate;
+
+ if (!plane) {
+ DPU_ERROR("invalid plane\n");
+ return;
+ }
+
+ pdpu = to_dpu_plane(plane);
+ DPU_DEBUG_PLANE(pdpu, "\n");
+
+ /* remove previous state, if present */
+ if (plane->state) {
+ dpu_plane_destroy_state(plane, plane->state);
+ plane->state = 0;
+ }
+
+ pstate = kzalloc(sizeof(*pstate), GFP_KERNEL);
+ if (!pstate) {
+ DPU_ERROR_PLANE(pdpu, "failed to allocate state\n");
+ return;
+ }
+
+ pstate->base.plane = plane;
+
+ plane->state = &pstate->base;
+}
+
+#ifdef CONFIG_DEBUG_FS
+static ssize_t _dpu_plane_danger_read(struct file *file,
+ char __user *buff, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int len = 0;
+ char buf[40] = {'\0'};
+
+ if (!cfg)
+ return -ENODEV;
+
+ if (*ppos)
+ return 0; /* the end */
+
+ len = snprintf(buf, sizeof(buf), "%d\n", !kms->has_danger_ctrl);
+ if (len < 0 || len >= sizeof(buf))
+ return 0;
+
+ if ((count < sizeof(buf)) || copy_to_user(buff, buf, len))
+ return -EFAULT;
+
+ *ppos += len; /* increase offset */
+
+ return len;
+}
+
+static void _dpu_plane_set_danger_state(struct dpu_kms *kms, bool enable)
+{
+ struct drm_plane *plane;
+
+ drm_for_each_plane(plane, kms->dev) {
+ if (plane->fb && plane->state) {
+ dpu_plane_danger_signal_ctrl(plane, enable);
+ DPU_DEBUG("plane:%d img:%dx%d ",
+ plane->base.id, plane->fb->width,
+ plane->fb->height);
+ DPU_DEBUG("src[%d,%d,%d,%d] dst[%d,%d,%d,%d]\n",
+ plane->state->src_x >> 16,
+ plane->state->src_y >> 16,
+ plane->state->src_w >> 16,
+ plane->state->src_h >> 16,
+ plane->state->crtc_x, plane->state->crtc_y,
+ plane->state->crtc_w, plane->state->crtc_h);
+ } else {
+ DPU_DEBUG("Inactive plane:%d\n", plane->base.id);
+ }
+ }
+}
+
+static ssize_t _dpu_plane_danger_write(struct file *file,
+ const char __user *user_buf, size_t count, loff_t *ppos)
+{
+ struct dpu_kms *kms = file->private_data;
+ struct dpu_mdss_cfg *cfg = kms->catalog;
+ int disable_panic;
+ char buf[10];
+
+ if (!cfg)
+ return -EFAULT;
+
+ if (count >= sizeof(buf))
+ return -EFAULT;
+
+ if (copy_from_user(buf, user_buf, count))
+ return -EFAULT;
+
+ buf[count] = 0; /* end of string */
+
+ if (kstrtoint(buf, 0, &disable_panic))
+ return -EFAULT;
+
+ if (disable_panic) {
+ /* Disable panic signal for all active pipes */
+ DPU_DEBUG("Disabling danger:\n");
+ _dpu_plane_set_danger_state(kms, false);
+ kms->has_danger_ctrl = false;
+ } else {
+ /* Enable panic signal for all active pipes */
+ DPU_DEBUG("Enabling danger:\n");
+ kms->has_danger_ctrl = true;
+ _dpu_plane_set_danger_state(kms, true);
+ }
+
+ return count;
+}
+
+static const struct file_operations dpu_plane_danger_enable = {
+ .open = simple_open,
+ .read = _dpu_plane_danger_read,
+ .write = _dpu_plane_danger_write,
+};
+
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+ struct dpu_kms *kms;
+ struct msm_drm_private *priv;
+ const struct dpu_sspp_sub_blks *sblk = 0;
+ const struct dpu_sspp_cfg *cfg = 0;
+
+ if (!plane || !plane->dev) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ priv = plane->dev->dev_private;
+ if (!priv || !priv->kms) {
+ DPU_ERROR("invalid KMS reference\n");
+ return -EINVAL;
+ }
+
+ kms = to_dpu_kms(priv->kms);
+ pdpu = to_dpu_plane(plane);
+
+ if (pdpu && pdpu->pipe_hw)
+ cfg = pdpu->pipe_hw->cap;
+ if (cfg)
+ sblk = cfg->sblk;
+
+ if (!sblk)
+ return 0;
+
+ /* create overall sub-directory for the pipe */
+ pdpu->debugfs_root =
+ debugfs_create_dir(pdpu->pipe_name,
+ plane->dev->primary->debugfs_root);
+
+ if (!pdpu->debugfs_root)
+ return -ENOMEM;
+
+ /* don't error check these */
+ debugfs_create_x32("features", 0600,
+ pdpu->debugfs_root, &pdpu->features);
+
+ /* add register dump support */
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_src,
+ sblk->src_blk.base + cfg->base,
+ sblk->src_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("src_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_src);
+
+ if (cfg->features & BIT(DPU_SSPP_SCALER_QSEED3) ||
+ cfg->features & BIT(DPU_SSPP_SCALER_QSEED2)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_scaler,
+ sblk->scaler_blk.base + cfg->base,
+ sblk->scaler_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("scaler_blk", 0400,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_scaler);
+ debugfs_create_bool("default_scaling",
+ 0600,
+ pdpu->debugfs_root,
+ &pdpu->debugfs_default_scale);
+ }
+
+ if (cfg->features & BIT(DPU_SSPP_CSC) ||
+ cfg->features & BIT(DPU_SSPP_CSC_10BIT)) {
+ dpu_debugfs_setup_regset32(&pdpu->debugfs_csc,
+ sblk->csc_blk.base + cfg->base,
+ sblk->csc_blk.len,
+ kms);
+ dpu_debugfs_create_regset32("csc_blk", 0400,
+ pdpu->debugfs_root, &pdpu->debugfs_csc);
+ }
+
+ debugfs_create_u32("xin_id",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->xin_id);
+ debugfs_create_u32("clk_ctrl",
+ 0400,
+ pdpu->debugfs_root,
+ (u32 *) &cfg->clk_ctrl);
+ debugfs_create_x32("creq_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->creq_vblank);
+ debugfs_create_x32("danger_vblank",
+ 0600,
+ pdpu->debugfs_root,
+ (u32 *) &sblk->danger_vblank);
+
+ debugfs_create_file("disable_danger",
+ 0600,
+ pdpu->debugfs_root,
+ kms, &dpu_plane_danger_enable);
+
+ return 0;
+}
+
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+ struct dpu_plane *pdpu;
+
+ if (!plane)
+ return;
+ pdpu = to_dpu_plane(plane);
+
+ debugfs_remove_recursive(pdpu->debugfs_root);
+}
+#else
+static int _dpu_plane_init_debugfs(struct drm_plane *plane)
+{
+ return 0;
+}
+static void _dpu_plane_destroy_debugfs(struct drm_plane *plane)
+{
+}
+#endif
+
+static int dpu_plane_late_register(struct drm_plane *plane)
+{
+ return _dpu_plane_init_debugfs(plane);
+}
+
+static void dpu_plane_early_unregister(struct drm_plane *plane)
+{
+ _dpu_plane_destroy_debugfs(plane);
+}
+
+static const struct drm_plane_funcs dpu_plane_funcs = {
+ .update_plane = drm_atomic_helper_update_plane,
+ .disable_plane = drm_atomic_helper_disable_plane,
+ .destroy = dpu_plane_destroy,
+ .reset = dpu_plane_reset,
+ .atomic_duplicate_state = dpu_plane_duplicate_state,
+ .atomic_destroy_state = dpu_plane_destroy_state,
+ .late_register = dpu_plane_late_register,
+ .early_unregister = dpu_plane_early_unregister,
+};
+
+static const struct drm_plane_helper_funcs dpu_plane_helper_funcs = {
+ .prepare_fb = dpu_plane_prepare_fb,
+ .cleanup_fb = dpu_plane_cleanup_fb,
+ .atomic_check = dpu_plane_atomic_check,
+ .atomic_update = dpu_plane_atomic_update,
+};
+
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->pipe : SSPP_NONE;
+}
+
+bool is_dpu_plane_virtual(struct drm_plane *plane)
+{
+ return plane ? to_dpu_plane(plane)->is_virtual : false;
+}
+
+/* initialize plane */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id)
+{
+ struct drm_plane *plane = NULL, *master_plane = NULL;
+ const struct dpu_format_extended *format_list;
+ struct dpu_plane *pdpu;
+ struct msm_drm_private *priv;
+ struct dpu_kms *kms;
+ enum drm_plane_type type;
+ int zpos_max = DPU_ZPOS_MAX;
+ int ret = -EINVAL;
+
+ if (!dev) {
+ DPU_ERROR("[%u]device is NULL\n", pipe);
+ goto exit;
+ }
+
+ priv = dev->dev_private;
+ if (!priv) {
+ DPU_ERROR("[%u]private data is NULL\n", pipe);
+ goto exit;
+ }
+
+ if (!priv->kms) {
+ DPU_ERROR("[%u]invalid KMS reference\n", pipe);
+ goto exit;
+ }
+ kms = to_dpu_kms(priv->kms);
+
+ if (!kms->catalog) {
+ DPU_ERROR("[%u]invalid catalog reference\n", pipe);
+ goto exit;
+ }
+
+ /* create and zero local structure */
+ pdpu = kzalloc(sizeof(*pdpu), GFP_KERNEL);
+ if (!pdpu) {
+ DPU_ERROR("[%u]failed to allocate local plane struct\n", pipe);
+ ret = -ENOMEM;
+ goto exit;
+ }
+
+ /* cache local stuff for later */
+ plane = &pdpu->base;
+ pdpu->pipe = pipe;
+ pdpu->is_virtual = (master_plane_id != 0);
+ INIT_LIST_HEAD(&pdpu->mplane_list);
+ master_plane = drm_plane_find(dev, NULL, master_plane_id);
+ if (master_plane) {
+ struct dpu_plane *mpdpu = to_dpu_plane(master_plane);
+
+ list_add_tail(&pdpu->mplane_list, &mpdpu->mplane_list);
+ }
+
+ /* initialize underlying h/w driver */
+ pdpu->pipe_hw = dpu_hw_sspp_init(pipe, kms->mmio, kms->catalog,
+ master_plane_id != 0);
+ if (IS_ERR(pdpu->pipe_hw)) {
+ DPU_ERROR("[%u]SSPP init failed\n", pipe);
+ ret = PTR_ERR(pdpu->pipe_hw);
+ goto clean_plane;
+ } else if (!pdpu->pipe_hw->cap || !pdpu->pipe_hw->cap->sblk) {
+ DPU_ERROR("[%u]SSPP init returned invalid cfg\n", pipe);
+ goto clean_sspp;
+ }
+
+ /* cache features mask for later */
+ pdpu->features = pdpu->pipe_hw->cap->features;
+ pdpu->pipe_sblk = pdpu->pipe_hw->cap->sblk;
+ if (!pdpu->pipe_sblk) {
+ DPU_ERROR("[%u]invalid sblk\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (!master_plane_id)
+ format_list = pdpu->pipe_sblk->format_list;
+ else
+ format_list = pdpu->pipe_sblk->virt_format_list;
+
+ pdpu->nformats = dpu_populate_formats(format_list,
+ pdpu->formats,
+ 0,
+ ARRAY_SIZE(pdpu->formats));
+
+ if (!pdpu->nformats) {
+ DPU_ERROR("[%u]no valid formats for plane\n", pipe);
+ goto clean_sspp;
+ }
+
+ if (pdpu->features & BIT(DPU_SSPP_CURSOR))
+ type = DRM_PLANE_TYPE_CURSOR;
+ else if (primary_plane)
+ type = DRM_PLANE_TYPE_PRIMARY;
+ else
+ type = DRM_PLANE_TYPE_OVERLAY;
+ ret = drm_universal_plane_init(dev, plane, 0xff, &dpu_plane_funcs,
+ pdpu->formats, pdpu->nformats,
+ NULL, type, NULL);
+ if (ret)
+ goto clean_sspp;
+
+ pdpu->catalog = kms->catalog;
+
+ if (kms->catalog->mixer_count &&
+ kms->catalog->mixer[0].sblk->maxblendstages) {
+ zpos_max = kms->catalog->mixer[0].sblk->maxblendstages - 1;
+ if (zpos_max > DPU_STAGE_MAX - DPU_STAGE_0 - 1)
+ zpos_max = DPU_STAGE_MAX - DPU_STAGE_0 - 1;
+ }
+
+ ret = drm_plane_create_zpos_property(plane, 0, 0, zpos_max);
+ if (ret)
+ DPU_ERROR("failed to install zpos property, rc = %d\n", ret);
+
+ /* success! finalize initialization */
+ drm_plane_helper_add(plane, &dpu_plane_helper_funcs);
+
+ /* save user friendly pipe name for later */
+ snprintf(pdpu->pipe_name, DPU_NAME_SIZE, "plane%u", plane->base.id);
+
+ mutex_init(&pdpu->lock);
+
+ DPU_DEBUG("%s created for pipe:%u id:%u virtual:%u\n", pdpu->pipe_name,
+ pipe, plane->base.id, master_plane_id);
+ return plane;
+
+clean_sspp:
+ if (pdpu && pdpu->pipe_hw)
+ dpu_hw_sspp_destroy(pdpu->pipe_hw);
+clean_plane:
+ kfree(pdpu);
+exit:
+ return ERR_PTR(ret);
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
new file mode 100644
index 000000000000..f6fe6ddc7a3a
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_plane.h
@@ -0,0 +1,175 @@
+/*
+ * Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ * Copyright (C) 2013 Red Hat
+ * Author: Rob Clark <robdclark@gmail.com>
+ *
+ * This program is free software; you can redistribute it and/or modify it
+ * under the terms of the GNU General Public License version 2 as published by
+ * the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful, but WITHOUT
+ * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
+ * FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for
+ * more details.
+ *
+ * You should have received a copy of the GNU General Public License along with
+ * this program. If not, see <http://www.gnu.org/licenses/>.
+ */
+
+#ifndef _DPU_PLANE_H_
+#define _DPU_PLANE_H_
+
+#include <drm/drm_crtc.h>
+
+#include "dpu_kms.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_sspp.h"
+
+/**
+ * struct dpu_plane_state: Define dpu extension of drm plane state object
+ * @base: base drm plane state object
+ * @property_state: Local storage for msm_prop properties
+ * @property_values: cached plane property values
+ * @aspace: pointer to address space for input/output buffers
+ * @input_fence: dereferenced input fence pointer
+ * @stage: assigned by crtc blender
+ * @multirect_index: index of the rectangle of SSPP
+ * @multirect_mode: parallel or time multiplex multirect mode
+ * @pending: whether the current update is still pending
+ * @scaler3_cfg: configuration data for scaler3
+ * @pixel_ext: configuration data for pixel extensions
+ * @scaler_check_state: indicates status of user provided pixel extension data
+ * @cdp_cfg: CDP configuration
+ */
+struct dpu_plane_state {
+ struct drm_plane_state base;
+ struct msm_gem_address_space *aspace;
+ void *input_fence;
+ enum dpu_stage stage;
+ uint32_t multirect_index;
+ uint32_t multirect_mode;
+ bool pending;
+
+ /* scaler configuration */
+ struct dpu_hw_scaler3_cfg scaler3_cfg;
+ struct dpu_hw_pixel_ext pixel_ext;
+
+ struct dpu_hw_pipe_cdp_cfg cdp_cfg;
+};
+
+/**
+ * struct dpu_multirect_plane_states: Defines multirect pair of drm plane states
+ * @r0: drm plane configured on rect 0
+ * @r1: drm plane configured on rect 1
+ */
+struct dpu_multirect_plane_states {
+ const struct drm_plane_state *r0;
+ const struct drm_plane_state *r1;
+};
+
+#define to_dpu_plane_state(x) \
+ container_of(x, struct dpu_plane_state, base)
+
+/**
+ * dpu_plane_pipe - return sspp identifier for the given plane
+ * @plane: Pointer to DRM plane object
+ * Returns: sspp identifier of the given plane
+ */
+enum dpu_sspp dpu_plane_pipe(struct drm_plane *plane);
+
+/**
+ * is_dpu_plane_virtual - check for virtual plane
+ * @plane: Pointer to DRM plane object
+ * returns: true - if the plane is virtual
+ * false - if the plane is primary
+ */
+bool is_dpu_plane_virtual(struct drm_plane *plane);
+
+/**
+ * dpu_plane_get_ctl_flush - get control flush mask
+ * @plane: Pointer to DRM plane object
+ * @ctl: Pointer to control hardware
+ * @flush_sspp: Pointer to sspp flush control word
+ */
+void dpu_plane_get_ctl_flush(struct drm_plane *plane, struct dpu_hw_ctl *ctl,
+ u32 *flush_sspp);
+
+/**
+ * dpu_plane_restore - restore hw state if previously power collapsed
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_restore(struct drm_plane *plane);
+
+/**
+ * dpu_plane_flush - final plane operations before commit flush
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_flush(struct drm_plane *plane);
+
+/**
+ * dpu_plane_kickoff - final plane operations before commit kickoff
+ * @plane: Pointer to drm plane structure
+ */
+void dpu_plane_kickoff(struct drm_plane *plane);
+
+/**
+ * dpu_plane_set_error: enable/disable error condition
+ * @plane: pointer to drm_plane structure
+ */
+void dpu_plane_set_error(struct drm_plane *plane, bool error);
+
+/**
+ * dpu_plane_init - create new dpu plane for the given pipe
+ * @dev: Pointer to DRM device
+ * @pipe: dpu hardware pipe identifier
+ * @primary_plane: true if this pipe is primary plane for crtc
+ * @possible_crtcs: bitmask of crtc that can be attached to the given pipe
+ * @master_plane_id: primary plane id of a multirect pipe. 0 value passed for
+ * a regular plane initialization. A non-zero primary plane
+ * id will be passed for a virtual pipe initialization.
+ *
+ */
+struct drm_plane *dpu_plane_init(struct drm_device *dev,
+ uint32_t pipe, bool primary_plane,
+ unsigned long possible_crtcs, u32 master_plane_id);
+
+/**
+ * dpu_plane_validate_multirecti_v2 - validate the multirect planes
+ * against hw limitations
+ * @plane: drm plate states of the multirect pair
+ */
+int dpu_plane_validate_multirect_v2(struct dpu_multirect_plane_states *plane);
+
+/**
+ * dpu_plane_clear_multirect - clear multirect bits for the given pipe
+ * @drm_state: Pointer to DRM plane state
+ */
+void dpu_plane_clear_multirect(const struct drm_plane_state *drm_state);
+
+/**
+ * dpu_plane_wait_input_fence - wait for input fence object
+ * @plane: Pointer to DRM plane object
+ * @wait_ms: Wait timeout value
+ * Returns: Zero on success
+ */
+int dpu_plane_wait_input_fence(struct drm_plane *plane, uint32_t wait_ms);
+
+/**
+ * dpu_plane_color_fill - enables color fill on plane
+ * @plane: Pointer to DRM plane object
+ * @color: RGB fill color value, [23..16] Blue, [15..8] Green, [7..0] Red
+ * @alpha: 8-bit fill alpha value, 255 selects 100% alpha
+ * Returns: 0 on success
+ */
+int dpu_plane_color_fill(struct drm_plane *plane,
+ uint32_t color, uint32_t alpha);
+
+/**
+ * dpu_plane_set_revalidate - sets revalidate flag which forces a full
+ * validation of the plane properties in the next atomic check
+ * @plane: Pointer to DRM plane object
+ * @enable: Boolean to set/unset the flag
+ */
+void dpu_plane_set_revalidate(struct drm_plane *plane, bool enable);
+
+#endif /* _DPU_PLANE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
new file mode 100644
index 000000000000..a75eebca2f37
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.c
@@ -0,0 +1,249 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d]: " fmt, __func__, __LINE__
+
+#include <linux/kernel.h>
+#include <linux/of.h>
+#include <linux/string.h>
+#include <linux/of_address.h>
+#include <linux/slab.h>
+#include <linux/mutex.h>
+#include <linux/of_platform.h>
+
+#include "dpu_power_handle.h"
+#include "dpu_trace.h"
+
+static const char *data_bus_name[DPU_POWER_HANDLE_DBUS_ID_MAX] = {
+ [DPU_POWER_HANDLE_DBUS_ID_MNOC] = "qcom,dpu-data-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_LLCC] = "qcom,dpu-llcc-bus",
+ [DPU_POWER_HANDLE_DBUS_ID_EBI] = "qcom,dpu-ebi-bus",
+};
+
+const char *dpu_power_handle_get_dbus_name(u32 bus_id)
+{
+ if (bus_id < DPU_POWER_HANDLE_DBUS_ID_MAX)
+ return data_bus_name[bus_id];
+
+ return NULL;
+}
+
+static void dpu_power_event_trigger_locked(struct dpu_power_handle *phandle,
+ u32 event_type)
+{
+ struct dpu_power_event *event;
+
+ list_for_each_entry(event, &phandle->event_list, list) {
+ if (event->event_type & event_type)
+ event->cb_fnc(event_type, event->usr);
+ }
+}
+
+struct dpu_power_client *dpu_power_client_create(
+ struct dpu_power_handle *phandle, char *client_name)
+{
+ struct dpu_power_client *client;
+ static u32 id;
+
+ if (!client_name || !phandle) {
+ pr_err("client name is null or invalid power data\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ client = kzalloc(sizeof(struct dpu_power_client), GFP_KERNEL);
+ if (!client)
+ return ERR_PTR(-ENOMEM);
+
+ mutex_lock(&phandle->phandle_lock);
+ strlcpy(client->name, client_name, MAX_CLIENT_NAME_LEN);
+ client->usecase_ndx = VOTE_INDEX_DISABLE;
+ client->id = id;
+ client->active = true;
+ pr_debug("client %s created:%pK id :%d\n", client_name,
+ client, id);
+ id++;
+ list_add(&client->list, &phandle->power_client_clist);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return client;
+}
+
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client)
+{
+ if (!client || !phandle) {
+ pr_err("reg bus vote: invalid client handle\n");
+ } else if (!client->active) {
+ pr_err("dpu power deinit already done\n");
+ kfree(client);
+ } else {
+ pr_debug("bus vote client %s destroyed:%pK id:%u\n",
+ client->name, client, client->id);
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&client->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(client);
+ }
+}
+
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ phandle->dev = &pdev->dev;
+
+ INIT_LIST_HEAD(&phandle->power_client_clist);
+ INIT_LIST_HEAD(&phandle->event_list);
+
+ mutex_init(&phandle->phandle_lock);
+}
+
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *phandle)
+{
+ struct dpu_power_client *curr_client, *next_client;
+ struct dpu_power_event *curr_event, *next_event;
+
+ if (!phandle || !pdev) {
+ pr_err("invalid input param\n");
+ return;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ list_for_each_entry_safe(curr_client, next_client,
+ &phandle->power_client_clist, list) {
+ pr_err("client:%s-%d still registered with refcount:%d\n",
+ curr_client->name, curr_client->id,
+ curr_client->refcount);
+ curr_client->active = false;
+ list_del(&curr_client->list);
+ }
+
+ list_for_each_entry_safe(curr_event, next_event,
+ &phandle->event_list, list) {
+ pr_err("event:%d, client:%s still registered\n",
+ curr_event->event_type,
+ curr_event->client_name);
+ curr_event->active = false;
+ list_del(&curr_event->list);
+ }
+ mutex_unlock(&phandle->phandle_lock);
+}
+
+int dpu_power_resource_enable(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, bool enable)
+{
+ bool changed = false;
+ u32 max_usecase_ndx = VOTE_INDEX_DISABLE, prev_usecase_ndx;
+ struct dpu_power_client *client;
+
+ if (!phandle || !pclient) {
+ pr_err("invalid input argument\n");
+ return -EINVAL;
+ }
+
+ mutex_lock(&phandle->phandle_lock);
+ if (enable)
+ pclient->refcount++;
+ else if (pclient->refcount)
+ pclient->refcount--;
+
+ if (pclient->refcount)
+ pclient->usecase_ndx = VOTE_INDEX_LOW;
+ else
+ pclient->usecase_ndx = VOTE_INDEX_DISABLE;
+
+ list_for_each_entry(client, &phandle->power_client_clist, list) {
+ if (client->usecase_ndx < VOTE_INDEX_MAX &&
+ client->usecase_ndx > max_usecase_ndx)
+ max_usecase_ndx = client->usecase_ndx;
+ }
+
+ if (phandle->current_usecase_ndx != max_usecase_ndx) {
+ changed = true;
+ prev_usecase_ndx = phandle->current_usecase_ndx;
+ phandle->current_usecase_ndx = max_usecase_ndx;
+ }
+
+ pr_debug("%pS: changed=%d current idx=%d request client %s id:%u enable:%d refcount:%d\n",
+ __builtin_return_address(0), changed, max_usecase_ndx,
+ pclient->name, pclient->id, enable, pclient->refcount);
+
+ if (!changed)
+ goto end;
+
+ if (enable) {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_ENABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_ENABLE);
+
+ } else {
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_PRE_DISABLE);
+ dpu_power_event_trigger_locked(phandle,
+ DPU_POWER_EVENT_POST_DISABLE);
+ }
+
+end:
+ mutex_unlock(&phandle->phandle_lock);
+ return 0;
+}
+
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name)
+{
+ struct dpu_power_event *event;
+
+ if (!phandle) {
+ pr_err("invalid power handle\n");
+ return ERR_PTR(-EINVAL);
+ } else if (!cb_fnc || !event_type) {
+ pr_err("no callback fnc or event type\n");
+ return ERR_PTR(-EINVAL);
+ }
+
+ event = kzalloc(sizeof(struct dpu_power_event), GFP_KERNEL);
+ if (!event)
+ return ERR_PTR(-ENOMEM);
+
+ event->event_type = event_type;
+ event->cb_fnc = cb_fnc;
+ event->usr = usr;
+ strlcpy(event->client_name, client_name, MAX_CLIENT_NAME_LEN);
+ event->active = true;
+
+ mutex_lock(&phandle->phandle_lock);
+ list_add(&event->list, &phandle->event_list);
+ mutex_unlock(&phandle->phandle_lock);
+
+ return event;
+}
+
+void dpu_power_handle_unregister_event(
+ struct dpu_power_handle *phandle,
+ struct dpu_power_event *event)
+{
+ if (!phandle || !event) {
+ pr_err("invalid phandle or event\n");
+ } else if (!event->active) {
+ pr_err("power handle deinit already done\n");
+ kfree(event);
+ } else {
+ mutex_lock(&phandle->phandle_lock);
+ list_del_init(&event->list);
+ mutex_unlock(&phandle->phandle_lock);
+ kfree(event);
+ }
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
new file mode 100644
index 000000000000..344f74464eca
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_power_handle.h
@@ -0,0 +1,225 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef _DPU_POWER_HANDLE_H_
+#define _DPU_POWER_HANDLE_H_
+
+#define MAX_CLIENT_NAME_LEN 128
+
+#define DPU_POWER_HANDLE_ENABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_DISABLE_BUS_AB_QUOTA 0
+#define DPU_POWER_HANDLE_ENABLE_BUS_IB_QUOTA 1600000000
+#define DPU_POWER_HANDLE_DISABLE_BUS_IB_QUOTA 0
+
+#include "dpu_io_util.h"
+
+/* event will be triggered before power handler disable */
+#define DPU_POWER_EVENT_PRE_DISABLE 0x1
+
+/* event will be triggered after power handler disable */
+#define DPU_POWER_EVENT_POST_DISABLE 0x2
+
+/* event will be triggered before power handler enable */
+#define DPU_POWER_EVENT_PRE_ENABLE 0x4
+
+/* event will be triggered after power handler enable */
+#define DPU_POWER_EVENT_POST_ENABLE 0x8
+
+/**
+ * mdss_bus_vote_type: register bus vote type
+ * VOTE_INDEX_DISABLE: removes the client vote
+ * VOTE_INDEX_LOW: keeps the lowest vote for register bus
+ * VOTE_INDEX_MAX: invalid
+ */
+enum mdss_bus_vote_type {
+ VOTE_INDEX_DISABLE,
+ VOTE_INDEX_LOW,
+ VOTE_INDEX_MAX,
+};
+
+/**
+ * enum dpu_power_handle_data_bus_client - type of axi bus clients
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT: core real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT: core non-real-time bus client
+ * @DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX: maximum number of bus client type
+ */
+enum dpu_power_handle_data_bus_client {
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_RT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_NRT,
+ DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX
+};
+
+/**
+ * enum DPU_POWER_HANDLE_DBUS_ID - data bus identifier
+ * @DPU_POWER_HANDLE_DBUS_ID_MNOC: DPU/MNOC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_LLCC: MNOC/LLCC data bus
+ * @DPU_POWER_HANDLE_DBUS_ID_EBI: LLCC/EBI data bus
+ */
+enum DPU_POWER_HANDLE_DBUS_ID {
+ DPU_POWER_HANDLE_DBUS_ID_MNOC,
+ DPU_POWER_HANDLE_DBUS_ID_LLCC,
+ DPU_POWER_HANDLE_DBUS_ID_EBI,
+ DPU_POWER_HANDLE_DBUS_ID_MAX,
+};
+
+/**
+ * struct dpu_power_client: stores the power client for dpu driver
+ * @name: name of the client
+ * @usecase_ndx: current regs bus vote type
+ * @refcount: current refcount if multiple modules are using same
+ * same client for enable/disable. Power module will
+ * aggregate the refcount and vote accordingly for this
+ * client.
+ * @id: assigned during create. helps for debugging.
+ * @list: list to attach power handle master list
+ * @ab: arbitrated bandwidth for each bus client
+ * @ib: instantaneous bandwidth for each bus client
+ * @active: inidcates the state of dpu power handle
+ */
+struct dpu_power_client {
+ char name[MAX_CLIENT_NAME_LEN];
+ short usecase_ndx;
+ short refcount;
+ u32 id;
+ struct list_head list;
+ u64 ab[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ u64 ib[DPU_POWER_HANDLE_DATA_BUS_CLIENT_MAX];
+ bool active;
+};
+
+/*
+ * struct dpu_power_event - local event registration structure
+ * @client_name: name of the client registering
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback event trigger
+ * @event: refer to DPU_POWER_HANDLE_EVENT_*
+ * @list: list to attach event master list
+ * @active: indicates the state of dpu power handle
+ */
+struct dpu_power_event {
+ char client_name[MAX_CLIENT_NAME_LEN];
+ void (*cb_fnc)(u32 event_type, void *usr);
+ void *usr;
+ u32 event_type;
+ struct list_head list;
+ bool active;
+};
+
+/**
+ * struct dpu_power_handle: power handle main struct
+ * @client_clist: master list to store all clients
+ * @phandle_lock: lock to synchronize the enable/disable
+ * @dev: pointer to device structure
+ * @usecase_ndx: current usecase index
+ * @event_list: current power handle event list
+ */
+struct dpu_power_handle {
+ struct list_head power_client_clist;
+ struct mutex phandle_lock;
+ struct device *dev;
+ u32 current_usecase_ndx;
+ struct list_head event_list;
+};
+
+/**
+ * dpu_power_resource_init() - initializes the dpu power handle
+ * @pdev: platform device to search the power resources
+ * @pdata: power handle to store the power resources
+ */
+void dpu_power_resource_init(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_resource_deinit() - release the dpu power handle
+ * @pdev: platform device for power resources
+ * @pdata: power handle containing the resources
+ *
+ * Return: error code.
+ */
+void dpu_power_resource_deinit(struct platform_device *pdev,
+ struct dpu_power_handle *pdata);
+
+/**
+ * dpu_power_client_create() - create the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: error code.
+ */
+struct dpu_power_client *dpu_power_client_create(struct dpu_power_handle *pdata,
+ char *client_name);
+
+/**
+ * dpu_power_client_destroy() - destroy the client on power handle
+ * @pdata: power handle containing the resources
+ * @client_name: new client name for registration
+ *
+ * Return: none
+ */
+void dpu_power_client_destroy(struct dpu_power_handle *phandle,
+ struct dpu_power_client *client);
+
+/**
+ * dpu_power_resource_enable() - enable/disable the power resources
+ * @pdata: power handle containing the resources
+ * @client: client information to enable/disable its vote
+ * @enable: boolean request for enable/disable
+ *
+ * Return: error code.
+ */
+int dpu_power_resource_enable(struct dpu_power_handle *pdata,
+ struct dpu_power_client *pclient, bool enable);
+
+/**
+ * dpu_power_data_bus_bandwidth_ctrl() - control data bus bandwidth enable
+ * @phandle: power handle containing the resources
+ * @client: client information to bandwidth control
+ * @enable: true to enable bandwidth for data base
+ *
+ * Return: none
+ */
+void dpu_power_data_bus_bandwidth_ctrl(struct dpu_power_handle *phandle,
+ struct dpu_power_client *pclient, int enable);
+
+/**
+ * dpu_power_handle_register_event - register a callback function for an event.
+ * Clients can register for multiple events with a single register.
+ * Any block with access to phandle can register for the event
+ * notification.
+ * @phandle: power handle containing the resources
+ * @event_type: event type to register; refer DPU_POWER_HANDLE_EVENT_*
+ * @cb_fnc: pointer to desired callback function
+ * @usr: user pointer to pass to callback on event trigger
+ *
+ * Return: event pointer if success, or error code otherwise
+ */
+struct dpu_power_event *dpu_power_handle_register_event(
+ struct dpu_power_handle *phandle,
+ u32 event_type, void (*cb_fnc)(u32 event_type, void *usr),
+ void *usr, char *client_name);
+/**
+ * dpu_power_handle_unregister_event - unregister callback for event(s)
+ * @phandle: power handle containing the resources
+ * @event: event pointer returned after power handle register
+ */
+void dpu_power_handle_unregister_event(struct dpu_power_handle *phandle,
+ struct dpu_power_event *event);
+
+/**
+ * dpu_power_handle_get_dbus_name - get name of given data bus identifier
+ * @bus_id: data bus identifier
+ * Return: Pointer to name string if success; NULL otherwise
+ */
+const char *dpu_power_handle_get_dbus_name(u32 bus_id);
+
+#endif /* _DPU_POWER_HANDLE_H_ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
new file mode 100644
index 000000000000..13c0a36d4ef9
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.c
@@ -0,0 +1,1079 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#define pr_fmt(fmt) "[drm:%s] " fmt, __func__
+#include "dpu_kms.h"
+#include "dpu_hw_lm.h"
+#include "dpu_hw_ctl.h"
+#include "dpu_hw_cdm.h"
+#include "dpu_hw_pingpong.h"
+#include "dpu_hw_intf.h"
+#include "dpu_encoder.h"
+#include "dpu_trace.h"
+
+#define RESERVED_BY_OTHER(h, r) \
+ ((h)->rsvp && ((h)->rsvp->enc_id != (r)->enc_id))
+
+#define RM_RQ_LOCK(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_LOCK))
+#define RM_RQ_CLEAR(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_RESERVE_CLEAR))
+#define RM_RQ_DS(r) ((r)->top_ctrl & BIT(DPU_RM_TOPCTL_DS))
+#define RM_IS_TOPOLOGY_MATCH(t, r) ((t).num_lm == (r).num_lm && \
+ (t).num_comp_enc == (r).num_enc && \
+ (t).num_intf == (r).num_intf)
+
+struct dpu_rm_topology_def {
+ enum dpu_rm_topology_name top_name;
+ int num_lm;
+ int num_comp_enc;
+ int num_intf;
+ int num_ctl;
+ int needs_split_display;
+};
+
+static const struct dpu_rm_topology_def g_top_table[] = {
+ { DPU_RM_TOPOLOGY_NONE, 0, 0, 0, 0, false },
+ { DPU_RM_TOPOLOGY_SINGLEPIPE, 1, 0, 1, 1, false },
+ { DPU_RM_TOPOLOGY_DUALPIPE, 2, 0, 2, 2, true },
+ { DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE, 2, 0, 1, 1, false },
+};
+
+/**
+ * struct dpu_rm_requirements - Reservation requirements parameter bundle
+ * @top_ctrl: topology control preference from kernel client
+ * @top: selected topology for the display
+ * @hw_res: Hardware resources required as reported by the encoders
+ */
+struct dpu_rm_requirements {
+ uint64_t top_ctrl;
+ const struct dpu_rm_topology_def *topology;
+ struct dpu_encoder_hw_resources hw_res;
+};
+
+/**
+ * struct dpu_rm_rsvp - Use Case Reservation tagging structure
+ * Used to tag HW blocks as reserved by a CRTC->Encoder->Connector chain
+ * By using as a tag, rather than lists of pointers to HW blocks used
+ * we can avoid some list management since we don't know how many blocks
+ * of each type a given use case may require.
+ * @list: List head for list of all reservations
+ * @seq: Global RSVP sequence number for debugging, especially for
+ * differentiating differenct allocations for same encoder.
+ * @enc_id: Reservations are tracked by Encoder DRM object ID.
+ * CRTCs may be connected to multiple Encoders.
+ * An encoder or connector id identifies the display path.
+ * @topology DRM<->HW topology use case
+ */
+struct dpu_rm_rsvp {
+ struct list_head list;
+ uint32_t seq;
+ uint32_t enc_id;
+ enum dpu_rm_topology_name topology;
+};
+
+/**
+ * struct dpu_rm_hw_blk - hardware block tracking list member
+ * @list: List head for list of all hardware blocks tracking items
+ * @rsvp: Pointer to use case reservation if reserved by a client
+ * @rsvp_nxt: Temporary pointer used during reservation to the incoming
+ * request. Will be swapped into rsvp if proposal is accepted
+ * @type: Type of hardware block this structure tracks
+ * @id: Hardware ID number, within it's own space, ie. LM_X
+ * @catalog: Pointer to the hardware catalog entry for this block
+ * @hw: Pointer to the hardware register access object for this block
+ */
+struct dpu_rm_hw_blk {
+ struct list_head list;
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_rsvp *rsvp_nxt;
+ enum dpu_hw_blk_type type;
+ uint32_t id;
+ struct dpu_hw_blk *hw;
+};
+
+/**
+ * dpu_rm_dbg_rsvp_stage - enum of steps in making reservation for event logging
+ */
+enum dpu_rm_dbg_rsvp_stage {
+ DPU_RM_STAGE_BEGIN,
+ DPU_RM_STAGE_AFTER_CLEAR,
+ DPU_RM_STAGE_AFTER_RSVPNEXT,
+ DPU_RM_STAGE_FINAL
+};
+
+static void _dpu_rm_print_rsvps(
+ struct dpu_rm *rm,
+ enum dpu_rm_dbg_rsvp_stage stage)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ DPU_DEBUG("%d\n", stage);
+
+ list_for_each_entry(rsvp, &rm->rsvps, list) {
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u] topology %d\n", stage, rsvp->seq,
+ rsvp->enc_id, rsvp->topology);
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (!blk->rsvp && !blk->rsvp_nxt)
+ continue;
+
+ DRM_DEBUG_KMS("%d rsvp[s%ue%u->s%ue%u] %d %d\n", stage,
+ (blk->rsvp) ? blk->rsvp->seq : 0,
+ (blk->rsvp) ? blk->rsvp->enc_id : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->seq : 0,
+ (blk->rsvp_nxt) ? blk->rsvp_nxt->enc_id : 0,
+ blk->type, blk->id);
+ }
+ }
+}
+
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm)
+{
+ return rm->hw_mdp;
+}
+
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology)
+{
+ int i;
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++)
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i], topology))
+ return g_top_table[i].top_name;
+
+ return DPU_RM_TOPOLOGY_NONE;
+}
+
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type)
+{
+ memset(iter, 0, sizeof(*iter));
+ iter->enc_id = enc_id;
+ iter->type = type;
+}
+
+static bool _dpu_rm_get_hw_locked(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ struct list_head *blk_list;
+
+ if (!rm || !i || i->type >= DPU_HW_BLK_MAX) {
+ DPU_ERROR("invalid rm\n");
+ return false;
+ }
+
+ i->hw = NULL;
+ blk_list = &rm->hw_blks[i->type];
+
+ if (i->blk && (&i->blk->list == blk_list)) {
+ DPU_DEBUG("attempt resume iteration past last\n");
+ return false;
+ }
+
+ i->blk = list_prepare_entry(i->blk, blk_list, list);
+
+ list_for_each_entry_continue(i->blk, blk_list, list) {
+ struct dpu_rm_rsvp *rsvp = i->blk->rsvp;
+
+ if (i->blk->type != i->type) {
+ DPU_ERROR("found incorrect block type %d on %d list\n",
+ i->blk->type, i->type);
+ return false;
+ }
+
+ if ((i->enc_id == 0) || (rsvp && rsvp->enc_id == i->enc_id)) {
+ i->hw = i->blk->hw;
+ DPU_DEBUG("found type %d id %d for enc %d\n",
+ i->type, i->blk->id, i->enc_id);
+ return true;
+ }
+ }
+
+ DPU_DEBUG("no match, type %d for enc %d\n", i->type, i->enc_id);
+
+ return false;
+}
+
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *i)
+{
+ bool ret;
+
+ mutex_lock(&rm->rm_lock);
+ ret = _dpu_rm_get_hw_locked(rm, i);
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
+
+static void _dpu_rm_hw_destroy(enum dpu_hw_blk_type type, void *hw)
+{
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ dpu_hw_lm_destroy(hw);
+ break;
+ case DPU_HW_BLK_CTL:
+ dpu_hw_ctl_destroy(hw);
+ break;
+ case DPU_HW_BLK_CDM:
+ dpu_hw_cdm_destroy(hw);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ dpu_hw_pingpong_destroy(hw);
+ break;
+ case DPU_HW_BLK_INTF:
+ dpu_hw_intf_destroy(hw);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ break;
+ }
+}
+
+int dpu_rm_destroy(struct dpu_rm *rm)
+{
+
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_hw_blk *hw_cur, *hw_nxt;
+ enum dpu_hw_blk_type type;
+
+ if (!rm) {
+ DPU_ERROR("invalid rm\n");
+ return -EINVAL;
+ }
+
+ list_for_each_entry_safe(rsvp_cur, rsvp_nxt, &rm->rsvps, list) {
+ list_del(&rsvp_cur->list);
+ kfree(rsvp_cur);
+ }
+
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry_safe(hw_cur, hw_nxt, &rm->hw_blks[type],
+ list) {
+ list_del(&hw_cur->list);
+ _dpu_rm_hw_destroy(hw_cur->type, hw_cur->hw);
+ kfree(hw_cur);
+ }
+ }
+
+ dpu_hw_mdp_destroy(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+
+ mutex_destroy(&rm->rm_lock);
+
+ return 0;
+}
+
+static int _dpu_rm_hw_blk_create(
+ struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ enum dpu_hw_blk_type type,
+ uint32_t id,
+ void *hw_catalog_info)
+{
+ struct dpu_rm_hw_blk *blk;
+ struct dpu_hw_mdp *hw_mdp;
+ void *hw;
+
+ hw_mdp = rm->hw_mdp;
+
+ switch (type) {
+ case DPU_HW_BLK_LM:
+ hw = dpu_hw_lm_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CTL:
+ hw = dpu_hw_ctl_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_CDM:
+ hw = dpu_hw_cdm_init(id, mmio, cat, hw_mdp);
+ break;
+ case DPU_HW_BLK_PINGPONG:
+ hw = dpu_hw_pingpong_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_INTF:
+ hw = dpu_hw_intf_init(id, mmio, cat);
+ break;
+ case DPU_HW_BLK_SSPP:
+ /* SSPPs are not managed by the resource manager */
+ case DPU_HW_BLK_TOP:
+ /* Top is a singleton, not managed in hw_blks list */
+ case DPU_HW_BLK_MAX:
+ default:
+ DPU_ERROR("unsupported block type %d\n", type);
+ return -EINVAL;
+ }
+
+ if (IS_ERR_OR_NULL(hw)) {
+ DPU_ERROR("failed hw object creation: type %d, err %ld\n",
+ type, PTR_ERR(hw));
+ return -EFAULT;
+ }
+
+ blk = kzalloc(sizeof(*blk), GFP_KERNEL);
+ if (!blk) {
+ _dpu_rm_hw_destroy(type, hw);
+ return -ENOMEM;
+ }
+
+ blk->type = type;
+ blk->id = id;
+ blk->hw = hw;
+ list_add_tail(&blk->list, &rm->hw_blks[type]);
+
+ return 0;
+}
+
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev)
+{
+ int rc, i;
+ enum dpu_hw_blk_type type;
+
+ if (!rm || !cat || !mmio || !dev) {
+ DPU_ERROR("invalid kms\n");
+ return -EINVAL;
+ }
+
+ /* Clear, setup lists */
+ memset(rm, 0, sizeof(*rm));
+
+ mutex_init(&rm->rm_lock);
+
+ INIT_LIST_HEAD(&rm->rsvps);
+ for (type = 0; type < DPU_HW_BLK_MAX; type++)
+ INIT_LIST_HEAD(&rm->hw_blks[type]);
+
+ rm->dev = dev;
+
+ /* Some of the sub-blocks require an mdptop to be created */
+ rm->hw_mdp = dpu_hw_mdptop_init(MDP_TOP, mmio, cat);
+ if (IS_ERR_OR_NULL(rm->hw_mdp)) {
+ rc = PTR_ERR(rm->hw_mdp);
+ rm->hw_mdp = NULL;
+ DPU_ERROR("failed: mdp hw not available\n");
+ goto fail;
+ }
+
+ /* Interrogate HW catalog and create tracking items for hw blocks */
+ for (i = 0; i < cat->mixer_count; i++) {
+ struct dpu_lm_cfg *lm = &cat->mixer[i];
+
+ if (lm->pingpong == PINGPONG_MAX) {
+ DPU_DEBUG("skip mixer %d without pingpong\n", lm->id);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_LM,
+ cat->mixer[i].id, &cat->mixer[i]);
+ if (rc) {
+ DPU_ERROR("failed: lm hw not available\n");
+ goto fail;
+ }
+
+ if (!rm->lm_max_width) {
+ rm->lm_max_width = lm->sblk->maxwidth;
+ } else if (rm->lm_max_width != lm->sblk->maxwidth) {
+ /*
+ * Don't expect to have hw where lm max widths differ.
+ * If found, take the min.
+ */
+ DPU_ERROR("unsupported: lm maxwidth differs\n");
+ if (rm->lm_max_width > lm->sblk->maxwidth)
+ rm->lm_max_width = lm->sblk->maxwidth;
+ }
+ }
+
+ for (i = 0; i < cat->pingpong_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_PINGPONG,
+ cat->pingpong[i].id, &cat->pingpong[i]);
+ if (rc) {
+ DPU_ERROR("failed: pp hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->intf_count; i++) {
+ if (cat->intf[i].type == INTF_NONE) {
+ DPU_DEBUG("skip intf %d with type none\n", i);
+ continue;
+ }
+
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_INTF,
+ cat->intf[i].id, &cat->intf[i]);
+ if (rc) {
+ DPU_ERROR("failed: intf hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->ctl_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CTL,
+ cat->ctl[i].id, &cat->ctl[i]);
+ if (rc) {
+ DPU_ERROR("failed: ctl hw not available\n");
+ goto fail;
+ }
+ }
+
+ for (i = 0; i < cat->cdm_count; i++) {
+ rc = _dpu_rm_hw_blk_create(rm, cat, mmio, DPU_HW_BLK_CDM,
+ cat->cdm[i].id, &cat->cdm[i]);
+ if (rc) {
+ DPU_ERROR("failed: cdm hw not available\n");
+ goto fail;
+ }
+ }
+
+ return 0;
+
+fail:
+ dpu_rm_destroy(rm);
+
+ return rc;
+}
+
+/**
+ * _dpu_rm_check_lm_and_get_connected_blks - check if proposed layer mixer meets
+ * proposed use case requirements, incl. hardwired dependent blocks like
+ * pingpong
+ * @rm: dpu resource manager handle
+ * @rsvp: reservation currently being created
+ * @reqs: proposed use case requirements
+ * @lm: proposed layer mixer, function checks if lm, and all other hardwired
+ * blocks connected to the lm (pp) is available and appropriate
+ * @pp: output parameter, pingpong block attached to the layer mixer.
+ * NULL if pp was not available, or not matching requirements.
+ * @primary_lm: if non-null, this function check if lm is compatible primary_lm
+ * as well as satisfying all other requirements
+ * @Return: true if lm matches all requirements, false otherwise
+ */
+static bool _dpu_rm_check_lm_and_get_connected_blks(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs,
+ struct dpu_rm_hw_blk *lm,
+ struct dpu_rm_hw_blk **pp,
+ struct dpu_rm_hw_blk *primary_lm)
+{
+ const struct dpu_lm_cfg *lm_cfg = to_dpu_hw_mixer(lm->hw)->cap;
+ struct dpu_rm_hw_iter iter;
+
+ *pp = NULL;
+
+ DPU_DEBUG("check lm %d pp %d\n",
+ lm_cfg->id, lm_cfg->pingpong);
+
+ /* Check if this layer mixer is a peer of the proposed primary LM */
+ if (primary_lm) {
+ const struct dpu_lm_cfg *prim_lm_cfg =
+ to_dpu_hw_mixer(primary_lm->hw)->cap;
+
+ if (!test_bit(lm_cfg->id, &prim_lm_cfg->lm_pair_mask)) {
+ DPU_DEBUG("lm %d not peer of lm %d\n", lm_cfg->id,
+ prim_lm_cfg->id);
+ return false;
+ }
+ }
+
+ /* Already reserved? */
+ if (RESERVED_BY_OTHER(lm, rsvp)) {
+ DPU_DEBUG("lm %d already reserved\n", lm_cfg->id);
+ return false;
+ }
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_PINGPONG);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id == lm_cfg->pingpong) {
+ *pp = iter.blk;
+ break;
+ }
+ }
+
+ if (!*pp) {
+ DPU_ERROR("failed to get pp on lm %d\n", lm_cfg->pingpong);
+ return false;
+ }
+
+ if (RESERVED_BY_OTHER(*pp, rsvp)) {
+ DPU_DEBUG("lm %d pp %d already reserved\n", lm->id,
+ (*pp)->id);
+ return false;
+ }
+
+ return true;
+}
+
+static int _dpu_rm_reserve_lms(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+
+{
+ struct dpu_rm_hw_blk *lm[MAX_BLOCKS];
+ struct dpu_rm_hw_blk *pp[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter_i, iter_j;
+ int lm_count = 0;
+ int i, rc = 0;
+
+ if (!reqs->topology->num_lm) {
+ DPU_ERROR("invalid number of lm: %d\n", reqs->topology->num_lm);
+ return -EINVAL;
+ }
+
+ /* Find a primary mixer */
+ dpu_rm_init_hw_iter(&iter_i, 0, DPU_HW_BLK_LM);
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_i)) {
+ memset(&lm, 0, sizeof(lm));
+ memset(&pp, 0, sizeof(pp));
+
+ lm_count = 0;
+ lm[lm_count] = iter_i.blk;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, lm[lm_count],
+ &pp[lm_count], NULL))
+ continue;
+
+ ++lm_count;
+
+ /* Valid primary mixer found, find matching peers */
+ dpu_rm_init_hw_iter(&iter_j, 0, DPU_HW_BLK_LM);
+
+ while (lm_count != reqs->topology->num_lm &&
+ _dpu_rm_get_hw_locked(rm, &iter_j)) {
+ if (iter_i.blk == iter_j.blk)
+ continue;
+
+ if (!_dpu_rm_check_lm_and_get_connected_blks(
+ rm, rsvp, reqs, iter_j.blk,
+ &pp[lm_count], iter_i.blk))
+ continue;
+
+ lm[lm_count] = iter_j.blk;
+ ++lm_count;
+ }
+ }
+
+ if (lm_count != reqs->topology->num_lm) {
+ DPU_DEBUG("unable to find appropriate mixers\n");
+ return -ENAVAIL;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(lm); i++) {
+ if (!lm[i])
+ break;
+
+ lm[i]->rsvp_nxt = rsvp;
+ pp[i]->rsvp_nxt = rsvp;
+
+ trace_dpu_rm_reserve_lms(lm[i]->id, lm[i]->type, rsvp->enc_id,
+ pp[i]->id);
+ }
+
+ return rc;
+}
+
+static int _dpu_rm_reserve_ctls(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ const struct dpu_rm_topology_def *top)
+{
+ struct dpu_rm_hw_blk *ctls[MAX_BLOCKS];
+ struct dpu_rm_hw_iter iter;
+ int i = 0;
+
+ memset(&ctls, 0, sizeof(ctls));
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CTL);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_ctl *ctl = to_dpu_hw_ctl(iter.blk->hw);
+ unsigned long features = ctl->caps->features;
+ bool has_split_display;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ has_split_display = BIT(DPU_CTL_SPLIT_DISPLAY) & features;
+
+ DPU_DEBUG("ctl %d caps 0x%lX\n", iter.blk->id, features);
+
+ if (top->needs_split_display != has_split_display)
+ continue;
+
+ ctls[i] = iter.blk;
+ DPU_DEBUG("ctl %d match\n", iter.blk->id);
+
+ if (++i == top->num_ctl)
+ break;
+ }
+
+ if (i != top->num_ctl)
+ return -ENAVAIL;
+
+ for (i = 0; i < ARRAY_SIZE(ctls) && i < top->num_ctl; i++) {
+ ctls[i]->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_ctls(ctls[i]->id, ctls[i]->type,
+ rsvp->enc_id);
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_cdm(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type)
+{
+ struct dpu_rm_hw_iter iter;
+
+ DRM_DEBUG_KMS("type %d id %d\n", type, id);
+
+ dpu_rm_init_hw_iter(&iter, 0, DPU_HW_BLK_CDM);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ const struct dpu_hw_cdm *cdm = to_dpu_hw_cdm(iter.blk->hw);
+ const struct dpu_cdm_cfg *caps = cdm->caps;
+ bool match = false;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp))
+ continue;
+
+ if (type == DPU_HW_BLK_INTF && id != INTF_MAX)
+ match = test_bit(id, &caps->intf_connect);
+
+ DRM_DEBUG_KMS("iter: type:%d id:%d enc:%d cdm:%lu match:%d\n",
+ iter.blk->type, iter.blk->id, rsvp->enc_id,
+ caps->intf_connect, match);
+
+ if (!match)
+ continue;
+
+ trace_dpu_rm_reserve_cdm(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ iter.blk->rsvp_nxt = rsvp;
+ break;
+ }
+
+ if (!iter.hw) {
+ DPU_ERROR("couldn't reserve cdm for type %d id %d\n", type, id);
+ return -ENAVAIL;
+ }
+
+ return 0;
+}
+
+static int _dpu_rm_reserve_intf(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ uint32_t id,
+ enum dpu_hw_blk_type type,
+ bool needs_cdm)
+{
+ struct dpu_rm_hw_iter iter;
+ int ret = 0;
+
+ /* Find the block entry in the rm, and note the reservation */
+ dpu_rm_init_hw_iter(&iter, 0, type);
+ while (_dpu_rm_get_hw_locked(rm, &iter)) {
+ if (iter.blk->id != id)
+ continue;
+
+ if (RESERVED_BY_OTHER(iter.blk, rsvp)) {
+ DPU_ERROR("type %d id %d already reserved\n", type, id);
+ return -ENAVAIL;
+ }
+
+ iter.blk->rsvp_nxt = rsvp;
+ trace_dpu_rm_reserve_intf(iter.blk->id, iter.blk->type,
+ rsvp->enc_id);
+ break;
+ }
+
+ /* Shouldn't happen since intfs are fixed at probe */
+ if (!iter.hw) {
+ DPU_ERROR("couldn't find type %d id %d\n", type, id);
+ return -EINVAL;
+ }
+
+ if (needs_cdm)
+ ret = _dpu_rm_reserve_cdm(rm, rsvp, id, type);
+
+ return ret;
+}
+
+static int _dpu_rm_reserve_intf_related_hw(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_encoder_hw_resources *hw_res)
+{
+ int i, ret = 0;
+ u32 id;
+
+ for (i = 0; i < ARRAY_SIZE(hw_res->intfs); i++) {
+ if (hw_res->intfs[i] == INTF_MODE_NONE)
+ continue;
+ id = i + INTF_0;
+ ret = _dpu_rm_reserve_intf(rm, rsvp, id,
+ DPU_HW_BLK_INTF, hw_res->needs_cdm);
+ if (ret)
+ return ret;
+ }
+
+ return ret;
+}
+
+static int _dpu_rm_make_next_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_rsvp *rsvp,
+ struct dpu_rm_requirements *reqs)
+{
+ int ret;
+ struct dpu_rm_topology_def topology;
+
+ /* Create reservation info, tag reserved blocks with it as we go */
+ rsvp->seq = ++rm->rsvp_next_seq;
+ rsvp->enc_id = enc->base.id;
+ rsvp->topology = reqs->topology->top_name;
+ list_add_tail(&rsvp->list, &rm->rsvps);
+
+ ret = _dpu_rm_reserve_lms(rm, rsvp, reqs);
+ if (ret) {
+ DPU_ERROR("unable to find appropriate mixers\n");
+ return ret;
+ }
+
+ /*
+ * Do assignment preferring to give away low-resource CTLs first:
+ * - Check mixers without Split Display
+ * - Only then allow to grab from CTLs with split display capability
+ */
+ _dpu_rm_reserve_ctls(rm, rsvp, reqs->topology);
+ if (ret && !reqs->topology->needs_split_display) {
+ memcpy(&topology, reqs->topology, sizeof(topology));
+ topology.needs_split_display = true;
+ _dpu_rm_reserve_ctls(rm, rsvp, &topology);
+ }
+ if (ret) {
+ DPU_ERROR("unable to find appropriate CTL\n");
+ return ret;
+ }
+
+ /* Assign INTFs and blks whose usage is tied to them: CTL & CDM */
+ ret = _dpu_rm_reserve_intf_related_hw(rm, rsvp, &reqs->hw_res);
+ if (ret)
+ return ret;
+
+ return ret;
+}
+
+static int _dpu_rm_populate_requirements(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct dpu_rm_requirements *reqs,
+ struct msm_display_topology req_topology)
+{
+ int i;
+
+ memset(reqs, 0, sizeof(*reqs));
+
+ dpu_encoder_get_hw_resources(enc, &reqs->hw_res, conn_state);
+
+ for (i = 0; i < DPU_RM_TOPOLOGY_MAX; i++) {
+ if (RM_IS_TOPOLOGY_MATCH(g_top_table[i],
+ req_topology)) {
+ reqs->topology = &g_top_table[i];
+ break;
+ }
+ }
+
+ if (!reqs->topology) {
+ DPU_ERROR("invalid topology for the display\n");
+ return -EINVAL;
+ }
+
+ /**
+ * Set the requirement based on caps if not set from user space
+ * This will ensure to select LM tied with DS blocks
+ * Currently, DS blocks are tied with LM 0 and LM 1 (primary display)
+ */
+ if (!RM_RQ_DS(reqs) && rm->hw_mdp->caps->has_dest_scaler &&
+ conn_state->connector->connector_type == DRM_MODE_CONNECTOR_DSI)
+ reqs->top_ctrl |= BIT(DPU_RM_TOPCTL_DS);
+
+ DRM_DEBUG_KMS("top_ctrl: 0x%llX num_h_tiles: %d\n", reqs->top_ctrl,
+ reqs->hw_res.display_num_of_h_tiles);
+ DRM_DEBUG_KMS("num_lm: %d num_ctl: %d topology: %d split_display: %d\n",
+ reqs->topology->num_lm, reqs->topology->num_ctl,
+ reqs->topology->top_name,
+ reqs->topology->needs_split_display);
+
+ return 0;
+}
+
+static struct dpu_rm_rsvp *_dpu_rm_get_rsvp(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *i;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return NULL;
+ }
+
+ if (list_empty(&rm->rsvps))
+ return NULL;
+
+ list_for_each_entry(i, &rm->rsvps, list)
+ if (i->enc_id == enc->base.id)
+ return i;
+
+ return NULL;
+}
+
+static struct drm_connector *_dpu_rm_get_connector(
+ struct drm_encoder *enc)
+{
+ struct drm_connector *conn = NULL;
+ struct list_head *connector_list =
+ &enc->dev->mode_config.connector_list;
+
+ list_for_each_entry(conn, connector_list, head)
+ if (conn->encoder == enc)
+ return conn;
+
+ return NULL;
+}
+
+/**
+ * _dpu_rm_release_rsvp - release resources and release a reservation
+ * @rm: KMS handle
+ * @rsvp: RSVP pointer to release and release resources for
+ */
+static void _dpu_rm_release_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector *conn)
+{
+ struct dpu_rm_rsvp *rsvp_c, *rsvp_n;
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+
+ if (!rsvp)
+ return;
+
+ DPU_DEBUG("rel rsvp %d enc %d\n", rsvp->seq, rsvp->enc_id);
+
+ list_for_each_entry_safe(rsvp_c, rsvp_n, &rm->rsvps, list) {
+ if (rsvp == rsvp_c) {
+ list_del(&rsvp_c->list);
+ break;
+ }
+ }
+
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp == rsvp) {
+ blk->rsvp = NULL;
+ DPU_DEBUG("rel rsvp %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ if (blk->rsvp_nxt == rsvp) {
+ blk->rsvp_nxt = NULL;
+ DPU_DEBUG("rel rsvp_nxt %d enc %d %d %d\n",
+ rsvp->seq, rsvp->enc_id,
+ blk->type, blk->id);
+ }
+ }
+ }
+
+ kfree(rsvp);
+}
+
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc)
+{
+ struct dpu_rm_rsvp *rsvp;
+ struct drm_connector *conn;
+
+ if (!rm || !enc) {
+ DPU_ERROR("invalid params\n");
+ return;
+ }
+
+ mutex_lock(&rm->rm_lock);
+
+ rsvp = _dpu_rm_get_rsvp(rm, enc);
+ if (!rsvp) {
+ DPU_ERROR("failed to find rsvp for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ conn = _dpu_rm_get_connector(enc);
+ if (!conn) {
+ DPU_ERROR("failed to get connector for enc %d\n", enc->base.id);
+ goto end;
+ }
+
+ _dpu_rm_release_rsvp(rm, rsvp, conn);
+end:
+ mutex_unlock(&rm->rm_lock);
+}
+
+static int _dpu_rm_commit_rsvp(
+ struct dpu_rm *rm,
+ struct dpu_rm_rsvp *rsvp,
+ struct drm_connector_state *conn_state)
+{
+ struct dpu_rm_hw_blk *blk;
+ enum dpu_hw_blk_type type;
+ int ret = 0;
+
+ /* Swap next rsvp to be the active */
+ for (type = 0; type < DPU_HW_BLK_MAX; type++) {
+ list_for_each_entry(blk, &rm->hw_blks[type], list) {
+ if (blk->rsvp_nxt) {
+ blk->rsvp = blk->rsvp_nxt;
+ blk->rsvp_nxt = NULL;
+ }
+ }
+ }
+
+ if (!ret)
+ DRM_DEBUG_KMS("rsrv enc %d topology %d\n", rsvp->enc_id,
+ rsvp->topology);
+
+ return ret;
+}
+
+int dpu_rm_reserve(
+ struct dpu_rm *rm,
+ struct drm_encoder *enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only)
+{
+ struct dpu_rm_rsvp *rsvp_cur, *rsvp_nxt;
+ struct dpu_rm_requirements reqs;
+ int ret;
+
+ if (!rm || !enc || !crtc_state || !conn_state) {
+ DPU_ERROR("invalid arguments\n");
+ return -EINVAL;
+ }
+
+ /* Check if this is just a page-flip */
+ if (!drm_atomic_crtc_needs_modeset(crtc_state))
+ return 0;
+
+ DRM_DEBUG_KMS("reserving hw for conn %d enc %d crtc %d test_only %d\n",
+ conn_state->connector->base.id, enc->base.id,
+ crtc_state->crtc->base.id, test_only);
+
+ mutex_lock(&rm->rm_lock);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_BEGIN);
+
+ ret = _dpu_rm_populate_requirements(rm, enc, crtc_state,
+ conn_state, &reqs, topology);
+ if (ret) {
+ DPU_ERROR("failed to populate hw requirements\n");
+ goto end;
+ }
+
+ /*
+ * We only support one active reservation per-hw-block. But to implement
+ * transactional semantics for test-only, and for allowing failure while
+ * modifying your existing reservation, over the course of this
+ * function we can have two reservations:
+ * Current: Existing reservation
+ * Next: Proposed reservation. The proposed reservation may fail, or may
+ * be discarded if in test-only mode.
+ * If reservation is successful, and we're not in test-only, then we
+ * replace the current with the next.
+ */
+ rsvp_nxt = kzalloc(sizeof(*rsvp_nxt), GFP_KERNEL);
+ if (!rsvp_nxt) {
+ ret = -ENOMEM;
+ goto end;
+ }
+
+ rsvp_cur = _dpu_rm_get_rsvp(rm, enc);
+
+ /*
+ * User can request that we clear out any reservation during the
+ * atomic_check phase by using this CLEAR bit
+ */
+ if (rsvp_cur && test_only && RM_RQ_CLEAR(&reqs)) {
+ DPU_DEBUG("test_only & CLEAR: clear rsvp[s%de%d]\n",
+ rsvp_cur->seq, rsvp_cur->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+ rsvp_cur = NULL;
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_CLEAR);
+ }
+
+ /* Check the proposed reservation, store it in hw's "next" field */
+ ret = _dpu_rm_make_next_rsvp(rm, enc, crtc_state, conn_state,
+ rsvp_nxt, &reqs);
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_AFTER_RSVPNEXT);
+
+ if (ret) {
+ DPU_ERROR("failed to reserve hw resources: %d\n", ret);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else if (test_only && !RM_RQ_LOCK(&reqs)) {
+ /*
+ * Normally, if test_only, test the reservation and then undo
+ * However, if the user requests LOCK, then keep the reservation
+ * made during the atomic_check phase.
+ */
+ DPU_DEBUG("test_only: discard test rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+ _dpu_rm_release_rsvp(rm, rsvp_nxt, conn_state->connector);
+ } else {
+ if (test_only && RM_RQ_LOCK(&reqs))
+ DPU_DEBUG("test_only & LOCK: lock rsvp[s%de%d]\n",
+ rsvp_nxt->seq, rsvp_nxt->enc_id);
+
+ _dpu_rm_release_rsvp(rm, rsvp_cur, conn_state->connector);
+
+ ret = _dpu_rm_commit_rsvp(rm, rsvp_nxt, conn_state);
+ }
+
+ _dpu_rm_print_rsvps(rm, DPU_RM_STAGE_FINAL);
+
+end:
+ mutex_unlock(&rm->rm_lock);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
new file mode 100644
index 000000000000..ffd1841a6067
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_rm.h
@@ -0,0 +1,199 @@
+/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ *
+ */
+
+#ifndef __DPU_RM_H__
+#define __DPU_RM_H__
+
+#include <linux/list.h>
+
+#include "msm_kms.h"
+#include "dpu_hw_top.h"
+
+/**
+ * enum dpu_rm_topology_name - HW resource use case in use by connector
+ * @DPU_RM_TOPOLOGY_NONE: No topology in use currently
+ * @DPU_RM_TOPOLOGY_SINGLEPIPE: 1 LM, 1 PP, 1 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE: 2 LM, 2 PP, 2 INTF/WB
+ * @DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE: 2 LM, 2 PP, 3DMux, 1 INTF/WB
+ */
+enum dpu_rm_topology_name {
+ DPU_RM_TOPOLOGY_NONE = 0,
+ DPU_RM_TOPOLOGY_SINGLEPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE,
+ DPU_RM_TOPOLOGY_DUALPIPE_3DMERGE,
+ DPU_RM_TOPOLOGY_MAX,
+};
+
+/**
+ * enum dpu_rm_topology_control - HW resource use case in use by connector
+ * @DPU_RM_TOPCTL_RESERVE_LOCK: If set, in AtomicTest phase, after a successful
+ * test, reserve the resources for this display.
+ * Normal behavior would not impact the reservation
+ * list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_RESERVE_CLEAR: If set, in AtomicTest phase, before testing,
+ * release any reservation held by this display.
+ * Normal behavior would not impact the
+ * reservation list during the AtomicTest phase.
+ * @DPU_RM_TOPCTL_DS : Require layer mixers with DS capabilities
+ */
+enum dpu_rm_topology_control {
+ DPU_RM_TOPCTL_RESERVE_LOCK,
+ DPU_RM_TOPCTL_RESERVE_CLEAR,
+ DPU_RM_TOPCTL_DS,
+};
+
+/**
+ * struct dpu_rm - DPU dynamic hardware resource manager
+ * @dev: device handle for event logging purposes
+ * @rsvps: list of hardware reservations by each crtc->encoder->connector
+ * @hw_blks: array of lists of hardware resources present in the system, one
+ * list per type of hardware block
+ * @hw_mdp: hardware object for mdp_top
+ * @lm_max_width: cached layer mixer maximum width
+ * @rsvp_next_seq: sequence number for next reservation for debugging purposes
+ * @rm_lock: resource manager mutex
+ */
+struct dpu_rm {
+ struct drm_device *dev;
+ struct list_head rsvps;
+ struct list_head hw_blks[DPU_HW_BLK_MAX];
+ struct dpu_hw_mdp *hw_mdp;
+ uint32_t lm_max_width;
+ uint32_t rsvp_next_seq;
+ struct mutex rm_lock;
+};
+
+/**
+ * struct dpu_rm_hw_blk - resource manager internal structure
+ * forward declaration for single iterator definition without void pointer
+ */
+struct dpu_rm_hw_blk;
+
+/**
+ * struct dpu_rm_hw_iter - iterator for use with dpu_rm
+ * @hw: dpu_hw object requested, or NULL on failure
+ * @blk: dpu_rm internal block representation. Clients ignore. Used as iterator.
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+struct dpu_rm_hw_iter {
+ void *hw;
+ struct dpu_rm_hw_blk *blk;
+ uint32_t enc_id;
+ enum dpu_hw_blk_type type;
+};
+
+/**
+ * dpu_rm_init - Read hardware catalog and create reservation tracking objects
+ * for all HW blocks.
+ * @rm: DPU Resource Manager handle
+ * @cat: Pointer to hardware catalog
+ * @mmio: mapped register io address of MDP
+ * @dev: device handle for event logging purposes
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_init(struct dpu_rm *rm,
+ struct dpu_mdss_cfg *cat,
+ void __iomem *mmio,
+ struct drm_device *dev);
+
+/**
+ * dpu_rm_destroy - Free all memory allocated by dpu_rm_init
+ * @rm: DPU Resource Manager handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_destroy(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_reserve - Given a CRTC->Encoder->Connector display chain, analyze
+ * the use connections and user requirements, specified through related
+ * topology control properties, and reserve hardware blocks to that
+ * display chain.
+ * HW blocks can then be accessed through dpu_rm_get_* functions.
+ * HW Reservations should be released via dpu_rm_release_hw.
+ * @rm: DPU Resource Manager handle
+ * @drm_enc: DRM Encoder handle
+ * @crtc_state: Proposed Atomic DRM CRTC State handle
+ * @conn_state: Proposed Atomic DRM Connector State handle
+ * @topology: Pointer to topology info for the display
+ * @test_only: Atomic-Test phase, discard results (unless property overrides)
+ * @Return: 0 on Success otherwise -ERROR
+ */
+int dpu_rm_reserve(struct dpu_rm *rm,
+ struct drm_encoder *drm_enc,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state,
+ struct msm_display_topology topology,
+ bool test_only);
+
+/**
+ * dpu_rm_reserve - Given the encoder for the display chain, release any
+ * HW blocks previously reserved for that use case.
+ * @rm: DPU Resource Manager handle
+ * @enc: DRM Encoder handle
+ * @Return: 0 on Success otherwise -ERROR
+ */
+void dpu_rm_release(struct dpu_rm *rm, struct drm_encoder *enc);
+
+/**
+ * dpu_rm_get_mdp - Retrieve HW block for MDP TOP.
+ * This is never reserved, and is usable by any display.
+ * @rm: DPU Resource Manager handle
+ * @Return: Pointer to hw block or NULL
+ */
+struct dpu_hw_mdp *dpu_rm_get_mdp(struct dpu_rm *rm);
+
+/**
+ * dpu_rm_init_hw_iter - setup given iterator for new iteration over hw list
+ * using dpu_rm_get_hw
+ * @iter: iter object to initialize
+ * @enc_id: DRM ID of Encoder client wishes to search for, or 0 for Any Encoder
+ * @type: Hardware Block Type client wishes to search for.
+ */
+void dpu_rm_init_hw_iter(
+ struct dpu_rm_hw_iter *iter,
+ uint32_t enc_id,
+ enum dpu_hw_blk_type type);
+/**
+ * dpu_rm_get_hw - retrieve reserved hw object given encoder and hw type
+ * Meant to do a single pass through the hardware list to iteratively
+ * retrieve hardware blocks of a given type for a given encoder.
+ * Initialize an iterator object.
+ * Set hw block type of interest. Set encoder id of interest, 0 for any.
+ * Function returns first hw of type for that encoder.
+ * Subsequent calls will return the next reserved hw of that type in-order.
+ * Iterator HW pointer will be null on failure to find hw.
+ * @rm: DPU Resource Manager handle
+ * @iter: iterator object
+ * @Return: true on match found, false on no match found
+ */
+bool dpu_rm_get_hw(struct dpu_rm *rm, struct dpu_rm_hw_iter *iter);
+
+/**
+ * dpu_rm_check_property_topctl - validate property bitmask before it is set
+ * @val: user's proposed topology control bitmask
+ * @Return: 0 on success or error
+ */
+int dpu_rm_check_property_topctl(uint64_t val);
+
+/**
+ * dpu_rm_get_topology_name - returns the name of the the given topology
+ * definition
+ * @topology: topology definition
+ * @Return: name of the topology
+ */
+enum dpu_rm_topology_name
+dpu_rm_get_topology_name(struct msm_display_topology topology);
+
+#endif /* __DPU_RM_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
new file mode 100644
index 000000000000..ae0ca5076238
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_trace.h
@@ -0,0 +1,1007 @@
+/* Copyright (c) 2014-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#if !defined(_DPU_TRACE_H_) || defined(TRACE_HEADER_MULTI_READ)
+#define _DPU_TRACE_H_
+
+#include <linux/stringify.h>
+#include <linux/types.h>
+#include <linux/tracepoint.h>
+
+#include <drm/drm_rect.h>
+#include "dpu_crtc.h"
+#include "dpu_encoder_phys.h"
+#include "dpu_hw_mdss.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_plane.h"
+
+#undef TRACE_SYSTEM
+#define TRACE_SYSTEM dpu
+#undef TRACE_INCLUDE_FILE
+#define TRACE_INCLUDE_FILE dpu_trace
+
+TRACE_EVENT(dpu_perf_set_qos_luts,
+ TP_PROTO(u32 pnum, u32 fmt, bool rt, u32 fl,
+ u32 lut, u32 lut_usage),
+ TP_ARGS(pnum, fmt, rt, fl, lut, lut_usage),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(bool, rt)
+ __field(u32, fl)
+ __field(u64, lut)
+ __field(u32, lut_usage)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->rt = rt;
+ __entry->fl = fl;
+ __entry->lut = lut;
+ __entry->lut_usage = lut_usage;
+ ),
+ TP_printk("pnum=%d fmt=%x rt=%d fl=%d lut=0x%llx lut_usage=%d",
+ __entry->pnum, __entry->fmt,
+ __entry->rt, __entry->fl,
+ __entry->lut, __entry->lut_usage)
+);
+
+TRACE_EVENT(dpu_perf_set_danger_luts,
+ TP_PROTO(u32 pnum, u32 fmt, u32 mode, u32 danger_lut,
+ u32 safe_lut),
+ TP_ARGS(pnum, fmt, mode, danger_lut, safe_lut),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, fmt)
+ __field(u32, mode)
+ __field(u32, danger_lut)
+ __field(u32, safe_lut)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->fmt = fmt;
+ __entry->mode = mode;
+ __entry->danger_lut = danger_lut;
+ __entry->safe_lut = safe_lut;
+ ),
+ TP_printk("pnum=%d fmt=%x mode=%d luts[0x%x, 0x%x]",
+ __entry->pnum, __entry->fmt,
+ __entry->mode, __entry->danger_lut,
+ __entry->safe_lut)
+);
+
+TRACE_EVENT(dpu_perf_set_ot,
+ TP_PROTO(u32 pnum, u32 xin_id, u32 rd_lim, u32 vbif_idx),
+ TP_ARGS(pnum, xin_id, rd_lim, vbif_idx),
+ TP_STRUCT__entry(
+ __field(u32, pnum)
+ __field(u32, xin_id)
+ __field(u32, rd_lim)
+ __field(u32, vbif_idx)
+ ),
+ TP_fast_assign(
+ __entry->pnum = pnum;
+ __entry->xin_id = xin_id;
+ __entry->rd_lim = rd_lim;
+ __entry->vbif_idx = vbif_idx;
+ ),
+ TP_printk("pnum:%d xin_id:%d ot:%d vbif:%d",
+ __entry->pnum, __entry->xin_id, __entry->rd_lim,
+ __entry->vbif_idx)
+)
+
+TRACE_EVENT(dpu_perf_update_bus,
+ TP_PROTO(int client, unsigned long long ab_quota,
+ unsigned long long ib_quota),
+ TP_ARGS(client, ab_quota, ib_quota),
+ TP_STRUCT__entry(
+ __field(int, client)
+ __field(u64, ab_quota)
+ __field(u64, ib_quota)
+ ),
+ TP_fast_assign(
+ __entry->client = client;
+ __entry->ab_quota = ab_quota;
+ __entry->ib_quota = ib_quota;
+ ),
+ TP_printk("Request client:%d ab=%llu ib=%llu",
+ __entry->client,
+ __entry->ab_quota,
+ __entry->ib_quota)
+)
+
+
+TRACE_EVENT(dpu_cmd_release_bw,
+ TP_PROTO(u32 crtc_id),
+ TP_ARGS(crtc_id),
+ TP_STRUCT__entry(
+ __field(u32, crtc_id)
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ ),
+ TP_printk("crtc:%d", __entry->crtc_id)
+);
+
+TRACE_EVENT(tracing_mark_write,
+ TP_PROTO(int pid, const char *name, bool trace_begin),
+ TP_ARGS(pid, name, trace_begin),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(trace_name, name)
+ __field(bool, trace_begin)
+ ),
+ TP_fast_assign(
+ __entry->pid = pid;
+ __assign_str(trace_name, name);
+ __entry->trace_begin = trace_begin;
+ ),
+ TP_printk("%s|%d|%s", __entry->trace_begin ? "B" : "E",
+ __entry->pid, __get_str(trace_name))
+)
+
+TRACE_EVENT(dpu_trace_counter,
+ TP_PROTO(int pid, char *name, int value),
+ TP_ARGS(pid, name, value),
+ TP_STRUCT__entry(
+ __field(int, pid)
+ __string(counter_name, name)
+ __field(int, value)
+ ),
+ TP_fast_assign(
+ __entry->pid = current->tgid;
+ __assign_str(counter_name, name);
+ __entry->value = value;
+ ),
+ TP_printk("%d|%s|%d", __entry->pid,
+ __get_str(counter_name), __entry->value)
+)
+
+TRACE_EVENT(dpu_perf_crtc_update,
+ TP_PROTO(u32 crtc, u64 bw_ctl_mnoc, u64 bw_ctl_llcc,
+ u64 bw_ctl_ebi, u32 core_clk_rate,
+ bool stop_req, u32 update_bus, u32 update_clk),
+ TP_ARGS(crtc, bw_ctl_mnoc, bw_ctl_llcc, bw_ctl_ebi, core_clk_rate,
+ stop_req, update_bus, update_clk),
+ TP_STRUCT__entry(
+ __field(u32, crtc)
+ __field(u64, bw_ctl_mnoc)
+ __field(u64, bw_ctl_llcc)
+ __field(u64, bw_ctl_ebi)
+ __field(u32, core_clk_rate)
+ __field(bool, stop_req)
+ __field(u32, update_bus)
+ __field(u32, update_clk)
+ ),
+ TP_fast_assign(
+ __entry->crtc = crtc;
+ __entry->bw_ctl_mnoc = bw_ctl_mnoc;
+ __entry->bw_ctl_llcc = bw_ctl_llcc;
+ __entry->bw_ctl_ebi = bw_ctl_ebi;
+ __entry->core_clk_rate = core_clk_rate;
+ __entry->stop_req = stop_req;
+ __entry->update_bus = update_bus;
+ __entry->update_clk = update_clk;
+ ),
+ TP_printk(
+ "crtc=%d bw_mnoc=%llu bw_llcc=%llu bw_ebi=%llu clk_rate=%u stop_req=%d u_bus=%d u_clk=%d",
+ __entry->crtc,
+ __entry->bw_ctl_mnoc,
+ __entry->bw_ctl_llcc,
+ __entry->bw_ctl_ebi,
+ __entry->core_clk_rate,
+ __entry->stop_req,
+ __entry->update_bus,
+ __entry->update_clk)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_irq_template,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_register_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+DEFINE_EVENT(dpu_enc_irq_template, dpu_enc_irq_unregister_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx)
+);
+
+TRACE_EVENT(dpu_enc_irq_wait_success,
+ TP_PROTO(uint32_t drm_id, enum dpu_intr_idx intr_idx, int hw_idx,
+ int irq_idx, enum dpu_pingpong pp_idx, int atomic_cnt),
+ TP_ARGS(drm_id, intr_idx, hw_idx, irq_idx, pp_idx, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intr_idx, intr_idx )
+ __field( int, hw_idx )
+ __field( int, irq_idx )
+ __field( enum dpu_pingpong, pp_idx )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intr_idx = intr_idx;
+ __entry->hw_idx = hw_idx;
+ __entry->irq_idx = irq_idx;
+ __entry->pp_idx = pp_idx;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, intr=%d, hw=%d, irq=%d, pp=%d, atomic_cnt=%d",
+ __entry->drm_id, __entry->intr_idx, __entry->hw_idx,
+ __entry->irq_idx, __entry->pp_idx, __entry->atomic_cnt)
+);
+
+DECLARE_EVENT_CLASS(dpu_drm_obj_template,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ ),
+ TP_printk("id=%u", __entry->drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_atomic_check,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_mode_set,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_disable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_enc_prepare_kickoff_reset,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_flip,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_vblank_cb,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_crtc_complete_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_enc_enable,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_commit,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+DEFINE_EVENT(dpu_drm_obj_template, dpu_kms_wait_for_commit_done,
+ TP_PROTO(uint32_t drm_id),
+ TP_ARGS(drm_id)
+);
+
+TRACE_EVENT(dpu_enc_enable,
+ TP_PROTO(uint32_t drm_id, int hdisplay, int vdisplay),
+ TP_ARGS(drm_id, hdisplay, vdisplay),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, hdisplay )
+ __field( int, vdisplay )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hdisplay = hdisplay;
+ __entry->vdisplay = vdisplay;
+ ),
+ TP_printk("id=%u, mode=%dx%d",
+ __entry->drm_id, __entry->hdisplay, __entry->vdisplay)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_keyval_template,
+ TP_PROTO(uint32_t drm_id, int val),
+ TP_ARGS(drm_id, val),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, val )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->val = val;
+ ),
+ TP_printk("id=%u, val=%d", __entry->drm_id, __entry->val)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_underrun_cb,
+ TP_PROTO(uint32_t drm_id, int count),
+ TP_ARGS(drm_id, count)
+);
+DEFINE_EVENT(dpu_enc_keyval_template, dpu_enc_trigger_start,
+ TP_PROTO(uint32_t drm_id, int ctl_idx),
+ TP_ARGS(drm_id, ctl_idx)
+);
+
+TRACE_EVENT(dpu_enc_atomic_check_flags,
+ TP_PROTO(uint32_t drm_id, unsigned int flags, int private_flags),
+ TP_ARGS(drm_id, flags, private_flags),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, flags )
+ __field( int, private_flags )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->flags = flags;
+ __entry->private_flags = private_flags;
+ ),
+ TP_printk("id=%u, flags=%u, private_flags=%d",
+ __entry->drm_id, __entry->flags, __entry->private_flags)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_id_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ ),
+ TP_printk("id=%u, enable=%s",
+ __entry->drm_id, __entry->enable ? "true" : "false")
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_rc_helper,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_vblank_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+DEFINE_EVENT(dpu_enc_id_enable_template, dpu_enc_phys_cmd_connect_te,
+ TP_PROTO(uint32_t drm_id, bool enable),
+ TP_ARGS(drm_id, enable)
+);
+
+TRACE_EVENT(dpu_enc_rc,
+ TP_PROTO(uint32_t drm_id, u32 sw_event, bool idle_pc_supported,
+ int rc_state, const char *stage),
+ TP_ARGS(drm_id, sw_event, idle_pc_supported, rc_state, stage),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, sw_event )
+ __field( bool, idle_pc_supported )
+ __field( int, rc_state )
+ __string( stage_str, stage )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->sw_event = sw_event;
+ __entry->idle_pc_supported = idle_pc_supported;
+ __entry->rc_state = rc_state;
+ __assign_str(stage_str, stage);
+ ),
+ TP_printk("%s: id:%u, sw_event:%d, idle_pc_supported:%s, rc_state:%d\n",
+ __get_str(stage_str), __entry->drm_id, __entry->sw_event,
+ __entry->idle_pc_supported ? "true" : "false",
+ __entry->rc_state)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb_not_busy,
+ TP_PROTO(uint32_t drm_id, u32 event, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, event, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, event=%u, intf=%d", __entry->drm_id, __entry->event,
+ __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_frame_done_cb,
+ TP_PROTO(uint32_t drm_id, unsigned int idx,
+ unsigned long frame_busy_mask),
+ TP_ARGS(drm_id, idx, frame_busy_mask),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( unsigned int, idx )
+ __field( unsigned long, frame_busy_mask )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->idx = idx;
+ __entry->frame_busy_mask = frame_busy_mask;
+ ),
+ TP_printk("id=%u, idx=%u, frame_busy_mask=%lx", __entry->drm_id,
+ __entry->idx, __entry->frame_busy_mask)
+);
+
+TRACE_EVENT(dpu_enc_trigger_flush,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx,
+ int pending_kickoff_cnt, int ctl_idx, u32 pending_flush_ret),
+ TP_ARGS(drm_id, intf_idx, pending_kickoff_cnt, ctl_idx,
+ pending_flush_ret),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( int, pending_kickoff_cnt )
+ __field( int, ctl_idx )
+ __field( u32, pending_flush_ret )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->pending_kickoff_cnt = pending_kickoff_cnt;
+ __entry->ctl_idx = ctl_idx;
+ __entry->pending_flush_ret = pending_flush_ret;
+ ),
+ TP_printk("id=%u, intf_idx=%d, pending_kickoff_cnt=%d ctl_idx=%d "
+ "pending_flush_ret=%u", __entry->drm_id,
+ __entry->intf_idx, __entry->pending_kickoff_cnt,
+ __entry->ctl_idx, __entry->pending_flush_ret)
+);
+
+DECLARE_EVENT_CLASS(dpu_enc_ktime_template,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( ktime_t, time )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->time = time;
+ ),
+ TP_printk("id=%u, time=%lld", __entry->drm_id,
+ ktime_to_ms(__entry->time))
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_vsync_event_work,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+DEFINE_EVENT(dpu_enc_ktime_template, dpu_enc_early_kickoff,
+ TP_PROTO(uint32_t drm_id, ktime_t time),
+ TP_ARGS(drm_id, time)
+);
+
+DECLARE_EVENT_CLASS(dpu_id_event_template,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, event=%u", __entry->drm_id, __entry->event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_enc_frame_done_timeout,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_cb,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_handle_power_event,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_done,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+DEFINE_EVENT(dpu_id_event_template, dpu_crtc_frame_event_more_pending,
+ TP_PROTO(uint32_t drm_id, u32 event),
+ TP_ARGS(drm_id, event)
+);
+
+TRACE_EVENT(dpu_enc_wait_event_timeout,
+ TP_PROTO(uint32_t drm_id, int32_t hw_id, int rc, s64 time,
+ s64 expected_time, int atomic_cnt),
+ TP_ARGS(drm_id, hw_id, rc, time, expected_time, atomic_cnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int32_t, hw_id )
+ __field( int, rc )
+ __field( s64, time )
+ __field( s64, expected_time )
+ __field( int, atomic_cnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->hw_id = hw_id;
+ __entry->rc = rc;
+ __entry->time = time;
+ __entry->expected_time = expected_time;
+ __entry->atomic_cnt = atomic_cnt;
+ ),
+ TP_printk("id=%u, hw_id=%d, rc=%d, time=%lld, expected=%lld cnt=%d",
+ __entry->drm_id, __entry->hw_id, __entry->rc, __entry->time,
+ __entry->expected_time, __entry->atomic_cnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, pp, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, pp=%d, enable=%s, refcnt=%d", __entry->drm_id,
+ __entry->pp, __entry->enable ? "true" : "false",
+ __entry->refcnt)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pp_tx_done,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int new_count,
+ u32 event),
+ TP_ARGS(drm_id, pp, new_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, new_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->new_count = new_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, new_count=%d, event=%u", __entry->drm_id,
+ __entry->pp, __entry->new_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_cmd_pdone_timeout,
+ TP_PROTO(uint32_t drm_id, enum dpu_pingpong pp, int timeout_count,
+ int kickoff_count, u32 event),
+ TP_ARGS(drm_id, pp, timeout_count, kickoff_count, event),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_pingpong, pp )
+ __field( int, timeout_count )
+ __field( int, kickoff_count )
+ __field( u32, event )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->pp = pp;
+ __entry->timeout_count = timeout_count;
+ __entry->kickoff_count = kickoff_count;
+ __entry->event = event;
+ ),
+ TP_printk("id=%u, pp=%d, timeout_count=%d, kickoff_count=%d, event=%u",
+ __entry->drm_id, __entry->pp, __entry->timeout_count,
+ __entry->kickoff_count, __entry->event)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_post_kickoff,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx),
+ TP_ARGS(drm_id, intf_idx),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ ),
+ TP_printk("id=%u, intf_idx=%d", __entry->drm_id, __entry->intf_idx)
+);
+
+TRACE_EVENT(dpu_enc_phys_vid_irq_ctrl,
+ TP_PROTO(uint32_t drm_id, enum dpu_intf intf_idx, bool enable,
+ int refcnt),
+ TP_ARGS(drm_id, intf_idx, enable, refcnt),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( enum dpu_intf, intf_idx )
+ __field( bool, enable )
+ __field( int, refcnt )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->intf_idx = intf_idx;
+ __entry->enable = enable;
+ __entry->refcnt = refcnt;
+ ),
+ TP_printk("id=%u, intf_idx=%d enable=%s refcnt=%d", __entry->drm_id,
+ __entry->intf_idx, __entry->enable ? "true" : "false",
+ __entry->drm_id)
+);
+
+TRACE_EVENT(dpu_crtc_setup_mixer,
+ TP_PROTO(uint32_t crtc_id, uint32_t plane_id,
+ struct drm_plane_state *state, struct dpu_plane_state *pstate,
+ uint32_t stage_idx, enum dpu_sspp sspp, uint32_t pixel_format,
+ uint64_t modifier),
+ TP_ARGS(crtc_id, plane_id, state, pstate, stage_idx, sspp,
+ pixel_format, modifier),
+ TP_STRUCT__entry(
+ __field( uint32_t, crtc_id )
+ __field( uint32_t, plane_id )
+ __field( struct drm_plane_state*,state )
+ __field( struct dpu_plane_state*,pstate )
+ __field( uint32_t, stage_idx )
+ __field( enum dpu_sspp, sspp )
+ __field( uint32_t, pixel_format )
+ __field( uint64_t, modifier )
+ ),
+ TP_fast_assign(
+ __entry->crtc_id = crtc_id;
+ __entry->plane_id = plane_id;
+ __entry->state = state;
+ __entry->pstate = pstate;
+ __entry->stage_idx = stage_idx;
+ __entry->sspp = sspp;
+ __entry->pixel_format = pixel_format;
+ __entry->modifier = modifier;
+ ),
+ TP_printk("crtc_id:%u plane_id:%u fb_id:%u src:{%ux%u+%ux%u} "
+ "dst:{%ux%u+%ux%u} stage_idx:%u stage:%d, sspp:%d "
+ "multirect_index:%d multirect_mode:%u pix_format:%u "
+ "modifier:%llu",
+ __entry->crtc_id, __entry->plane_id,
+ __entry->state->fb ? __entry->state->fb->base.id : -1,
+ __entry->state->src_w >> 16, __entry->state->src_h >> 16,
+ __entry->state->src_x >> 16, __entry->state->src_y >> 16,
+ __entry->state->crtc_w, __entry->state->crtc_h,
+ __entry->state->crtc_x, __entry->state->crtc_y,
+ __entry->stage_idx, __entry->pstate->stage, __entry->sspp,
+ __entry->pstate->multirect_index,
+ __entry->pstate->multirect_mode, __entry->pixel_format,
+ __entry->modifier)
+);
+
+TRACE_EVENT(dpu_crtc_setup_lm_bounds,
+ TP_PROTO(uint32_t drm_id, int mixer, struct drm_rect *bounds),
+ TP_ARGS(drm_id, mixer, bounds),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, mixer )
+ __field( struct drm_rect *, bounds )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->mixer = mixer;
+ __entry->bounds = bounds;
+ ),
+ TP_printk("id:%u mixer:%d bounds:" DRM_RECT_FMT, __entry->drm_id,
+ __entry->mixer, DRM_RECT_ARG(__entry->bounds))
+);
+
+TRACE_EVENT(dpu_crtc_vblank_enable,
+ TP_PROTO(uint32_t drm_id, uint32_t enc_id, bool enable,
+ struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enc_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( uint32_t, enc_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enc_id = enc_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u encoder:%u enable:%s state{enabled:%s suspend:%s "
+ "vblank_req:%s}",
+ __entry->drm_id, __entry->enc_id,
+ __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+
+DECLARE_EVENT_CLASS(dpu_crtc_enable_template,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, enable )
+ __field( struct dpu_crtc *, crtc )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->enable = enable;
+ __entry->crtc = crtc;
+ ),
+ TP_printk("id:%u enable:%s state{enabled:%s suspend:%s vblank_req:%s}",
+ __entry->drm_id, __entry->enable ? "true" : "false",
+ __entry->crtc->enabled ? "true" : "false",
+ __entry->crtc->suspend ? "true" : "false",
+ __entry->crtc->vblank_requested ? "true" : "false")
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_set_suspend,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_enable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_disable,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+DEFINE_EVENT(dpu_crtc_enable_template, dpu_crtc_vblank,
+ TP_PROTO(uint32_t drm_id, bool enable, struct dpu_crtc *crtc),
+ TP_ARGS(drm_id, enable, crtc)
+);
+
+TRACE_EVENT(dpu_crtc_disable_frame_pending,
+ TP_PROTO(uint32_t drm_id, int frame_pending),
+ TP_ARGS(drm_id, frame_pending),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( int, frame_pending )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->frame_pending = frame_pending;
+ ),
+ TP_printk("id:%u frame_pending:%d", __entry->drm_id,
+ __entry->frame_pending)
+);
+
+TRACE_EVENT(dpu_plane_set_scanout,
+ TP_PROTO(enum dpu_sspp index, struct dpu_hw_fmt_layout *layout,
+ enum dpu_sspp_multirect_index multirect_index),
+ TP_ARGS(index, layout, multirect_index),
+ TP_STRUCT__entry(
+ __field( enum dpu_sspp, index )
+ __field( struct dpu_hw_fmt_layout*, layout )
+ __field( enum dpu_sspp_multirect_index, multirect_index)
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->layout = layout;
+ __entry->multirect_index = multirect_index;
+ ),
+ TP_printk("index:%d layout:{%ux%u @ [%u/%u, %u/%u, %u/%u, %u/%u]} "
+ "multirect_index:%d", __entry->index, __entry->layout->width,
+ __entry->layout->height, __entry->layout->plane_addr[0],
+ __entry->layout->plane_size[0],
+ __entry->layout->plane_addr[1],
+ __entry->layout->plane_size[1],
+ __entry->layout->plane_addr[2],
+ __entry->layout->plane_size[2],
+ __entry->layout->plane_addr[3],
+ __entry->layout->plane_size[3], __entry->multirect_index)
+);
+
+TRACE_EVENT(dpu_plane_disable,
+ TP_PROTO(uint32_t drm_id, bool is_virtual, uint32_t multirect_mode),
+ TP_ARGS(drm_id, is_virtual, multirect_mode),
+ TP_STRUCT__entry(
+ __field( uint32_t, drm_id )
+ __field( bool, is_virtual )
+ __field( uint32_t, multirect_mode )
+ ),
+ TP_fast_assign(
+ __entry->drm_id = drm_id;
+ __entry->is_virtual = is_virtual;
+ __entry->multirect_mode = multirect_mode;
+ ),
+ TP_printk("id:%u is_virtual:%s multirect_mode:%u", __entry->drm_id,
+ __entry->is_virtual ? "true" : "false",
+ __entry->multirect_mode)
+);
+
+DECLARE_EVENT_CLASS(dpu_rm_iter_template,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u", __entry->id, __entry->type,
+ __entry->enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_cdm,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_intf,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+DEFINE_EVENT(dpu_rm_iter_template, dpu_rm_reserve_ctls,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id),
+ TP_ARGS(id, type, enc_id)
+);
+
+TRACE_EVENT(dpu_rm_reserve_lms,
+ TP_PROTO(uint32_t id, enum dpu_hw_blk_type type, uint32_t enc_id,
+ uint32_t pp_id),
+ TP_ARGS(id, type, enc_id, pp_id),
+ TP_STRUCT__entry(
+ __field( uint32_t, id )
+ __field( enum dpu_hw_blk_type, type )
+ __field( uint32_t, enc_id )
+ __field( uint32_t, pp_id )
+ ),
+ TP_fast_assign(
+ __entry->id = id;
+ __entry->type = type;
+ __entry->enc_id = enc_id;
+ __entry->pp_id = pp_id;
+ ),
+ TP_printk("id:%d type:%d enc_id:%u pp_id:%u", __entry->id,
+ __entry->type, __entry->enc_id, __entry->pp_id)
+);
+
+TRACE_EVENT(dpu_vbif_wait_xin_halt_fail,
+ TP_PROTO(enum dpu_vbif index, u32 xin_id),
+ TP_ARGS(index, xin_id),
+ TP_STRUCT__entry(
+ __field( enum dpu_vbif, index )
+ __field( u32, xin_id )
+ ),
+ TP_fast_assign(
+ __entry->index = index;
+ __entry->xin_id = xin_id;
+ ),
+ TP_printk("index:%d xin_id:%u", __entry->index, __entry->xin_id)
+);
+
+TRACE_EVENT(dpu_pp_connect_ext_te,
+ TP_PROTO(enum dpu_pingpong pp, u32 cfg),
+ TP_ARGS(pp, cfg),
+ TP_STRUCT__entry(
+ __field( enum dpu_pingpong, pp )
+ __field( u32, cfg )
+ ),
+ TP_fast_assign(
+ __entry->pp = pp;
+ __entry->cfg = cfg;
+ ),
+ TP_printk("pp:%d cfg:%u", __entry->pp, __entry->cfg)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_idx_cnt_template,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( int, enable_count )
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->enable_count = enable_count;
+ ),
+ TP_printk("irq_idx:%d enable_count:%u", __entry->irq_idx,
+ __entry->enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_enable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+DEFINE_EVENT(dpu_core_irq_idx_cnt_template, dpu_core_irq_disable_idx,
+ TP_PROTO(int irq_idx, int enable_count),
+ TP_ARGS(irq_idx, enable_count)
+);
+
+DECLARE_EVENT_CLASS(dpu_core_irq_callback_template,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback),
+ TP_STRUCT__entry(
+ __field( int, irq_idx )
+ __field( struct dpu_irq_callback *, callback)
+ ),
+ TP_fast_assign(
+ __entry->irq_idx = irq_idx;
+ __entry->callback = callback;
+ ),
+ TP_printk("irq_idx:%d callback:%pK", __entry->irq_idx,
+ __entry->callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_register_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+DEFINE_EVENT(dpu_core_irq_callback_template, dpu_core_irq_unregister_callback,
+ TP_PROTO(int irq_idx, struct dpu_irq_callback *callback),
+ TP_ARGS(irq_idx, callback)
+);
+
+TRACE_EVENT(dpu_core_perf_update_clk,
+ TP_PROTO(struct drm_device *dev, bool stop_req, u64 clk_rate),
+ TP_ARGS(dev, stop_req, clk_rate),
+ TP_STRUCT__entry(
+ __field( struct drm_device *, dev )
+ __field( bool, stop_req )
+ __field( u64, clk_rate )
+ ),
+ TP_fast_assign(
+ __entry->dev = dev;
+ __entry->stop_req = stop_req;
+ __entry->clk_rate = clk_rate;
+ ),
+ TP_printk("dev:%s stop_req:%s clk_rate:%llu", __entry->dev->unique,
+ __entry->stop_req ? "true" : "false", __entry->clk_rate)
+);
+
+#define DPU_ATRACE_END(name) trace_tracing_mark_write(current->tgid, name, 0)
+#define DPU_ATRACE_BEGIN(name) trace_tracing_mark_write(current->tgid, name, 1)
+#define DPU_ATRACE_FUNC() DPU_ATRACE_BEGIN(__func__)
+
+#define DPU_ATRACE_INT(name, value) \
+ trace_dpu_trace_counter(current->tgid, name, value)
+
+#endif /* _DPU_TRACE_H_ */
+
+/* This part must be outside protection */
+#undef TRACE_INCLUDE_PATH
+#define TRACE_INCLUDE_PATH .
+#include <trace/define_trace.h>
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
new file mode 100644
index 000000000000..295528292296
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.c
@@ -0,0 +1,384 @@
+/* Copyright (c) 2015-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#define pr_fmt(fmt) "[drm:%s:%d] " fmt, __func__, __LINE__
+
+#include <linux/debugfs.h>
+
+#include "dpu_vbif.h"
+#include "dpu_hw_vbif.h"
+#include "dpu_trace.h"
+
+/**
+ * _dpu_vbif_wait_for_xin_halt - wait for the xin to halt
+ * @vbif: Pointer to hardware vbif driver
+ * @xin_id: Client interface identifier
+ * @return: 0 if success; error code otherwise
+ */
+static int _dpu_vbif_wait_for_xin_halt(struct dpu_hw_vbif *vbif, u32 xin_id)
+{
+ ktime_t timeout;
+ bool status;
+ int rc;
+
+ if (!vbif || !vbif->cap || !vbif->ops.get_halt_ctrl) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ timeout = ktime_add_us(ktime_get(), vbif->cap->xin_halt_timeout);
+ for (;;) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ if (status)
+ break;
+ if (ktime_compare_safe(ktime_get(), timeout) > 0) {
+ status = vbif->ops.get_halt_ctrl(vbif, xin_id);
+ break;
+ }
+ usleep_range(501, 1000);
+ }
+
+ if (!status) {
+ rc = -ETIMEDOUT;
+ DPU_ERROR("VBIF %d client %d not halting. TIMEDOUT.\n",
+ vbif->idx - VBIF_0, xin_id);
+ } else {
+ rc = 0;
+ DPU_DEBUG("VBIF %d client %d is halted\n",
+ vbif->idx - VBIF_0, xin_id);
+ }
+
+ return rc;
+}
+
+/**
+ * _dpu_vbif_apply_dynamic_ot_limit - determine OT based on usecase parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @ot_lim: Pointer to OT limit to be modified
+ * @params: Pointer to usecase parameters
+ */
+static void _dpu_vbif_apply_dynamic_ot_limit(struct dpu_hw_vbif *vbif,
+ u32 *ot_lim, struct dpu_vbif_set_ot_params *params)
+{
+ u64 pps;
+ const struct dpu_vbif_dynamic_ot_tbl *tbl;
+ u32 i;
+
+ if (!vbif || !(vbif->cap->features & BIT(DPU_VBIF_QOS_OTLIM)))
+ return;
+
+ /* Dynamic OT setting done only for WFD */
+ if (!params->is_wfd)
+ return;
+
+ pps = params->frame_rate;
+ pps *= params->width;
+ pps *= params->height;
+
+ tbl = params->rd ? &vbif->cap->dynamic_ot_rd_tbl :
+ &vbif->cap->dynamic_ot_wr_tbl;
+
+ for (i = 0; i < tbl->count; i++) {
+ if (pps <= tbl->cfg[i].pps) {
+ *ot_lim = tbl->cfg[i].ot_limit;
+ break;
+ }
+ }
+
+ DPU_DEBUG("vbif:%d xin:%d w:%d h:%d fps:%d pps:%llu ot:%u\n",
+ vbif->idx - VBIF_0, params->xin_id,
+ params->width, params->height, params->frame_rate,
+ pps, *ot_lim);
+}
+
+/**
+ * _dpu_vbif_get_ot_limit - get OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ * @return: OT limit
+ */
+static u32 _dpu_vbif_get_ot_limit(struct dpu_hw_vbif *vbif,
+ struct dpu_vbif_set_ot_params *params)
+{
+ u32 ot_lim = 0;
+ u32 val;
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid arguments vbif %d\n", vbif != 0);
+ return -EINVAL;
+ }
+
+ if (vbif->cap->default_ot_wr_limit && !params->rd)
+ ot_lim = vbif->cap->default_ot_wr_limit;
+ else if (vbif->cap->default_ot_rd_limit && params->rd)
+ ot_lim = vbif->cap->default_ot_rd_limit;
+
+ /*
+ * If default ot is not set from dt/catalog,
+ * then do not configure it.
+ */
+ if (ot_lim == 0)
+ goto exit;
+
+ /* Modify the limits if the target and the use case requires it */
+ _dpu_vbif_apply_dynamic_ot_limit(vbif, &ot_lim, params);
+
+ if (vbif && vbif->ops.get_limit_conf) {
+ val = vbif->ops.get_limit_conf(vbif,
+ params->xin_id, params->rd);
+ if (val == ot_lim)
+ ot_lim = 0;
+ }
+
+exit:
+ DPU_DEBUG("vbif:%d xin:%d ot_lim:%d\n",
+ vbif->idx - VBIF_0, params->xin_id, ot_lim);
+ return ot_lim;
+}
+
+/**
+ * dpu_vbif_set_ot_limit - set OT based on usecase & configuration parameters
+ * @vbif: Pointer to hardware vbif driver
+ * @params: Pointer to usecase parameters
+ *
+ * Note this function would block waiting for bus halt.
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ u32 ot_lim;
+ int ret, i;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx)
+ vbif = dpu_kms->hw_vbif[i];
+ }
+
+ if (!vbif || !mdp) {
+ DPU_DEBUG("invalid arguments vbif %d mdp %d\n",
+ vbif != 0, mdp != 0);
+ return;
+ }
+
+ if (!mdp->ops.setup_clk_force_ctrl ||
+ !vbif->ops.set_limit_conf ||
+ !vbif->ops.set_halt_ctrl)
+ return;
+
+ /* set write_gather_en for all write clients */
+ if (vbif->ops.set_write_gather_en && !params->rd)
+ vbif->ops.set_write_gather_en(vbif, params->xin_id);
+
+ ot_lim = _dpu_vbif_get_ot_limit(vbif, params) & 0xFF;
+
+ if (ot_lim == 0)
+ goto exit;
+
+ trace_dpu_perf_set_ot(params->num, params->xin_id, ot_lim,
+ params->vbif_idx);
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ vbif->ops.set_limit_conf(vbif, params->xin_id, params->rd, ot_lim);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, true);
+
+ ret = _dpu_vbif_wait_for_xin_halt(vbif, params->xin_id);
+ if (ret)
+ trace_dpu_vbif_wait_xin_halt_fail(vbif->idx, params->xin_id);
+
+ vbif->ops.set_halt_ctrl(vbif, params->xin_id, false);
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+exit:
+ return;
+}
+
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params)
+{
+ struct dpu_hw_vbif *vbif = NULL;
+ struct dpu_hw_mdp *mdp;
+ bool forced_on = false;
+ const struct dpu_vbif_qos_tbl *qos_tbl;
+ int i;
+
+ if (!dpu_kms || !params || !dpu_kms->hw_mdp) {
+ DPU_ERROR("invalid arguments\n");
+ return;
+ }
+ mdp = dpu_kms->hw_mdp;
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ if (dpu_kms->hw_vbif[i] &&
+ dpu_kms->hw_vbif[i]->idx == params->vbif_idx) {
+ vbif = dpu_kms->hw_vbif[i];
+ break;
+ }
+ }
+
+ if (!vbif || !vbif->cap) {
+ DPU_ERROR("invalid vbif %d\n", params->vbif_idx);
+ return;
+ }
+
+ if (!vbif->ops.set_qos_remap || !mdp->ops.setup_clk_force_ctrl) {
+ DPU_DEBUG("qos remap not supported\n");
+ return;
+ }
+
+ qos_tbl = params->is_rt ? &vbif->cap->qos_rt_tbl :
+ &vbif->cap->qos_nrt_tbl;
+
+ if (!qos_tbl->npriority_lvl || !qos_tbl->priority_lvl) {
+ DPU_DEBUG("qos tbl not defined\n");
+ return;
+ }
+
+ forced_on = mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, true);
+
+ for (i = 0; i < qos_tbl->npriority_lvl; i++) {
+ DPU_DEBUG("vbif:%d xin:%d lvl:%d/%d\n",
+ params->vbif_idx, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ vbif->ops.set_qos_remap(vbif, params->xin_id, i,
+ qos_tbl->priority_lvl[i]);
+ }
+
+ if (forced_on)
+ mdp->ops.setup_clk_force_ctrl(mdp, params->clk_ctrl, false);
+}
+
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ u32 i, pnd, src;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->ops.clear_errors) {
+ vbif->ops.clear_errors(vbif, &pnd, &src);
+ if (pnd || src) {
+ DRM_DEBUG_KMS("VBIF %d: pnd 0x%X, src 0x%X\n",
+ vbif->idx - VBIF_0, pnd, src);
+ }
+ }
+ }
+}
+
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms)
+{
+ struct dpu_hw_vbif *vbif;
+ int i, j;
+
+ if (!dpu_kms) {
+ DPU_ERROR("invalid argument\n");
+ return;
+ }
+
+ for (i = 0; i < ARRAY_SIZE(dpu_kms->hw_vbif); i++) {
+ vbif = dpu_kms->hw_vbif[i];
+ if (vbif && vbif->cap && vbif->ops.set_mem_type) {
+ for (j = 0; j < vbif->cap->memtype_count; j++)
+ vbif->ops.set_mem_type(
+ vbif, j, vbif->cap->memtype[j]);
+ }
+ }
+}
+
+#ifdef CONFIG_DEBUG_FS
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+ debugfs_remove_recursive(dpu_kms->debugfs_vbif);
+ dpu_kms->debugfs_vbif = NULL;
+}
+
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root)
+{
+ char vbif_name[32];
+ struct dentry *debugfs_vbif;
+ int i, j;
+
+ dpu_kms->debugfs_vbif = debugfs_create_dir("vbif", debugfs_root);
+ if (!dpu_kms->debugfs_vbif) {
+ DPU_ERROR("failed to create vbif debugfs\n");
+ return -EINVAL;
+ }
+
+ for (i = 0; i < dpu_kms->catalog->vbif_count; i++) {
+ struct dpu_vbif_cfg *vbif = &dpu_kms->catalog->vbif[i];
+
+ snprintf(vbif_name, sizeof(vbif_name), "%d", vbif->id);
+
+ debugfs_vbif = debugfs_create_dir(vbif_name,
+ dpu_kms->debugfs_vbif);
+
+ debugfs_create_u32("features", 0600, debugfs_vbif,
+ (u32 *)&vbif->features);
+
+ debugfs_create_u32("xin_halt_timeout", 0400, debugfs_vbif,
+ (u32 *)&vbif->xin_halt_timeout);
+
+ debugfs_create_u32("default_rd_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_rd_limit);
+
+ debugfs_create_u32("default_wr_ot_limit", 0400, debugfs_vbif,
+ (u32 *)&vbif->default_ot_wr_limit);
+
+ for (j = 0; j < vbif->dynamic_ot_rd_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_rd_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_rd_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+
+ for (j = 0; j < vbif->dynamic_ot_wr_tbl.count; j++) {
+ struct dpu_vbif_dynamic_ot_cfg *cfg =
+ &vbif->dynamic_ot_wr_tbl.cfg[j];
+
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_pps", j);
+ debugfs_create_u64(vbif_name, 0400, debugfs_vbif,
+ (u64 *)&cfg->pps);
+ snprintf(vbif_name, sizeof(vbif_name),
+ "dynamic_ot_wr_%d_ot_limit", j);
+ debugfs_create_u32(vbif_name, 0400, debugfs_vbif,
+ (u32 *)&cfg->ot_limit);
+ }
+ }
+
+ return 0;
+}
+#endif
diff --git a/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
new file mode 100644
index 000000000000..f17af52dbbd5
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/dpu_vbif.h
@@ -0,0 +1,94 @@
+/* Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
+ *
+ * This program is free software; you can redistribute it and/or modify
+ * it under the terms of the GNU General Public License version 2 and
+ * only version 2 as published by the Free Software Foundation.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifndef __DPU_VBIF_H__
+#define __DPU_VBIF_H__
+
+#include "dpu_kms.h"
+
+struct dpu_vbif_set_ot_params {
+ u32 xin_id;
+ u32 num;
+ u32 width;
+ u32 height;
+ u32 frame_rate;
+ bool rd;
+ bool is_wfd;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+};
+
+struct dpu_vbif_set_memtype_params {
+ u32 xin_id;
+ u32 vbif_idx;
+ u32 clk_ctrl;
+ bool is_cacheable;
+};
+
+/**
+ * struct dpu_vbif_set_qos_params - QoS remapper parameter
+ * @vbif_idx: vbif identifier
+ * @xin_id: client interface identifier
+ * @clk_ctrl: clock control identifier of the xin
+ * @num: pipe identifier (debug only)
+ * @is_rt: true if pipe is used in real-time use case
+ */
+struct dpu_vbif_set_qos_params {
+ u32 vbif_idx;
+ u32 xin_id;
+ u32 clk_ctrl;
+ u32 num;
+ bool is_rt;
+};
+
+/**
+ * dpu_vbif_set_ot_limit - set OT limit for vbif client
+ * @dpu_kms: DPU handler
+ * @params: Pointer to OT configuration parameters
+ */
+void dpu_vbif_set_ot_limit(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_ot_params *params);
+
+/**
+ * dpu_vbif_set_qos_remap - set QoS priority level remap
+ * @dpu_kms: DPU handler
+ * @params: Pointer to QoS configuration parameters
+ */
+void dpu_vbif_set_qos_remap(struct dpu_kms *dpu_kms,
+ struct dpu_vbif_set_qos_params *params);
+
+/**
+ * dpu_vbif_clear_errors - clear any vbif errors
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_clear_errors(struct dpu_kms *dpu_kms);
+
+/**
+ * dpu_vbif_init_memtypes - initialize xin memory types for vbif
+ * @dpu_kms: DPU handler
+ */
+void dpu_vbif_init_memtypes(struct dpu_kms *dpu_kms);
+
+#ifdef CONFIG_DEBUG_FS
+int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms, struct dentry *debugfs_root);
+void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms);
+#else
+static inline int dpu_debugfs_vbif_init(struct dpu_kms *dpu_kms,
+ struct dentry *debugfs_root)
+{
+ return 0;
+}
+static inline void dpu_debugfs_vbif_destroy(struct dpu_kms *dpu_kms)
+{
+}
+#endif
+#endif /* __DPU_VBIF_H__ */
diff --git a/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
new file mode 100644
index 000000000000..4f12e5c534c8
--- /dev/null
+++ b/drivers/gpu/drm/msm/disp/dpu1/msm_media_info.h
@@ -0,0 +1,1376 @@
+#ifndef __MEDIA_INFO_H__
+#define __MEDIA_INFO_H__
+
+#ifndef MSM_MEDIA_ALIGN
+#define MSM_MEDIA_ALIGN(__sz, __align) (((__align) & ((__align) - 1)) ?\
+ ((((__sz) + (__align) - 1) / (__align)) * (__align)) :\
+ (((__sz) + (__align) - 1) & (~((__align) - 1))))
+#endif
+
+#ifndef MSM_MEDIA_ROUNDUP
+#define MSM_MEDIA_ROUNDUP(__sz, __r) (((__sz) + ((__r) - 1)) / (__r))
+#endif
+
+#ifndef MSM_MEDIA_MAX
+#define MSM_MEDIA_MAX(__a, __b) ((__a) > (__b)?(__a):(__b))
+#endif
+
+enum color_fmts {
+ /* Venus NV12:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV12,
+
+ /* Venus NV21:
+ * YUV 4:2:0 image with a plane of 8 bit Y samples followed
+ * by an interleaved V/U plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * V U V U V U V U V U V U . . . . ^
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . |
+ * V U V U V U V U V U V U . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Padding & Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_NV21,
+ /* Venus NV12_MVTB:
+ * Two YUV 4:2:0 images/views one after the other
+ * in a top-bottom layout, same as NV12
+ * with a plane of 8 bit Y samples followed
+ * by an interleaved U/V plane containing 8 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_1
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V | |
+ * . . . . . . . . . . . . . . . . | View_2
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V |
+ * U V U V U V U V U V U V . . . . ^ |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . | |
+ * U V U V U V U V U V U V . . . . UV_Scanlines |
+ * . . . . . . . . . . . . . . . . | |
+ * . . . . . . . . . . . . . . . . V V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width aligned to 128
+ * UV_Stride : Width aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * View_1 begin at: 0 (zero)
+ * View_2 begin at: Y_Stride * Y_Scanlines + UV_Stride * UV_Scanlines
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((2*(Y_Stride * Y_Scanlines)
+ * + 2*(UV_Stride * UV_Scanlines) + Extradata), 4096)
+ */
+ COLOR_FMT_NV12_MVTB,
+ /*
+ * The buffer can be of 2 types:
+ * (1) Venus NV12 UBWC Progressive
+ * (2) Venus NV12 UBWC Interlaced
+ *
+ * (1) Venus NV12 UBWC Progressive Buffer Format:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 8 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 8 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Y_Stride = align(Width, 128)
+ * UV_Stride = align(Width, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ *
+ *
+ * (2) Venus NV12 UBWC Interlaced Buffer Format:
+ * Compressed Macro-tile format for NV12 interlaced.
+ * Contains 8 planes in the following order -
+ * (A) Y_Meta_Top_Field_Plane
+ * (B) Y_UBWC_Top_Field_Plane
+ * (C) UV_Meta_Top_Field_Plane
+ * (D) UV_UBWC_Top_Field_Plane
+ * (E) Y_Meta_Bottom_Field_Plane
+ * (F) Y_UBWC_Bottom_Field_Plane
+ * (G) UV_Meta_Bottom_Field_Plane
+ * (H) UV_UBWC_Bottom_Field_Plane
+ * Y_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Top_Field_Plane.
+ * Y_UBWC_Top_Field_Plane consists of Y data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Top_Field_Plane data together
+ * with Y_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit Y samples for top field of an interlaced frame.
+ *
+ * UV_Meta_Top_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Top_Field_Plane.
+ * UV_UBWC_Top_Field_Plane consists of UV data in compressed macro-tile
+ * format for top field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Top_Field_Plane data together
+ * with UV_UBWC_Top_Field_Plane data to produce loss-less uncompressed
+ * 8 bit subsampled color difference samples for top field of an
+ * interlaced frame.
+ *
+ * Each tile in Y_UBWC_Top_Field_Plane/UV_UBWC_Top_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * Y_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data for Y_UBWC_Bottom_Field_Plane.
+ * Y_UBWC_Bottom_Field_Plane consists of Y data in compressed macro-tile
+ * format for bottom field of an interlaced frame.
+ * UBWC decoder block will use the Y_Meta_Bottom_Field_Plane data
+ * together with Y_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit Y samples for bottom field of an interlaced frame.
+ *
+ * UV_Meta_Bottom_Field_Plane consists of meta information to decode
+ * compressed tile data in UV_UBWC_Bottom_Field_Plane.
+ * UV_UBWC_Bottom_Field_Plane consists of UV data in compressed
+ * macro-tile format for bottom field of an interlaced frame.
+ * UBWC decoder block will use UV_Meta_Bottom_Field_Plane data together
+ * with UV_UBWC_Bottom_Field_Plane data to produce loss-less
+ * uncompressed 8 bit subsampled color difference samples for bottom
+ * field of an interlaced frame.
+ *
+ * Each tile in Y_UBWC_Bottom_Field_Plane/UV_UBWC_Bottom_Field_Plane is
+ * independently decodable and randomly accessible. There is no
+ * dependency between tiles.
+ *
+ * <-----Y_TF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_TF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_TF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_TF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_TF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_TF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_TF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_TF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-----Y_BF_Meta_Stride---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Half_height |
+ * M M M M M M M M M M M M . . | Meta_Y_BF_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-Compressed tile Y_BF Stride->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Half_height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_BF_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----UV_BF_Meta_Stride---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_BF_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <-Compressed tile UV_BF Stride->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_BF_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ * Half_height = (Height+1)>>1
+ * Y_TF_Stride = align(Width, 128)
+ * UV_TF_Stride = align(Width, 128)
+ * Y_TF_Scanlines = align(Half_height, 32)
+ * UV_TF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_TF_Plane_size = align(Y_TF_Stride * Y_TF_Scanlines, 4096)
+ * UV_UBWC_TF_Plane_size = align(UV_TF_Stride * UV_TF_Scanlines, 4096)
+ * Y_TF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_TF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_TF_Meta_Plane_size =
+ * align(Y_TF_Meta_Stride * Y_TF_Meta_Scanlines, 4096)
+ * UV_TF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_TF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_TF_Meta_Plane_size =
+ * align(UV_TF_Meta_Stride * UV_TF_Meta_Scanlines, 4096)
+ * Y_BF_Stride = align(Width, 128)
+ * UV_BF_Stride = align(Width, 128)
+ * Y_BF_Scanlines = align(Half_height, 32)
+ * UV_BF_Scanlines = align((Half_height+1)/2, 32)
+ * Y_UBWC_BF_Plane_size = align(Y_BF_Stride * Y_BF_Scanlines, 4096)
+ * UV_UBWC_BF_Plane_size = align(UV_BF_Stride * UV_BF_Scanlines, 4096)
+ * Y_BF_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_BF_Meta_Scanlines = align(roundup(Half_height, Y_TileHeight), 16)
+ * Y_BF_Meta_Plane_size =
+ * align(Y_BF_Meta_Stride * Y_BF_Meta_Scanlines, 4096)
+ * UV_BF_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_BF_Meta_Scanlines = align(roundup(Half_height, UV_TileHeight), 16)
+ * UV_BF_Meta_Plane_size =
+ * align(UV_BF_Meta_Stride * UV_BF_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align( Y_UBWC_TF_Plane_size + UV_UBWC_TF_Plane_size +
+ * Y_TF_Meta_Plane_size + UV_TF_Meta_Plane_size +
+ * Y_UBWC_BF_Plane_size + UV_UBWC_BF_Plane_size +
+ * Y_BF_Meta_Plane_size + UV_BF_Meta_Plane_size +
+ * + max(Extradata, Y_TF_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_UBWC,
+ /* Venus NV12 10-bit UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 4/3, 128)
+ * UV_Stride = align(Width * 4/3, 128)
+ * Y_Scanlines = align(Height, 32)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_NV12_BPP10_UBWC,
+ /* Venus RGBA8888 format:
+ * Contains 1 plane in the following order -
+ * (A) RGBA plane
+ *
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Plane_size + Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888,
+ /* Venus RGBA8888 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 128)
+ * RGB_Scanlines = align(Height, 32)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA8888_UBWC,
+ /* Venus RGBA1010102 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGBA plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 4, 256)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGBA1010102_UBWC,
+ /* Venus RGB565 UBWC format:
+ * Contains 2 planes in the following order -
+ * (A) Meta plane
+ * (B) RGB plane
+ *
+ * <--- RGB_Meta_Stride ---->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_RGB_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <-------- RGB_Stride -------->
+ * <------- Width ------->
+ * R R R R R R R R R R R R . . . . ^ ^
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . Height |
+ * R R R R R R R R R R R R . . . . | RGB_Scanlines
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . | |
+ * R R R R R R R R R R R R . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ *
+ * RGB_Stride = align(Width * 2, 128)
+ * RGB_Scanlines = align(Height, 16)
+ * RGB_Plane_size = align(RGB_Stride * RGB_Scanlines, 4096)
+ * RGB_Meta_Stride = align(roundup(Width, RGB_TileWidth), 64)
+ * RGB_Meta_Scanline = align(roundup(Height, RGB_TileHeight), 16)
+ * RGB_Meta_Plane_size = align(RGB_Meta_Stride *
+ * RGB_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(RGB_Meta_Plane_size + RGB_Plane_size +
+ * Extradata, 4096)
+ */
+ COLOR_FMT_RGB565_UBWC,
+ /* P010 UBWC:
+ * Compressed Macro-tile format for NV12.
+ * Contains 4 planes in the following order -
+ * (A) Y_Meta_Plane
+ * (B) Y_UBWC_Plane
+ * (C) UV_Meta_Plane
+ * (D) UV_UBWC_Plane
+ *
+ * Y_Meta_Plane consists of meta information to decode compressed
+ * tile data in Y_UBWC_Plane.
+ * Y_UBWC_Plane consists of Y data in compressed macro-tile format.
+ * UBWC decoder block will use the Y_Meta_Plane data together with
+ * Y_UBWC_Plane data to produce loss-less uncompressed 10 bit Y samples.
+ *
+ * UV_Meta_Plane consists of meta information to decode compressed
+ * tile data in UV_UBWC_Plane.
+ * UV_UBWC_Plane consists of UV data in compressed macro-tile format.
+ * UBWC decoder block will use UV_Meta_Plane data together with
+ * UV_UBWC_Plane data to produce loss-less uncompressed 10 bit 2x2
+ * subsampled color difference samples.
+ *
+ * Each tile in Y_UBWC_Plane/UV_UBWC_Plane is independently decodable
+ * and randomly accessible. There is no dependency between tiles.
+ *
+ * <----- Y_Meta_Stride ----->
+ * <-------- Width ------>
+ * M M M M M M M M M M M M . . ^ ^
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . Height |
+ * M M M M M M M M M M M M . . | Meta_Y_Scanlines
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . | |
+ * M M M M M M M M M M M M . . V |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . V
+ * <--Compressed tile Y Stride--->
+ * <------- Width ------->
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . ^ ^
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . Height |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | Macro_tile_Y_Scanlines
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . | |
+ * Y* Y* Y* Y* Y* Y* Y* Y* . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * . . . . . . . . . . . . . . . . V
+ * <----- UV_Meta_Stride ---->
+ * M M M M M M M M M M M M . . ^
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . |
+ * M M M M M M M M M M M M . . M_UV_Scanlines
+ * . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ * <--Compressed tile UV Stride--->
+ * U* V* U* V* U* V* U* V* . . . . ^
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . |
+ * U* V* U* V* U* V* U* V* . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . -------> Buffer size aligned to 4k
+ *
+ *
+ * Y_Stride = align(Width * 2, 256)
+ * UV_Stride = align(Width * 2, 256)
+ * Y_Scanlines = align(Height, 16)
+ * UV_Scanlines = align(Height/2, 16)
+ * Y_UBWC_Plane_Size = align(Y_Stride * Y_Scanlines, 4096)
+ * UV_UBWC_Plane_Size = align(UV_Stride * UV_Scanlines, 4096)
+ * Y_Meta_Stride = align(roundup(Width, Y_TileWidth), 64)
+ * Y_Meta_Scanlines = align(roundup(Height, Y_TileHeight), 16)
+ * Y_Meta_Plane_size = align(Y_Meta_Stride * Y_Meta_Scanlines, 4096)
+ * UV_Meta_Stride = align(roundup(Width, UV_TileWidth), 64)
+ * UV_Meta_Scanlines = align(roundup(Height, UV_TileHeight), 16)
+ * UV_Meta_Plane_size = align(UV_Meta_Stride * UV_Meta_Scanlines, 4096)
+ * Extradata = 8k
+ *
+ * Total size = align(Y_UBWC_Plane_size + UV_UBWC_Plane_size +
+ * Y_Meta_Plane_size + UV_Meta_Plane_size
+ * + max(Extradata, Y_Stride * 48), 4096)
+ */
+ COLOR_FMT_P010_UBWC,
+ /* Venus P010:
+ * YUV 4:2:0 image with a plane of 10 bit Y samples followed
+ * by an interleaved U/V plane containing 10 bit 2x2 subsampled
+ * colour difference samples.
+ *
+ * <-------- Y/UV_Stride -------->
+ * <------- Width ------->
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . ^ ^
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . Height |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | Y_Scanlines
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . | |
+ * Y Y Y Y Y Y Y Y Y Y Y Y . . . . V |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * U V U V U V U V U V U V . . . . ^
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . |
+ * U V U V U V U V U V U V . . . . UV_Scanlines
+ * . . . . . . . . . . . . . . . . |
+ * . . . . . . . . . . . . . . . . V
+ * . . . . . . . . . . . . . . . . --> Buffer size alignment
+ *
+ * Y_Stride : Width * 2 aligned to 128
+ * UV_Stride : Width * 2 aligned to 128
+ * Y_Scanlines: Height aligned to 32
+ * UV_Scanlines: Height/2 aligned to 16
+ * Extradata: Arbitrary (software-imposed) padding
+ * Total size = align((Y_Stride * Y_Scanlines
+ * + UV_Stride * UV_Scanlines
+ * + max(Extradata, Y_Stride * 8), 4096)
+ */
+ COLOR_FMT_P010,
+};
+
+#define COLOR_FMT_RGBA1010102_UBWC COLOR_FMT_RGBA1010102_UBWC
+#define COLOR_FMT_RGB565_UBWC COLOR_FMT_RGB565_UBWC
+#define COLOR_FMT_P010_UBWC COLOR_FMT_P010_UBWC
+#define COLOR_FMT_P010 COLOR_FMT_P010
+
+static inline unsigned int VENUS_EXTRADATA_SIZE(int width, int height)
+{
+ (void)height;
+ (void)width;
+
+ /*
+ * In the future, calculate the size based on the w/h but just
+ * hardcode it for now since 16K satisfies all current usecases.
+ */
+ return 16 * 1024;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment, stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width, alignment);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width, 192);
+ stride = MSM_MEDIA_ALIGN(stride * 4/3, alignment);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ alignment = 256;
+ stride = MSM_MEDIA_ALIGN(width * 2, alignment);
+ break;
+ case COLOR_FMT_P010:
+ alignment = 128;
+ stride = MSM_MEDIA_ALIGN(width*2, alignment);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ alignment = 16;
+ break;
+ default:
+ return 0;
+ }
+ sclines = MSM_MEDIA_ALIGN(height, alignment);
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment, sclines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_NV12_MVTB:
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ case COLOR_FMT_P010:
+ alignment = 16;
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ alignment = 32;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ sclines = MSM_MEDIA_ALIGN((height+1)>>1, alignment);
+
+invalid_input:
+ return sclines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_Y_META_STRIDE(int color_fmt, int width)
+{
+ int y_tile_width = 0, y_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_width = 32;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_tile_width = 48;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_stride = MSM_MEDIA_ROUNDUP(width, y_tile_width);
+ y_meta_stride = MSM_MEDIA_ALIGN(y_meta_stride, 64);
+
+invalid_input:
+ return y_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_Y_META_SCANLINES(int color_fmt, int height)
+{
+ int y_tile_height = 0, y_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ y_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ y_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ y_meta_scanlines = MSM_MEDIA_ROUNDUP(height, y_tile_height);
+ y_meta_scanlines = MSM_MEDIA_ALIGN(y_meta_scanlines, 16);
+
+invalid_input:
+ return y_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ */
+static inline unsigned int VENUS_UV_META_STRIDE(int color_fmt, int width)
+{
+ int uv_tile_width = 0, uv_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_width = 16;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ uv_tile_width = 24;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_stride = MSM_MEDIA_ROUNDUP((width+1)>>1, uv_tile_width);
+ uv_meta_stride = MSM_MEDIA_ALIGN(uv_meta_stride, 64);
+
+invalid_input:
+ return uv_meta_stride;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @height
+ * Progressive: height
+ * Interlaced: (height+1)>>1
+ */
+static inline unsigned int VENUS_UV_META_SCANLINES(int color_fmt, int height)
+{
+ int uv_tile_height = 0, uv_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_UBWC:
+ uv_tile_height = 8;
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ case COLOR_FMT_P010_UBWC:
+ uv_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ uv_meta_scanlines = MSM_MEDIA_ROUNDUP((height+1)>>1, uv_tile_height);
+ uv_meta_scanlines = MSM_MEDIA_ALIGN(uv_meta_scanlines, 16);
+
+invalid_input:
+ return uv_meta_scanlines;
+}
+
+static inline unsigned int VENUS_RGB_STRIDE(int color_fmt, int width)
+{
+ unsigned int alignment = 0, stride = 0, bpp = 4;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 128;
+ break;
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 256;
+ bpp = 2;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ alignment = 256;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ stride = MSM_MEDIA_ALIGN(width * bpp, alignment);
+
+invalid_input:
+ return stride;
+}
+
+static inline unsigned int VENUS_RGB_SCANLINES(int color_fmt, int height)
+{
+ unsigned int alignment = 0, scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888:
+ alignment = 32;
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ alignment = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ scanlines = MSM_MEDIA_ALIGN(height, alignment);
+
+invalid_input:
+ return scanlines;
+}
+
+static inline unsigned int VENUS_RGB_META_STRIDE(int color_fmt, int width)
+{
+ int rgb_tile_width = 0, rgb_meta_stride = 0;
+
+ if (!width)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_width = 16;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_stride = MSM_MEDIA_ROUNDUP(width, rgb_tile_width);
+ rgb_meta_stride = MSM_MEDIA_ALIGN(rgb_meta_stride, 64);
+
+invalid_input:
+ return rgb_meta_stride;
+}
+
+static inline unsigned int VENUS_RGB_META_SCANLINES(int color_fmt, int height)
+{
+ int rgb_tile_height = 0, rgb_meta_scanlines = 0;
+
+ if (!height)
+ goto invalid_input;
+
+ switch (color_fmt) {
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_tile_height = 4;
+ break;
+ default:
+ goto invalid_input;
+ }
+
+ rgb_meta_scanlines = MSM_MEDIA_ROUNDUP(height, rgb_tile_height);
+ rgb_meta_scanlines = MSM_MEDIA_ALIGN(rgb_meta_scanlines, 16);
+
+invalid_input:
+ return rgb_meta_scanlines;
+}
+
+/*
+ * Function arguments:
+ * @color_fmt
+ * @width
+ * Progressive: width
+ * Interlaced: width
+ * @height
+ * Progressive: height
+ * Interlaced: height
+ */
+static inline unsigned int VENUS_BUFFER_SIZE(
+ int color_fmt, int width, int height)
+{
+ const unsigned int extra_size = VENUS_EXTRADATA_SIZE(width, height);
+ unsigned int uv_alignment = 0, size = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ unsigned int y_ubwc_plane = 0, uv_ubwc_plane = 0;
+ unsigned int y_meta_stride = 0, y_meta_scanlines = 0;
+ unsigned int uv_meta_stride = 0, uv_meta_scanlines = 0;
+ unsigned int y_meta_plane = 0, uv_meta_plane = 0;
+ unsigned int rgb_stride = 0, rgb_scanlines = 0;
+ unsigned int rgb_plane = 0, rgb_ubwc_plane = 0, rgb_meta_plane = 0;
+ unsigned int rgb_meta_stride = 0, rgb_meta_scanlines = 0;
+
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ rgb_stride = VENUS_RGB_STRIDE(color_fmt, width);
+ rgb_scanlines = VENUS_RGB_SCANLINES(color_fmt, height);
+
+ switch (color_fmt) {
+ case COLOR_FMT_NV21:
+ case COLOR_FMT_NV12:
+ case COLOR_FMT_P010:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane +
+ MSM_MEDIA_MAX(extra_size, 8 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_MVTB:
+ uv_alignment = 4096;
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines + uv_alignment;
+ size = y_plane + uv_plane;
+ size = 2 * size + extra_size;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_UBWC:
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, (height+1)>>1);
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, (height+1)>>1);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines =
+ VENUS_Y_META_SCANLINES(color_fmt, (height+1)>>1);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines =
+ VENUS_UV_META_SCANLINES(color_fmt, (height+1)>>1);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = (y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane)*2 +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_NV12_BPP10_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane +
+ MSM_MEDIA_MAX(extra_size + 8192, 48 * y_stride);
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_P010_UBWC:
+ y_ubwc_plane = MSM_MEDIA_ALIGN(y_stride * y_sclines, 4096);
+ uv_ubwc_plane = MSM_MEDIA_ALIGN(uv_stride * uv_sclines, 4096);
+ y_meta_stride = VENUS_Y_META_STRIDE(color_fmt, width);
+ y_meta_scanlines = VENUS_Y_META_SCANLINES(color_fmt, height);
+ y_meta_plane = MSM_MEDIA_ALIGN(
+ y_meta_stride * y_meta_scanlines, 4096);
+ uv_meta_stride = VENUS_UV_META_STRIDE(color_fmt, width);
+ uv_meta_scanlines = VENUS_UV_META_SCANLINES(color_fmt, height);
+ uv_meta_plane = MSM_MEDIA_ALIGN(uv_meta_stride *
+ uv_meta_scanlines, 4096);
+
+ size = y_ubwc_plane + uv_ubwc_plane + y_meta_plane +
+ uv_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888:
+ rgb_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines, 4096);
+ size = rgb_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ case COLOR_FMT_RGBA8888_UBWC:
+ case COLOR_FMT_RGBA1010102_UBWC:
+ case COLOR_FMT_RGB565_UBWC:
+ rgb_ubwc_plane = MSM_MEDIA_ALIGN(rgb_stride * rgb_scanlines,
+ 4096);
+ rgb_meta_stride = VENUS_RGB_META_STRIDE(color_fmt, width);
+ rgb_meta_scanlines = VENUS_RGB_META_SCANLINES(color_fmt,
+ height);
+ rgb_meta_plane = MSM_MEDIA_ALIGN(rgb_meta_stride *
+ rgb_meta_scanlines, 4096);
+ size = rgb_ubwc_plane + rgb_meta_plane;
+ size = MSM_MEDIA_ALIGN(size, 4096);
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return size;
+}
+
+static inline unsigned int VENUS_VIEW2_OFFSET(
+ int color_fmt, int width, int height)
+{
+ unsigned int offset = 0;
+ unsigned int y_plane, uv_plane, y_stride,
+ uv_stride, y_sclines, uv_sclines;
+ if (!width || !height)
+ goto invalid_input;
+
+ y_stride = VENUS_Y_STRIDE(color_fmt, width);
+ uv_stride = VENUS_UV_STRIDE(color_fmt, width);
+ y_sclines = VENUS_Y_SCANLINES(color_fmt, height);
+ uv_sclines = VENUS_UV_SCANLINES(color_fmt, height);
+ switch (color_fmt) {
+ case COLOR_FMT_NV12_MVTB:
+ y_plane = y_stride * y_sclines;
+ uv_plane = uv_stride * uv_sclines;
+ offset = y_plane + uv_plane;
+ break;
+ default:
+ break;
+ }
+invalid_input:
+ return offset;
+}
+
+#endif
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
index 576cea30d391..4b36b8954bae 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
index 4b646bf9c214..44d1cda56974 100644
--- a/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp4/mdp4_kms.c
@@ -125,6 +125,8 @@ static void mdp4_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct drm_crtc *crtc;
struct drm_crtc_state *crtc_state;
+ drm_atomic_helper_wait_for_vblanks(mdp4_kms->dev, state);
+
/* see 119ecb7fd */
for_each_new_crtc_in_state(state, crtc, crtc_state, i)
drm_crtc_vblank_put(crtc);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
index d9c10e02ee41..784d98989e3a 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
index 24e00274844b..b1da9ce54379 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_crtc.c
@@ -65,7 +65,7 @@ struct mdp5_crtc {
struct drm_gem_object *scanout_bo;
uint64_t iova;
uint32_t width, height;
- uint32_t x, y;
+ int x, y;
} cursor;
};
#define to_mdp5_crtc(x) container_of(x, struct mdp5_crtc, base)
@@ -760,20 +760,31 @@ static void get_roi(struct drm_crtc *crtc, uint32_t *roi_w, uint32_t *roi_h)
* Cursor Region Of Interest (ROI) is a plane read from cursor
* buffer to render. The ROI region is determined by the visibility of
* the cursor point. In the default Cursor image the cursor point will
- * be at the top left of the cursor image, unless it is specified
- * otherwise using hotspot feature.
+ * be at the top left of the cursor image.
*
+ * Without rotation:
* If the cursor point reaches the right (xres - x < cursor.width) or
* bottom (yres - y < cursor.height) boundary of the screen, then ROI
* width and ROI height need to be evaluated to crop the cursor image
* accordingly.
* (xres-x) will be new cursor width when x > (xres - cursor.width)
* (yres-y) will be new cursor height when y > (yres - cursor.height)
+ *
+ * With rotation:
+ * We get negative x and/or y coordinates.
+ * (cursor.width - abs(x)) will be new cursor width when x < 0
+ * (cursor.height - abs(y)) will be new cursor width when y < 0
*/
- *roi_w = min(mdp5_crtc->cursor.width, xres -
+ if (mdp5_crtc->cursor.x >= 0)
+ *roi_w = min(mdp5_crtc->cursor.width, xres -
mdp5_crtc->cursor.x);
- *roi_h = min(mdp5_crtc->cursor.height, yres -
+ else
+ *roi_w = mdp5_crtc->cursor.width - abs(mdp5_crtc->cursor.x);
+ if (mdp5_crtc->cursor.y >= 0)
+ *roi_h = min(mdp5_crtc->cursor.height, yres -
mdp5_crtc->cursor.y);
+ else
+ *roi_h = mdp5_crtc->cursor.height - abs(mdp5_crtc->cursor.y);
}
static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
@@ -783,7 +794,7 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
struct mdp5_kms *mdp5_kms = get_kms(crtc);
const enum mdp5_cursor_alpha cur_alpha = CURSOR_ALPHA_PER_PIXEL;
uint32_t blendcfg, stride;
- uint32_t x, y, width, height;
+ uint32_t x, y, src_x, src_y, width, height;
uint32_t roi_w, roi_h;
int lm;
@@ -800,6 +811,26 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
get_roi(crtc, &roi_w, &roi_h);
+ /* If cusror buffer overlaps due to rotation on the
+ * upper or left screen border the pixel offset inside
+ * the cursor buffer of the ROI is the positive overlap
+ * distance.
+ */
+ if (mdp5_crtc->cursor.x < 0) {
+ src_x = abs(mdp5_crtc->cursor.x);
+ x = 0;
+ } else {
+ src_x = 0;
+ }
+ if (mdp5_crtc->cursor.y < 0) {
+ src_y = abs(mdp5_crtc->cursor.y);
+ y = 0;
+ } else {
+ src_y = 0;
+ }
+ DBG("%s: x=%d, y=%d roi_w=%d roi_h=%d src_x=%d src_y=%d",
+ crtc->name, x, y, roi_w, roi_h, src_x, src_y);
+
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_STRIDE(lm), stride);
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_FORMAT(lm),
MDP5_LM_CURSOR_FORMAT_FORMAT(CURSOR_FMT_ARGB8888));
@@ -812,6 +843,9 @@ static void mdp5_crtc_restore_cursor(struct drm_crtc *crtc)
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_START_XY(lm),
MDP5_LM_CURSOR_START_XY_Y_START(y) |
MDP5_LM_CURSOR_START_XY_X_START(x));
+ mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_XY(lm),
+ MDP5_LM_CURSOR_XY_SRC_Y(src_y) |
+ MDP5_LM_CURSOR_XY_SRC_X(src_x));
mdp5_write(mdp5_kms, REG_MDP5_LM_CURSOR_BASE_ADDR(lm),
mdp5_crtc->cursor.iova);
@@ -932,8 +966,9 @@ static int mdp5_crtc_cursor_move(struct drm_crtc *crtc, int x, int y)
if (unlikely(!crtc->state->enable))
return 0;
- mdp5_crtc->cursor.x = x = max(x, 0);
- mdp5_crtc->cursor.y = y = max(y, 0);
+ /* accept negative x/y coordinates up to maximum cursor overlap */
+ mdp5_crtc->cursor.x = x = max(x, -(int)mdp5_crtc->cursor.width);
+ mdp5_crtc->cursor.y = y = max(y, -(int)mdp5_crtc->cursor.height);
get_roi(crtc, &roi_w, &roi_h);
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
index 9af94e35f678..fcd44d1d1068 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_encoder.c
@@ -319,7 +319,17 @@ static int mdp5_encoder_atomic_check(struct drm_encoder *encoder,
mdp5_cstate->ctl = ctl;
mdp5_cstate->pipeline.intf = intf;
- mdp5_cstate->defer_start = true;
+
+ /*
+ * This is a bit awkward, but we want to flush the CTL and hit the
+ * START bit at most once for an atomic update. In the non-full-
+ * modeset case, this is done from crtc->atomic_flush(), but that
+ * is too early in the case of full modeset, in which case we
+ * defer to encoder->enable(). But we need to *know* whether
+ * encoder->enable() will be called to do this:
+ */
+ if (drm_atomic_crtc_needs_modeset(crtc_state))
+ mdp5_cstate->defer_start = true;
return 0;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
index 6e12e275deba..bddd625ab91b 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_kms.c
@@ -170,6 +170,8 @@ static void mdp5_complete_commit(struct msm_kms *kms, struct drm_atomic_state *s
struct device *dev = &mdp5_kms->pdev->dev;
struct mdp5_global_state *global_state;
+ drm_atomic_helper_wait_for_vblanks(mdp5_kms->dev, state);
+
global_state = mdp5_get_existing_global_state(mdp5_kms);
if (mdp5_kms->smp)
diff --git a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
index f2a0db7a8a03..1cc4e57f0226 100644
--- a/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
+++ b/drivers/gpu/drm/msm/disp/mdp5/mdp5_mdss.c
@@ -20,12 +20,10 @@
#include "msm_drv.h"
#include "mdp5_kms.h"
-/*
- * If needed, this can become more specific: something like struct mdp5_mdss,
- * which contains a 'struct msm_mdss base' member.
- */
-struct msm_mdss {
- struct drm_device *dev;
+#define to_mdp5_mdss(x) container_of(x, struct mdp5_mdss, base)
+
+struct mdp5_mdss {
+ struct msm_mdss base;
void __iomem *mmio, *vbif;
@@ -41,22 +39,22 @@ struct msm_mdss {
} irqcontroller;
};
-static inline void mdss_write(struct msm_mdss *mdss, u32 reg, u32 data)
+static inline void mdss_write(struct mdp5_mdss *mdp5_mdss, u32 reg, u32 data)
{
- msm_writel(data, mdss->mmio + reg);
+ msm_writel(data, mdp5_mdss->mmio + reg);
}
-static inline u32 mdss_read(struct msm_mdss *mdss, u32 reg)
+static inline u32 mdss_read(struct mdp5_mdss *mdp5_mdss, u32 reg)
{
- return msm_readl(mdss->mmio + reg);
+ return msm_readl(mdp5_mdss->mmio + reg);
}
static irqreturn_t mdss_irq(int irq, void *arg)
{
- struct msm_mdss *mdss = arg;
+ struct mdp5_mdss *mdp5_mdss = arg;
u32 intr;
- intr = mdss_read(mdss, REG_MDSS_HW_INTR_STATUS);
+ intr = mdss_read(mdp5_mdss, REG_MDSS_HW_INTR_STATUS);
VERB("intr=%08x", intr);
@@ -64,7 +62,7 @@ static irqreturn_t mdss_irq(int irq, void *arg)
irq_hw_number_t hwirq = fls(intr) - 1;
generic_handle_irq(irq_find_mapping(
- mdss->irqcontroller.domain, hwirq));
+ mdp5_mdss->irqcontroller.domain, hwirq));
intr &= ~(1 << hwirq);
}
@@ -84,19 +82,19 @@ static irqreturn_t mdss_irq(int irq, void *arg)
static void mdss_hw_mask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- clear_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ clear_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
static void mdss_hw_unmask_irq(struct irq_data *irqd)
{
- struct msm_mdss *mdss = irq_data_get_irq_chip_data(irqd);
+ struct mdp5_mdss *mdp5_mdss = irq_data_get_irq_chip_data(irqd);
smp_mb__before_atomic();
- set_bit(irqd->hwirq, &mdss->irqcontroller.enabled_mask);
+ set_bit(irqd->hwirq, &mdp5_mdss->irqcontroller.enabled_mask);
smp_mb__after_atomic();
}
@@ -109,13 +107,13 @@ static struct irq_chip mdss_hw_irq_chip = {
static int mdss_hw_irqdomain_map(struct irq_domain *d, unsigned int irq,
irq_hw_number_t hwirq)
{
- struct msm_mdss *mdss = d->host_data;
+ struct mdp5_mdss *mdp5_mdss = d->host_data;
if (!(VALID_IRQS & (1 << hwirq)))
return -EPERM;
irq_set_chip_and_handler(irq, &mdss_hw_irq_chip, handle_level_irq);
- irq_set_chip_data(irq, mdss);
+ irq_set_chip_data(irq, mdp5_mdss);
return 0;
}
@@ -126,90 +124,99 @@ static const struct irq_domain_ops mdss_hw_irqdomain_ops = {
};
-static int mdss_irq_domain_init(struct msm_mdss *mdss)
+static int mdss_irq_domain_init(struct mdp5_mdss *mdp5_mdss)
{
- struct device *dev = mdss->dev->dev;
+ struct device *dev = mdp5_mdss->base.dev->dev;
struct irq_domain *d;
d = irq_domain_add_linear(dev->of_node, 32, &mdss_hw_irqdomain_ops,
- mdss);
+ mdp5_mdss);
if (!d) {
dev_err(dev, "mdss irq domain add failed\n");
return -ENXIO;
}
- mdss->irqcontroller.enabled_mask = 0;
- mdss->irqcontroller.domain = d;
+ mdp5_mdss->irqcontroller.enabled_mask = 0;
+ mdp5_mdss->irqcontroller.domain = d;
return 0;
}
-int msm_mdss_enable(struct msm_mdss *mdss)
+static int mdp5_mdss_enable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- clk_prepare_enable(mdss->ahb_clk);
- if (mdss->axi_clk)
- clk_prepare_enable(mdss->axi_clk);
- if (mdss->vsync_clk)
- clk_prepare_enable(mdss->vsync_clk);
+ clk_prepare_enable(mdp5_mdss->ahb_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_prepare_enable(mdp5_mdss->axi_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_prepare_enable(mdp5_mdss->vsync_clk);
return 0;
}
-int msm_mdss_disable(struct msm_mdss *mdss)
+static int mdp5_mdss_disable(struct msm_mdss *mdss)
{
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(mdss);
DBG("");
- if (mdss->vsync_clk)
- clk_disable_unprepare(mdss->vsync_clk);
- if (mdss->axi_clk)
- clk_disable_unprepare(mdss->axi_clk);
- clk_disable_unprepare(mdss->ahb_clk);
+ if (mdp5_mdss->vsync_clk)
+ clk_disable_unprepare(mdp5_mdss->vsync_clk);
+ if (mdp5_mdss->axi_clk)
+ clk_disable_unprepare(mdp5_mdss->axi_clk);
+ clk_disable_unprepare(mdp5_mdss->ahb_clk);
return 0;
}
-static int msm_mdss_get_clocks(struct msm_mdss *mdss)
+static int msm_mdss_get_clocks(struct mdp5_mdss *mdp5_mdss)
{
- struct platform_device *pdev = to_platform_device(mdss->dev->dev);
+ struct platform_device *pdev =
+ to_platform_device(mdp5_mdss->base.dev->dev);
- mdss->ahb_clk = msm_clk_get(pdev, "iface");
- if (IS_ERR(mdss->ahb_clk))
- mdss->ahb_clk = NULL;
+ mdp5_mdss->ahb_clk = msm_clk_get(pdev, "iface");
+ if (IS_ERR(mdp5_mdss->ahb_clk))
+ mdp5_mdss->ahb_clk = NULL;
- mdss->axi_clk = msm_clk_get(pdev, "bus");
- if (IS_ERR(mdss->axi_clk))
- mdss->axi_clk = NULL;
+ mdp5_mdss->axi_clk = msm_clk_get(pdev, "bus");
+ if (IS_ERR(mdp5_mdss->axi_clk))
+ mdp5_mdss->axi_clk = NULL;
- mdss->vsync_clk = msm_clk_get(pdev, "vsync");
- if (IS_ERR(mdss->vsync_clk))
- mdss->vsync_clk = NULL;
+ mdp5_mdss->vsync_clk = msm_clk_get(pdev, "vsync");
+ if (IS_ERR(mdp5_mdss->vsync_clk))
+ mdp5_mdss->vsync_clk = NULL;
return 0;
}
-void msm_mdss_destroy(struct drm_device *dev)
+static void mdp5_mdss_destroy(struct drm_device *dev)
{
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss = priv->mdss;
+ struct mdp5_mdss *mdp5_mdss = to_mdp5_mdss(priv->mdss);
- if (!mdss)
+ if (!mdp5_mdss)
return;
- irq_domain_remove(mdss->irqcontroller.domain);
- mdss->irqcontroller.domain = NULL;
+ irq_domain_remove(mdp5_mdss->irqcontroller.domain);
+ mdp5_mdss->irqcontroller.domain = NULL;
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
pm_runtime_disable(dev->dev);
}
-int msm_mdss_init(struct drm_device *dev)
+static const struct msm_mdss_funcs mdss_funcs = {
+ .enable = mdp5_mdss_enable,
+ .disable = mdp5_mdss_disable,
+ .destroy = mdp5_mdss_destroy,
+};
+
+int mdp5_mdss_init(struct drm_device *dev)
{
struct platform_device *pdev = to_platform_device(dev->dev);
struct msm_drm_private *priv = dev->dev_private;
- struct msm_mdss *mdss;
+ struct mdp5_mdss *mdp5_mdss;
int ret;
DBG("");
@@ -217,40 +224,40 @@ int msm_mdss_init(struct drm_device *dev)
if (!of_device_is_compatible(dev->dev->of_node, "qcom,mdss"))
return 0;
- mdss = devm_kzalloc(dev->dev, sizeof(*mdss), GFP_KERNEL);
- if (!mdss) {
+ mdp5_mdss = devm_kzalloc(dev->dev, sizeof(*mdp5_mdss), GFP_KERNEL);
+ if (!mdp5_mdss) {
ret = -ENOMEM;
goto fail;
}
- mdss->dev = dev;
+ mdp5_mdss->base.dev = dev;
- mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
- if (IS_ERR(mdss->mmio)) {
- ret = PTR_ERR(mdss->mmio);
+ mdp5_mdss->mmio = msm_ioremap(pdev, "mdss_phys", "MDSS");
+ if (IS_ERR(mdp5_mdss->mmio)) {
+ ret = PTR_ERR(mdp5_mdss->mmio);
goto fail;
}
- mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
- if (IS_ERR(mdss->vbif)) {
- ret = PTR_ERR(mdss->vbif);
+ mdp5_mdss->vbif = msm_ioremap(pdev, "vbif_phys", "VBIF");
+ if (IS_ERR(mdp5_mdss->vbif)) {
+ ret = PTR_ERR(mdp5_mdss->vbif);
goto fail;
}
- ret = msm_mdss_get_clocks(mdss);
+ ret = msm_mdss_get_clocks(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to get clocks: %d\n", ret);
goto fail;
}
/* Regulator to enable GDSCs in downstream kernels */
- mdss->vdd = devm_regulator_get(dev->dev, "vdd");
- if (IS_ERR(mdss->vdd)) {
- ret = PTR_ERR(mdss->vdd);
+ mdp5_mdss->vdd = devm_regulator_get(dev->dev, "vdd");
+ if (IS_ERR(mdp5_mdss->vdd)) {
+ ret = PTR_ERR(mdp5_mdss->vdd);
goto fail;
}
- ret = regulator_enable(mdss->vdd);
+ ret = regulator_enable(mdp5_mdss->vdd);
if (ret) {
dev_err(dev->dev, "failed to enable regulator vdd: %d\n",
ret);
@@ -258,25 +265,26 @@ int msm_mdss_init(struct drm_device *dev)
}
ret = devm_request_irq(dev->dev, platform_get_irq(pdev, 0),
- mdss_irq, 0, "mdss_isr", mdss);
+ mdss_irq, 0, "mdss_isr", mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init irq: %d\n", ret);
goto fail_irq;
}
- ret = mdss_irq_domain_init(mdss);
+ ret = mdss_irq_domain_init(mdp5_mdss);
if (ret) {
dev_err(dev->dev, "failed to init sub-block irqs: %d\n", ret);
goto fail_irq;
}
- priv->mdss = mdss;
+ mdp5_mdss->base.funcs = &mdss_funcs;
+ priv->mdss = &mdp5_mdss->base;
pm_runtime_enable(dev->dev);
return 0;
fail_irq:
- regulator_disable(mdss->vdd);
+ regulator_disable(mdp5_mdss->vdd);
fail:
return ret;
}
diff --git a/drivers/gpu/drm/msm/disp/mdp_common.xml.h b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
index 1494c407be44..d420c8044e23 100644
--- a/drivers/gpu/drm/msm/disp/mdp_common.xml.h
+++ b/drivers/gpu/drm/msm/disp/mdp_common.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/dsi.c b/drivers/gpu/drm/msm/dsi/dsi.c
index b744bcc7d8ad..ff8164cc6738 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.c
+++ b/drivers/gpu/drm/msm/dsi/dsi.c
@@ -208,6 +208,9 @@ int msm_dsi_modeset_init(struct msm_dsi *msm_dsi, struct drm_device *dev,
goto fail;
}
+ if (!msm_dsi_manager_validate_current_config(msm_dsi->id))
+ goto fail;
+
msm_dsi->encoder = encoder;
msm_dsi->bridge = msm_dsi_manager_bridge_init(msm_dsi->id);
diff --git a/drivers/gpu/drm/msm/dsi/dsi.h b/drivers/gpu/drm/msm/dsi/dsi.h
index 70d9a9a47acd..08f3fc6771b7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.h
@@ -100,6 +100,7 @@ bool msm_dsi_manager_cmd_xfer_trigger(int id, u32 dma_base, u32 len);
void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags);
int msm_dsi_manager_register(struct msm_dsi *msm_dsi);
void msm_dsi_manager_unregister(struct msm_dsi *msm_dsi);
+bool msm_dsi_manager_validate_current_config(u8 id);
/* msm dsi */
static inline bool msm_dsi_device_connected(struct msm_dsi *msm_dsi)
@@ -149,6 +150,7 @@ static inline int msm_dsi_pll_set_usecase(struct msm_dsi_pll *pll,
#endif
/* dsi host */
+struct msm_dsi_host;
int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg);
void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
@@ -162,7 +164,8 @@ void msm_dsi_host_cmd_xfer_commit(struct mipi_dsi_host *host,
int msm_dsi_host_enable(struct mipi_dsi_host *host);
int msm_dsi_host_disable(struct mipi_dsi_host *host);
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings);
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi);
int msm_dsi_host_power_off(struct mipi_dsi_host *host);
int msm_dsi_host_set_display_mode(struct mipi_dsi_host *host,
struct drm_display_mode *mode);
@@ -175,13 +178,29 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll);
void msm_dsi_host_reset_phy(struct mipi_dsi_host *host);
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req);
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi);
void msm_dsi_host_destroy(struct mipi_dsi_host *host);
int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev);
int msm_dsi_host_init(struct msm_dsi *msm_dsi);
int msm_dsi_runtime_suspend(struct device *dev);
int msm_dsi_runtime_resume(struct device *dev);
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host);
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host);
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size);
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size);
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host);
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host);
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host);
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *iova);
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host);
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host);
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi);
/* dsi phy */
struct msm_dsi_phy;
diff --git a/drivers/gpu/drm/msm/dsi/dsi.xml.h b/drivers/gpu/drm/msm/dsi/dsi.xml.h
index f6a9471b70c8..21f489a737d7 100644
--- a/drivers/gpu/drm/msm/dsi/dsi.xml.h
+++ b/drivers/gpu/drm/msm/dsi/dsi.xml.h
@@ -8,8 +8,17 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /local/mnt/workspace/source_trees/envytools/rnndb/../rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-01-12 09:09:22)
-- /local/mnt/workspace/source_trees/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2016-05-09 06:32:54)
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.c b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
index 0327bb54b01b..dcdfb1bb54f9 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.c
@@ -136,20 +136,58 @@ static const struct msm_dsi_config sdm845_dsi_cfg = {
.num_dsi = 2,
};
+const static struct msm_dsi_host_cfg_ops msm_dsi_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_v2,
+ .link_clk_disable = dsi_link_clk_disable_v2,
+ .clk_init_ver = dsi_clk_init_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_v2,
+ .tx_buf_get = dsi_tx_buf_get_v2,
+ .tx_buf_put = NULL,
+ .dma_base_get = dsi_dma_base_get_v2,
+ .calc_clk_rate = dsi_calc_clk_rate_v2,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = NULL,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
+const static struct msm_dsi_host_cfg_ops msm_dsi_6g_v2_host_ops = {
+ .link_clk_enable = dsi_link_clk_enable_6g,
+ .link_clk_disable = dsi_link_clk_disable_6g,
+ .clk_init_ver = dsi_clk_init_6g_v2,
+ .tx_buf_alloc = dsi_tx_buf_alloc_6g,
+ .tx_buf_get = dsi_tx_buf_get_6g,
+ .tx_buf_put = dsi_tx_buf_put_6g,
+ .dma_base_get = dsi_dma_base_get_6g,
+ .calc_clk_rate = dsi_calc_clk_rate_6g,
+};
+
static const struct msm_dsi_cfg_handler dsi_cfg_handlers[] = {
- {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064, &apq8064_dsi_cfg},
+ {MSM_DSI_VER_MAJOR_V2, MSM_DSI_V2_VER_MINOR_8064,
+ &apq8064_dsi_cfg, &msm_dsi_v2_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_0,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_1_1,
- &msm8974_apq8084_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
{MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_2,
- &msm8974_apq8084_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3, &msm8994_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1, &msm8916_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1, &msm8996_dsi_cfg},
- {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1, &sdm845_dsi_cfg},
+ &msm8974_apq8084_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3,
+ &msm8994_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_3_1,
+ &msm8916_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V1_4_1,
+ &msm8996_dsi_cfg, &msm_dsi_6g_host_ops},
+ {MSM_DSI_VER_MAJOR_6G, MSM_DSI_6G_VER_MINOR_V2_2_1,
+ &sdm845_dsi_cfg, &msm_dsi_6g_v2_host_ops},
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor)
diff --git a/drivers/gpu/drm/msm/dsi/dsi_cfg.h b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
index 9cfdcf1c95d5..16c507911110 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_cfg.h
+++ b/drivers/gpu/drm/msm/dsi/dsi_cfg.h
@@ -40,10 +40,22 @@ struct msm_dsi_config {
const int num_dsi;
};
+struct msm_dsi_host_cfg_ops {
+ int (*link_clk_enable)(struct msm_dsi_host *msm_host);
+ void (*link_clk_disable)(struct msm_dsi_host *msm_host);
+ int (*clk_init_ver)(struct msm_dsi_host *msm_host);
+ int (*tx_buf_alloc)(struct msm_dsi_host *msm_host, int size);
+ void* (*tx_buf_get)(struct msm_dsi_host *msm_host);
+ void (*tx_buf_put)(struct msm_dsi_host *msm_host);
+ int (*dma_base_get)(struct msm_dsi_host *msm_host, uint64_t *iova);
+ int (*calc_clk_rate)(struct msm_dsi_host *msm_host, bool is_dual_dsi);
+};
+
struct msm_dsi_cfg_handler {
u32 major;
u32 minor;
const struct msm_dsi_config *cfg;
+ const struct msm_dsi_host_cfg_ops *ops;
};
const struct msm_dsi_cfg_handler *msm_dsi_cfg_get(u32 major, u32 minor);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_host.c b/drivers/gpu/drm/msm/dsi/dsi_host.c
index 29841f440111..96fb5f635314 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_host.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_host.c
@@ -118,6 +118,7 @@ struct msm_dsi_host {
struct clk *byte_intf_clk;
u32 byte_clk_rate;
+ u32 pixel_clk_rate;
u32 esc_clk_rate;
/* DSI v2 specific clocks */
@@ -332,6 +333,54 @@ static int dsi_regulator_init(struct msm_dsi_host *msm_host)
return 0;
}
+int dsi_clk_init_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->src_clk = msm_clk_get(pdev, "src");
+
+ if (IS_ERR(msm_host->src_clk)) {
+ ret = PTR_ERR(msm_host->src_clk);
+ pr_err("%s: can't find src clock. ret=%d\n",
+ __func__, ret);
+ msm_host->src_clk = NULL;
+ return ret;
+ }
+
+ msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
+ if (!msm_host->esc_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get esc clock parent. ret=%d\n",
+ __func__, ret);
+ return ret;
+ }
+
+ msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
+ if (!msm_host->dsi_clk_src) {
+ ret = -ENODEV;
+ pr_err("%s: can't get src clock parent. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
+int dsi_clk_init_6g_v2(struct msm_dsi_host *msm_host)
+{
+ struct platform_device *pdev = msm_host->pdev;
+ int ret = 0;
+
+ msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
+ if (IS_ERR(msm_host->byte_intf_clk)) {
+ ret = PTR_ERR(msm_host->byte_intf_clk);
+ pr_err("%s: can't find byte_intf clock. ret=%d\n",
+ __func__, ret);
+ }
+
+ return ret;
+}
+
static int dsi_clk_init(struct msm_dsi_host *msm_host)
{
struct platform_device *pdev = msm_host->pdev;
@@ -379,19 +428,6 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G &&
- cfg_hnd->minor >= MSM_DSI_6G_VER_MINOR_V2_2_1) {
- msm_host->byte_intf_clk = msm_clk_get(pdev, "byte_intf");
- if (IS_ERR(msm_host->byte_intf_clk)) {
- ret = PTR_ERR(msm_host->byte_intf_clk);
- pr_err("%s: can't find byte_intf clock. ret=%d\n",
- __func__, ret);
- goto exit;
- }
- } else {
- msm_host->byte_intf_clk = NULL;
- }
-
msm_host->byte_clk_src = clk_get_parent(msm_host->byte_clk);
if (!msm_host->byte_clk_src) {
ret = -ENODEV;
@@ -406,31 +442,8 @@ static int dsi_clk_init(struct msm_dsi_host *msm_host)
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- msm_host->src_clk = msm_clk_get(pdev, "src");
- if (IS_ERR(msm_host->src_clk)) {
- ret = PTR_ERR(msm_host->src_clk);
- pr_err("%s: can't find src clock. ret=%d\n",
- __func__, ret);
- msm_host->src_clk = NULL;
- goto exit;
- }
-
- msm_host->esc_clk_src = clk_get_parent(msm_host->esc_clk);
- if (!msm_host->esc_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get esc clock parent. ret=%d\n",
- __func__, ret);
- goto exit;
- }
-
- msm_host->dsi_clk_src = clk_get_parent(msm_host->src_clk);
- if (!msm_host->dsi_clk_src) {
- ret = -ENODEV;
- pr_err("%s: can't get src clock parent. ret=%d\n",
- __func__, ret);
- }
- }
+ if (cfg_hnd->ops->clk_init_ver)
+ ret = cfg_hnd->ops->clk_init_ver(msm_host);
exit:
return ret;
}
@@ -498,7 +511,7 @@ int msm_dsi_runtime_resume(struct device *dev)
return dsi_bus_clk_enable(msm_host);
}
-static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
{
int ret;
@@ -511,7 +524,7 @@ static int dsi_link_clk_enable_6g(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -566,7 +579,7 @@ error:
return ret;
}
-static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
+int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
{
int ret;
@@ -592,7 +605,7 @@ static int dsi_link_clk_enable_v2(struct msm_dsi_host *msm_host)
goto error;
}
- ret = clk_set_rate(msm_host->pixel_clk, msm_host->mode->clock * 1000);
+ ret = clk_set_rate(msm_host->pixel_clk, msm_host->pixel_clk_rate);
if (ret) {
pr_err("%s: Failed to set rate pixel clk, %d\n", __func__, ret);
goto error;
@@ -634,98 +647,121 @@ error:
return ret;
}
-static int dsi_link_clk_enable(struct msm_dsi_host *msm_host)
+void dsi_link_clk_disable_6g(struct msm_dsi_host *msm_host)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->pixel_clk);
+ if (msm_host->byte_intf_clk)
+ clk_disable_unprepare(msm_host->byte_intf_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
+}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- return dsi_link_clk_enable_6g(msm_host);
- else
- return dsi_link_clk_enable_v2(msm_host);
+void dsi_link_clk_disable_v2(struct msm_dsi_host *msm_host)
+{
+ clk_disable_unprepare(msm_host->pixel_clk);
+ clk_disable_unprepare(msm_host->src_clk);
+ clk_disable_unprepare(msm_host->esc_clk);
+ clk_disable_unprepare(msm_host->byte_clk);
}
-static void dsi_link_clk_disable(struct msm_dsi_host *msm_host)
+static u32 dsi_get_pclk_rate(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
+ struct drm_display_mode *mode = msm_host->mode;
+ u32 pclk_rate;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->pixel_clk);
- if (msm_host->byte_intf_clk)
- clk_disable_unprepare(msm_host->byte_intf_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- } else {
- clk_disable_unprepare(msm_host->pixel_clk);
- clk_disable_unprepare(msm_host->src_clk);
- clk_disable_unprepare(msm_host->esc_clk);
- clk_disable_unprepare(msm_host->byte_clk);
- }
+ pclk_rate = mode->clock * 1000;
+
+ /*
+ * For dual DSI mode, the current DRM mode has the complete width of the
+ * panel. Since, the complete panel is driven by two DSI controllers,
+ * the clock rates have to be split between the two dsi controllers.
+ * Adjust the byte and pixel clock rates for each dsi host accordingly.
+ */
+ if (is_dual_dsi)
+ pclk_rate /= 2;
+
+ return pclk_rate;
}
-static int dsi_calc_clk_rate(struct msm_dsi_host *msm_host)
+static void dsi_calc_pclk(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
- struct drm_display_mode *mode = msm_host->mode;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
u8 lanes = msm_host->lanes;
u32 bpp = dsi_get_bpp(msm_host->format);
- u32 pclk_rate;
+ u32 pclk_rate = dsi_get_pclk_rate(msm_host, is_dual_dsi);
+ u64 pclk_bpp = (u64)pclk_rate * bpp;
- if (!mode) {
- pr_err("%s: mode not set\n", __func__);
- return -EINVAL;
- }
-
- pclk_rate = mode->clock * 1000;
- if (lanes > 0) {
- msm_host->byte_clk_rate = (pclk_rate * bpp) / (8 * lanes);
- } else {
+ if (lanes == 0) {
pr_err("%s: forcing mdss_dsi lanes to 1\n", __func__);
- msm_host->byte_clk_rate = (pclk_rate * bpp) / 8;
+ lanes = 1;
}
- DBG("pclk=%d, bclk=%d", pclk_rate, msm_host->byte_clk_rate);
+ do_div(pclk_bpp, (8 * lanes));
- msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ msm_host->pixel_clk_rate = pclk_rate;
+ msm_host->byte_clk_rate = pclk_bpp;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
- unsigned int esc_mhz, esc_div;
- unsigned long byte_mhz;
+ DBG("pclk=%d, bclk=%d", msm_host->pixel_clk_rate,
+ msm_host->byte_clk_rate);
- msm_host->src_clk_rate = (pclk_rate * bpp) / 8;
+}
- /*
- * esc clock is byte clock followed by a 4 bit divider,
- * we need to find an escape clock frequency within the
- * mipi DSI spec range within the maximum divider limit
- * We iterate here between an escape clock frequencey
- * between 20 Mhz to 5 Mhz and pick up the first one
- * that can be supported by our divider
- */
+int dsi_calc_clk_rate_6g(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ if (!msm_host->mode) {
+ pr_err("%s: mode not set\n", __func__);
+ return -EINVAL;
+ }
+
+ dsi_calc_pclk(msm_host, is_dual_dsi);
+ msm_host->esc_clk_rate = clk_get_rate(msm_host->esc_clk);
+ return 0;
+}
- byte_mhz = msm_host->byte_clk_rate / 1000000;
+int dsi_calc_clk_rate_v2(struct msm_dsi_host *msm_host, bool is_dual_dsi)
+{
+ u32 bpp = dsi_get_bpp(msm_host->format);
+ u64 pclk_bpp;
+ unsigned int esc_mhz, esc_div;
+ unsigned long byte_mhz;
- for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
- esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
+ dsi_calc_pclk(msm_host, is_dual_dsi);
- /*
- * TODO: Ideally, we shouldn't know what sort of divider
- * is available in mmss_cc, we're just assuming that
- * it'll always be a 4 bit divider. Need to come up with
- * a better way here.
- */
- if (esc_div >= 1 && esc_div <= 16)
- break;
- }
+ pclk_bpp = (u64)dsi_get_pclk_rate(msm_host, is_dual_dsi) * bpp;
+ do_div(pclk_bpp, 8);
+ msm_host->src_clk_rate = pclk_bpp;
- if (esc_mhz < 5)
- return -EINVAL;
+ /*
+ * esc clock is byte clock followed by a 4 bit divider,
+ * we need to find an escape clock frequency within the
+ * mipi DSI spec range within the maximum divider limit
+ * We iterate here between an escape clock frequencey
+ * between 20 Mhz to 5 Mhz and pick up the first one
+ * that can be supported by our divider
+ */
+
+ byte_mhz = msm_host->byte_clk_rate / 1000000;
- msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+ for (esc_mhz = 20; esc_mhz >= 5; esc_mhz--) {
+ esc_div = DIV_ROUND_UP(byte_mhz, esc_mhz);
- DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
- msm_host->src_clk_rate);
+ /*
+ * TODO: Ideally, we shouldn't know what sort of divider
+ * is available in mmss_cc, we're just assuming that
+ * it'll always be a 4 bit divider. Need to come up with
+ * a better way here.
+ */
+ if (esc_div >= 1 && esc_div <= 16)
+ break;
}
+ if (esc_mhz < 5)
+ return -EINVAL;
+
+ msm_host->esc_clk_rate = msm_host->byte_clk_rate / esc_div;
+
+ DBG("esc=%d, src=%d", msm_host->esc_clk_rate,
+ msm_host->src_clk_rate);
+
return 0;
}
@@ -885,7 +921,7 @@ static void dsi_ctrl_config(struct msm_dsi_host *msm_host, bool enable,
dsi_write(msm_host, REG_DSI_CTRL, data);
}
-static void dsi_timing_setup(struct msm_dsi_host *msm_host)
+static void dsi_timing_setup(struct msm_dsi_host *msm_host, bool is_dual_dsi)
{
struct drm_display_mode *mode = msm_host->mode;
u32 hs_start = 0, vs_start = 0; /* take sync start as 0 */
@@ -897,10 +933,26 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
u32 ha_end = ha_start + mode->hdisplay;
u32 va_start = v_total - mode->vsync_start;
u32 va_end = va_start + mode->vdisplay;
+ u32 hdisplay = mode->hdisplay;
u32 wc;
DBG("");
+ /*
+ * For dual DSI mode, the current DRM mode has
+ * the complete width of the panel. Since, the complete
+ * panel is driven by two DSI controllers, the horizontal
+ * timings have to be split between the two dsi controllers.
+ * Adjust the DSI host timing values accordingly.
+ */
+ if (is_dual_dsi) {
+ h_total /= 2;
+ hs_end /= 2;
+ ha_start /= 2;
+ ha_end /= 2;
+ hdisplay /= 2;
+ }
+
if (msm_host->mode_flags & MIPI_DSI_MODE_VIDEO) {
dsi_write(msm_host, REG_DSI_ACTIVE_H,
DSI_ACTIVE_H_START(ha_start) |
@@ -921,7 +973,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
DSI_ACTIVE_VSYNC_VPOS_END(vs_end));
} else { /* command mode */
/* image data and 1 byte write_memory_start cmd */
- wc = mode->hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
+ wc = hdisplay * dsi_get_bpp(msm_host->format) / 8 + 1;
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_CTRL,
DSI_CMD_MDP_STREAM_CTRL_WORD_COUNT(wc) |
@@ -931,7 +983,7 @@ static void dsi_timing_setup(struct msm_dsi_host *msm_host)
MIPI_DSI_DCS_LONG_WRITE));
dsi_write(msm_host, REG_DSI_CMD_MDP_STREAM_TOTAL,
- DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(mode->hdisplay) |
+ DSI_CMD_MDP_STREAM_TOTAL_H_TOTAL(hdisplay) |
DSI_CMD_MDP_STREAM_TOTAL_V_TOTAL(mode->vdisplay));
}
}
@@ -1015,50 +1067,37 @@ static void dsi_wait4video_eng_busy(struct msm_dsi_host *msm_host)
}
}
-/* dsi_cmd */
-static int dsi_tx_buf_alloc(struct msm_dsi_host *msm_host, int size)
+int dsi_tx_buf_alloc_6g(struct msm_dsi_host *msm_host, int size)
{
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
- int ret;
uint64_t iova;
+ u8 *data;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- msm_host->tx_gem_obj = msm_gem_new(dev, size, MSM_BO_UNCACHED);
- if (IS_ERR(msm_host->tx_gem_obj)) {
- ret = PTR_ERR(msm_host->tx_gem_obj);
- pr_err("%s: failed to allocate gem, %d\n",
- __func__, ret);
- msm_host->tx_gem_obj = NULL;
- return ret;
- }
+ data = msm_gem_kernel_new(dev, size, MSM_BO_UNCACHED,
+ priv->kms->aspace,
+ &msm_host->tx_gem_obj, &iova);
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &iova);
- if (ret) {
- pr_err("%s: failed to get iova, %d\n", __func__, ret);
- return ret;
- }
+ if (IS_ERR(data)) {
+ msm_host->tx_gem_obj = NULL;
+ return PTR_ERR(data);
+ }
- if (iova & 0x07) {
- pr_err("%s: buf NOT 8 bytes aligned\n", __func__);
- return -EINVAL;
- }
+ msm_host->tx_size = msm_host->tx_gem_obj->size;
- msm_host->tx_size = msm_host->tx_gem_obj->size;
- } else {
- msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
+ return 0;
+}
+
+int dsi_tx_buf_alloc_v2(struct msm_dsi_host *msm_host, int size)
+{
+ struct drm_device *dev = msm_host->dev;
+
+ msm_host->tx_buf = dma_alloc_coherent(dev->dev, size,
&msm_host->tx_buf_paddr, GFP_KERNEL);
- if (!msm_host->tx_buf) {
- ret = -ENOMEM;
- pr_err("%s: failed to allocate tx buf, %d\n",
- __func__, ret);
- return ret;
- }
+ if (!msm_host->tx_buf)
+ return -ENOMEM;
- msm_host->tx_size = size;
- }
+ msm_host->tx_size = size;
return 0;
}
@@ -1089,6 +1128,21 @@ static void dsi_tx_buf_free(struct msm_dsi_host *msm_host)
msm_host->tx_buf_paddr);
}
+void *dsi_tx_buf_get_6g(struct msm_dsi_host *msm_host)
+{
+ return msm_gem_get_vaddr(msm_host->tx_gem_obj);
+}
+
+void *dsi_tx_buf_get_v2(struct msm_dsi_host *msm_host)
+{
+ return msm_host->tx_buf;
+}
+
+void dsi_tx_buf_put_6g(struct msm_dsi_host *msm_host)
+{
+ msm_gem_put_vaddr(msm_host->tx_gem_obj);
+}
+
/*
* prepare cmd buffer to be txed
*/
@@ -1113,15 +1167,11 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
return -EINVAL;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- data = msm_gem_get_vaddr(msm_host->tx_gem_obj);
- if (IS_ERR(data)) {
- ret = PTR_ERR(data);
- pr_err("%s: get vaddr failed, %d\n", __func__, ret);
- return ret;
- }
- } else {
- data = msm_host->tx_buf;
+ data = cfg_hnd->ops->tx_buf_get(msm_host);
+ if (IS_ERR(data)) {
+ ret = PTR_ERR(data);
+ pr_err("%s: get vaddr failed, %d\n", __func__, ret);
+ return ret;
}
/* MSM specific command format in memory */
@@ -1142,8 +1192,8 @@ static int dsi_cmd_dma_add(struct msm_dsi_host *msm_host,
if (packet.size < len)
memset(data + packet.size, 0xff, len - packet.size);
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G)
- msm_gem_put_vaddr(msm_host->tx_gem_obj);
+ if (cfg_hnd->ops->tx_buf_put)
+ cfg_hnd->ops->tx_buf_put(msm_host);
return len;
}
@@ -1190,24 +1240,38 @@ static int dsi_long_read_resp(u8 *buf, const struct mipi_dsi_msg *msg)
return msg->rx_len;
}
-static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+int dsi_dma_base_get_6g(struct msm_dsi_host *msm_host, uint64_t *dma_base)
{
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct drm_device *dev = msm_host->dev;
struct msm_drm_private *priv = dev->dev_private;
+
+ if (!dma_base)
+ return -EINVAL;
+
+ return msm_gem_get_iova(msm_host->tx_gem_obj,
+ priv->kms->aspace, dma_base);
+}
+
+int dsi_dma_base_get_v2(struct msm_dsi_host *msm_host, uint64_t *dma_base)
+{
+ if (!dma_base)
+ return -EINVAL;
+
+ *dma_base = msm_host->tx_buf_paddr;
+ return 0;
+}
+
+static int dsi_cmd_dma_tx(struct msm_dsi_host *msm_host, int len)
+{
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
uint64_t dma_base;
bool triggered;
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_6G) {
- ret = msm_gem_get_iova(msm_host->tx_gem_obj,
- priv->kms->aspace, &dma_base);
- if (ret) {
- pr_err("%s: failed to get iova: %d\n", __func__, ret);
- return ret;
- }
- } else {
- dma_base = msm_host->tx_buf_paddr;
+ ret = cfg_hnd->ops->dma_base_get(msm_host, &dma_base);
+ if (ret) {
+ pr_err("%s: failed to get iova: %d\n", __func__, ret);
+ return ret;
}
reinit_completion(&msm_host->dma_comp);
@@ -1845,6 +1909,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
struct drm_device *dev)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct platform_device *pdev = msm_host->pdev;
int ret;
@@ -1865,7 +1930,7 @@ int msm_dsi_host_modeset_init(struct mipi_dsi_host *host,
}
msm_host->dev = dev;
- ret = dsi_tx_buf_alloc(msm_host, SZ_4K);
+ ret = cfg_hnd->ops->tx_buf_alloc(msm_host, SZ_4K);
if (ret) {
pr_err("%s: alloc tx gem obj failed, %d\n", __func__, ret);
return ret;
@@ -1923,6 +1988,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
/* TODO: make sure dsi_cmd_mdp is idle.
* Since DSI6G v1.2.0, we can set DSI_TRIG_CTRL.BLOCK_DMA_WITHIN_FRAME
@@ -1935,7 +2001,7 @@ int msm_dsi_host_xfer_prepare(struct mipi_dsi_host *host,
* mdp clock need to be enabled to receive dsi interrupt
*/
pm_runtime_get_sync(&msm_host->pdev->dev);
- dsi_link_clk_enable(msm_host);
+ cfg_hnd->ops->link_clk_enable(msm_host);
/* TODO: vote for bus bandwidth */
@@ -1956,6 +2022,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
const struct mipi_dsi_msg *msg)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
dsi_intr_ctrl(msm_host, DSI_IRQ_MASK_CMD_DMA_DONE, 0);
dsi_write(msm_host, REG_DSI_CTRL, msm_host->dma_cmd_ctrl_restore);
@@ -1965,7 +2032,7 @@ void msm_dsi_host_xfer_restore(struct mipi_dsi_host *host,
/* TODO: unvote for bus bandwidth */
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
}
@@ -2129,7 +2196,6 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
struct msm_dsi_pll *src_pll)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
- const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
struct clk *byte_clk_provider, *pixel_clk_provider;
int ret;
@@ -2155,14 +2221,16 @@ int msm_dsi_host_set_src_pll(struct mipi_dsi_host *host,
goto exit;
}
- if (cfg_hnd->major == MSM_DSI_VER_MAJOR_V2) {
+ if (msm_host->dsi_clk_src) {
ret = clk_set_parent(msm_host->dsi_clk_src, pixel_clk_provider);
if (ret) {
pr_err("%s: can't set parent to dsi_clk_src. ret=%d\n",
__func__, ret);
goto exit;
}
+ }
+ if (msm_host->esc_clk_src) {
ret = clk_set_parent(msm_host->esc_clk_src, byte_clk_provider);
if (ret) {
pr_err("%s: can't set parent to esc_clk_src. ret=%d\n",
@@ -2189,12 +2257,14 @@ void msm_dsi_host_reset_phy(struct mipi_dsi_host *host)
}
void msm_dsi_host_get_phy_clk_req(struct mipi_dsi_host *host,
- struct msm_dsi_phy_clk_request *clk_req)
+ struct msm_dsi_phy_clk_request *clk_req,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret;
- ret = dsi_calc_clk_rate(msm_host);
+ ret = cfg_hnd->ops->calc_clk_rate(msm_host, is_dual_dsi);
if (ret) {
pr_err("%s: unable to calc clk rate, %d\n", __func__, ret);
return;
@@ -2256,9 +2326,11 @@ static void msm_dsi_sfpb_config(struct msm_dsi_host *msm_host, bool enable)
}
int msm_dsi_host_power_on(struct mipi_dsi_host *host,
- struct msm_dsi_phy_shared_timings *phy_shared_timings)
+ struct msm_dsi_phy_shared_timings *phy_shared_timings,
+ bool is_dual_dsi)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
int ret = 0;
mutex_lock(&msm_host->dev_mutex);
@@ -2277,7 +2349,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
}
pm_runtime_get_sync(&msm_host->pdev->dev);
- ret = dsi_link_clk_enable(msm_host);
+ ret = cfg_hnd->ops->link_clk_enable(msm_host);
if (ret) {
pr_err("%s: failed to enable link clocks. ret=%d\n",
__func__, ret);
@@ -2291,7 +2363,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
goto fail_disable_clk;
}
- dsi_timing_setup(msm_host);
+ dsi_timing_setup(msm_host, is_dual_dsi);
dsi_sw_reset(msm_host);
dsi_ctrl_config(msm_host, true, phy_shared_timings);
@@ -2304,7 +2376,7 @@ int msm_dsi_host_power_on(struct mipi_dsi_host *host,
return 0;
fail_disable_clk:
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
fail_disable_reg:
dsi_host_regulator_disable(msm_host);
@@ -2316,6 +2388,7 @@ unlock_ret:
int msm_dsi_host_power_off(struct mipi_dsi_host *host)
{
struct msm_dsi_host *msm_host = to_msm_dsi_host(host);
+ const struct msm_dsi_cfg_handler *cfg_hnd = msm_host->cfg_hnd;
mutex_lock(&msm_host->dev_mutex);
if (!msm_host->power_on) {
@@ -2330,7 +2403,7 @@ int msm_dsi_host_power_off(struct mipi_dsi_host *host)
pinctrl_pm_select_sleep_state(&msm_host->pdev->dev);
- dsi_link_clk_disable(msm_host);
+ cfg_hnd->ops->link_clk_disable(msm_host);
pm_runtime_put_autosuspend(&msm_host->pdev->dev);
dsi_host_regulator_disable(msm_host);
diff --git a/drivers/gpu/drm/msm/dsi/dsi_manager.c b/drivers/gpu/drm/msm/dsi/dsi_manager.c
index d5006d6923e0..5224010d90e4 100644
--- a/drivers/gpu/drm/msm/dsi/dsi_manager.c
+++ b/drivers/gpu/drm/msm/dsi/dsi_manager.c
@@ -134,8 +134,9 @@ static int enable_phy(struct msm_dsi *msm_dsi, int src_pll_id,
{
struct msm_dsi_phy_clk_request clk_req;
int ret;
+ bool is_dual_dsi = IS_DUAL_DSI();
- msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req);
+ msm_dsi_host_get_phy_clk_req(msm_dsi->host, &clk_req, is_dual_dsi);
ret = msm_dsi_phy_enable(msm_dsi->phy, src_pll_id, &clk_req);
msm_dsi_phy_get_shared_timings(msm_dsi->phy, shared_timings);
@@ -305,102 +306,25 @@ static void dsi_mgr_connector_destroy(struct drm_connector *connector)
kfree(dsi_connector);
}
-static void dsi_dual_connector_fix_modes(struct drm_connector *connector)
-{
- struct drm_display_mode *mode, *m;
-
- /* Only support left-right mode */
- list_for_each_entry_safe(mode, m, &connector->probed_modes, head) {
- mode->clock >>= 1;
- mode->hdisplay >>= 1;
- mode->hsync_start >>= 1;
- mode->hsync_end >>= 1;
- mode->htotal >>= 1;
- drm_mode_set_name(mode);
- }
-}
-
-static int dsi_dual_connector_tile_init(
- struct drm_connector *connector, int id)
-{
- struct drm_display_mode *mode;
- /* Fake topology id */
- char topo_id[8] = {'M', 'S', 'M', 'D', 'U', 'D', 'S', 'I'};
-
- if (connector->tile_group) {
- DBG("Tile property has been initialized");
- return 0;
- }
-
- /* Use the first mode only for now */
- mode = list_first_entry(&connector->probed_modes,
- struct drm_display_mode,
- head);
- if (!mode)
- return -EINVAL;
-
- connector->tile_group = drm_mode_get_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group)
- connector->tile_group = drm_mode_create_tile_group(
- connector->dev, topo_id);
- if (!connector->tile_group) {
- pr_err("%s: failed to create tile group\n", __func__);
- return -ENOMEM;
- }
-
- connector->has_tile = true;
- connector->tile_is_single_monitor = true;
-
- /* mode has been fixed */
- connector->tile_h_size = mode->hdisplay;
- connector->tile_v_size = mode->vdisplay;
-
- /* Only support left-right mode */
- connector->num_h_tile = 2;
- connector->num_v_tile = 1;
-
- connector->tile_v_loc = 0;
- connector->tile_h_loc = (id == DSI_RIGHT) ? 1 : 0;
-
- return 0;
-}
-
static int dsi_mgr_connector_get_modes(struct drm_connector *connector)
{
int id = dsi_mgr_connector_get_id(connector);
struct msm_dsi *msm_dsi = dsi_mgr_get_dsi(id);
struct drm_panel *panel = msm_dsi->panel;
- int ret, num;
+ int num;
if (!panel)
return 0;
- /* Since we have 2 connectors, but only 1 drm_panel in dual DSI mode,
- * panel should not attach to any connector.
- * Only temporarily attach panel to the current connector here,
- * to let panel set mode to this connector.
+ /*
+ * In dual DSI mode, we have one connector that can be
+ * attached to the drm_panel.
*/
drm_panel_attach(panel, connector);
num = drm_panel_get_modes(panel);
- drm_panel_detach(panel);
if (!num)
return 0;
- if (IS_DUAL_DSI()) {
- /* report half resolution to user */
- dsi_dual_connector_fix_modes(connector);
- ret = dsi_dual_connector_tile_init(connector, id);
- if (ret)
- return ret;
- ret = drm_connector_set_tile_property(connector);
- if (ret) {
- pr_err("%s: set tile property failed, %d\n",
- __func__, ret);
- return ret;
- }
- }
-
return num;
}
@@ -454,11 +378,11 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (ret)
goto phy_en_fail;
- /* Do nothing with the host if it is DSI 1 in case of dual DSI */
- if (is_dual_dsi && (DSI_1 == id))
+ /* Do nothing with the host if it is slave-DSI in case of dual DSI */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
- ret = msm_dsi_host_power_on(host, &phy_shared_timings[id]);
+ ret = msm_dsi_host_power_on(host, &phy_shared_timings[id], is_dual_dsi);
if (ret) {
pr_err("%s: power on host %d failed, %d\n", __func__, id, ret);
goto host_on_fail;
@@ -466,7 +390,7 @@ static void dsi_mgr_bridge_pre_enable(struct drm_bridge *bridge)
if (is_dual_dsi && msm_dsi1) {
ret = msm_dsi_host_power_on(msm_dsi1->host,
- &phy_shared_timings[DSI_1]);
+ &phy_shared_timings[DSI_1], is_dual_dsi);
if (ret) {
pr_err("%s: power on host1 failed, %d\n",
__func__, ret);
@@ -556,11 +480,11 @@ static void dsi_mgr_bridge_post_disable(struct drm_bridge *bridge)
return;
/*
- * Do nothing with the host if it is DSI 1 in case of dual DSI.
+ * Do nothing with the host if it is slave-DSI in case of dual DSI.
* It is safe to call dsi_mgr_phy_disable() here because a single PHY
* won't be diabled until both PHYs request disable.
*/
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
goto disable_phy;
if (panel) {
@@ -621,7 +545,7 @@ static void dsi_mgr_bridge_mode_set(struct drm_bridge *bridge,
mode->vsync_end, mode->vtotal,
mode->type, mode->flags);
- if (is_dual_dsi && (DSI_1 == id))
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id))
return;
msm_dsi_host_set_display_mode(host, adjusted_mode);
@@ -689,6 +613,23 @@ struct drm_connector *msm_dsi_manager_connector_init(u8 id)
return connector;
}
+bool msm_dsi_manager_validate_current_config(u8 id)
+{
+ bool is_dual_dsi = IS_DUAL_DSI();
+
+ /*
+ * For dual DSI, we only have one drm panel. For this
+ * use case, we register only one bridge/connector.
+ * Skip bridge/connector initialisation if it is
+ * slave-DSI for dual DSI configuration.
+ */
+ if (is_dual_dsi && !IS_MASTER_DSI_LINK(id)) {
+ DBG("Skip bridge registration for slave DSI->id: %d\n", id);
+ return false;
+ }
+ return true;
+}
+
/* initialize bridge */
struct drm_bridge *msm_dsi_manager_bridge_init(u8 id)
{
@@ -832,6 +773,7 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
struct msm_drm_private *priv;
struct msm_kms *kms;
struct drm_encoder *encoder;
+ bool cmd_mode;
/*
* drm_device pointer is assigned to msm_dsi only in the modeset_init
@@ -846,10 +788,11 @@ void msm_dsi_manager_attach_dsi_device(int id, u32 device_flags)
priv = dev->dev_private;
kms = priv->kms;
encoder = msm_dsi_get_encoder(msm_dsi);
+ cmd_mode = !(device_flags &
+ MIPI_DSI_MODE_VIDEO);
if (encoder && kms->funcs->set_encoder_mode)
- if (!(device_flags & MIPI_DSI_MODE_VIDEO))
- kms->funcs->set_encoder_mode(kms, encoder, true);
+ kms->funcs->set_encoder_mode(kms, encoder, cmd_mode);
}
int msm_dsi_manager_register(struct msm_dsi *msm_dsi)
diff --git a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
index 57cf7fa7f1c4..874265314413 100644
--- a/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
+++ b/drivers/gpu/drm/msm/dsi/mmss_cc.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
index c4c37a7df637..4c03f0b7343e 100644
--- a/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
+++ b/drivers/gpu/drm/msm/dsi/pll/dsi_pll_10nm.c
@@ -798,6 +798,8 @@ struct msm_dsi_pll *msm_dsi_pll_10nm_init(struct platform_device *pdev, int id)
return ERR_PTR(-ENOMEM);
}
+ spin_lock_init(&pll_10nm->postdiv_lock);
+
pll = &pll_10nm->base;
pll->min_rate = 1000000000UL;
pll->max_rate = 3500000000UL;
diff --git a/drivers/gpu/drm/msm/dsi/sfpb.xml.h b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
index 9d4d1feaefd7..07c48ddb5301 100644
--- a/drivers/gpu/drm/msm/dsi/sfpb.xml.h
+++ b/drivers/gpu/drm/msm/dsi/sfpb.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/edp/edp.xml.h b/drivers/gpu/drm/msm/edp/edp.xml.h
index f150d4a47707..9cb6e6fe9810 100644
--- a/drivers/gpu/drm/msm/edp/edp.xml.h
+++ b/drivers/gpu/drm/msm/edp/edp.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
index ecebf8b623ab..3eff3ea3b271 100644
--- a/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/hdmi.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
index da646deedf4b..7717d4269662 100644
--- a/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
+++ b/drivers/gpu/drm/msm/hdmi/qfprom.xml.h
@@ -8,19 +8,19 @@ http://github.com/freedreno/envytools/
git clone https://github.com/freedreno/envytools.git
The rules-ng-ng source files this header was generated from are:
-- /home/robclark/src/freedreno/envytools/rnndb/msm.xml ( 676 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/dsi.xml ( 33004 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2017-05-17 13:21:27)
-- /home/robclark/src/freedreno/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2017-06-16 12:32:42)
-- /home/robclark/src/freedreno/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2017-05-17 13:21:27)
-
-Copyright (C) 2013-2017 by the following authors:
+- /home/robclark/src/envytools/rnndb/msm.xml ( 676 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/freedreno_copyright.xml ( 1572 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp4.xml ( 20915 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp_common.xml ( 2849 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/mdp/mdp5.xml ( 37411 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/dsi.xml ( 37239 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/sfpb.xml ( 602 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/dsi/mmss_cc.xml ( 1686 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/qfprom.xml ( 600 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/hdmi/hdmi.xml ( 41799 bytes, from 2018-07-03 19:37:13)
+- /home/robclark/src/envytools/rnndb/edp/edp.xml ( 10416 bytes, from 2018-07-03 19:37:13)
+
+Copyright (C) 2013-2018 by the following authors:
- Rob Clark <robdclark@gmail.com> (robclark)
- Ilia Mirkin <imirkin@alum.mit.edu> (imirkin)
diff --git a/drivers/gpu/drm/msm/msm_atomic.c b/drivers/gpu/drm/msm/msm_atomic.c
index f0635c3da7f4..c1f1779c980f 100644
--- a/drivers/gpu/drm/msm/msm_atomic.c
+++ b/drivers/gpu/drm/msm/msm_atomic.c
@@ -71,12 +71,15 @@ void msm_atomic_commit_tail(struct drm_atomic_state *state)
drm_atomic_helper_commit_modeset_enables(dev, state);
+ if (kms->funcs->commit) {
+ DRM_DEBUG_ATOMIC("triggering commit\n");
+ kms->funcs->commit(kms, state);
+ }
+
msm_atomic_wait_for_commit_done(dev, state);
kms->funcs->complete_commit(kms, state);
- drm_atomic_helper_wait_for_vblanks(dev, state);
-
drm_atomic_helper_commit_hw_done(state);
drm_atomic_helper_cleanup_planes(dev, state);
diff --git a/drivers/gpu/drm/msm/msm_debugfs.c b/drivers/gpu/drm/msm/msm_debugfs.c
index 1ff3fda245d1..f0da0d3c8a80 100644
--- a/drivers/gpu/drm/msm/msm_debugfs.c
+++ b/drivers/gpu/drm/msm/msm_debugfs.c
@@ -16,26 +16,101 @@
*/
#ifdef CONFIG_DEBUG_FS
+#include <linux/debugfs.h>
#include "msm_drv.h"
#include "msm_gpu.h"
#include "msm_kms.h"
#include "msm_debugfs.h"
-static int msm_gpu_show(struct drm_device *dev, struct seq_file *m)
+struct msm_gpu_show_priv {
+ struct msm_gpu_state *state;
+ struct drm_device *dev;
+};
+
+static int msm_gpu_show(struct seq_file *m, void *arg)
+{
+ struct drm_printer p = drm_seq_file_printer(m);
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ drm_printf(&p, "%s Status:\n", gpu->name);
+ gpu->funcs->show(gpu, show_priv->state, &p);
+
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ return 0;
+}
+
+static int msm_gpu_release(struct inode *inode, struct file *file)
{
+ struct seq_file *m = file->private_data;
+ struct msm_gpu_show_priv *show_priv = m->private;
+ struct msm_drm_private *priv = show_priv->dev->dev_private;
+ struct msm_gpu *gpu = priv->gpu;
+ int ret;
+
+ ret = mutex_lock_interruptible(&show_priv->dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ gpu->funcs->gpu_state_put(show_priv->state);
+ mutex_unlock(&show_priv->dev->struct_mutex);
+
+ kfree(show_priv);
+
+ return single_release(inode, file);
+}
+
+static int msm_gpu_open(struct inode *inode, struct file *file)
+{
+ struct drm_device *dev = inode->i_private;
struct msm_drm_private *priv = dev->dev_private;
struct msm_gpu *gpu = priv->gpu;
+ struct msm_gpu_show_priv *show_priv;
+ int ret;
- if (gpu) {
- seq_printf(m, "%s Status:\n", gpu->name);
- pm_runtime_get_sync(&gpu->pdev->dev);
- gpu->funcs->show(gpu, m);
- pm_runtime_put_sync(&gpu->pdev->dev);
+ if (!gpu)
+ return -ENODEV;
+
+ show_priv = kmalloc(sizeof(*show_priv), GFP_KERNEL);
+ if (!show_priv)
+ return -ENOMEM;
+
+ ret = mutex_lock_interruptible(&dev->struct_mutex);
+ if (ret)
+ return ret;
+
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ show_priv->state = gpu->funcs->gpu_state_get(gpu);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ mutex_unlock(&dev->struct_mutex);
+
+ if (IS_ERR(show_priv->state)) {
+ ret = PTR_ERR(show_priv->state);
+ kfree(show_priv);
+ return ret;
}
- return 0;
+ show_priv->dev = dev;
+
+ return single_open(file, msm_gpu_show, show_priv);
}
+static const struct file_operations msm_gpu_fops = {
+ .owner = THIS_MODULE,
+ .open = msm_gpu_open,
+ .read = seq_read,
+ .llseek = seq_lseek,
+ .release = msm_gpu_release,
+};
+
static int msm_gem_show(struct drm_device *dev, struct seq_file *m)
{
struct msm_drm_private *priv = dev->dev_private;
@@ -105,7 +180,6 @@ static int show_locked(struct seq_file *m, void *arg)
}
static struct drm_info_list msm_debugfs_list[] = {
- {"gpu", show_locked, 0, msm_gpu_show},
{"gem", show_locked, 0, msm_gem_show},
{ "mm", show_locked, 0, msm_mm_show },
{ "fb", show_locked, 0, msm_fb_show },
@@ -158,6 +232,9 @@ int msm_debugfs_init(struct drm_minor *minor)
return ret;
}
+ debugfs_create_file("gpu", S_IRUSR, minor->debugfs_root,
+ dev, &msm_gpu_fops);
+
if (priv->kms->funcs->debugfs_init) {
ret = priv->kms->funcs->debugfs_init(priv->kms, minor);
if (ret)
diff --git a/drivers/gpu/drm/msm/msm_drv.c b/drivers/gpu/drm/msm/msm_drv.c
index 021a0b6f9a59..c1abad8a8612 100644
--- a/drivers/gpu/drm/msm/msm_drv.c
+++ b/drivers/gpu/drm/msm/msm_drv.c
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -15,6 +16,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/kthread.h>
+#include <uapi/linux/sched/types.h>
#include <drm/drm_of.h>
#include "msm_drv.h"
@@ -78,6 +81,63 @@ module_param(modeset, bool, 0600);
* Util/helpers:
*/
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk)
+{
+ struct property *prop;
+ const char *name;
+ struct clk_bulk_data *local;
+ int i = 0, ret, count;
+
+ count = of_property_count_strings(dev->of_node, "clock-names");
+ if (count < 1)
+ return 0;
+
+ local = devm_kcalloc(dev, sizeof(struct clk_bulk_data *),
+ count, GFP_KERNEL);
+ if (!local)
+ return -ENOMEM;
+
+ of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
+ local[i].id = devm_kstrdup(dev, name, GFP_KERNEL);
+ if (!local[i].id) {
+ devm_kfree(dev, local);
+ return -ENOMEM;
+ }
+
+ i++;
+ }
+
+ ret = devm_clk_bulk_get(dev, count, local);
+
+ if (ret) {
+ for (i = 0; i < count; i++)
+ devm_kfree(dev, (void *) local[i].id);
+ devm_kfree(dev, local);
+
+ return ret;
+ }
+
+ *bulk = local;
+ return count;
+}
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name)
+{
+ int i;
+ char n[32];
+
+ snprintf(n, sizeof(n), "%s_clk", name);
+
+ for (i = 0; bulk && i < count; i++) {
+ if (!strcmp(bulk[i].id, name) || !strcmp(bulk[i].id, n))
+ return bulk[i].clk;
+ }
+
+
+ return NULL;
+}
+
struct clk *msm_clk_get(struct platform_device *pdev, const char *name)
{
struct clk *clk;
@@ -149,7 +209,7 @@ struct vblank_event {
bool enable;
};
-static void vblank_ctrl_worker(struct work_struct *work)
+static void vblank_ctrl_worker(struct kthread_work *work)
{
struct msm_vblank_ctrl *vbl_ctrl = container_of(work,
struct msm_vblank_ctrl, work);
@@ -197,7 +257,8 @@ static int vblank_ctrl_queue_work(struct msm_drm_private *priv,
list_add_tail(&vbl_ev->node, &vbl_ctrl->event_list);
spin_unlock_irqrestore(&vbl_ctrl->lock, flags);
- queue_work(priv->wq, &vbl_ctrl->work);
+ kthread_queue_work(&priv->disp_thread[crtc_id].worker,
+ &vbl_ctrl->work);
return 0;
}
@@ -208,19 +269,36 @@ static int msm_drm_uninit(struct device *dev)
struct drm_device *ddev = platform_get_drvdata(pdev);
struct msm_drm_private *priv = ddev->dev_private;
struct msm_kms *kms = priv->kms;
+ struct msm_mdss *mdss = priv->mdss;
struct msm_vblank_ctrl *vbl_ctrl = &priv->vblank_ctrl;
struct vblank_event *vbl_ev, *tmp;
+ int i;
/* We must cancel and cleanup any pending vblank enable/disable
* work before drm_irq_uninstall() to avoid work re-enabling an
* irq after uninstall has disabled it.
*/
- cancel_work_sync(&vbl_ctrl->work);
+ kthread_flush_work(&vbl_ctrl->work);
list_for_each_entry_safe(vbl_ev, tmp, &vbl_ctrl->event_list, node) {
list_del(&vbl_ev->node);
kfree(vbl_ev);
}
+ /* clean up display commit/event worker threads */
+ for (i = 0; i < priv->num_crtcs; i++) {
+ if (priv->disp_thread[i].thread) {
+ kthread_flush_worker(&priv->disp_thread[i].worker);
+ kthread_stop(priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_flush_worker(&priv->event_thread[i].worker);
+ kthread_stop(priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+
msm_gem_shrinker_cleanup(ddev);
drm_kms_helper_poll_fini(ddev);
@@ -243,9 +321,6 @@ static int msm_drm_uninit(struct device *dev)
flush_workqueue(priv->wq);
destroy_workqueue(priv->wq);
- flush_workqueue(priv->atomic_wq);
- destroy_workqueue(priv->atomic_wq);
-
if (kms && kms->funcs)
kms->funcs->destroy(kms);
@@ -258,7 +333,8 @@ static int msm_drm_uninit(struct device *dev)
component_unbind_all(dev, ddev);
- msm_mdss_destroy(ddev);
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
ddev->dev_private = NULL;
drm_dev_unref(ddev);
@@ -268,6 +344,10 @@ static int msm_drm_uninit(struct device *dev)
return 0;
}
+#define KMS_MDP4 4
+#define KMS_MDP5 5
+#define KMS_DPU 3
+
static int get_mdp_ver(struct platform_device *pdev)
{
struct device *dev = &pdev->dev;
@@ -357,7 +437,9 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
struct drm_device *ddev;
struct msm_drm_private *priv;
struct msm_kms *kms;
- int ret;
+ struct msm_mdss *mdss;
+ int ret, i;
+ struct sched_param param;
ddev = drm_dev_alloc(drv, dev);
if (IS_ERR(ddev)) {
@@ -369,53 +451,61 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
priv = kzalloc(sizeof(*priv), GFP_KERNEL);
if (!priv) {
- drm_dev_unref(ddev);
- return -ENOMEM;
+ ret = -ENOMEM;
+ goto err_unref_drm_dev;
}
ddev->dev_private = priv;
priv->dev = ddev;
- ret = msm_mdss_init(ddev);
- if (ret) {
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
+ switch (get_mdp_ver(pdev)) {
+ case KMS_MDP5:
+ ret = mdp5_mdss_init(ddev);
+ break;
+ case KMS_DPU:
+ ret = dpu_mdss_init(ddev);
+ break;
+ default:
+ ret = 0;
+ break;
}
+ if (ret)
+ goto err_free_priv;
+
+ mdss = priv->mdss;
priv->wq = alloc_ordered_workqueue("msm", 0);
- priv->atomic_wq = alloc_ordered_workqueue("msm:atomic", 0);
INIT_LIST_HEAD(&priv->inactive_list);
INIT_LIST_HEAD(&priv->vblank_ctrl.event_list);
- INIT_WORK(&priv->vblank_ctrl.work, vblank_ctrl_worker);
+ kthread_init_work(&priv->vblank_ctrl.work, vblank_ctrl_worker);
spin_lock_init(&priv->vblank_ctrl.lock);
drm_mode_config_init(ddev);
/* Bind all our sub-components: */
ret = component_bind_all(dev, ddev);
- if (ret) {
- msm_mdss_destroy(ddev);
- kfree(priv);
- drm_dev_unref(ddev);
- return ret;
- }
+ if (ret)
+ goto err_destroy_mdss;
ret = msm_init_vram(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
msm_gem_shrinker_init(ddev);
switch (get_mdp_ver(pdev)) {
- case 4:
+ case KMS_MDP4:
kms = mdp4_kms_init(ddev);
priv->kms = kms;
break;
- case 5:
+ case KMS_MDP5:
kms = mdp5_kms_init(ddev);
break;
+ case KMS_DPU:
+ kms = dpu_kms_init(ddev);
+ priv->kms = kms;
+ break;
default:
kms = ERR_PTR(-ENODEV);
break;
@@ -430,24 +520,100 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
*/
dev_err(dev, "failed to load kms\n");
ret = PTR_ERR(kms);
- goto fail;
+ goto err_msm_uninit;
}
+ /* Enable normalization of plane zpos */
+ ddev->mode_config.normalize_zpos = true;
+
if (kms) {
ret = kms->funcs->hw_init(kms);
if (ret) {
dev_err(dev, "kms hw init failed: %d\n", ret);
- goto fail;
+ goto err_msm_uninit;
}
}
ddev->mode_config.funcs = &mode_config_funcs;
ddev->mode_config.helper_private = &mode_config_helper_funcs;
+ /**
+ * this priority was found during empiric testing to have appropriate
+ * realtime scheduling to process display updates and interact with
+ * other real time and normal priority task
+ */
+ param.sched_priority = 16;
+ for (i = 0; i < priv->num_crtcs; i++) {
+
+ /* initialize display thread */
+ priv->disp_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->disp_thread[i].worker);
+ priv->disp_thread[i].dev = ddev;
+ priv->disp_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->disp_thread[i].worker,
+ "crtc_commit:%d", priv->disp_thread[i].crtc_id);
+ ret = sched_setscheduler(priv->disp_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->disp_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_commit kthread\n");
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ /* initialize event thread */
+ priv->event_thread[i].crtc_id = priv->crtcs[i]->base.id;
+ kthread_init_worker(&priv->event_thread[i].worker);
+ priv->event_thread[i].dev = ddev;
+ priv->event_thread[i].thread =
+ kthread_run(kthread_worker_fn,
+ &priv->event_thread[i].worker,
+ "crtc_event:%d", priv->event_thread[i].crtc_id);
+ /**
+ * event thread should also run at same priority as disp_thread
+ * because it is handling frame_done events. A lower priority
+ * event thread and higher priority disp_thread can causes
+ * frame_pending counters beyond 2. This can lead to commit
+ * failure at crtc commit level.
+ */
+ ret = sched_setscheduler(priv->event_thread[i].thread,
+ SCHED_FIFO, &param);
+ if (ret)
+ pr_warn("display event thread priority update failed: %d\n",
+ ret);
+
+ if (IS_ERR(priv->event_thread[i].thread)) {
+ dev_err(dev, "failed to create crtc_event kthread\n");
+ priv->event_thread[i].thread = NULL;
+ }
+
+ if ((!priv->disp_thread[i].thread) ||
+ !priv->event_thread[i].thread) {
+ /* clean up previously created threads if any */
+ for ( ; i >= 0; i--) {
+ if (priv->disp_thread[i].thread) {
+ kthread_stop(
+ priv->disp_thread[i].thread);
+ priv->disp_thread[i].thread = NULL;
+ }
+
+ if (priv->event_thread[i].thread) {
+ kthread_stop(
+ priv->event_thread[i].thread);
+ priv->event_thread[i].thread = NULL;
+ }
+ }
+ goto err_msm_uninit;
+ }
+ }
+
ret = drm_vblank_init(ddev, priv->num_crtcs);
if (ret < 0) {
dev_err(dev, "failed to initialize vblank\n");
- goto fail;
+ goto err_msm_uninit;
}
if (kms) {
@@ -456,13 +622,13 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
pm_runtime_put_sync(dev);
if (ret < 0) {
dev_err(dev, "failed to install IRQ handler\n");
- goto fail;
+ goto err_msm_uninit;
}
}
ret = drm_dev_register(ddev, 0);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_mode_config_reset(ddev);
@@ -473,15 +639,23 @@ static int msm_drm_init(struct device *dev, struct drm_driver *drv)
ret = msm_debugfs_late_init(ddev);
if (ret)
- goto fail;
+ goto err_msm_uninit;
drm_kms_helper_poll_init(ddev);
return 0;
-fail:
+err_msm_uninit:
msm_drm_uninit(dev);
return ret;
+err_destroy_mdss:
+ if (mdss && mdss->funcs)
+ mdss->funcs->destroy(ddev);
+err_free_priv:
+ kfree(priv);
+err_unref_drm_dev:
+ drm_dev_unref(ddev);
+ return ret;
}
/*
@@ -894,16 +1068,35 @@ static struct drm_driver msm_driver = {
static int msm_pm_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_suspend)
+ return kms->funcs->pm_suspend(dev);
drm_kms_helper_poll_disable(ddev);
+ priv->pm_state = drm_atomic_helper_suspend(ddev);
+ if (IS_ERR(priv->pm_state)) {
+ drm_kms_helper_poll_enable(ddev);
+ return PTR_ERR(priv->pm_state);
+ }
+
return 0;
}
static int msm_pm_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
+ struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_kms *kms = priv->kms;
+
+ /* TODO: Use atomic helper suspend/resume */
+ if (kms && kms->funcs && kms->funcs->pm_resume)
+ return kms->funcs->pm_resume(dev);
+ drm_atomic_helper_resume(ddev, priv->pm_state);
drm_kms_helper_poll_enable(ddev);
return 0;
@@ -915,11 +1108,12 @@ static int msm_runtime_suspend(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_disable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->disable(mdss);
return 0;
}
@@ -928,11 +1122,12 @@ static int msm_runtime_resume(struct device *dev)
{
struct drm_device *ddev = dev_get_drvdata(dev);
struct msm_drm_private *priv = ddev->dev_private;
+ struct msm_mdss *mdss = priv->mdss;
DBG("");
- if (priv->mdss)
- return msm_mdss_enable(priv->mdss);
+ if (mdss && mdss->funcs)
+ return mdss->funcs->enable(mdss);
return 0;
}
@@ -1031,12 +1226,13 @@ static int add_display_components(struct device *dev,
int ret;
/*
- * MDP5 based devices don't have a flat hierarchy. There is a top level
- * parent: MDSS, and children: MDP5, DSI, HDMI, eDP etc. Populate the
- * children devices, find the MDP5 node, and then add the interfaces
- * to our components list.
+ * MDP5/DPU based devices don't have a flat hierarchy. There is a top
+ * level parent: MDSS, and children: MDP5/DPU, DSI, HDMI, eDP etc.
+ * Populate the children devices, find the MDP5/DPU node, and then add
+ * the interfaces to our components list.
*/
- if (of_device_is_compatible(dev->of_node, "qcom,mdss")) {
+ if (of_device_is_compatible(dev->of_node, "qcom,mdss") ||
+ of_device_is_compatible(dev->of_node, "qcom,sdm845-mdss")) {
ret = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (ret) {
dev_err(dev, "failed to populate children devices\n");
@@ -1146,8 +1342,9 @@ static int msm_pdev_remove(struct platform_device *pdev)
}
static const struct of_device_id dt_match[] = {
- { .compatible = "qcom,mdp4", .data = (void *)4 }, /* MDP4 */
- { .compatible = "qcom,mdss", .data = (void *)5 }, /* MDP5 MDSS */
+ { .compatible = "qcom,mdp4", .data = (void *)KMS_MDP4 },
+ { .compatible = "qcom,mdss", .data = (void *)KMS_MDP5 },
+ { .compatible = "qcom,sdm845-mdss", .data = (void *)KMS_DPU },
{}
};
MODULE_DEVICE_TABLE(of, dt_match);
@@ -1169,6 +1366,7 @@ static int __init msm_drm_register(void)
DBG("init");
msm_mdp_register();
+ msm_dpu_register();
msm_dsi_register();
msm_edp_register();
msm_hdmi_register();
@@ -1185,6 +1383,7 @@ static void __exit msm_drm_unregister(void)
msm_edp_unregister();
msm_dsi_unregister();
msm_mdp_unregister();
+ msm_dpu_unregister();
}
module_init(msm_drm_register);
diff --git a/drivers/gpu/drm/msm/msm_drv.h b/drivers/gpu/drm/msm/msm_drv.h
index b2da1fbf81e0..8e510d5c758a 100644
--- a/drivers/gpu/drm/msm/msm_drv.h
+++ b/drivers/gpu/drm/msm/msm_drv.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -33,6 +34,7 @@
#include <linux/of_graph.h>
#include <linux/of_device.h>
#include <asm/sizes.h>
+#include <linux/kthread.h>
#include <drm/drmP.h>
#include <drm/drm_atomic.h>
@@ -54,6 +56,12 @@ struct msm_fence_context;
struct msm_gem_address_space;
struct msm_gem_vma;
+#define MAX_CRTCS 8
+#define MAX_PLANES 20
+#define MAX_ENCODERS 8
+#define MAX_BRIDGES 8
+#define MAX_CONNECTORS 8
+
struct msm_file_private {
rwlock_t queuelock;
struct list_head submitqueues;
@@ -68,12 +76,77 @@ enum msm_mdp_plane_property {
};
struct msm_vblank_ctrl {
- struct work_struct work;
+ struct kthread_work work;
struct list_head event_list;
spinlock_t lock;
};
#define MSM_GPU_MAX_RINGS 4
+#define MAX_H_TILES_PER_DISPLAY 2
+
+/**
+ * enum msm_display_caps - features/capabilities supported by displays
+ * @MSM_DISPLAY_CAP_VID_MODE: Video or "active" mode supported
+ * @MSM_DISPLAY_CAP_CMD_MODE: Command mode supported
+ * @MSM_DISPLAY_CAP_HOT_PLUG: Hot plug detection supported
+ * @MSM_DISPLAY_CAP_EDID: EDID supported
+ */
+enum msm_display_caps {
+ MSM_DISPLAY_CAP_VID_MODE = BIT(0),
+ MSM_DISPLAY_CAP_CMD_MODE = BIT(1),
+ MSM_DISPLAY_CAP_HOT_PLUG = BIT(2),
+ MSM_DISPLAY_CAP_EDID = BIT(3),
+};
+
+/**
+ * enum msm_event_wait - type of HW events to wait for
+ * @MSM_ENC_COMMIT_DONE - wait for the driver to flush the registers to HW
+ * @MSM_ENC_TX_COMPLETE - wait for the HW to transfer the frame to panel
+ * @MSM_ENC_VBLANK - wait for the HW VBLANK event (for driver-internal waiters)
+ */
+enum msm_event_wait {
+ MSM_ENC_COMMIT_DONE = 0,
+ MSM_ENC_TX_COMPLETE,
+ MSM_ENC_VBLANK,
+};
+
+/**
+ * struct msm_display_topology - defines a display topology pipeline
+ * @num_lm: number of layer mixers used
+ * @num_enc: number of compression encoder blocks used
+ * @num_intf: number of interfaces the panel is mounted on
+ */
+struct msm_display_topology {
+ u32 num_lm;
+ u32 num_enc;
+ u32 num_intf;
+};
+
+/**
+ * struct msm_display_info - defines display properties
+ * @intf_type: DRM_MODE_CONNECTOR_ display type
+ * @capabilities: Bitmask of display flags
+ * @num_of_h_tiles: Number of horizontal tiles in case of split interface
+ * @h_tile_instance: Controller instance used per tile. Number of elements is
+ * based on num_of_h_tiles
+ * @is_te_using_watchdog_timer: Boolean to indicate watchdog TE is
+ * used instead of panel TE in cmd mode panels
+ */
+struct msm_display_info {
+ int intf_type;
+ uint32_t capabilities;
+ uint32_t num_of_h_tiles;
+ uint32_t h_tile_instance[MAX_H_TILES_PER_DISPLAY];
+ bool is_te_using_watchdog_timer;
+};
+
+/* Commit/Event thread specific structure */
+struct msm_drm_thread {
+ struct drm_device *dev;
+ struct task_struct *thread;
+ unsigned int crtc_id;
+ struct kthread_worker worker;
+};
struct msm_drm_private {
@@ -84,7 +157,7 @@ struct msm_drm_private {
/* subordinate devices, if present: */
struct platform_device *gpu_pdev;
- /* top level MDSS wrapper device (for MDP5 only) */
+ /* top level MDSS wrapper device (for MDP5/DPU only) */
struct msm_mdss *mdss;
/* possibly this should be in the kms component, but it is
@@ -115,22 +188,24 @@ struct msm_drm_private {
struct list_head inactive_list;
struct workqueue_struct *wq;
- struct workqueue_struct *atomic_wq;
unsigned int num_planes;
- struct drm_plane *planes[16];
+ struct drm_plane *planes[MAX_PLANES];
unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ struct drm_crtc *crtcs[MAX_CRTCS];
+
+ struct msm_drm_thread disp_thread[MAX_CRTCS];
+ struct msm_drm_thread event_thread[MAX_CRTCS];
unsigned int num_encoders;
- struct drm_encoder *encoders[8];
+ struct drm_encoder *encoders[MAX_ENCODERS];
unsigned int num_bridges;
- struct drm_bridge *bridges[8];
+ struct drm_bridge *bridges[MAX_BRIDGES];
unsigned int num_connectors;
- struct drm_connector *connectors[8];
+ struct drm_connector *connectors[MAX_CONNECTORS];
/* Properties */
struct drm_property *plane_property[PLANE_PROP_MAX_NUM];
@@ -150,6 +225,7 @@ struct msm_drm_private {
struct shrinker shrinker;
struct msm_vblank_ctrl vblank_ctrl;
+ struct drm_atomic_state *pm_state;
};
struct msm_format {
@@ -174,6 +250,9 @@ struct msm_gem_address_space *
msm_gem_address_space_create(struct device *dev, struct iommu_domain *domain,
const char *name);
+int msm_register_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+void msm_unregister_mmu(struct drm_device *dev, struct msm_mmu *mmu);
+
void msm_gem_submit_free(struct msm_gem_submit *submit);
int msm_ioctl_gem_submit(struct drm_device *dev, void *data,
struct drm_file *file);
@@ -184,7 +263,7 @@ void msm_gem_shrinker_cleanup(struct drm_device *dev);
int msm_gem_mmap_obj(struct drm_gem_object *obj,
struct vm_area_struct *vma);
int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma);
-int msm_gem_fault(struct vm_fault *vmf);
+vm_fault_t msm_gem_fault(struct vm_fault *vmf);
uint64_t msm_gem_mmap_offset(struct drm_gem_object *obj);
int msm_gem_get_iova(struct drm_gem_object *obj,
struct msm_gem_address_space *aspace, uint64_t *iova);
@@ -285,6 +364,8 @@ static inline int msm_dsi_modeset_init(struct msm_dsi *msm_dsi,
void __init msm_mdp_register(void);
void __exit msm_mdp_unregister(void);
+void __init msm_dpu_register(void);
+void __exit msm_dpu_unregister(void);
#ifdef CONFIG_DEBUG_FS
void msm_gem_describe(struct drm_gem_object *obj, struct seq_file *m);
@@ -306,6 +387,10 @@ static inline void msm_perf_debugfs_cleanup(struct msm_drm_private *priv) {}
#endif
struct clk *msm_clk_get(struct platform_device *pdev, const char *name);
+int msm_clk_bulk_get(struct device *dev, struct clk_bulk_data **bulk);
+
+struct clk *msm_clk_bulk_get_clock(struct clk_bulk_data *bulk, int count,
+ const char *name);
void __iomem *msm_ioremap(struct platform_device *pdev, const char *name,
const char *dbgname);
void msm_writel(u32 data, void __iomem *addr);
diff --git a/drivers/gpu/drm/msm/msm_fence.c b/drivers/gpu/drm/msm/msm_fence.c
index 349c12f670eb..77263cf97b20 100644
--- a/drivers/gpu/drm/msm/msm_fence.c
+++ b/drivers/gpu/drm/msm/msm_fence.c
@@ -119,11 +119,6 @@ static const char *msm_fence_get_timeline_name(struct dma_fence *fence)
return f->fctx->name;
}
-static bool msm_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static bool msm_fence_signaled(struct dma_fence *fence)
{
struct msm_fence *f = to_msm_fence(fence);
@@ -133,10 +128,7 @@ static bool msm_fence_signaled(struct dma_fence *fence)
static const struct dma_fence_ops msm_fence_ops = {
.get_driver_name = msm_fence_get_driver_name,
.get_timeline_name = msm_fence_get_timeline_name,
- .enable_signaling = msm_fence_enable_signaling,
.signaled = msm_fence_signaled,
- .wait = dma_fence_default_wait,
- .release = dma_fence_free,
};
struct dma_fence *
diff --git a/drivers/gpu/drm/msm/msm_gem.c b/drivers/gpu/drm/msm/msm_gem.c
index f583bb4222f9..f59ca27a4a35 100644
--- a/drivers/gpu/drm/msm/msm_gem.c
+++ b/drivers/gpu/drm/msm/msm_gem.c
@@ -219,7 +219,7 @@ int msm_gem_mmap(struct file *filp, struct vm_area_struct *vma)
return msm_gem_mmap_obj(vma->vm_private_data, vma);
}
-int msm_gem_fault(struct vm_fault *vmf)
+vm_fault_t msm_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct drm_gem_object *obj = vma->vm_private_data;
@@ -227,15 +227,18 @@ int msm_gem_fault(struct vm_fault *vmf)
struct page **pages;
unsigned long pfn;
pgoff_t pgoff;
- int ret;
+ int err;
+ vm_fault_t ret;
/*
* vm_ops.open/drm_gem_mmap_obj and close get and put
* a reference on obj. So, we dont need to hold one here.
*/
- ret = mutex_lock_interruptible(&msm_obj->lock);
- if (ret)
+ err = mutex_lock_interruptible(&msm_obj->lock);
+ if (err) {
+ ret = VM_FAULT_NOPAGE;
goto out;
+ }
if (WARN_ON(msm_obj->madv != MSM_MADV_WILLNEED)) {
mutex_unlock(&msm_obj->lock);
@@ -245,7 +248,7 @@ int msm_gem_fault(struct vm_fault *vmf)
/* make sure we have pages attached now */
pages = get_pages(obj);
if (IS_ERR(pages)) {
- ret = PTR_ERR(pages);
+ ret = vmf_error(PTR_ERR(pages));
goto out_unlock;
}
@@ -257,27 +260,11 @@ int msm_gem_fault(struct vm_fault *vmf)
VERB("Inserting %p pfn %lx, pa %lx", (void *)vmf->address,
pfn, pfn << PAGE_SHIFT);
- ret = vm_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
-
+ ret = vmf_insert_mixed(vma, vmf->address, __pfn_to_pfn_t(pfn, PFN_DEV));
out_unlock:
mutex_unlock(&msm_obj->lock);
out:
- switch (ret) {
- case -EAGAIN:
- case 0:
- case -ERESTARTSYS:
- case -EINTR:
- case -EBUSY:
- /*
- * EBUSY is ok: this just means that another thread
- * already did the job.
- */
- return VM_FAULT_NOPAGE;
- case -ENOMEM:
- return VM_FAULT_OOM;
- default:
- return VM_FAULT_SIGBUS;
- }
+ return ret;
}
/** get mmap offset */
diff --git a/drivers/gpu/drm/msm/msm_gpu.c b/drivers/gpu/drm/msm/msm_gpu.c
index 1c09acfb4028..5e808cfec345 100644
--- a/drivers/gpu/drm/msm/msm_gpu.c
+++ b/drivers/gpu/drm/msm/msm_gpu.c
@@ -20,10 +20,11 @@
#include "msm_mmu.h"
#include "msm_fence.h"
+#include <generated/utsrelease.h>
#include <linux/string_helpers.h>
#include <linux/pm_opp.h>
#include <linux/devfreq.h>
-
+#include <linux/devcoredump.h>
/*
* Power Management:
@@ -87,7 +88,7 @@ static struct devfreq_dev_profile msm_devfreq_profile = {
static void msm_devfreq_init(struct msm_gpu *gpu)
{
/* We need target support to do devfreq */
- if (!gpu->funcs->gpu_busy)
+ if (!gpu->funcs->gpu_busy || !gpu->core_clk)
return;
msm_devfreq_profile.initial_freq = gpu->fast_rate;
@@ -141,8 +142,6 @@ static int disable_pwrrail(struct msm_gpu *gpu)
static int enable_clk(struct msm_gpu *gpu)
{
- int i;
-
if (gpu->core_clk && gpu->fast_rate)
clk_set_rate(gpu->core_clk, gpu->fast_rate);
@@ -150,28 +149,12 @@ static int enable_clk(struct msm_gpu *gpu)
if (gpu->rbbmtimer_clk)
clk_set_rate(gpu->rbbmtimer_clk, 19200000);
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_prepare(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_enable(gpu->grp_clks[i]);
-
- return 0;
+ return clk_bulk_prepare_enable(gpu->nr_clocks, gpu->grp_clks);
}
static int disable_clk(struct msm_gpu *gpu)
{
- int i;
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_disable(gpu->grp_clks[i]);
-
- for (i = gpu->nr_clocks - 1; i >= 0; i--)
- if (gpu->grp_clks[i])
- clk_unprepare(gpu->grp_clks[i]);
+ clk_bulk_disable_unprepare(gpu->nr_clocks, gpu->grp_clks);
/*
* Set the clock to a deliberately low rate. On older targets the clock
@@ -273,6 +256,123 @@ int msm_gpu_hw_init(struct msm_gpu *gpu)
return ret;
}
+#ifdef CONFIG_DEV_COREDUMP
+static ssize_t msm_gpu_devcoredump_read(char *buffer, loff_t offset,
+ size_t count, void *data, size_t datalen)
+{
+ struct msm_gpu *gpu = data;
+ struct drm_print_iterator iter;
+ struct drm_printer p;
+ struct msm_gpu_state *state;
+
+ state = msm_gpu_crashstate_get(gpu);
+ if (!state)
+ return 0;
+
+ iter.data = buffer;
+ iter.offset = 0;
+ iter.start = offset;
+ iter.remain = count;
+
+ p = drm_coredump_printer(&iter);
+
+ drm_printf(&p, "---\n");
+ drm_printf(&p, "kernel: " UTS_RELEASE "\n");
+ drm_printf(&p, "module: " KBUILD_MODNAME "\n");
+ drm_printf(&p, "time: %lld.%09ld\n",
+ state->time.tv_sec, state->time.tv_nsec);
+ if (state->comm)
+ drm_printf(&p, "comm: %s\n", state->comm);
+ if (state->cmd)
+ drm_printf(&p, "cmdline: %s\n", state->cmd);
+
+ gpu->funcs->show(gpu, state, &p);
+
+ msm_gpu_crashstate_put(gpu);
+
+ return count - iter.remain;
+}
+
+static void msm_gpu_devcoredump_free(void *data)
+{
+ struct msm_gpu *gpu = data;
+
+ msm_gpu_crashstate_put(gpu);
+}
+
+static void msm_gpu_crashstate_get_bo(struct msm_gpu_state *state,
+ struct msm_gem_object *obj, u64 iova, u32 flags)
+{
+ struct msm_gpu_state_bo *state_bo = &state->bos[state->nr_bos];
+
+ /* Don't record write only objects */
+
+ state_bo->size = obj->base.size;
+ state_bo->iova = iova;
+
+ /* Only store the data for buffer objects marked for read */
+ if ((flags & MSM_SUBMIT_BO_READ)) {
+ void *ptr;
+
+ state_bo->data = kvmalloc(obj->base.size, GFP_KERNEL);
+ if (!state_bo->data)
+ return;
+
+ ptr = msm_gem_get_vaddr_active(&obj->base);
+ if (IS_ERR(ptr)) {
+ kvfree(state_bo->data);
+ return;
+ }
+
+ memcpy(state_bo->data, ptr, obj->base.size);
+ msm_gem_put_vaddr(&obj->base);
+ }
+
+ state->nr_bos++;
+}
+
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu,
+ struct msm_gem_submit *submit, char *comm, char *cmd)
+{
+ struct msm_gpu_state *state;
+
+ /* Only save one crash state at a time */
+ if (gpu->crashstate)
+ return;
+
+ state = gpu->funcs->gpu_state_get(gpu);
+ if (IS_ERR_OR_NULL(state))
+ return;
+
+ /* Fill in the additional crash state information */
+ state->comm = kstrdup(comm, GFP_KERNEL);
+ state->cmd = kstrdup(cmd, GFP_KERNEL);
+
+ if (submit) {
+ int i;
+
+ state->bos = kcalloc(submit->nr_bos,
+ sizeof(struct msm_gpu_state_bo), GFP_KERNEL);
+
+ for (i = 0; state->bos && i < submit->nr_bos; i++)
+ msm_gpu_crashstate_get_bo(state, submit->bos[i].obj,
+ submit->bos[i].iova, submit->bos[i].flags);
+ }
+
+ /* Set the active crash state to be dumped on failure */
+ gpu->crashstate = state;
+
+ /* FIXME: Release the crashstate if this errors out? */
+ dev_coredumpm(gpu->dev->dev, THIS_MODULE, gpu, 0, GFP_KERNEL,
+ msm_gpu_devcoredump_read, msm_gpu_devcoredump_free);
+}
+#else
+static void msm_gpu_crashstate_capture(struct msm_gpu *gpu, char *comm,
+ char *cmd)
+{
+}
+#endif
+
/*
* Hangcheck detection for locked gpu:
*/
@@ -314,6 +414,7 @@ static void recover_worker(struct work_struct *work)
struct msm_drm_private *priv = dev->dev_private;
struct msm_gem_submit *submit;
struct msm_ringbuffer *cur_ring = gpu->funcs->active_ring(gpu);
+ char *comm = NULL, *cmd = NULL;
int i;
mutex_lock(&dev->struct_mutex);
@@ -327,7 +428,7 @@ static void recover_worker(struct work_struct *work)
rcu_read_lock();
task = pid_task(submit->pid, PIDTYPE_PID);
if (task) {
- char *cmd;
+ comm = kstrdup(task->comm, GFP_ATOMIC);
/*
* So slightly annoying, in other paths like
@@ -340,22 +441,28 @@ static void recover_worker(struct work_struct *work)
* about the submit going away.
*/
mutex_unlock(&dev->struct_mutex);
- cmd = kstrdup_quotable_cmdline(task, GFP_KERNEL);
+ cmd = kstrdup_quotable_cmdline(task, GFP_ATOMIC);
mutex_lock(&dev->struct_mutex);
+ }
+ rcu_read_unlock();
+ if (comm && cmd) {
dev_err(dev->dev, "%s: offending task: %s (%s)\n",
- gpu->name, task->comm, cmd);
+ gpu->name, comm, cmd);
msm_rd_dump_submit(priv->hangrd, submit,
- "offending task: %s (%s)", task->comm, cmd);
-
- kfree(cmd);
- } else {
+ "offending task: %s (%s)", comm, cmd);
+ } else
msm_rd_dump_submit(priv->hangrd, submit, NULL);
- }
- rcu_read_unlock();
}
+ /* Record the crash state */
+ pm_runtime_get_sync(&gpu->pdev->dev);
+ msm_gpu_crashstate_capture(gpu, submit, comm, cmd);
+ pm_runtime_put_sync(&gpu->pdev->dev);
+
+ kfree(cmd);
+ kfree(comm);
/*
* Update all the rings with the latest and greatest fence.. this
@@ -660,44 +767,22 @@ static irqreturn_t irq_handler(int irq, void *data)
return gpu->funcs->irq(gpu);
}
-static struct clk *get_clock(struct device *dev, const char *name)
-{
- struct clk *clk = devm_clk_get(dev, name);
-
- return IS_ERR(clk) ? NULL : clk;
-}
-
static int get_clocks(struct platform_device *pdev, struct msm_gpu *gpu)
{
- struct device *dev = &pdev->dev;
- struct property *prop;
- const char *name;
- int i = 0;
+ int ret = msm_clk_bulk_get(&pdev->dev, &gpu->grp_clks);
- gpu->nr_clocks = of_property_count_strings(dev->of_node, "clock-names");
- if (gpu->nr_clocks < 1) {
+ if (ret < 1) {
gpu->nr_clocks = 0;
- return 0;
- }
-
- gpu->grp_clks = devm_kcalloc(dev, sizeof(struct clk *), gpu->nr_clocks,
- GFP_KERNEL);
- if (!gpu->grp_clks) {
- gpu->nr_clocks = 0;
- return -ENOMEM;
+ return ret;
}
- of_property_for_each_string(dev->of_node, "clock-names", prop, name) {
- gpu->grp_clks[i] = get_clock(dev, name);
+ gpu->nr_clocks = ret;
- /* Remember the key clocks that we need to control later */
- if (!strcmp(name, "core") || !strcmp(name, "core_clk"))
- gpu->core_clk = gpu->grp_clks[i];
- else if (!strcmp(name, "rbbmtimer") || !strcmp(name, "rbbmtimer_clk"))
- gpu->rbbmtimer_clk = gpu->grp_clks[i];
+ gpu->core_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "core");
- ++i;
- }
+ gpu->rbbmtimer_clk = msm_clk_bulk_get_clock(gpu->grp_clks,
+ gpu->nr_clocks, "rbbmtimer");
return 0;
}
diff --git a/drivers/gpu/drm/msm/msm_gpu.h b/drivers/gpu/drm/msm/msm_gpu.h
index b8241179175a..9122ee6e55e4 100644
--- a/drivers/gpu/drm/msm/msm_gpu.h
+++ b/drivers/gpu/drm/msm/msm_gpu.h
@@ -27,6 +27,7 @@
struct msm_gem_submit;
struct msm_gpu_perfcntr;
+struct msm_gpu_state;
struct msm_gpu_config {
const char *ioname;
@@ -64,11 +65,14 @@ struct msm_gpu_funcs {
void (*destroy)(struct msm_gpu *gpu);
#ifdef CONFIG_DEBUG_FS
/* show GPU status in debugfs: */
- void (*show)(struct msm_gpu *gpu, struct seq_file *m);
+ void (*show)(struct msm_gpu *gpu, struct msm_gpu_state *state,
+ struct drm_printer *p);
/* for generation specific debugfs: */
int (*debugfs_init)(struct msm_gpu *gpu, struct drm_minor *minor);
#endif
int (*gpu_busy)(struct msm_gpu *gpu, uint64_t *value);
+ struct msm_gpu_state *(*gpu_state_get)(struct msm_gpu *gpu);
+ int (*gpu_state_put)(struct msm_gpu_state *state);
};
struct msm_gpu {
@@ -108,7 +112,7 @@ struct msm_gpu {
/* Power Control: */
struct regulator *gpu_reg, *gpu_cx;
- struct clk **grp_clks;
+ struct clk_bulk_data *grp_clks;
int nr_clocks;
struct clk *ebi1_clk, *core_clk, *rbbmtimer_clk;
uint32_t fast_rate;
@@ -129,6 +133,8 @@ struct msm_gpu {
u64 busy_cycles;
ktime_t time;
} devfreq;
+
+ struct msm_gpu_state *crashstate;
};
/* It turns out that all targets use the same ringbuffer size */
@@ -175,6 +181,38 @@ struct msm_gpu_submitqueue {
struct kref ref;
};
+struct msm_gpu_state_bo {
+ u64 iova;
+ size_t size;
+ void *data;
+};
+
+struct msm_gpu_state {
+ struct kref ref;
+ struct timespec64 time;
+
+ struct {
+ u64 iova;
+ u32 fence;
+ u32 seqno;
+ u32 rptr;
+ u32 wptr;
+ void *data;
+ int data_size;
+ } ring[MSM_GPU_MAX_RINGS];
+
+ int nr_registers;
+ u32 *registers;
+
+ u32 rbbm_status;
+
+ char *comm;
+ char *cmd;
+
+ int nr_bos;
+ struct msm_gpu_state_bo *bos;
+};
+
static inline void gpu_write(struct msm_gpu *gpu, u32 reg, u32 data)
{
msm_writel(data, gpu->mmio + (reg << 2));
@@ -254,4 +292,32 @@ static inline void msm_submitqueue_put(struct msm_gpu_submitqueue *queue)
kref_put(&queue->ref, msm_submitqueue_destroy);
}
+static inline struct msm_gpu_state *msm_gpu_crashstate_get(struct msm_gpu *gpu)
+{
+ struct msm_gpu_state *state = NULL;
+
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ kref_get(&gpu->crashstate->ref);
+ state = gpu->crashstate;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+
+ return state;
+}
+
+static inline void msm_gpu_crashstate_put(struct msm_gpu *gpu)
+{
+ mutex_lock(&gpu->dev->struct_mutex);
+
+ if (gpu->crashstate) {
+ if (gpu->funcs->gpu_state_put(gpu->crashstate))
+ gpu->crashstate = NULL;
+ }
+
+ mutex_unlock(&gpu->dev->struct_mutex);
+}
+
#endif /* __MSM_GPU_H__ */
diff --git a/drivers/gpu/drm/msm/msm_kms.h b/drivers/gpu/drm/msm/msm_kms.h
index dfd92947de2c..fd88cebb6adb 100644
--- a/drivers/gpu/drm/msm/msm_kms.h
+++ b/drivers/gpu/drm/msm/msm_kms.h
@@ -1,4 +1,5 @@
/*
+ * Copyright (c) 2016-2018, The Linux Foundation. All rights reserved.
* Copyright (C) 2013 Red Hat
* Author: Rob Clark <robdclark@gmail.com>
*
@@ -42,6 +43,7 @@ struct msm_kms_funcs {
void (*disable_vblank)(struct msm_kms *kms, struct drm_crtc *crtc);
/* modeset, bracketing atomic_commit(): */
void (*prepare_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
+ void (*commit)(struct msm_kms *kms, struct drm_atomic_state *state);
void (*complete_commit)(struct msm_kms *kms, struct drm_atomic_state *state);
/* functions to wait for atomic commit completed on each CRTC */
void (*wait_for_crtc_commit_done)(struct msm_kms *kms,
@@ -50,6 +52,11 @@ struct msm_kms_funcs {
const struct msm_format *(*get_format)(struct msm_kms *kms,
const uint32_t format,
const uint64_t modifiers);
+ /* do format checking on format modified through fb_cmd2 modifiers */
+ int (*check_modified_format)(const struct msm_kms *kms,
+ const struct msm_format *msm_fmt,
+ const struct drm_mode_fb_cmd2 *cmd,
+ struct drm_gem_object **bos);
/* misc: */
long (*round_pixclk)(struct msm_kms *kms, unsigned long rate,
struct drm_encoder *encoder);
@@ -60,6 +67,9 @@ struct msm_kms_funcs {
void (*set_encoder_mode)(struct msm_kms *kms,
struct drm_encoder *encoder,
bool cmd_mode);
+ /* pm suspend/resume hooks */
+ int (*pm_suspend)(struct device *dev);
+ int (*pm_resume)(struct device *dev);
/* cleanup: */
void (*destroy)(struct msm_kms *kms);
#ifdef CONFIG_DEBUG_FS
@@ -86,9 +96,20 @@ static inline void msm_kms_init(struct msm_kms *kms,
struct msm_kms *mdp4_kms_init(struct drm_device *dev);
struct msm_kms *mdp5_kms_init(struct drm_device *dev);
-int msm_mdss_init(struct drm_device *dev);
-void msm_mdss_destroy(struct drm_device *dev);
-int msm_mdss_enable(struct msm_mdss *mdss);
-int msm_mdss_disable(struct msm_mdss *mdss);
+struct msm_kms *dpu_kms_init(struct drm_device *dev);
+
+struct msm_mdss_funcs {
+ int (*enable)(struct msm_mdss *mdss);
+ int (*disable)(struct msm_mdss *mdss);
+ void (*destroy)(struct drm_device *dev);
+};
+
+struct msm_mdss {
+ struct drm_device *dev;
+ const struct msm_mdss_funcs *funcs;
+};
+
+int mdp5_mdss_init(struct drm_device *dev);
+int dpu_mdss_init(struct drm_device *dev);
#endif /* __MSM_KMS_H__ */
diff --git a/drivers/gpu/drm/nouveau/dispnv04/disp.c b/drivers/gpu/drm/nouveau/dispnv04/disp.c
index 501d2d290e9c..70dce544984e 100644
--- a/drivers/gpu/drm/nouveau/dispnv04/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv04/disp.c
@@ -55,6 +55,9 @@ nv04_display_create(struct drm_device *dev)
nouveau_display(dev)->init = nv04_display_init;
nouveau_display(dev)->fini = nv04_display_fini;
+ /* Pre-nv50 doesn't support atomic, so don't expose the ioctls */
+ dev->driver->driver_features &= ~DRIVER_ATOMIC;
+
nouveau_hw_save_vga_fonts(dev, 1);
nv04_crtc_create(dev, 0);
diff --git a/drivers/gpu/drm/nouveau/dispnv50/disp.c b/drivers/gpu/drm/nouveau/dispnv50/disp.c
index 0190377b02a6..8412119bd940 100644
--- a/drivers/gpu/drm/nouveau/dispnv50/disp.c
+++ b/drivers/gpu/drm/nouveau/dispnv50/disp.c
@@ -1610,8 +1610,9 @@ nv50_pior_create(struct drm_connector *connector, struct dcb_output *dcbe)
*****************************************************************************/
static void
-nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
+nv50_disp_atomic_commit_core(struct drm_atomic_state *state, u32 *interlock)
{
+ struct nouveau_drm *drm = nouveau_drm(state->dev);
struct nv50_disp *disp = nv50_disp(drm->dev);
struct nv50_core *core = disp->core;
struct nv50_mstm *mstm;
@@ -1643,6 +1644,22 @@ nv50_disp_atomic_commit_core(struct nouveau_drm *drm, u32 *interlock)
}
static void
+nv50_disp_atomic_commit_wndw(struct drm_atomic_state *state, u32 *interlock)
+{
+ struct drm_plane_state *new_plane_state;
+ struct drm_plane *plane;
+ int i;
+
+ for_each_new_plane_in_state(state, plane, new_plane_state, i) {
+ struct nv50_wndw *wndw = nv50_wndw(plane);
+ if (interlock[wndw->interlock.type] & wndw->interlock.data) {
+ if (wndw->func->update)
+ wndw->func->update(wndw, interlock);
+ }
+ }
+}
+
+static void
nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
{
struct drm_device *dev = state->dev;
@@ -1709,7 +1726,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
help->disable(encoder);
interlock[NV50_DISP_INTERLOCK_CORE] |= 1;
if (outp->flush_disable) {
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1718,15 +1736,8 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
/* Flush disable. */
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (atom->flush_disable) {
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
-
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_wndw(state, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
memset(interlock, 0x00, sizeof(interlock));
}
}
@@ -1787,18 +1798,14 @@ nv50_disp_atomic_commit_tail(struct drm_atomic_state *state)
}
/* Flush update. */
- for_each_new_plane_in_state(state, plane, new_plane_state, i) {
- struct nv50_wndw *wndw = nv50_wndw(plane);
- if (interlock[wndw->interlock.type] & wndw->interlock.data) {
- if (wndw->func->update)
- wndw->func->update(wndw, interlock);
- }
- }
+ nv50_disp_atomic_commit_wndw(state, interlock);
if (interlock[NV50_DISP_INTERLOCK_CORE]) {
if (interlock[NV50_DISP_INTERLOCK_BASE] ||
+ interlock[NV50_DISP_INTERLOCK_OVLY] ||
+ interlock[NV50_DISP_INTERLOCK_WNDW] ||
!atom->state.legacy_cursor_update)
- nv50_disp_atomic_commit_core(drm, interlock);
+ nv50_disp_atomic_commit_core(state, interlock);
else
disp->core->func->update(disp->core, interlock, false);
}
@@ -1896,7 +1903,7 @@ nv50_disp_atomic_commit(struct drm_device *dev,
nv50_disp_atomic_commit_tail(state);
drm_for_each_crtc(crtc, dev) {
- if (crtc->state->enable) {
+ if (crtc->state->active) {
if (!drm->have_disp_power_ref) {
drm->have_disp_power_ref = true;
return 0;
@@ -2144,10 +2151,6 @@ nv50_display_destroy(struct drm_device *dev)
kfree(disp);
}
-MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
-static int nouveau_atomic = 0;
-module_param_named(atomic, nouveau_atomic, int, 0400);
-
int
nv50_display_create(struct drm_device *dev)
{
@@ -2172,8 +2175,6 @@ nv50_display_create(struct drm_device *dev)
disp->disp = &nouveau_display(dev)->disp;
dev->mode_config.funcs = &nv50_disp_func;
dev->driver->driver_features |= DRIVER_PREFER_XBGR_30BPP;
- if (nouveau_atomic)
- dev->driver->driver_features |= DRIVER_ATOMIC;
/* small shared memory area we use for notifiers and semaphores */
ret = nouveau_bo_new(&drm->client, 4096, 0x1000, TTM_PL_FLAG_VRAM,
diff --git a/drivers/gpu/drm/nouveau/nouveau_backlight.c b/drivers/gpu/drm/nouveau/nouveau_backlight.c
index debbbf0fd4bd..408b955e5c39 100644
--- a/drivers/gpu/drm/nouveau/nouveau_backlight.c
+++ b/drivers/gpu/drm/nouveau/nouveau_backlight.c
@@ -267,6 +267,7 @@ nouveau_backlight_init(struct drm_device *dev)
struct nouveau_drm *drm = nouveau_drm(dev);
struct nvif_device *device = &drm->client.device;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
INIT_LIST_HEAD(&drm->bl_connectors);
@@ -275,7 +276,8 @@ nouveau_backlight_init(struct drm_device *dev)
return 0;
}
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ drm_for_each_connector_iter(connector, &conn_iter) {
if (connector->connector_type != DRM_MODE_CONNECTOR_LVDS &&
connector->connector_type != DRM_MODE_CONNECTOR_eDP)
continue;
@@ -292,7 +294,7 @@ nouveau_backlight_init(struct drm_device *dev)
break;
}
}
-
+ drm_connector_list_iter_end(&conn_iter);
return 0;
}
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.c b/drivers/gpu/drm/nouveau/nouveau_connector.c
index 22a15478d23d..eb4f766b5958 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.c
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.c
@@ -400,8 +400,10 @@ nouveau_connector_destroy(struct drm_connector *connector)
kfree(nv_connector->edid);
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
- if (nv_connector->aux.transfer)
+ if (nv_connector->aux.transfer) {
+ drm_dp_cec_unregister_connector(&nv_connector->aux);
drm_dp_aux_unregister(&nv_connector->aux);
+ }
kfree(connector);
}
@@ -608,6 +610,7 @@ nouveau_connector_detect(struct drm_connector *connector, bool force)
nouveau_connector_set_encoder(connector, nv_encoder);
conn_status = connector_status_connected;
+ drm_dp_cec_set_edid(&nv_connector->aux, nv_connector->edid);
goto out;
}
@@ -1108,11 +1111,14 @@ nouveau_connector_hotplug(struct nvif_notify *notify)
if (rep->mask & NVIF_NOTIFY_CONN_V0_IRQ) {
NV_DEBUG(drm, "service %s\n", name);
+ drm_dp_cec_irq(&nv_connector->aux);
if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP)))
nv50_mstm_service(nv_encoder->dp.mstm);
} else {
bool plugged = (rep->mask != NVIF_NOTIFY_CONN_V0_UNPLUG);
+ if (!plugged)
+ drm_dp_cec_unset_edid(&nv_connector->aux);
NV_DEBUG(drm, "%splugged %s\n", plugged ? "" : "un", name);
if ((nv_encoder = find_encoder(connector, DCB_OUTPUT_DP))) {
if (!plugged)
@@ -1193,14 +1199,19 @@ nouveau_connector_create(struct drm_device *dev, int index)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_connector *nv_connector = NULL;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int type, ret = 0;
bool dummy;
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
nv_connector = nouveau_connector(connector);
- if (nv_connector->index == index)
+ if (nv_connector->index == index) {
+ drm_connector_list_iter_end(&conn_iter);
return connector;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
nv_connector = kzalloc(sizeof(*nv_connector), GFP_KERNEL);
if (!nv_connector)
@@ -1297,7 +1308,6 @@ nouveau_connector_create(struct drm_device *dev, int index)
kfree(nv_connector);
return ERR_PTR(ret);
}
-
funcs = &nouveau_connector_funcs;
break;
default:
@@ -1351,6 +1361,14 @@ nouveau_connector_create(struct drm_device *dev, int index)
break;
}
+ switch (type) {
+ case DRM_MODE_CONNECTOR_DisplayPort:
+ case DRM_MODE_CONNECTOR_eDP:
+ drm_dp_cec_register_connector(&nv_connector->aux,
+ connector->name, dev->dev);
+ break;
+ }
+
ret = nvif_notify_init(&disp->disp.object, nouveau_connector_hotplug,
true, NV04_DISP_NTFY_CONN,
&(struct nvif_notify_conn_req_v0) {
diff --git a/drivers/gpu/drm/nouveau/nouveau_connector.h b/drivers/gpu/drm/nouveau/nouveau_connector.h
index a4d1a059bd3d..dc7454e7f19a 100644
--- a/drivers/gpu/drm/nouveau/nouveau_connector.h
+++ b/drivers/gpu/drm/nouveau/nouveau_connector.h
@@ -33,6 +33,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_dp_helper.h>
#include "nouveau_crtc.h"
+#include "nouveau_encoder.h"
struct nvkm_i2c_port;
@@ -60,19 +61,46 @@ static inline struct nouveau_connector *nouveau_connector(
return container_of(con, struct nouveau_connector, base);
}
+static inline bool
+nouveau_connector_is_mst(struct drm_connector *connector)
+{
+ const struct nouveau_encoder *nv_encoder;
+ const struct drm_encoder *encoder;
+
+ if (connector->connector_type != DRM_MODE_CONNECTOR_DisplayPort)
+ return false;
+
+ nv_encoder = find_encoder(connector, DCB_OUTPUT_ANY);
+ if (!nv_encoder)
+ return false;
+
+ encoder = &nv_encoder->base.base;
+ return encoder->encoder_type == DRM_MODE_ENCODER_DPMST;
+}
+
+#define nouveau_for_each_non_mst_connector_iter(connector, iter) \
+ drm_for_each_connector_iter(connector, iter) \
+ for_each_if(!nouveau_connector_is_mst(connector))
+
static inline struct nouveau_connector *
nouveau_crtc_connector_get(struct nouveau_crtc *nv_crtc)
{
struct drm_device *dev = nv_crtc->base.dev;
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
+ struct nouveau_connector *nv_connector = NULL;
struct drm_crtc *crtc = to_drm_crtc(nv_crtc);
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
- if (connector->encoder && connector->encoder->crtc == crtc)
- return nouveau_connector(connector);
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
+ if (connector->encoder && connector->encoder->crtc == crtc) {
+ nv_connector = nouveau_connector(connector);
+ break;
+ }
}
+ drm_connector_list_iter_end(&conn_iter);
- return NULL;
+ return nv_connector;
}
struct drm_connector *
diff --git a/drivers/gpu/drm/nouveau/nouveau_display.c b/drivers/gpu/drm/nouveau/nouveau_display.c
index dfa236370726..139368b31916 100644
--- a/drivers/gpu/drm/nouveau/nouveau_display.c
+++ b/drivers/gpu/drm/nouveau/nouveau_display.c
@@ -404,6 +404,7 @@ nouveau_display_init(struct drm_device *dev)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
int ret;
ret = disp->init(dev);
@@ -411,10 +412,12 @@ nouveau_display_init(struct drm_device *dev)
return ret;
/* enable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_get(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
/* enable flip completion events */
nvif_notify_get(&drm->flip);
@@ -427,6 +430,7 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
struct nouveau_display *disp = nouveau_display(dev);
struct nouveau_drm *drm = nouveau_drm(dev);
struct drm_connector *connector;
+ struct drm_connector_list_iter conn_iter;
if (!suspend) {
if (drm_drv_uses_atomic_modeset(dev))
@@ -439,10 +443,12 @@ nouveau_display_fini(struct drm_device *dev, bool suspend)
nvif_notify_put(&drm->flip);
/* disable hotplug interrupts */
- list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
+ drm_connector_list_iter_begin(dev, &conn_iter);
+ nouveau_for_each_non_mst_connector_iter(connector, &conn_iter) {
struct nouveau_connector *conn = nouveau_connector(connector);
nvif_notify_put(&conn->hpd);
}
+ drm_connector_list_iter_end(&conn_iter);
drm_kms_helper_poll_disable(dev);
disp->fini(dev);
diff --git a/drivers/gpu/drm/nouveau/nouveau_drm.c b/drivers/gpu/drm/nouveau/nouveau_drm.c
index c779ee3c665b..c7ec86d6c3c9 100644
--- a/drivers/gpu/drm/nouveau/nouveau_drm.c
+++ b/drivers/gpu/drm/nouveau/nouveau_drm.c
@@ -81,6 +81,10 @@ MODULE_PARM_DESC(modeset, "enable driver (default: auto, "
int nouveau_modeset = -1;
module_param_named(modeset, nouveau_modeset, int, 0400);
+MODULE_PARM_DESC(atomic, "Expose atomic ioctl (default: disabled)");
+static int nouveau_atomic = 0;
+module_param_named(atomic, nouveau_atomic, int, 0400);
+
MODULE_PARM_DESC(runpm, "disable (0), force enable (1), optimus only default (-1)");
static int nouveau_runtime_pm = -1;
module_param_named(runpm, nouveau_runtime_pm, int, 0400);
@@ -509,6 +513,9 @@ static int nouveau_drm_probe(struct pci_dev *pdev,
pci_set_master(pdev);
+ if (nouveau_atomic)
+ driver_pci.driver_features |= DRIVER_ATOMIC;
+
ret = drm_get_pci_dev(pdev, pent, &driver_pci);
if (ret) {
nvkm_device_del(&device);
@@ -874,22 +881,11 @@ nouveau_pmops_runtime_resume(struct device *dev)
static int
nouveau_pmops_runtime_idle(struct device *dev)
{
- struct pci_dev *pdev = to_pci_dev(dev);
- struct drm_device *drm_dev = pci_get_drvdata(pdev);
- struct nouveau_drm *drm = nouveau_drm(drm_dev);
- struct drm_crtc *crtc;
-
if (!nouveau_pmops_runtime()) {
pm_runtime_forbid(dev);
return -EBUSY;
}
- list_for_each_entry(crtc, &drm->dev->mode_config.crtc_list, head) {
- if (crtc->enabled) {
- DRM_DEBUG_DRIVER("failing to power off - crtc active\n");
- return -EBUSY;
- }
- }
pm_runtime_mark_last_busy(dev);
pm_runtime_autosuspend(dev);
/* we don't want the main rpm_idle to call suspend - we want to autosuspend */
diff --git a/drivers/gpu/drm/nouveau/nouveau_fence.c b/drivers/gpu/drm/nouveau/nouveau_fence.c
index 412d49bc6e56..99be61ddeb75 100644
--- a/drivers/gpu/drm/nouveau/nouveau_fence.c
+++ b/drivers/gpu/drm/nouveau/nouveau_fence.c
@@ -526,6 +526,5 @@ static const struct dma_fence_ops nouveau_fence_ops_uevent = {
.get_timeline_name = nouveau_fence_get_timeline_name,
.enable_signaling = nouveau_fence_enable_signaling,
.signaled = nouveau_fence_is_signaled,
- .wait = dma_fence_default_wait,
.release = nouveau_fence_release
};
diff --git a/drivers/gpu/drm/nouveau/nouveau_gem.c b/drivers/gpu/drm/nouveau/nouveau_gem.c
index df73bec354e8..b56524d343c3 100644
--- a/drivers/gpu/drm/nouveau/nouveau_gem.c
+++ b/drivers/gpu/drm/nouveau/nouveau_gem.c
@@ -616,7 +616,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
struct nouveau_bo *nvbo;
uint32_t data;
- if (unlikely(r->bo_index > req->nr_buffers)) {
+ if (unlikely(r->bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc bo index invalid\n");
ret = -EINVAL;
break;
@@ -626,7 +626,7 @@ nouveau_gem_pushbuf_reloc_apply(struct nouveau_cli *cli,
if (b->presumed.valid)
continue;
- if (unlikely(r->reloc_bo_index > req->nr_buffers)) {
+ if (unlikely(r->reloc_bo_index >= req->nr_buffers)) {
NV_PRINTK(err, cli, "reloc container bo index invalid\n");
ret = -EINVAL;
break;
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
index 73b5d46104bd..434d2fc5bb1c 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/base.c
@@ -140,6 +140,9 @@ nvkm_fb_init(struct nvkm_subdev *subdev)
if (fb->func->init)
fb->func->init(fb);
+ if (fb->func->init_remapper)
+ fb->func->init_remapper(fb);
+
if (fb->func->init_page) {
ret = fb->func->init_page(fb);
if (WARN_ON(ret))
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
index dffe1f5e1071..8205ce436b3e 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp100.c
@@ -37,6 +37,14 @@ gp100_fb_init_unkn(struct nvkm_fb *base)
}
void
+gp100_fb_init_remapper(struct nvkm_fb *fb)
+{
+ struct nvkm_device *device = fb->subdev.device;
+ /* Disable address remapper. */
+ nvkm_mask(device, 0x100c14, 0x00040000, 0x00000000);
+}
+
+void
gp100_fb_init(struct nvkm_fb *base)
{
struct gf100_fb *fb = gf100_fb(base);
@@ -56,6 +64,7 @@ gp100_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.init_unkn = gp100_fb_init_unkn,
.ram_new = gp100_ram_new,
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
index b84b9861ef26..b4d74e815674 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/gp102.c
@@ -31,6 +31,7 @@ gp102_fb = {
.dtor = gf100_fb_dtor,
.oneinit = gf100_fb_oneinit,
.init = gp100_fb_init,
+ .init_remapper = gp100_fb_init_remapper,
.init_page = gm200_fb_init_page,
.ram_new = gp100_ram_new,
};
diff --git a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
index 2857f31466bf..1e4ad61c19e1 100644
--- a/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
+++ b/drivers/gpu/drm/nouveau/nvkm/subdev/fb/priv.h
@@ -11,6 +11,7 @@ struct nvkm_fb_func {
u32 (*tags)(struct nvkm_fb *);
int (*oneinit)(struct nvkm_fb *);
void (*init)(struct nvkm_fb *);
+ void (*init_remapper)(struct nvkm_fb *);
int (*init_page)(struct nvkm_fb *);
void (*init_unkn)(struct nvkm_fb *);
void (*intr)(struct nvkm_fb *);
@@ -69,5 +70,6 @@ int gf100_fb_init_page(struct nvkm_fb *);
int gm200_fb_init_page(struct nvkm_fb *);
+void gp100_fb_init_remapper(struct nvkm_fb *);
void gp100_fb_init_unkn(struct nvkm_fb *);
#endif
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
index 9eabd7201a12..28a3ce8f88d2 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-analog-tv.c
@@ -18,77 +18,27 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct device *dev;
-
- struct videomode vm;
-};
-
-static const struct videomode tvc_pal_vm = {
- .hactive = 720,
- .vactive = 574,
- .pixelclock = 13500000,
- .hsync_len = 64,
- .hfront_porch = 12,
- .hback_porch = 68,
- .vsync_len = 5,
- .vfront_porch = 5,
- .vback_porch = 41,
-
- .flags = DISPLAY_FLAGS_INTERLACED | DISPLAY_FLAGS_HSYNC_LOW |
- DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int tvc_connect(struct omap_dss_device *dssdev)
+static int tvc_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- dev_dbg(ddata->dev, "connect\n");
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(ddata->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.atv->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void tvc_disconnect(struct omap_dss_device *dssdev)
+static void tvc_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(ddata->dev, "disconnect\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.atv->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int tvc_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(ddata->dev, "enable\n");
@@ -99,9 +49,7 @@ static int tvc_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->vm);
-
- r = in->ops.atv->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -113,83 +61,30 @@ static int tvc_enable(struct omap_dss_device *dssdev)
static void tvc_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(ddata->dev, "disable\n");
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.atv->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tvc_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.atv->set_timings(in, vm);
-}
-
-static void tvc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int tvc_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.atv->check_timings(in, vm);
-}
-
-static u32 tvc_get_wss(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.atv->get_wss(in);
-}
-
-static int tvc_set_wss(struct omap_dss_device *dssdev, u32 wss)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.atv->set_wss(in, wss);
-}
-
-static struct omap_dss_driver tvc_driver = {
+static const struct omap_dss_device_ops tvc_ops = {
.connect = tvc_connect,
.disconnect = tvc_disconnect,
.enable = tvc_enable,
.disable = tvc_disable,
-
- .set_timings = tvc_set_timings,
- .get_timings = tvc_get_timings,
- .check_timings = tvc_check_timings,
-
- .get_wss = tvc_get_wss,
- .set_wss = tvc_set_wss,
};
static int tvc_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
- int r;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -198,20 +93,15 @@ static int tvc_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- ddata->vm = tvc_pal_vm;
-
dssdev = &ddata->dssdev;
- dssdev->driver = &tvc_driver;
+ dssdev->ops = &tvc_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = tvc_pal_vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -221,10 +111,9 @@ static int __exit tvc_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(&ddata->dssdev);
+ omapdss_device_unregister(&ddata->dssdev);
tvc_disable(dssdev);
- tvc_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
index 6d8cbd9e2110..24b14f44248e 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-dvi.c
@@ -19,30 +19,8 @@
#include "../dss/omapdss.h"
-static const struct videomode dvic_default_vm = {
- .hactive = 640,
- .vactive = 480,
-
- .pixelclock = 23500000,
-
- .hfront_porch = 48,
- .hsync_len = 32,
- .hback_porch = 80,
-
- .vfront_porch = 3,
- .vsync_len = 4,
- .vback_porch = 7,
-
- .flags = DISPLAY_FLAGS_HSYNC_HIGH | DISPLAY_FLAGS_VSYNC_HIGH |
- DISPLAY_FLAGS_SYNC_NEGEDGE | DISPLAY_FLAGS_DE_HIGH |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
-};
-
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
-
- struct videomode vm;
struct i2c_adapter *i2c_adapter;
@@ -57,49 +35,20 @@ struct panel_drv_data {
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int dvic_connect(struct omap_dss_device *dssdev)
+static int dvic_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dvi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void dvic_disconnect(struct omap_dss_device *dssdev)
+static void dvic_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dvi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int dvic_enable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -108,9 +57,7 @@ static int dvic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dvi->set_timings(in, &ddata->vm);
-
- r = in->ops.dvi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -121,46 +68,16 @@ static int dvic_enable(struct omap_dss_device *dssdev)
static void dvic_disable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.dvi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void dvic_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dvi->set_timings(in, vm);
-}
-
-static void dvic_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int dvic_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dvi->check_timings(in, vm);
-}
-
static int dvic_ddc_read(struct i2c_adapter *adapter,
unsigned char *buf, u16 count, u8 offset)
{
@@ -198,12 +115,6 @@ static int dvic_read_edid(struct omap_dss_device *dssdev,
struct panel_drv_data *ddata = to_panel_data(dssdev);
int r, l, bytes_read;
- if (ddata->hpd_gpio && !gpiod_get_value_cansleep(ddata->hpd_gpio))
- return -ENODEV;
-
- if (!ddata->i2c_adapter)
- return -ENODEV;
-
l = min(EDID_LENGTH, len);
r = dvic_ddc_read(ddata->i2c_adapter, edid, l, 0);
if (r)
@@ -243,78 +154,41 @@ static bool dvic_detect(struct omap_dss_device *dssdev)
return r == 0;
}
-static int dvic_register_hpd_cb(struct omap_dss_device *dssdev,
+static void dvic_register_hpd_cb(struct omap_dss_device *dssdev,
void (*cb)(void *cb_data,
enum drm_connector_status status),
void *cb_data)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- if (!ddata->hpd_gpio)
- return -ENOTSUPP;
-
mutex_lock(&ddata->hpd_lock);
ddata->hpd_cb = cb;
ddata->hpd_cb_data = cb_data;
mutex_unlock(&ddata->hpd_lock);
- return 0;
}
static void dvic_unregister_hpd_cb(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- if (!ddata->hpd_gpio)
- return;
-
mutex_lock(&ddata->hpd_lock);
ddata->hpd_cb = NULL;
ddata->hpd_cb_data = NULL;
mutex_unlock(&ddata->hpd_lock);
}
-static void dvic_enable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (!ddata->hpd_gpio)
- return;
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = true;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void dvic_disable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- if (!ddata->hpd_gpio)
- return;
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = false;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static struct omap_dss_driver dvic_driver = {
+static const struct omap_dss_device_ops dvic_ops = {
.connect = dvic_connect,
.disconnect = dvic_disconnect,
.enable = dvic_enable,
.disable = dvic_disable,
- .set_timings = dvic_set_timings,
- .get_timings = dvic_get_timings,
- .check_timings = dvic_check_timings,
-
.read_edid = dvic_read_edid,
.detect = dvic_detect,
.register_hpd_cb = dvic_register_hpd_cb,
.unregister_hpd_cb = dvic_unregister_hpd_cb,
- .enable_hpd = dvic_enable_hpd,
- .disable_hpd = dvic_disable_hpd,
};
static irqreturn_t dvic_hpd_isr(int irq, void *data)
@@ -396,28 +270,24 @@ static int dvic_probe(struct platform_device *pdev)
if (r)
return r;
- ddata->vm = dvic_default_vm;
-
dssdev = &ddata->dssdev;
- dssdev->driver = &dvic_driver;
+ dssdev->ops = &dvic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = dvic_default_vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- goto err_reg;
- }
-
- return 0;
+ if (ddata->hpd_gpio)
+ dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
+ | OMAP_DSS_DEVICE_OP_HPD;
+ if (ddata->i2c_adapter)
+ dssdev->ops_flags |= OMAP_DSS_DEVICE_OP_DETECT
+ | OMAP_DSS_DEVICE_OP_EDID;
-err_reg:
- i2c_put_adapter(ddata->i2c_adapter);
- mutex_destroy(&ddata->hpd_lock);
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
- return r;
+ return 0;
}
static int __exit dvic_remove(struct platform_device *pdev)
@@ -425,10 +295,9 @@ static int __exit dvic_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(&ddata->dssdev);
+ omapdss_device_unregister(&ddata->dssdev);
dvic_disable(dssdev);
- dvic_disconnect(dssdev);
i2c_put_adapter(ddata->i2c_adapter);
diff --git a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
index ca30ed9da7eb..e602fa4a50a4 100644
--- a/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
+++ b/drivers/gpu/drm/omapdrm/displays/connector-hdmi.c
@@ -10,95 +10,41 @@
*/
#include <linux/gpio/consumer.h>
-#include <linux/slab.h>
#include <linux/module.h>
-#include <linux/platform_device.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
#include <linux/mutex.h>
-
-#include <drm/drm_edid.h>
+#include <linux/platform_device.h>
+#include <linux/slab.h>
#include "../dss/omapdss.h"
-static const struct videomode hdmic_default_vm = {
- .hactive = 640,
- .vactive = 480,
- .pixelclock = 25175000,
- .hsync_len = 96,
- .hfront_porch = 16,
- .hback_porch = 48,
- .vsync_len = 2,
- .vfront_porch = 11,
- .vback_porch = 31,
-
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
-};
-
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
void *hpd_cb_data;
- bool hpd_enabled;
struct mutex hpd_lock;
struct device *dev;
- struct videomode vm;
-
- int hpd_gpio;
+ struct gpio_desc *hpd_gpio;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int hdmic_connect(struct omap_dss_device *dssdev)
+static int hdmic_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- dev_dbg(ddata->dev, "connect\n");
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(ddata->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(ddata->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.hdmi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void hdmic_disconnect(struct omap_dss_device *dssdev)
+static void hdmic_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(ddata->dev, "disconnect\n");
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.hdmi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int hdmic_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(ddata->dev, "enable\n");
@@ -109,9 +55,7 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.hdmi->set_timings(in, &ddata->vm);
-
- r = in->ops.hdmi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -123,171 +67,58 @@ static int hdmic_enable(struct omap_dss_device *dssdev)
static void hdmic_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(ddata->dev, "disable\n");
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.hdmi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void hdmic_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.hdmi->set_timings(in, vm);
-}
-
-static void hdmic_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int hdmic_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->check_timings(in, vm);
-}
-
-static int hdmic_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->read_edid(in, edid, len);
-}
-
static bool hdmic_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- bool connected;
-
- if (gpio_is_valid(ddata->hpd_gpio))
- connected = gpio_get_value_cansleep(ddata->hpd_gpio);
- else
- connected = in->ops.hdmi->detect(in);
- if (!connected && in->ops.hdmi->lost_hotplug)
- in->ops.hdmi->lost_hotplug(in);
- return connected;
+
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
-static int hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
+static void hdmic_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
enum drm_connector_status status),
- void *cb_data)
+ void *cb_data)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = cb;
- ddata->hpd_cb_data = cb_data;
- mutex_unlock(&ddata->hpd_lock);
- return 0;
- } else if (in->ops.hdmi->register_hpd_cb) {
- return in->ops.hdmi->register_hpd_cb(in, cb, cb_data);
- }
- return -ENOTSUPP;
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = cb;
+ ddata->hpd_cb_data = cb_data;
+ mutex_unlock(&ddata->hpd_lock);
}
static void hdmic_unregister_hpd_cb(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_cb = NULL;
- ddata->hpd_cb_data = NULL;
- mutex_unlock(&ddata->hpd_lock);
- } else if (in->ops.hdmi->unregister_hpd_cb) {
- in->ops.hdmi->unregister_hpd_cb(in);
- }
-}
-
-static void hdmic_enable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = true;
- mutex_unlock(&ddata->hpd_lock);
- } else if (in->ops.hdmi->enable_hpd) {
- in->ops.hdmi->enable_hpd(in);
- }
-}
-
-static void hdmic_disable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (gpio_is_valid(ddata->hpd_gpio)) {
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = false;
- mutex_unlock(&ddata->hpd_lock);
- } else if (in->ops.hdmi->disable_hpd) {
- in->ops.hdmi->disable_hpd(in);
- }
-}
-
-static int hdmic_set_hdmi_mode(struct omap_dss_device *dssdev, bool hdmi_mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
-}
-
-static int hdmic_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- return in->ops.hdmi->set_infoframe(in, avi);
+ mutex_lock(&ddata->hpd_lock);
+ ddata->hpd_cb = NULL;
+ ddata->hpd_cb_data = NULL;
+ mutex_unlock(&ddata->hpd_lock);
}
-static struct omap_dss_driver hdmic_driver = {
+static const struct omap_dss_device_ops hdmic_ops = {
.connect = hdmic_connect,
.disconnect = hdmic_disconnect,
.enable = hdmic_enable,
.disable = hdmic_disable,
- .set_timings = hdmic_set_timings,
- .get_timings = hdmic_get_timings,
- .check_timings = hdmic_check_timings,
-
- .read_edid = hdmic_read_edid,
.detect = hdmic_detect,
.register_hpd_cb = hdmic_register_hpd_cb,
.unregister_hpd_cb = hdmic_unregister_hpd_cb,
- .enable_hpd = hdmic_enable_hpd,
- .disable_hpd = hdmic_disable_hpd,
- .set_hdmi_mode = hdmic_set_hdmi_mode,
- .set_hdmi_infoframe = hdmic_set_infoframe,
};
static irqreturn_t hdmic_hpd_isr(int irq, void *data)
@@ -295,7 +126,7 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data)
struct panel_drv_data *ddata = data;
mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
+ if (ddata->hpd_cb) {
enum drm_connector_status status;
if (hdmic_detect(&ddata->dssdev))
@@ -310,26 +141,11 @@ static irqreturn_t hdmic_hpd_isr(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmic_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int gpio;
-
- /* HPD GPIO */
- gpio = of_get_named_gpio(node, "hpd-gpios", 0);
- if (gpio_is_valid(gpio))
- ddata->hpd_gpio = gpio;
- else
- ddata->hpd_gpio = -ENODEV;
-
- return 0;
-}
-
static int hdmic_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
+ struct gpio_desc *gpio;
int r;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
@@ -339,20 +155,20 @@ static int hdmic_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
ddata->dev = &pdev->dev;
- r = hdmic_probe_of(pdev);
- if (r)
- return r;
-
mutex_init(&ddata->hpd_lock);
- if (gpio_is_valid(ddata->hpd_gpio)) {
- r = devm_gpio_request_one(&pdev->dev, ddata->hpd_gpio,
- GPIOF_DIR_IN, "hdmi_hpd");
- if (r)
- return r;
+ /* HPD GPIO */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "hpd", GPIOD_IN);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to parse HPD gpio\n");
+ return PTR_ERR(gpio);
+ }
+
+ ddata->hpd_gpio = gpio;
+ if (ddata->hpd_gpio) {
r = devm_request_threaded_irq(&pdev->dev,
- gpio_to_irq(ddata->hpd_gpio),
+ gpiod_to_irq(ddata->hpd_gpio),
NULL, hdmic_hpd_isr,
IRQF_TRIGGER_RISING | IRQF_TRIGGER_FALLING |
IRQF_ONESHOT,
@@ -361,20 +177,18 @@ static int hdmic_probe(struct platform_device *pdev)
return r;
}
- ddata->vm = hdmic_default_vm;
-
dssdev = &ddata->dssdev;
- dssdev->driver = &hdmic_driver;
+ dssdev->ops = &hdmic_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = hdmic_default_vm;
+ dssdev->of_ports = BIT(0);
+ dssdev->ops_flags = ddata->hpd_gpio
+ ? OMAP_DSS_DEVICE_OP_DETECT | OMAP_DSS_DEVICE_OP_HPD
+ : 0;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -384,10 +198,9 @@ static int __exit hdmic_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(&ddata->dssdev);
+ omapdss_device_unregister(&ddata->dssdev);
hdmic_disable(dssdev);
- hdmic_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
index afee1b8b457a..4fefd80f53bb 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-opa362.c
@@ -23,75 +23,28 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct gpio_desc *enable_gpio;
-
- struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int opa362_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int opa362_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- dev_dbg(dssdev->dev, "connect\n");
-
- if (omapdss_device_is_connected(dssdev))
- return -EBUSY;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.atv->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- dst->src = dssdev;
- dssdev->dst = dst;
-
- ddata->in = in;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
-static void opa362_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void opa362_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(dssdev->dev, "disconnect\n");
-
- WARN_ON(!omapdss_device_is_connected(dssdev));
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->dst)
- return;
-
- dst->src = NULL;
- dssdev->dst = NULL;
-
- in->ops.atv->disconnect(in, &ddata->dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
+ omapdss_device_disconnect(dst, dst->next);
}
static int opa362_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(dssdev->dev, "enable\n");
@@ -102,9 +55,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.atv->set_timings(in, &ddata->vm);
-
- r = in->ops.atv->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -119,7 +70,7 @@ static int opa362_enable(struct omap_dss_device *dssdev)
static void opa362_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "disable\n");
@@ -129,56 +80,16 @@ static void opa362_disable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- in->ops.atv->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void opa362_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(dssdev->dev, "set_timings\n");
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.atv->set_timings(in, vm);
-}
-
-static void opa362_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- dev_dbg(dssdev->dev, "get_timings\n");
-
- *vm = ddata->vm;
-}
-
-static int opa362_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- dev_dbg(dssdev->dev, "check_timings\n");
-
- return in->ops.atv->check_timings(in, vm);
-}
-
-static const struct omapdss_atv_ops opa362_atv_ops = {
+static const struct omap_dss_device_ops opa362_ops = {
.connect = opa362_connect,
.disconnect = opa362_disconnect,
-
.enable = opa362_enable,
.disable = opa362_disable,
-
- .check_timings = opa362_check_timings,
- .set_timings = opa362_set_timings,
- .get_timings = opa362_get_timings,
};
static int opa362_probe(struct platform_device *pdev)
@@ -186,7 +97,6 @@ static int opa362_probe(struct platform_device *pdev)
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
struct gpio_desc *gpio;
- int r;
dev_dbg(&pdev->dev, "probe\n");
@@ -203,18 +113,22 @@ static int opa362_probe(struct platform_device *pdev)
ddata->enable_gpio = gpio;
dssdev = &ddata->dssdev;
- dssdev->ops.atv = &opa362_atv_ops;
+ dssdev->ops = &opa362_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_VENC;
dssdev->output_type = OMAP_DISPLAY_TYPE_VENC;
dssdev->owner = THIS_MODULE;
+ dssdev->of_ports = BIT(1) | BIT(0);
- r = omapdss_register_output(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register output\n");
- return r;
+ dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
+ if (IS_ERR(dssdev->next)) {
+ if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find video sink\n");
+ return PTR_ERR(dssdev->next);
}
+ omapdss_device_register(dssdev);
+
return 0;
}
@@ -223,7 +137,9 @@ static int __exit opa362_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_output(&ddata->dssdev);
+ if (dssdev->next)
+ omapdss_device_put(dssdev->next);
+ omapdss_device_unregister(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
@@ -231,7 +147,7 @@ static int __exit opa362_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- opa362_disconnect(dssdev, dssdev->dst);
+ omapdss_device_disconnect(NULL, dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
index ed7ae384c3ed..f1a748353279 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tfp410.c
@@ -13,77 +13,33 @@
#include <linux/module.h>
#include <linux/platform_device.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
- int pd_gpio;
-
- struct videomode vm;
+ struct gpio_desc *pd_gpio;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int tfp410_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int tfp410_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return -EBUSY;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- dst->src = dssdev;
- dssdev->dst = dst;
-
- ddata->in = in;
- return 0;
+ return omapdss_device_connect(dst->dss, dst, dst->next);
}
-static void tfp410_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void tfp410_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- WARN_ON(!omapdss_device_is_connected(dssdev));
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- WARN_ON(dst != dssdev->dst);
- if (dst != dssdev->dst)
- return;
-
- dst->src = NULL;
- dssdev->dst = NULL;
-
- in->ops.dpi->disconnect(in, &ddata->dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
+ omapdss_device_disconnect(dst, dst->next);
}
static int tfp410_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -92,14 +48,12 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
- if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value_cansleep(ddata->pd_gpio, 1);
+ if (ddata->pd_gpio)
+ gpiod_set_value_cansleep(ddata->pd_gpio, 0);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
@@ -109,94 +63,31 @@ static int tfp410_enable(struct omap_dss_device *dssdev)
static void tfp410_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- if (gpio_is_valid(ddata->pd_gpio))
- gpio_set_value_cansleep(ddata->pd_gpio, 0);
+ if (ddata->pd_gpio)
+ gpiod_set_value_cansleep(ddata->pd_gpio, 0);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tfp410_fix_timings(struct videomode *vm)
-{
- vm->flags |= DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_PIXDATA_POSEDGE |
- DISPLAY_FLAGS_SYNC_POSEDGE;
-}
-
-static void tfp410_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- tfp410_fix_timings(vm);
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
-static void tfp410_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int tfp410_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- tfp410_fix_timings(vm);
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static const struct omapdss_dvi_ops tfp410_dvi_ops = {
+static const struct omap_dss_device_ops tfp410_ops = {
.connect = tfp410_connect,
.disconnect = tfp410_disconnect,
-
.enable = tfp410_enable,
.disable = tfp410_disable,
-
- .check_timings = tfp410_check_timings,
- .set_timings = tfp410_set_timings,
- .get_timings = tfp410_get_timings,
};
-static int tfp410_probe_of(struct platform_device *pdev)
-{
- struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct device_node *node = pdev->dev.of_node;
- int gpio;
-
- gpio = of_get_named_gpio(node, "powerdown-gpios", 0);
-
- if (gpio_is_valid(gpio) || gpio == -ENOENT) {
- ddata->pd_gpio = gpio;
- } else {
- if (gpio != -EPROBE_DEFER)
- dev_err(&pdev->dev, "failed to parse PD gpio\n");
- return gpio;
- }
-
- return 0;
-}
-
static int tfp410_probe(struct platform_device *pdev)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
- int r;
+ struct gpio_desc *gpio;
ddata = devm_kzalloc(&pdev->dev, sizeof(*ddata), GFP_KERNEL);
if (!ddata)
@@ -204,34 +95,34 @@ static int tfp410_probe(struct platform_device *pdev)
platform_set_drvdata(pdev, ddata);
- r = tfp410_probe_of(pdev);
- if (r)
- return r;
-
- if (gpio_is_valid(ddata->pd_gpio)) {
- r = devm_gpio_request_one(&pdev->dev, ddata->pd_gpio,
- GPIOF_OUT_INIT_LOW, "tfp410 PD");
- if (r) {
- dev_err(&pdev->dev, "Failed to request PD GPIO %d\n",
- ddata->pd_gpio);
- return r;
- }
+ /* Powerdown GPIO */
+ gpio = devm_gpiod_get_optional(&pdev->dev, "powerdown", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio)) {
+ dev_err(&pdev->dev, "failed to parse powerdown gpio\n");
+ return PTR_ERR(gpio);
}
+ ddata->pd_gpio = gpio;
+
dssdev = &ddata->dssdev;
- dssdev->ops.dvi = &tfp410_dvi_ops;
+ dssdev->ops = &tfp410_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->output_type = OMAP_DISPLAY_TYPE_DVI;
dssdev->owner = THIS_MODULE;
- dssdev->port_num = 1;
-
- r = omapdss_register_output(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register output\n");
- return r;
+ dssdev->of_ports = BIT(1) | BIT(0);
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+
+ dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
+ if (IS_ERR(dssdev->next)) {
+ if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find video sink\n");
+ return PTR_ERR(dssdev->next);
}
+ omapdss_device_register(dssdev);
+
return 0;
}
@@ -240,7 +131,9 @@ static int __exit tfp410_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_output(&ddata->dssdev);
+ if (dssdev->next)
+ omapdss_device_put(dssdev->next);
+ omapdss_device_unregister(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
@@ -248,7 +141,7 @@ static int __exit tfp410_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- tfp410_disconnect(dssdev, dssdev->dst);
+ omapdss_device_disconnect(NULL, dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
index d275bf152da5..94de55fd8884 100644
--- a/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
+++ b/drivers/gpu/drm/omapdrm/displays/encoder-tpd12s015.c
@@ -21,42 +21,26 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
void (*hpd_cb)(void *cb_data, enum drm_connector_status status);
void *hpd_cb_data;
- bool hpd_enabled;
struct mutex hpd_lock;
struct gpio_desc *ct_cp_hpd_gpio;
struct gpio_desc *ls_oe_gpio;
struct gpio_desc *hpd_gpio;
-
- struct videomode vm;
};
#define to_panel_data(x) container_of(x, struct panel_drv_data, dssdev)
-static int tpd_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int tpd_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
+ struct panel_drv_data *ddata = to_panel_data(dst);
int r;
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.hdmi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
+ if (r)
return r;
- }
-
- dst->src = dssdev;
- dssdev->dst = dst;
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 1);
gpiod_set_value_cansleep(ddata->ls_oe_gpio, 1);
@@ -64,45 +48,29 @@ static int tpd_connect(struct omap_dss_device *dssdev,
/* DC-DC converter needs at max 300us to get to 90% of 5V */
udelay(300);
- ddata->in = in;
return 0;
}
-static void tpd_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void tpd_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
+ struct panel_drv_data *ddata = to_panel_data(dst);
gpiod_set_value_cansleep(ddata->ct_cp_hpd_gpio, 0);
gpiod_set_value_cansleep(ddata->ls_oe_gpio, 0);
- dst->src = NULL;
- dssdev->dst = NULL;
-
- in->ops.hdmi->disconnect(in, &ddata->dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
+ omapdss_device_disconnect(dst, dst->next);
}
static int tpd_enable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
return 0;
- in->ops.hdmi->set_timings(in, &ddata->vm);
-
- r = in->ops.hdmi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -113,76 +81,27 @@ static int tpd_enable(struct omap_dss_device *dssdev)
static void tpd_disable(struct omap_dss_device *dssdev)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (dssdev->state != OMAP_DSS_DISPLAY_ACTIVE)
return;
- in->ops.hdmi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpd_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.hdmi->set_timings(in, vm);
-}
-
-static void tpd_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- *vm = ddata->vm;
-}
-
-static int tpd_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- int r;
-
- r = in->ops.hdmi->check_timings(in, vm);
-
- return r;
-}
-
-static int tpd_read_edid(struct omap_dss_device *dssdev,
- u8 *edid, int len)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!gpiod_get_value_cansleep(ddata->hpd_gpio))
- return -ENODEV;
-
- return in->ops.hdmi->read_edid(in, edid, len);
-}
-
static bool tpd_detect(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
- bool connected = gpiod_get_value_cansleep(ddata->hpd_gpio);
- if (!connected && in->ops.hdmi->lost_hotplug)
- in->ops.hdmi->lost_hotplug(in);
- return connected;
+ return gpiod_get_value_cansleep(ddata->hpd_gpio);
}
-static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
+static void tpd_register_hpd_cb(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
enum drm_connector_status status),
- void *cb_data)
+ void *cb_data)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
@@ -190,8 +109,6 @@ static int tpd_register_hpd_cb(struct omap_dss_device *dssdev,
ddata->hpd_cb = cb;
ddata->hpd_cb_data = cb_data;
mutex_unlock(&ddata->hpd_lock);
-
- return 0;
}
static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
@@ -204,61 +121,14 @@ static void tpd_unregister_hpd_cb(struct omap_dss_device *dssdev)
mutex_unlock(&ddata->hpd_lock);
}
-static void tpd_enable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = true;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static void tpd_disable_hpd(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
-
- mutex_lock(&ddata->hpd_lock);
- ddata->hpd_enabled = false;
- mutex_unlock(&ddata->hpd_lock);
-}
-
-static int tpd_set_infoframe(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->set_infoframe(in, avi);
-}
-
-static int tpd_set_hdmi_mode(struct omap_dss_device *dssdev,
- bool hdmi_mode)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.hdmi->set_hdmi_mode(in, hdmi_mode);
-}
-
-static const struct omapdss_hdmi_ops tpd_hdmi_ops = {
+static const struct omap_dss_device_ops tpd_ops = {
.connect = tpd_connect,
.disconnect = tpd_disconnect,
-
.enable = tpd_enable,
.disable = tpd_disable,
-
- .check_timings = tpd_check_timings,
- .set_timings = tpd_set_timings,
- .get_timings = tpd_get_timings,
-
- .read_edid = tpd_read_edid,
.detect = tpd_detect,
.register_hpd_cb = tpd_register_hpd_cb,
.unregister_hpd_cb = tpd_unregister_hpd_cb,
- .enable_hpd = tpd_enable_hpd,
- .disable_hpd = tpd_disable_hpd,
- .set_infoframe = tpd_set_infoframe,
- .set_hdmi_mode = tpd_set_hdmi_mode,
};
static irqreturn_t tpd_hpd_isr(int irq, void *data)
@@ -266,7 +136,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data)
struct panel_drv_data *ddata = data;
mutex_lock(&ddata->hpd_lock);
- if (ddata->hpd_enabled && ddata->hpd_cb) {
+ if (ddata->hpd_cb) {
enum drm_connector_status status;
if (tpd_detect(&ddata->dssdev))
@@ -283,7 +153,7 @@ static irqreturn_t tpd_hpd_isr(int irq, void *data)
static int tpd_probe(struct platform_device *pdev)
{
- struct omap_dss_device *in, *dssdev;
+ struct omap_dss_device *dssdev;
struct panel_drv_data *ddata;
int r;
struct gpio_desc *gpio;
@@ -325,21 +195,24 @@ static int tpd_probe(struct platform_device *pdev)
return r;
dssdev = &ddata->dssdev;
- dssdev->ops.hdmi = &tpd_hdmi_ops;
+ dssdev->ops = &tpd_ops;
dssdev->dev = &pdev->dev;
dssdev->type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->output_type = OMAP_DISPLAY_TYPE_HDMI;
dssdev->owner = THIS_MODULE;
- dssdev->port_num = 1;
-
- in = ddata->in;
-
- r = omapdss_register_output(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register output\n");
- return r;
+ dssdev->of_ports = BIT(1) | BIT(0);
+ dssdev->ops_flags = OMAP_DSS_DEVICE_OP_DETECT
+ | OMAP_DSS_DEVICE_OP_HPD;
+
+ dssdev->next = omapdss_of_find_connected_device(pdev->dev.of_node, 1);
+ if (IS_ERR(dssdev->next)) {
+ if (PTR_ERR(dssdev->next) != -EPROBE_DEFER)
+ dev_err(&pdev->dev, "failed to find video sink\n");
+ return PTR_ERR(dssdev->next);
}
+ omapdss_device_register(dssdev);
+
return 0;
}
@@ -348,7 +221,9 @@ static int __exit tpd_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_output(&ddata->dssdev);
+ if (dssdev->next)
+ omapdss_device_put(dssdev->next);
+ omapdss_device_unregister(&ddata->dssdev);
WARN_ON(omapdss_device_is_enabled(dssdev));
if (omapdss_device_is_enabled(dssdev))
@@ -356,7 +231,7 @@ static int __exit tpd_remove(struct platform_device *pdev)
WARN_ON(omapdss_device_is_connected(dssdev));
if (omapdss_device_is_connected(dssdev))
- tpd_disconnect(dssdev, dssdev->dst);
+ omapdss_device_disconnect(NULL, dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
index 6cbf570d6727..1f8161b041be 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dpi.c
@@ -23,7 +23,6 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
@@ -35,49 +34,21 @@ struct panel_drv_data {
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-static int panel_dpi_connect(struct omap_dss_device *dssdev)
+static int panel_dpi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void panel_dpi_disconnect(struct omap_dss_device *dssdev)
+static void panel_dpi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int panel_dpi_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -86,15 +57,13 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
r = regulator_enable(ddata->vcc_supply);
if (r) {
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
return r;
}
@@ -109,7 +78,7 @@ static int panel_dpi_enable(struct omap_dss_device *dssdev)
static void panel_dpi_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -119,23 +88,11 @@ static void panel_dpi_disable(struct omap_dss_device *dssdev)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
regulator_disable(ddata->vcc_supply);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void panel_dpi_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -144,25 +101,14 @@ static void panel_dpi_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int panel_dpi_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver panel_dpi_ops = {
+static const struct omap_dss_device_ops panel_dpi_ops = {
.connect = panel_dpi_connect,
.disconnect = panel_dpi_disconnect,
.enable = panel_dpi_enable,
.disable = panel_dpi_disable,
- .set_timings = panel_dpi_set_timings,
.get_timings = panel_dpi_get_timings,
- .check_timings = panel_dpi_check_timings,
};
static int panel_dpi_probe_of(struct platform_device *pdev)
@@ -227,16 +173,13 @@ static int panel_dpi_probe(struct platform_device *pdev)
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
- dssdev->driver = &panel_dpi_ops;
+ dssdev->ops = &panel_dpi_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -246,10 +189,9 @@ static int __exit panel_dpi_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
panel_dpi_disable(dssdev);
- panel_dpi_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
index 428de90fced1..29692a5217c5 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-dsi-cm.c
@@ -41,7 +41,6 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
@@ -142,11 +141,11 @@ static void hw_guard_wait(struct panel_drv_data *ddata)
static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
u8 buf[1];
- r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd, buf, 1);
+ r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd, buf, 1);
if (r < 0)
return r;
@@ -158,29 +157,30 @@ static int dsicm_dcs_read_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 *data)
static int dsicm_dcs_write_0(struct panel_drv_data *ddata, u8 dcs_cmd)
{
- struct omap_dss_device *in = ddata->in;
- return in->ops.dsi->dcs_write(in, ddata->channel, &dcs_cmd, 1);
+ struct omap_dss_device *src = ddata->dssdev.src;
+
+ return src->ops->dsi.dcs_write(src, ddata->channel, &dcs_cmd, 1);
}
static int dsicm_dcs_write_1(struct panel_drv_data *ddata, u8 dcs_cmd, u8 param)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 buf[2] = { dcs_cmd, param };
- return in->ops.dsi->dcs_write(in, ddata->channel, buf, 2);
+ return src->ops->dsi.dcs_write(src, ddata->channel, buf, 2);
}
static int dsicm_sleep_in(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 cmd;
int r;
hw_guard_wait(ddata);
cmd = MIPI_DCS_ENTER_SLEEP_MODE;
- r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, &cmd, 1);
+ r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, &cmd, 1);
if (r)
return r;
@@ -228,7 +228,7 @@ static int dsicm_get_id(struct panel_drv_data *ddata, u8 *id1, u8 *id2, u8 *id3)
static int dsicm_set_update_window(struct panel_drv_data *ddata,
u16 x, u16 y, u16 w, u16 h)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
u16 x1 = x;
u16 x2 = x + w - 1;
@@ -242,7 +242,7 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata,
buf[3] = (x2 >> 8) & 0xff;
buf[4] = (x2 >> 0) & 0xff;
- r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf));
+ r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
if (r)
return r;
@@ -252,11 +252,11 @@ static int dsicm_set_update_window(struct panel_drv_data *ddata,
buf[3] = (y2 >> 8) & 0xff;
buf[4] = (y2 >> 0) & 0xff;
- r = in->ops.dsi->dcs_write_nosync(in, ddata->channel, buf, sizeof(buf));
+ r = src->ops->dsi.dcs_write_nosync(src, ddata->channel, buf, sizeof(buf));
if (r)
return r;
- in->ops.dsi->bta_sync(in, ddata->channel);
+ src->ops->dsi.bta_sync(src, ddata->channel);
return r;
}
@@ -275,7 +275,7 @@ static void dsicm_cancel_ulps_work(struct panel_drv_data *ddata)
static int dsicm_enter_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
if (ddata->ulps_enabled)
@@ -290,7 +290,7 @@ static int dsicm_enter_ulps(struct panel_drv_data *ddata)
if (ddata->ext_te_gpio)
disable_irq(gpiod_to_irq(ddata->ext_te_gpio));
- in->ops.dsi->disable(in, false, true);
+ src->ops->dsi.disable(src, false, true);
ddata->ulps_enabled = true;
@@ -309,19 +309,19 @@ err:
static int dsicm_exit_ulps(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
if (!ddata->ulps_enabled)
return 0;
- r = in->ops.dsi->enable(in);
+ r = src->ops->enable(src);
if (r) {
dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
goto err1;
}
- in->ops.dsi->enable_hs(in, ddata->channel, true);
+ src->ops->dsi.enable_hs(src, ddata->channel, true);
r = _dsicm_enable_te(ddata, true);
if (r) {
@@ -366,7 +366,7 @@ static int dsicm_wake_up(struct panel_drv_data *ddata)
static int dsicm_bl_update_status(struct backlight_device *dev)
{
struct panel_drv_data *ddata = dev_get_drvdata(&dev->dev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r = 0;
int level;
@@ -381,13 +381,13 @@ static int dsicm_bl_update_status(struct backlight_device *dev)
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (!r)
r = dsicm_dcs_write_1(ddata, DCS_BRIGHTNESS, level);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
mutex_unlock(&ddata->lock);
@@ -414,21 +414,21 @@ static ssize_t dsicm_num_errors_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 errors = 0;
int r;
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (!r)
r = dsicm_dcs_read_1(ddata, DCS_READ_NUM_ERRORS,
&errors);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
} else {
r = -ENODEV;
}
@@ -446,20 +446,20 @@ static ssize_t dsicm_hw_revision_show(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 id1, id2, id3;
int r;
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (!r)
r = dsicm_get_id(ddata, &id1, &id2, &id3);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
} else {
r = -ENODEV;
}
@@ -478,7 +478,7 @@ static ssize_t dsicm_store_ulps(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
unsigned long t;
int r;
@@ -489,14 +489,14 @@ static ssize_t dsicm_store_ulps(struct device *dev,
mutex_lock(&ddata->lock);
if (ddata->enabled) {
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
if (t)
r = dsicm_enter_ulps(ddata);
else
r = dsicm_wake_up(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
mutex_unlock(&ddata->lock);
@@ -528,7 +528,7 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
{
struct platform_device *pdev = to_platform_device(dev);
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
unsigned long t;
int r;
@@ -541,9 +541,9 @@ static ssize_t dsicm_store_ulps_timeout(struct device *dev,
if (ddata->enabled) {
/* dsicm_wake_up will restart the timer */
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
mutex_unlock(&ddata->lock);
@@ -603,7 +603,7 @@ static void dsicm_hw_reset(struct panel_drv_data *ddata)
static int dsicm_power_on(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
u8 id1, id2, id3;
int r;
struct omap_dss_dsi_config dsi_config = {
@@ -635,7 +635,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
}
if (ddata->pin_config.num_pins > 0) {
- r = in->ops.dsi->configure_pins(in, &ddata->pin_config);
+ r = src->ops->dsi.configure_pins(src, &ddata->pin_config);
if (r) {
dev_err(&ddata->pdev->dev,
"failed to configure DSI pins\n");
@@ -643,13 +643,13 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
}
}
- r = in->ops.dsi->set_config(in, &dsi_config);
+ r = src->ops->dsi.set_config(src, &dsi_config);
if (r) {
dev_err(&ddata->pdev->dev, "failed to configure DSI\n");
goto err_vddi;
}
- r = in->ops.dsi->enable(in);
+ r = src->ops->enable(src);
if (r) {
dev_err(&ddata->pdev->dev, "failed to enable DSI\n");
goto err_vddi;
@@ -657,7 +657,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
dsicm_hw_reset(ddata);
- in->ops.dsi->enable_hs(in, ddata->channel, false);
+ src->ops->dsi.enable_hs(src, ddata->channel, false);
r = dsicm_sleep_out(ddata);
if (r)
@@ -689,7 +689,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
if (r)
goto err;
- r = in->ops.dsi->enable_video_output(in, ddata->channel);
+ r = src->ops->dsi.enable_video_output(src, ddata->channel);
if (r)
goto err;
@@ -701,7 +701,7 @@ static int dsicm_power_on(struct panel_drv_data *ddata)
ddata->intro_printed = true;
}
- in->ops.dsi->enable_hs(in, ddata->channel, true);
+ src->ops->dsi.enable_hs(src, ddata->channel, true);
return 0;
err:
@@ -709,7 +709,7 @@ err:
dsicm_hw_reset(ddata);
- in->ops.dsi->disable(in, true, false);
+ src->ops->dsi.disable(src, true, false);
err_vddi:
if (ddata->vddi)
regulator_disable(ddata->vddi);
@@ -722,10 +722,10 @@ err_vpnl:
static void dsicm_power_off(struct panel_drv_data *ddata)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
- in->ops.dsi->disable_video_output(in, ddata->channel);
+ src->ops->dsi.disable_video_output(src, ddata->channel);
r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_DISPLAY_OFF);
if (!r)
@@ -737,7 +737,7 @@ static void dsicm_power_off(struct panel_drv_data *ddata)
dsicm_hw_reset(ddata);
}
- in->ops.dsi->disable(in, true, false);
+ src->ops->dsi.disable(src, true, false);
if (ddata->vddi)
regulator_disable(ddata->vddi);
@@ -756,71 +756,41 @@ static int dsicm_panel_reset(struct panel_drv_data *ddata)
return dsicm_power_on(ddata);
}
-static int dsicm_connect(struct omap_dss_device *dssdev)
+static int dsicm_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
+ struct panel_drv_data *ddata = to_panel_data(dst);
struct device *dev = &ddata->pdev->dev;
- struct omap_dss_device *in;
int r;
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dsi->connect(in, dssdev);
- if (r) {
- dev_err(dev, "Failed to connect to video source\n");
- goto err_connect;
- }
-
- r = in->ops.dsi->request_vc(in, &ddata->channel);
+ r = src->ops->dsi.request_vc(src, &ddata->channel);
if (r) {
dev_err(dev, "failed to get virtual channel\n");
- goto err_req_vc;
+ return r;
}
- r = in->ops.dsi->set_vc_id(in, ddata->channel, TCH);
+ r = src->ops->dsi.set_vc_id(src, ddata->channel, TCH);
if (r) {
dev_err(dev, "failed to set VC_ID\n");
- goto err_vc_id;
+ src->ops->dsi.release_vc(src, ddata->channel);
+ return r;
}
- ddata->in = in;
return 0;
-
-err_vc_id:
- in->ops.dsi->release_vc(in, ddata->channel);
-err_req_vc:
- in->ops.dsi->disconnect(in, dssdev);
-err_connect:
- omap_dss_put_device(in);
- return r;
}
-static void dsicm_disconnect(struct omap_dss_device *dssdev)
+static void dsicm_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dsi->release_vc(in, ddata->channel);
- in->ops.dsi->disconnect(in, dssdev);
+ struct panel_drv_data *ddata = to_panel_data(dst);
- omap_dss_put_device(in);
- ddata->in = NULL;
+ src->ops->dsi.release_vc(src, ddata->channel);
}
static int dsicm_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->pdev->dev, "enable\n");
@@ -837,11 +807,11 @@ static int dsicm_enable(struct omap_dss_device *dssdev)
goto err;
}
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_power_on(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
if (r)
goto err;
@@ -862,7 +832,7 @@ err:
static void dsicm_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->pdev->dev, "disable\n");
@@ -873,7 +843,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
dsicm_cancel_ulps_work(ddata);
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
if (omapdss_device_is_enabled(dssdev)) {
r = dsicm_wake_up(ddata);
@@ -881,7 +851,7 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
dsicm_power_off(ddata);
}
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
@@ -891,16 +861,16 @@ static void dsicm_disable(struct omap_dss_device *dssdev)
static void dsicm_framedone_cb(int err, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
dev_dbg(&ddata->pdev->dev, "framedone, err %d\n", err);
- in->ops.dsi->bus_unlock(ddata->in);
+ src->ops->dsi.bus_unlock(src);
}
static irqreturn_t dsicm_te_isr(int irq, void *data)
{
struct panel_drv_data *ddata = data;
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int old;
int r;
@@ -909,7 +879,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data)
if (old) {
cancel_delayed_work(&ddata->te_timeout_work);
- r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb,
+ r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
ddata);
if (r)
goto err;
@@ -918,7 +888,7 @@ static irqreturn_t dsicm_te_isr(int irq, void *data)
return IRQ_HANDLED;
err:
dev_err(&ddata->pdev->dev, "start update failed\n");
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
return IRQ_HANDLED;
}
@@ -926,25 +896,25 @@ static void dsicm_te_timeout_work_callback(struct work_struct *work)
{
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
te_timeout_work.work);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
dev_err(&ddata->pdev->dev, "TE not received for 250ms!\n");
atomic_set(&ddata->do_update, 0);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
}
static int dsicm_update(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->pdev->dev, "update %d, %d, %d x %d\n", x, y, w, h);
mutex_lock(&ddata->lock);
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (r)
@@ -956,9 +926,8 @@ static int dsicm_update(struct omap_dss_device *dssdev,
}
/* XXX no need to send this every frame, but dsi break if not done */
- r = dsicm_set_update_window(ddata, 0, 0,
- dssdev->panel.vm.hactive,
- dssdev->panel.vm.vactive);
+ r = dsicm_set_update_window(ddata, 0, 0, ddata->vm.hactive,
+ ddata->vm.vactive);
if (r)
goto err;
@@ -967,17 +936,17 @@ static int dsicm_update(struct omap_dss_device *dssdev,
msecs_to_jiffies(250));
atomic_set(&ddata->do_update, 1);
} else {
- r = in->ops.dsi->update(in, ddata->channel, dsicm_framedone_cb,
+ r = src->ops->dsi.update(src, ddata->channel, dsicm_framedone_cb,
ddata);
if (r)
goto err;
}
- /* note: no bus_unlock here. unlock is in framedone_cb */
+ /* note: no bus_unlock here. unlock is src framedone_cb */
mutex_unlock(&ddata->lock);
return 0;
err:
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
return r;
}
@@ -985,13 +954,13 @@ err:
static int dsicm_sync(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(&ddata->pdev->dev, "sync\n");
mutex_lock(&ddata->lock);
- in->ops.dsi->bus_lock(in);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_lock(src);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
dev_dbg(&ddata->pdev->dev, "sync done\n");
@@ -1001,7 +970,7 @@ static int dsicm_sync(struct omap_dss_device *dssdev)
static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
{
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = ddata->dssdev.src;
int r;
if (enable)
@@ -1010,7 +979,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
r = dsicm_dcs_write_0(ddata, MIPI_DCS_SET_TEAR_OFF);
if (!ddata->ext_te_gpio)
- in->ops.dsi->enable_te(in, enable);
+ src->ops->dsi.enable_te(src, enable);
/* possible panel bug */
msleep(100);
@@ -1021,7 +990,7 @@ static int _dsicm_enable_te(struct panel_drv_data *ddata, bool enable)
static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
mutex_lock(&ddata->lock);
@@ -1029,7 +998,7 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
if (ddata->te_enabled == enable)
goto end;
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
if (ddata->enabled) {
r = dsicm_wake_up(ddata);
@@ -1043,13 +1012,13 @@ static int dsicm_enable_te(struct omap_dss_device *dssdev, bool enable)
ddata->te_enabled = enable;
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
end:
mutex_unlock(&ddata->lock);
return 0;
err:
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
return r;
@@ -1072,7 +1041,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
int first = 1;
int plen;
@@ -1089,9 +1058,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
}
size = min((u32)w * h * 3,
- dssdev->panel.vm.hactive * dssdev->panel.vm.vactive * 3);
+ ddata->vm.hactive * ddata->vm.vactive * 3);
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
r = dsicm_wake_up(ddata);
if (r)
@@ -1107,7 +1076,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
dsicm_set_update_window(ddata, x, y, w, h);
- r = in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, plen);
+ r = src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, plen);
if (r)
goto err2;
@@ -1115,7 +1084,7 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
u8 dcs_cmd = first ? 0x2e : 0x3e;
first = 0;
- r = in->ops.dsi->dcs_read(in, ddata->channel, dcs_cmd,
+ r = src->ops->dsi.dcs_read(src, ddata->channel, dcs_cmd,
buf + buf_used, size - buf_used);
if (r < 0) {
@@ -1141,9 +1110,9 @@ static int dsicm_memory_read(struct omap_dss_device *dssdev,
r = buf_used;
err3:
- in->ops.dsi->set_max_rx_packet_size(in, ddata->channel, 1);
+ src->ops->dsi.set_max_rx_packet_size(src, ddata->channel, 1);
err2:
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
err1:
mutex_unlock(&ddata->lock);
return r;
@@ -1154,7 +1123,7 @@ static void dsicm_ulps_work(struct work_struct *work)
struct panel_drv_data *ddata = container_of(work, struct panel_drv_data,
ulps_work.work);
struct omap_dss_device *dssdev = &ddata->dssdev;
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
mutex_lock(&ddata->lock);
@@ -1163,11 +1132,11 @@ static void dsicm_ulps_work(struct work_struct *work)
return;
}
- in->ops.dsi->bus_lock(in);
+ src->ops->dsi.bus_lock(src);
dsicm_enter_ulps(ddata);
- in->ops.dsi->bus_unlock(in);
+ src->ops->dsi.bus_unlock(src);
mutex_unlock(&ddata->lock);
}
@@ -1210,18 +1179,21 @@ static void dsicm_get_size(struct omap_dss_device *dssdev,
*height = ddata->height_mm;
}
-static struct omap_dss_driver dsicm_ops = {
+static const struct omap_dss_device_ops dsicm_ops = {
.connect = dsicm_connect,
.disconnect = dsicm_disconnect,
.enable = dsicm_enable,
.disable = dsicm_disable,
+ .get_timings = dsicm_get_timings,
+ .check_timings = dsicm_check_timings,
+};
+
+static const struct omap_dss_driver dsicm_dss_driver = {
.update = dsicm_update,
.sync = dsicm_sync,
- .get_timings = dsicm_get_timings,
- .check_timings = dsicm_check_timings,
.get_size = dsicm_get_size,
.enable_te = dsicm_enable_te,
@@ -1330,20 +1302,17 @@ static int dsicm_probe(struct platform_device *pdev)
dssdev = &ddata->dssdev;
dssdev->dev = dev;
- dssdev->driver = &dsicm_ops;
- dssdev->panel.vm = ddata->vm;
+ dssdev->ops = &dsicm_ops;
+ dssdev->driver = &dsicm_dss_driver;
dssdev->type = OMAP_DISPLAY_TYPE_DSI;
dssdev->owner = THIS_MODULE;
+ dssdev->of_ports = BIT(0);
- dssdev->panel.dsi_pix_fmt = OMAP_DSS_DSI_FMT_RGB888;
dssdev->caps = OMAP_DSS_DISPLAY_CAP_MANUAL_UPDATE |
OMAP_DSS_DISPLAY_CAP_TEAR_ELIM;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(dev, "Failed to register panel\n");
- goto err_reg;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
mutex_init(&ddata->lock);
@@ -1414,10 +1383,10 @@ static int __exit dsicm_remove(struct platform_device *pdev)
dev_dbg(&pdev->dev, "remove\n");
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
dsicm_disable(dssdev);
- dsicm_disconnect(dssdev);
+ omapdss_device_disconnect(dssdev->src, dssdev);
sysfs_remove_group(&pdev->dev.kobj, &dsicm_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
index 754197099440..f6ef8ff964dd 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-lgphilips-lb035q02.c
@@ -33,19 +33,11 @@ static const struct videomode lb035q02_vm = {
.vfront_porch = 4,
.vback_porch = 18,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
- /*
- * Note: According to the panel documentation:
- * DE is active LOW
- * DATA needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct spi_device *spi;
@@ -116,51 +108,25 @@ static void init_lb035q02_panel(struct spi_device *spi)
lb035q02_write_reg(spi, 0x3b, 0x0806);
}
-static int lb035q02_connect(struct omap_dss_device *dssdev)
+static int lb035q02_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
+ struct panel_drv_data *ddata = to_panel_data(dst);
init_lb035q02_panel(ddata->spi);
- ddata->in = in;
return 0;
}
-static void lb035q02_disconnect(struct omap_dss_device *dssdev)
+static void lb035q02_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int lb035q02_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -169,9 +135,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -186,7 +150,7 @@ static int lb035q02_enable(struct omap_dss_device *dssdev)
static void lb035q02_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -194,23 +158,11 @@ static void lb035q02_disable(struct omap_dss_device *dssdev)
if (ddata->enable_gpio)
gpiod_set_value_cansleep(ddata->enable_gpio, 0);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void lb035q02_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void lb035q02_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -219,25 +171,14 @@ static void lb035q02_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int lb035q02_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver lb035q02_ops = {
+static const struct omap_dss_device_ops lb035q02_ops = {
.connect = lb035q02_connect,
.disconnect = lb035q02_disconnect,
.enable = lb035q02_enable,
.disable = lb035q02_disable,
- .set_timings = lb035q02_set_timings,
.get_timings = lb035q02_get_timings,
- .check_timings = lb035q02_check_timings,
};
static int lb035q02_probe_of(struct spi_device *spi)
@@ -278,16 +219,21 @@ static int lb035q02_panel_spi_probe(struct spi_device *spi)
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &lb035q02_ops;
+ dssdev->ops = &lb035q02_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- return r;
- }
+ /*
+ * Note: According to the panel documentation:
+ * DE is active LOW
+ * DATA needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -297,10 +243,9 @@ static int lb035q02_panel_spi_remove(struct spi_device *spi)
struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
lb035q02_disable(dssdev);
- lb035q02_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
index 9a3b27fa5cb5..f445de6369f7 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-nec-nl8048hl11.c
@@ -11,22 +11,19 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/spi/spi.h>
#include <linux/gpio/consumer.h>
-#include <linux/of_gpio.h>
+#include <linux/module.h>
+#include <linux/spi/spi.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
- int res_gpio;
- int qvga_gpio;
+ struct gpio_desc *res_gpio;
struct spi_device *spi;
};
@@ -74,9 +71,7 @@ static const struct videomode nec_8048_panel_vm = {
.vsync_len = 1,
.vback_porch = 4,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -112,49 +107,21 @@ static int init_nec_8048_wvga_lcd(struct spi_device *spi)
return 0;
}
-static int nec_8048_connect(struct omap_dss_device *dssdev)
+static int nec_8048_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void nec_8048_disconnect(struct omap_dss_device *dssdev)
+static void nec_8048_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int nec_8048_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -163,14 +130,11 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
- if (gpio_is_valid(ddata->res_gpio))
- gpio_set_value_cansleep(ddata->res_gpio, 1);
+ gpiod_set_value_cansleep(ddata->res_gpio, 1);
dssdev->state = OMAP_DSS_DISPLAY_ACTIVE;
@@ -180,31 +144,18 @@ static int nec_8048_enable(struct omap_dss_device *dssdev)
static void nec_8048_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- if (gpio_is_valid(ddata->res_gpio))
- gpio_set_value_cansleep(ddata->res_gpio, 0);
+ gpiod_set_value_cansleep(ddata->res_gpio, 0);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void nec_8048_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void nec_8048_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -213,50 +164,21 @@ static void nec_8048_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int nec_8048_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver nec_8048_ops = {
+static const struct omap_dss_device_ops nec_8048_ops = {
.connect = nec_8048_connect,
.disconnect = nec_8048_disconnect,
.enable = nec_8048_enable,
.disable = nec_8048_disable,
- .set_timings = nec_8048_set_timings,
.get_timings = nec_8048_get_timings,
- .check_timings = nec_8048_check_timings,
};
-static int nec_8048_probe_of(struct spi_device *spi)
-{
- struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- int gpio;
-
- gpio = of_get_named_gpio(node, "reset-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return gpio;
- }
- ddata->res_gpio = gpio;
-
- /* XXX the panel spec doesn't mention any QVGA pin?? */
- ddata->qvga_gpio = -ENOENT;
-
- return 0;
-}
-
static int nec_8048_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
+ struct gpio_desc *gpio;
int r;
dev_dbg(&spi->dev, "%s\n", __func__);
@@ -280,38 +202,27 @@ static int nec_8048_probe(struct spi_device *spi)
ddata->spi = spi;
- r = nec_8048_probe_of(spi);
- if (r)
- return r;
-
- if (gpio_is_valid(ddata->qvga_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->qvga_gpio,
- GPIOF_OUT_INIT_HIGH, "lcd QVGA");
- if (r)
- return r;
+ gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio)) {
+ dev_err(&spi->dev, "failed to get reset gpio\n");
+ return PTR_ERR(gpio);
}
- if (gpio_is_valid(ddata->res_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->res_gpio,
- GPIOF_OUT_INIT_LOW, "lcd RES");
- if (r)
- return r;
- }
+ ddata->res_gpio = gpio;
ddata->vm = nec_8048_panel_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &nec_8048_ops;
+ dssdev->ops = &nec_8048_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- return r;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -323,10 +234,9 @@ static int nec_8048_remove(struct spi_device *spi)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
nec_8048_disable(dssdev);
- nec_8048_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
index bb5b680cabfe..64b1369cb274 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sharp-ls037v7dw01.c
@@ -21,7 +21,6 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct regulator *vcc;
struct videomode vm;
@@ -47,60 +46,26 @@ static const struct videomode sharp_ls_vm = {
.vfront_porch = 1,
.vback_porch = 1,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
- /*
- * Note: According to the panel documentation:
- * DATA needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-static int sharp_ls_connect(struct omap_dss_device *dssdev)
+static int sharp_ls_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void sharp_ls_disconnect(struct omap_dss_device *dssdev)
+static void sharp_ls_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int sharp_ls_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -109,15 +74,13 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
if (ddata->vcc) {
r = regulator_enable(ddata->vcc);
if (r != 0)
return r;
}
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r) {
regulator_disable(ddata->vcc);
return r;
@@ -140,7 +103,7 @@ static int sharp_ls_enable(struct omap_dss_device *dssdev)
static void sharp_ls_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -155,7 +118,7 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
msleep(100);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
if (ddata->vcc)
regulator_disable(ddata->vcc);
@@ -163,18 +126,6 @@ static void sharp_ls_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void sharp_ls_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -183,25 +134,14 @@ static void sharp_ls_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int sharp_ls_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver sharp_ls_ops = {
+static const struct omap_dss_device_ops sharp_ls_ops = {
.connect = sharp_ls_connect,
.disconnect = sharp_ls_disconnect,
.enable = sharp_ls_enable,
.disable = sharp_ls_disable,
- .set_timings = sharp_ls_set_timings,
.get_timings = sharp_ls_get_timings,
- .check_timings = sharp_ls_check_timings,
};
static int sharp_ls_get_gpio_of(struct device *dev, int index, int val,
@@ -278,16 +218,20 @@ static int sharp_ls_probe(struct platform_device *pdev)
dssdev = &ddata->dssdev;
dssdev->dev = &pdev->dev;
- dssdev->driver = &sharp_ls_ops;
+ dssdev->ops = &sharp_ls_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&pdev->dev, "Failed to register panel\n");
- return r;
- }
+ /*
+ * Note: According to the panel documentation:
+ * DATA needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
+
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -297,10 +241,9 @@ static int __exit sharp_ls_remove(struct platform_device *pdev)
struct panel_drv_data *ddata = platform_get_drvdata(pdev);
struct omap_dss_device *dssdev = &ddata->dssdev;
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
sharp_ls_disable(dssdev);
- sharp_ls_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
index f34c06bb5bd7..e04663856b31 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-sony-acx565akm.c
@@ -20,17 +20,15 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/backlight.h>
+#include <linux/delay.h>
+#include <linux/gpio/consumer.h>
+#include <linux/jiffies.h>
#include <linux/kernel.h>
#include <linux/module.h>
#include <linux/platform_device.h>
-#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/jiffies.h>
#include <linux/sched.h>
-#include <linux/backlight.h>
-#include <linux/gpio/consumer.h>
-#include <linux/of.h>
-#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
#include "../dss/omapdss.h"
@@ -64,9 +62,8 @@
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
- int reset_gpio;
+ struct gpio_desc *reset_gpio;
struct videomode vm;
@@ -100,9 +97,7 @@ static const struct videomode acx565akm_panel_vm = {
.vsync_len = 3,
.vback_porch = 4,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_NEGEDGE |
- DISPLAY_FLAGS_PIXDATA_POSEDGE,
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -507,56 +502,26 @@ static const struct attribute_group bldev_attr_group = {
.attrs = bldev_attrs,
};
-static int acx565akm_connect(struct omap_dss_device *dssdev)
+static int acx565akm_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.sdi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void acx565akm_disconnect(struct omap_dss_device *dssdev)
+static void acx565akm_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.sdi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- in->ops.sdi->set_timings(in, &ddata->vm);
-
- r = in->ops.sdi->enable(in);
+ r = src->ops->enable(src);
if (r) {
pr_err("%s sdi enable failed\n", __func__);
return r;
@@ -565,8 +530,8 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
/*FIXME tweak me */
msleep(50);
- if (gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 1);
+ if (ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 1);
if (ddata->enabled) {
dev_dbg(&ddata->spi->dev, "panel already enabled\n");
@@ -597,7 +562,7 @@ static int acx565akm_panel_power_on(struct omap_dss_device *dssdev)
static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
dev_dbg(dssdev->dev, "%s\n", __func__);
@@ -615,13 +580,13 @@ static void acx565akm_panel_power_off(struct omap_dss_device *dssdev)
*/
msleep(50);
- if (gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 0);
+ if (ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 0);
/* FIXME need to tweak this delay */
msleep(100);
- in->ops.sdi->disable(in);
+ src->ops->disable(src);
}
static int acx565akm_enable(struct omap_dss_device *dssdev)
@@ -664,18 +629,6 @@ static void acx565akm_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void acx565akm_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.sdi->set_timings(in, vm);
-}
-
static void acx565akm_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -684,37 +637,16 @@ static void acx565akm_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int acx565akm_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.sdi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver acx565akm_ops = {
+static const struct omap_dss_device_ops acx565akm_ops = {
.connect = acx565akm_connect,
.disconnect = acx565akm_disconnect,
.enable = acx565akm_enable,
.disable = acx565akm_disable,
- .set_timings = acx565akm_set_timings,
.get_timings = acx565akm_get_timings,
- .check_timings = acx565akm_check_timings,
};
-static int acx565akm_probe_of(struct spi_device *spi)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- struct device_node *np = spi->dev.of_node;
-
- ddata->reset_gpio = of_get_named_gpio(np, "reset-gpios", 0);
-
- return 0;
-}
-
static int acx565akm_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
@@ -722,6 +654,7 @@ static int acx565akm_probe(struct spi_device *spi)
struct backlight_device *bldev;
int max_brightness, brightness;
struct backlight_properties props;
+ struct gpio_desc *gpio;
int r;
dev_dbg(&spi->dev, "%s\n", __func__);
@@ -738,19 +671,16 @@ static int acx565akm_probe(struct spi_device *spi)
mutex_init(&ddata->mutex);
- r = acx565akm_probe_of(spi);
- if (r)
- return r;
-
- if (gpio_is_valid(ddata->reset_gpio)) {
- r = devm_gpio_request_one(&spi->dev, ddata->reset_gpio,
- GPIOF_OUT_INIT_LOW, "lcd reset");
- if (r)
- goto err_gpio;
+ gpio = devm_gpiod_get_optional(&spi->dev, "reset", GPIOD_OUT_LOW);
+ if (IS_ERR(gpio)) {
+ dev_err(&spi->dev, "failed to parse reset gpio\n");
+ return PTR_ERR(gpio);
}
- if (gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 1);
+ ddata->reset_gpio = gpio;
+
+ if (ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 1);
/*
* After reset we have to wait 5 msec before the first
@@ -762,12 +692,12 @@ static int acx565akm_probe(struct spi_device *spi)
r = panel_detect(ddata);
- if (!ddata->enabled && gpio_is_valid(ddata->reset_gpio))
- gpio_set_value(ddata->reset_gpio, 0);
+ if (!ddata->enabled && ddata->reset_gpio)
+ gpiod_set_value(ddata->reset_gpio, 0);
if (r) {
dev_err(&spi->dev, "%s panel detect error\n", __func__);
- goto err_detect;
+ return r;
}
memset(&props, 0, sizeof(props));
@@ -777,17 +707,15 @@ static int acx565akm_probe(struct spi_device *spi)
bldev = backlight_device_register("acx565akm", &ddata->spi->dev,
ddata, &acx565akm_bl_ops, &props);
- if (IS_ERR(bldev)) {
- r = PTR_ERR(bldev);
- goto err_reg_bl;
- }
+ if (IS_ERR(bldev))
+ return PTR_ERR(bldev);
ddata->bl_dev = bldev;
if (ddata->has_cabc) {
r = sysfs_create_group(&bldev->dev.kobj, &bldev_attr_group);
if (r) {
dev_err(&bldev->dev,
"%s failed to create sysfs files\n", __func__);
- goto err_sysfs;
+ goto err_backlight_unregister;
}
ddata->cabc_mode = get_hw_cabc_mode(ddata);
}
@@ -809,26 +737,20 @@ static int acx565akm_probe(struct spi_device *spi)
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &acx565akm_ops;
+ dssdev->ops = &acx565akm_ops;
dssdev->type = OMAP_DISPLAY_TYPE_SDI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_NEGEDGE
+ | DRM_BUS_FLAG_PIXDATA_POSEDGE;
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
- }
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
-err_reg:
- sysfs_remove_group(&bldev->dev.kobj, &bldev_attr_group);
-err_sysfs:
+err_backlight_unregister:
backlight_device_unregister(bldev);
-err_reg_bl:
-err_detect:
-err_gpio:
return r;
}
@@ -842,10 +764,9 @@ static int acx565akm_remove(struct spi_device *spi)
sysfs_remove_group(&ddata->bl_dev->dev.kobj, &bldev_attr_group);
backlight_device_unregister(ddata->bl_dev);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
acx565akm_disable(dssdev);
- acx565akm_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
index a1f1dc18407a..7ddc8c574a61 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td028ttec1.c
@@ -27,13 +27,11 @@
#include <linux/module.h>
#include <linux/delay.h>
#include <linux/spi/spi.h>
-#include <linux/gpio.h>
#include "../dss/omapdss.h"
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
@@ -51,13 +49,7 @@ static const struct videomode td028ttec1_panel_vm = {
.vsync_len = 2,
.vback_porch = 2,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE,
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define JBT_COMMAND 0x000
@@ -166,49 +158,21 @@ enum jbt_register {
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
-static int td028ttec1_panel_connect(struct omap_dss_device *dssdev)
+static int td028ttec1_panel_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void td028ttec1_panel_disconnect(struct omap_dss_device *dssdev)
+static void td028ttec1_panel_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -217,9 +181,7 @@ static int td028ttec1_panel_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -316,7 +278,7 @@ transfer_err:
static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
@@ -328,23 +290,11 @@ static void td028ttec1_panel_disable(struct omap_dss_device *dssdev)
jbt_ret_write_0(ddata, JBT_REG_SLEEP_IN);
jbt_reg_write_1(ddata, JBT_REG_POWER_ON_OFF, 0x00);
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void td028ttec1_panel_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -353,25 +303,14 @@ static void td028ttec1_panel_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int td028ttec1_panel_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver td028ttec1_ops = {
+static const struct omap_dss_device_ops td028ttec1_ops = {
.connect = td028ttec1_panel_connect,
.disconnect = td028ttec1_panel_disconnect,
.enable = td028ttec1_panel_enable,
.disable = td028ttec1_panel_disable,
- .set_timings = td028ttec1_panel_set_timings,
.get_timings = td028ttec1_panel_get_timings,
- .check_timings = td028ttec1_panel_check_timings,
};
static int td028ttec1_panel_probe(struct spi_device *spi)
@@ -403,16 +342,20 @@ static int td028ttec1_panel_probe(struct spi_device *spi)
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &td028ttec1_ops;
+ dssdev->ops = &td028ttec1_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- return r;
- }
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
+
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
return 0;
}
@@ -424,10 +367,9 @@ static int td028ttec1_panel_remove(struct spi_device *spi)
dev_dbg(&ddata->spi_dev->dev, "%s\n", __func__);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
td028ttec1_panel_disable(dssdev);
- td028ttec1_panel_disconnect(dssdev);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
index c08e22b43447..8440fcb744d9 100644
--- a/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
+++ b/drivers/gpu/drm/omapdrm/displays/panel-tpo-td043mtea1.c
@@ -10,14 +10,13 @@
* (at your option) any later version.
*/
-#include <linux/module.h>
#include <linux/delay.h>
-#include <linux/spi/spi.h>
-#include <linux/regulator/consumer.h>
-#include <linux/gpio/consumer.h>
#include <linux/err.h>
+#include <linux/gpio/consumer.h>
+#include <linux/module.h>
+#include <linux/regulator/consumer.h>
#include <linux/slab.h>
-#include <linux/of_gpio.h>
+#include <linux/spi/spi.h>
#include "../dss/omapdss.h"
@@ -54,16 +53,14 @@ static const u16 tpo_td043_def_gamma[12] = {
struct panel_drv_data {
struct omap_dss_device dssdev;
- struct omap_dss_device *in;
struct videomode vm;
struct spi_device *spi;
struct regulator *vcc_reg;
- int nreset_gpio;
+ struct gpio_desc *reset_gpio;
u16 gamma[12];
u32 mode;
- u32 hmirror:1;
u32 vmirror:1;
u32 powered_on:1;
u32 spi_suspended:1;
@@ -84,13 +81,7 @@ static const struct videomode tpo_td043_vm = {
.vfront_porch = 39,
.vback_porch = 34,
- .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW |
- DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_SYNC_POSEDGE |
- DISPLAY_FLAGS_PIXDATA_NEGEDGE,
- /*
- * Note: According to the panel documentation:
- * SYNC needs to be driven on the FALLING edge
- */
+ .flags = DISPLAY_FLAGS_HSYNC_LOW | DISPLAY_FLAGS_VSYNC_LOW,
};
#define to_panel_data(p) container_of(p, struct panel_drv_data, dssdev)
@@ -152,22 +143,6 @@ static int tpo_td043_write_mirror(struct spi_device *spi, bool h, bool v)
return tpo_td043_write(spi, 4, reg4);
}
-static int tpo_td043_set_hmirror(struct omap_dss_device *dssdev, bool enable)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- ddata->hmirror = enable;
- return tpo_td043_write_mirror(ddata->spi, ddata->hmirror,
- ddata->vmirror);
-}
-
-static bool tpo_td043_get_hmirror(struct omap_dss_device *dssdev)
-{
- struct panel_drv_data *ddata = dev_get_drvdata(dssdev->dev);
-
- return ddata->hmirror;
-}
-
static ssize_t tpo_td043_vmirror_show(struct device *dev,
struct device_attribute *attr, char *buf)
{
@@ -189,7 +164,7 @@ static ssize_t tpo_td043_vmirror_store(struct device *dev,
val = !!val;
- ret = tpo_td043_write_mirror(ddata->spi, ddata->hmirror, val);
+ ret = tpo_td043_write_mirror(ddata->spi, false, val);
if (ret < 0)
return ret;
@@ -300,16 +275,14 @@ static int tpo_td043_power_on(struct panel_drv_data *ddata)
/* wait for panel to stabilize */
msleep(160);
- if (gpio_is_valid(ddata->nreset_gpio))
- gpio_set_value(ddata->nreset_gpio, 1);
+ gpiod_set_value(ddata->reset_gpio, 0);
tpo_td043_write(ddata->spi, 2,
TPO_R02_MODE(ddata->mode) | TPO_R02_NCLK_RISING);
tpo_td043_write(ddata->spi, 3, TPO_R03_VAL_NORMAL);
tpo_td043_write(ddata->spi, 0x20, 0xf0);
tpo_td043_write(ddata->spi, 0x21, 0xf0);
- tpo_td043_write_mirror(ddata->spi, ddata->hmirror,
- ddata->vmirror);
+ tpo_td043_write_mirror(ddata->spi, false, ddata->vmirror);
tpo_td043_write_gamma(ddata->spi, ddata->gamma);
ddata->powered_on = 1;
@@ -324,8 +297,7 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata)
tpo_td043_write(ddata->spi, 3,
TPO_R03_VAL_STANDBY | TPO_R03_EN_PWM);
- if (gpio_is_valid(ddata->nreset_gpio))
- gpio_set_value(ddata->nreset_gpio, 0);
+ gpiod_set_value(ddata->reset_gpio, 1);
/* wait for at least 2 vsyncs before cutting off power */
msleep(50);
@@ -337,49 +309,21 @@ static void tpo_td043_power_off(struct panel_drv_data *ddata)
ddata->powered_on = 0;
}
-static int tpo_td043_connect(struct omap_dss_device *dssdev)
+static int tpo_td043_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in;
- int r;
-
- if (omapdss_device_is_connected(dssdev))
- return 0;
-
- in = omapdss_of_find_source_for_first_ep(dssdev->dev->of_node);
- if (IS_ERR(in)) {
- dev_err(dssdev->dev, "failed to find video source\n");
- return PTR_ERR(in);
- }
-
- r = in->ops.dpi->connect(in, dssdev);
- if (r) {
- omap_dss_put_device(in);
- return r;
- }
-
- ddata->in = in;
return 0;
}
-static void tpo_td043_disconnect(struct omap_dss_device *dssdev)
+static void tpo_td043_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- if (!omapdss_device_is_connected(dssdev))
- return;
-
- in->ops.dpi->disconnect(in, dssdev);
-
- omap_dss_put_device(in);
- ddata->in = NULL;
}
static int tpo_td043_enable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
int r;
if (!omapdss_device_is_connected(dssdev))
@@ -388,9 +332,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (omapdss_device_is_enabled(dssdev))
return 0;
- in->ops.dpi->set_timings(in, &ddata->vm);
-
- r = in->ops.dpi->enable(in);
+ r = src->ops->enable(src);
if (r)
return r;
@@ -401,7 +343,7 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
if (!ddata->spi_suspended) {
r = tpo_td043_power_on(ddata);
if (r) {
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
return r;
}
}
@@ -414,12 +356,12 @@ static int tpo_td043_enable(struct omap_dss_device *dssdev)
static void tpo_td043_disable(struct omap_dss_device *dssdev)
{
struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
+ struct omap_dss_device *src = dssdev->src;
if (!omapdss_device_is_enabled(dssdev))
return;
- in->ops.dpi->disable(in);
+ src->ops->disable(src);
if (!ddata->spi_suspended)
tpo_td043_power_off(ddata);
@@ -427,18 +369,6 @@ static void tpo_td043_disable(struct omap_dss_device *dssdev)
dssdev->state = OMAP_DSS_DISPLAY_DISABLED;
}
-static void tpo_td043_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- ddata->vm = *vm;
- dssdev->panel.vm = *vm;
-
- in->ops.dpi->set_timings(in, vm);
-}
-
static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
@@ -447,50 +377,21 @@ static void tpo_td043_get_timings(struct omap_dss_device *dssdev,
*vm = ddata->vm;
}
-static int tpo_td043_check_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct panel_drv_data *ddata = to_panel_data(dssdev);
- struct omap_dss_device *in = ddata->in;
-
- return in->ops.dpi->check_timings(in, vm);
-}
-
-static struct omap_dss_driver tpo_td043_ops = {
+static const struct omap_dss_device_ops tpo_td043_ops = {
.connect = tpo_td043_connect,
.disconnect = tpo_td043_disconnect,
.enable = tpo_td043_enable,
.disable = tpo_td043_disable,
- .set_timings = tpo_td043_set_timings,
.get_timings = tpo_td043_get_timings,
- .check_timings = tpo_td043_check_timings,
-
- .set_mirror = tpo_td043_set_hmirror,
- .get_mirror = tpo_td043_get_hmirror,
};
-static int tpo_td043_probe_of(struct spi_device *spi)
-{
- struct device_node *node = spi->dev.of_node;
- struct panel_drv_data *ddata = dev_get_drvdata(&spi->dev);
- int gpio;
-
- gpio = of_get_named_gpio(node, "reset-gpios", 0);
- if (!gpio_is_valid(gpio)) {
- dev_err(&spi->dev, "failed to parse enable gpio\n");
- return gpio;
- }
- ddata->nreset_gpio = gpio;
-
- return 0;
-}
-
static int tpo_td043_probe(struct spi_device *spi)
{
struct panel_drv_data *ddata;
struct omap_dss_device *dssdev;
+ struct gpio_desc *gpio;
int r;
dev_dbg(&spi->dev, "%s\n", __func__);
@@ -512,59 +413,49 @@ static int tpo_td043_probe(struct spi_device *spi)
ddata->spi = spi;
- r = tpo_td043_probe_of(spi);
- if (r)
- return r;
-
ddata->mode = TPO_R02_MODE_800x480;
memcpy(ddata->gamma, tpo_td043_def_gamma, sizeof(ddata->gamma));
ddata->vcc_reg = devm_regulator_get(&spi->dev, "vcc");
if (IS_ERR(ddata->vcc_reg)) {
dev_err(&spi->dev, "failed to get LCD VCC regulator\n");
- r = PTR_ERR(ddata->vcc_reg);
- goto err_regulator;
+ return PTR_ERR(ddata->vcc_reg);
}
- if (gpio_is_valid(ddata->nreset_gpio)) {
- r = devm_gpio_request_one(&spi->dev,
- ddata->nreset_gpio, GPIOF_OUT_INIT_LOW,
- "lcd reset");
- if (r < 0) {
- dev_err(&spi->dev, "couldn't request reset GPIO\n");
- goto err_gpio_req;
- }
+ gpio = devm_gpiod_get(&spi->dev, "reset", GPIOD_OUT_HIGH);
+ if (IS_ERR(gpio)) {
+ dev_err(&spi->dev, "failed to get reset gpio\n");
+ return PTR_ERR(gpio);
}
+ ddata->reset_gpio = gpio;
+
r = sysfs_create_group(&spi->dev.kobj, &tpo_td043_attr_group);
if (r) {
dev_err(&spi->dev, "failed to create sysfs files\n");
- goto err_sysfs;
+ return r;
}
ddata->vm = tpo_td043_vm;
dssdev = &ddata->dssdev;
dssdev->dev = &spi->dev;
- dssdev->driver = &tpo_td043_ops;
+ dssdev->ops = &tpo_td043_ops;
dssdev->type = OMAP_DISPLAY_TYPE_DPI;
dssdev->owner = THIS_MODULE;
- dssdev->panel.vm = ddata->vm;
+ dssdev->of_ports = BIT(0);
- r = omapdss_register_display(dssdev);
- if (r) {
- dev_err(&spi->dev, "Failed to register panel\n");
- goto err_reg;
- }
+ /*
+ * Note: According to the panel documentation:
+ * SYNC needs to be driven on the FALLING edge
+ */
+ dssdev->bus_flags = DRM_BUS_FLAG_DE_HIGH | DRM_BUS_FLAG_SYNC_POSEDGE
+ | DRM_BUS_FLAG_PIXDATA_NEGEDGE;
- return 0;
+ omapdss_display_init(dssdev);
+ omapdss_device_register(dssdev);
-err_reg:
- sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
-err_sysfs:
-err_gpio_req:
-err_regulator:
- return r;
+ return 0;
}
static int tpo_td043_remove(struct spi_device *spi)
@@ -574,10 +465,9 @@ static int tpo_td043_remove(struct spi_device *spi)
dev_dbg(&ddata->spi->dev, "%s\n", __func__);
- omapdss_unregister_display(dssdev);
+ omapdss_device_unregister(dssdev);
tpo_td043_disable(dssdev);
- tpo_td043_disconnect(dssdev);
sysfs_remove_group(&spi->dev.kobj, &tpo_td043_attr_group);
diff --git a/drivers/gpu/drm/omapdrm/dss/base.c b/drivers/gpu/drm/omapdrm/dss/base.c
index 99e8cb8dc65b..472f56e3de70 100644
--- a/drivers/gpu/drm/omapdrm/dss/base.c
+++ b/drivers/gpu/drm/omapdrm/dss/base.c
@@ -14,24 +14,17 @@
*/
#include <linux/kernel.h>
+#include <linux/list.h>
#include <linux/module.h>
+#include <linux/mutex.h>
#include <linux/of.h>
#include <linux/of_graph.h>
-#include <linux/list.h>
#include "dss.h"
#include "omapdss.h"
static struct dss_device *dss_device;
-static struct list_head omapdss_comp_list;
-
-struct omapdss_comp_node {
- struct list_head list;
- struct device_node *node;
- bool dss_core_component;
-};
-
struct dss_device *omapdss_get_dss(void)
{
return dss_device;
@@ -56,6 +49,208 @@ const struct dispc_ops *dispc_get_ops(struct dss_device *dss)
}
EXPORT_SYMBOL(dispc_get_ops);
+
+/* -----------------------------------------------------------------------------
+ * OMAP DSS Devices Handling
+ */
+
+static LIST_HEAD(omapdss_devices_list);
+static DEFINE_MUTEX(omapdss_devices_lock);
+
+void omapdss_device_register(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&omapdss_devices_lock);
+ list_add_tail(&dssdev->list, &omapdss_devices_list);
+ mutex_unlock(&omapdss_devices_lock);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_register);
+
+void omapdss_device_unregister(struct omap_dss_device *dssdev)
+{
+ mutex_lock(&omapdss_devices_lock);
+ list_del(&dssdev->list);
+ mutex_unlock(&omapdss_devices_lock);
+}
+EXPORT_SYMBOL_GPL(omapdss_device_unregister);
+
+static bool omapdss_device_is_registered(struct device_node *node)
+{
+ struct omap_dss_device *dssdev;
+ bool found = false;
+
+ mutex_lock(&omapdss_devices_lock);
+
+ list_for_each_entry(dssdev, &omapdss_devices_list, list) {
+ if (dssdev->dev->of_node == node) {
+ found = true;
+ break;
+ }
+ }
+
+ mutex_unlock(&omapdss_devices_lock);
+ return found;
+}
+
+struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev)
+{
+ if (!try_module_get(dssdev->owner))
+ return NULL;
+
+ if (get_device(dssdev->dev) == NULL) {
+ module_put(dssdev->owner);
+ return NULL;
+ }
+
+ return dssdev;
+}
+EXPORT_SYMBOL(omapdss_device_get);
+
+void omapdss_device_put(struct omap_dss_device *dssdev)
+{
+ put_device(dssdev->dev);
+ module_put(dssdev->owner);
+}
+EXPORT_SYMBOL(omapdss_device_put);
+
+struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
+ unsigned int port)
+{
+ struct omap_dss_device *dssdev;
+
+ list_for_each_entry(dssdev, &omapdss_devices_list, list) {
+ if (dssdev->dev->of_node == src && dssdev->of_ports & BIT(port))
+ return omapdss_device_get(dssdev);
+ }
+
+ return NULL;
+}
+
+/*
+ * Search for the next device starting at @from. The type argument specfies
+ * which device types to consider when searching. Searching for multiple types
+ * is supported by and'ing their type flags. Release the reference to the @from
+ * device, and acquire a reference to the returned device if found.
+ */
+struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
+ enum omap_dss_device_type type)
+{
+ struct omap_dss_device *dssdev;
+ struct list_head *list;
+
+ mutex_lock(&omapdss_devices_lock);
+
+ if (list_empty(&omapdss_devices_list)) {
+ dssdev = NULL;
+ goto done;
+ }
+
+ /*
+ * Start from the from entry if given or from omapdss_devices_list
+ * otherwise.
+ */
+ list = from ? &from->list : &omapdss_devices_list;
+
+ list_for_each_entry(dssdev, list, list) {
+ /*
+ * Stop if we reach the omapdss_devices_list, that's the end of
+ * the list.
+ */
+ if (&dssdev->list == &omapdss_devices_list) {
+ dssdev = NULL;
+ goto done;
+ }
+
+ /*
+ * Accept display entities if the display type is requested,
+ * and output entities if the output type is requested.
+ */
+ if ((type & OMAP_DSS_DEVICE_TYPE_DISPLAY) &&
+ !dssdev->output_type)
+ goto done;
+ if ((type & OMAP_DSS_DEVICE_TYPE_OUTPUT) && dssdev->id &&
+ dssdev->next)
+ goto done;
+ }
+
+ dssdev = NULL;
+
+done:
+ if (from)
+ omapdss_device_put(from);
+ if (dssdev)
+ omapdss_device_get(dssdev);
+
+ mutex_unlock(&omapdss_devices_lock);
+ return dssdev;
+}
+EXPORT_SYMBOL(omapdss_device_get_next);
+
+int omapdss_device_connect(struct dss_device *dss,
+ struct omap_dss_device *src,
+ struct omap_dss_device *dst)
+{
+ int ret;
+
+ dev_dbg(dst->dev, "connect\n");
+
+ if (omapdss_device_is_connected(dst))
+ return -EBUSY;
+
+ dst->dss = dss;
+
+ ret = dst->ops->connect(src, dst);
+ if (ret < 0) {
+ dst->dss = NULL;
+ return ret;
+ }
+
+ if (src) {
+ WARN_ON(src->dst);
+ dst->src = src;
+ src->dst = dst;
+ }
+
+ return 0;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_connect);
+
+void omapdss_device_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
+{
+ dev_dbg(dst->dev, "disconnect\n");
+
+ if (!dst->id && !omapdss_device_is_connected(dst)) {
+ WARN_ON(dst->output_type);
+ return;
+ }
+
+ if (src) {
+ if (WARN_ON(dst != src->dst))
+ return;
+
+ dst->src = NULL;
+ src->dst = NULL;
+ }
+
+ WARN_ON(dst->state != OMAP_DSS_DISPLAY_DISABLED);
+
+ dst->ops->disconnect(src, dst);
+ dst->dss = NULL;
+}
+EXPORT_SYMBOL_GPL(omapdss_device_disconnect);
+
+/* -----------------------------------------------------------------------------
+ * Components Handling
+ */
+
+static struct list_head omapdss_comp_list;
+
+struct omapdss_comp_node {
+ struct list_head list;
+ struct device_node *node;
+ bool dss_core_component;
+};
+
static bool omapdss_list_contains(const struct device_node *node)
{
struct omapdss_comp_node *comp;
@@ -130,9 +325,7 @@ static bool omapdss_component_is_loaded(struct omapdss_comp_node *comp)
{
if (comp->dss_core_component)
return true;
- if (omapdss_component_is_display(comp->node))
- return true;
- if (omapdss_component_is_output(comp->node))
+ if (omapdss_device_is_registered(comp->node))
return true;
return false;
diff --git a/drivers/gpu/drm/omapdrm/dss/core.c b/drivers/gpu/drm/omapdrm/dss/core.c
index 07d00a186f15..a2edabc9f6b3 100644
--- a/drivers/gpu/drm/omapdrm/dss/core.c
+++ b/drivers/gpu/drm/omapdrm/dss/core.c
@@ -45,36 +45,14 @@ static struct platform_driver * const omap_dss_drivers[] = {
#endif
};
-static struct platform_device *omap_drm_device;
-
static int __init omap_dss_init(void)
{
- int r;
-
- r = platform_register_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
- if (r)
- goto err_reg;
-
- omap_drm_device = platform_device_register_simple("omapdrm", 0, NULL, 0);
- if (IS_ERR(omap_drm_device)) {
- r = PTR_ERR(omap_drm_device);
- goto err_reg;
- }
-
- return 0;
-
-err_reg:
- platform_unregister_drivers(omap_dss_drivers,
- ARRAY_SIZE(omap_dss_drivers));
-
- return r;
+ return platform_register_drivers(omap_dss_drivers,
+ ARRAY_SIZE(omap_dss_drivers));
}
static void __exit omap_dss_exit(void)
{
- platform_device_unregister(omap_drm_device);
-
platform_unregister_drivers(omap_dss_drivers,
ARRAY_SIZE(omap_dss_drivers));
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dispc.c b/drivers/gpu/drm/omapdrm/dss/dispc.c
index 84f274c4a4cb..e61a9592a650 100644
--- a/drivers/gpu/drm/omapdrm/dss/dispc.c
+++ b/drivers/gpu/drm/omapdrm/dss/dispc.c
@@ -2904,13 +2904,6 @@ static int dispc_ovl_enable(struct dispc_device *dispc,
return 0;
}
-static enum omap_dss_output_id
-dispc_mgr_get_supported_outputs(struct dispc_device *dispc,
- enum omap_channel channel)
-{
- return dss_get_supported_outputs(dispc->dss, channel);
-}
-
static void dispc_lcd_enable_signal_polarity(struct dispc_device *dispc,
bool act_high)
{
@@ -3120,28 +3113,29 @@ static bool _dispc_mgr_pclk_ok(struct dispc_device *dispc,
return pclk <= dispc->feat->max_tv_pclk;
}
-bool dispc_mgr_timings_ok(struct dispc_device *dispc, enum omap_channel channel,
- const struct videomode *vm)
+static int dispc_mgr_check_timings(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm)
{
if (!_dispc_mgr_size_ok(dispc, vm->hactive, vm->vactive))
- return false;
+ return MODE_BAD;
if (!_dispc_mgr_pclk_ok(dispc, channel, vm->pixelclock))
- return false;
+ return MODE_BAD;
if (dss_mgr_is_lcd(channel)) {
/* TODO: OMAP4+ supports interlace for LCD outputs */
if (vm->flags & DISPLAY_FLAGS_INTERLACED)
- return false;
+ return MODE_BAD;
if (!_dispc_lcd_timings_ok(dispc, vm->hsync_len,
vm->hfront_porch, vm->hback_porch,
vm->vsync_len, vm->vfront_porch,
vm->vback_porch))
- return false;
+ return MODE_BAD;
}
- return true;
+ return MODE_OK;
}
static void _dispc_mgr_set_lcd_timings(struct dispc_device *dispc,
@@ -3243,7 +3237,7 @@ static void dispc_mgr_set_timings(struct dispc_device *dispc,
DSSDBG("channel %d xres %u yres %u\n", channel, t.hactive, t.vactive);
- if (!dispc_mgr_timings_ok(dispc, channel, &t)) {
+ if (dispc_mgr_check_timings(dispc, channel, &t)) {
BUG();
return;
}
@@ -4740,9 +4734,9 @@ static const struct dispc_ops dispc_ops = {
.mgr_go_busy = dispc_mgr_go_busy,
.mgr_go = dispc_mgr_go,
.mgr_set_lcd_config = dispc_mgr_set_lcd_config,
+ .mgr_check_timings = dispc_mgr_check_timings,
.mgr_set_timings = dispc_mgr_set_timings,
.mgr_setup = dispc_mgr_setup,
- .mgr_get_supported_outputs = dispc_mgr_get_supported_outputs,
.mgr_gamma_size = dispc_mgr_gamma_size,
.mgr_set_gamma = dispc_mgr_set_gamma,
diff --git a/drivers/gpu/drm/omapdrm/dss/display.c b/drivers/gpu/drm/omapdrm/dss/display.c
index 9e7fcbd57e52..34b2a4ef63a4 100644
--- a/drivers/gpu/drm/omapdrm/dss/display.c
+++ b/drivers/gpu/drm/omapdrm/dss/display.c
@@ -21,27 +21,14 @@
#define DSS_SUBSYS_NAME "DISPLAY"
#include <linux/kernel.h>
-#include <linux/module.h>
-#include <linux/jiffies.h>
-#include <linux/platform_device.h>
#include <linux/of.h>
#include "omapdss.h"
-static void omapdss_default_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- *vm = dssdev->panel.vm;
-}
-
-static LIST_HEAD(panel_list);
-static DEFINE_MUTEX(panel_list_mutex);
static int disp_num_counter;
-int omapdss_register_display(struct omap_dss_device *dssdev)
+void omapdss_display_init(struct omap_dss_device *dssdev)
{
- struct omap_dss_driver *drv = dssdev->driver;
- struct list_head *cur;
int id;
/*
@@ -52,123 +39,22 @@ int omapdss_register_display(struct omap_dss_device *dssdev)
if (id < 0)
id = disp_num_counter++;
- snprintf(dssdev->alias, sizeof(dssdev->alias), "display%d", id);
+ dssdev->alias_id = id;
/* Use 'label' property for name, if it exists */
of_property_read_string(dssdev->dev->of_node, "label", &dssdev->name);
if (dssdev->name == NULL)
- dssdev->name = dssdev->alias;
-
- if (drv && drv->get_timings == NULL)
- drv->get_timings = omapdss_default_get_timings;
-
- mutex_lock(&panel_list_mutex);
- list_for_each(cur, &panel_list) {
- struct omap_dss_device *ldev = list_entry(cur,
- struct omap_dss_device,
- panel_list);
- if (strcmp(ldev->alias, dssdev->alias) > 0)
- break;
- }
- list_add_tail(&dssdev->panel_list, cur);
- mutex_unlock(&panel_list_mutex);
- return 0;
-}
-EXPORT_SYMBOL(omapdss_register_display);
-
-void omapdss_unregister_display(struct omap_dss_device *dssdev)
-{
- mutex_lock(&panel_list_mutex);
- list_del(&dssdev->panel_list);
- mutex_unlock(&panel_list_mutex);
+ dssdev->name = devm_kasprintf(dssdev->dev, GFP_KERNEL,
+ "display%u", id);
}
-EXPORT_SYMBOL(omapdss_unregister_display);
+EXPORT_SYMBOL_GPL(omapdss_display_init);
-bool omapdss_component_is_display(struct device_node *node)
+struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output)
{
- struct omap_dss_device *dssdev;
- bool found = false;
-
- mutex_lock(&panel_list_mutex);
- list_for_each_entry(dssdev, &panel_list, panel_list) {
- if (dssdev->dev->of_node == node) {
- found = true;
- goto out;
- }
- }
-out:
- mutex_unlock(&panel_list_mutex);
- return found;
-}
-EXPORT_SYMBOL(omapdss_component_is_display);
-
-struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev)
-{
- if (!try_module_get(dssdev->owner))
- return NULL;
-
- if (get_device(dssdev->dev) == NULL) {
- module_put(dssdev->owner);
- return NULL;
- }
-
- return dssdev;
-}
-EXPORT_SYMBOL(omap_dss_get_device);
-
-void omap_dss_put_device(struct omap_dss_device *dssdev)
-{
- put_device(dssdev->dev);
- module_put(dssdev->owner);
-}
-EXPORT_SYMBOL(omap_dss_put_device);
-
-/*
- * ref count of the found device is incremented.
- * ref count of from-device is decremented.
- */
-struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from)
-{
- struct list_head *l;
- struct omap_dss_device *dssdev;
-
- mutex_lock(&panel_list_mutex);
-
- if (list_empty(&panel_list)) {
- dssdev = NULL;
- goto out;
- }
-
- if (from == NULL) {
- dssdev = list_first_entry(&panel_list, struct omap_dss_device,
- panel_list);
- omap_dss_get_device(dssdev);
- goto out;
- }
-
- omap_dss_put_device(from);
-
- list_for_each(l, &panel_list) {
- dssdev = list_entry(l, struct omap_dss_device, panel_list);
- if (dssdev == from) {
- if (list_is_last(l, &panel_list)) {
- dssdev = NULL;
- goto out;
- }
-
- dssdev = list_entry(l->next, struct omap_dss_device,
- panel_list);
- omap_dss_get_device(dssdev);
- goto out;
- }
- }
-
- WARN(1, "'from' dssdev not found\n");
+ while (output->next)
+ output = output->next;
- dssdev = NULL;
-out:
- mutex_unlock(&panel_list_mutex);
- return dssdev;
+ return omapdss_device_get(output);
}
-EXPORT_SYMBOL(omap_dss_get_next_device);
+EXPORT_SYMBOL_GPL(omapdss_display_get);
diff --git a/drivers/gpu/drm/omapdrm/dss/dpi.c b/drivers/gpu/drm/omapdrm/dss/dpi.c
index 9fcc50217133..ca4f3c4c6318 100644
--- a/drivers/gpu/drm/omapdrm/dss/dpi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dpi.c
@@ -39,6 +39,7 @@ struct dpi_data {
struct platform_device *pdev;
enum dss_model dss_model;
struct dss_device *dss;
+ unsigned int id;
struct regulator *vdds_dsi_reg;
enum dss_clk_source clk_src;
@@ -346,10 +347,9 @@ static int dpi_set_dispc_clk(struct dpi_data *dpi, unsigned long pck_req,
static int dpi_set_mode(struct dpi_data *dpi)
{
- struct videomode *vm = &dpi->vm;
+ const struct videomode *vm = &dpi->vm;
int lck_div = 0, pck_div = 0;
unsigned long fck = 0;
- unsigned long pck;
int r = 0;
if (dpi->pll)
@@ -361,17 +361,6 @@ static int dpi_set_mode(struct dpi_data *dpi)
if (r)
return r;
- pck = fck / lck_div / pck_div;
-
- if (pck != vm->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
- vm->pixelclock, pck);
-
- vm->pixelclock = pck;
- }
-
- dss_mgr_set_timings(&dpi->output, vm);
-
return 0;
}
@@ -413,7 +402,7 @@ static int dpi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- r = dss_dpi_select_source(dpi->dss, out->port_num, out->dispc_channel);
+ r = dss_dpi_select_source(dpi->dss, dpi->id, out->dispc_channel);
if (r)
goto err_src_sel;
@@ -478,7 +467,7 @@ static void dpi_display_disable(struct omap_dss_device *dssdev)
}
static void dpi_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ const struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
@@ -491,23 +480,10 @@ static void dpi_set_timings(struct omap_dss_device *dssdev,
mutex_unlock(&dpi->lock);
}
-static void dpi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- mutex_lock(&dpi->lock);
-
- *vm = dpi->vm;
-
- mutex_unlock(&dpi->lock);
-}
-
static int dpi_check_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
- enum omap_channel channel = dpi->output.dispc_channel;
int lck_div, pck_div;
unsigned long fck;
unsigned long pck;
@@ -517,9 +493,6 @@ static int dpi_check_timings(struct omap_dss_device *dssdev,
if (vm->hactive % 8 != 0)
return -EINVAL;
- if (!dispc_mgr_timings_ok(dpi->dss->dispc, channel, vm))
- return -EINVAL;
-
if (vm->pixelclock == 0)
return -EINVAL;
@@ -562,38 +535,6 @@ static int dpi_verify_pll(struct dss_pll *pll)
return 0;
}
-static const struct soc_device_attribute dpi_soc_devices[] = {
- { .machine = "OMAP3[456]*" },
- { .machine = "[AD]M37*" },
- { /* sentinel */ }
-};
-
-static int dpi_init_regulator(struct dpi_data *dpi)
-{
- struct regulator *vdds_dsi;
-
- /*
- * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
- * DM37xx only.
- */
- if (!soc_device_match(dpi_soc_devices))
- return 0;
-
- if (dpi->vdds_dsi_reg)
- return 0;
-
- vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
- if (IS_ERR(vdds_dsi)) {
- if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
- DSSERR("can't get VDDS_DSI regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dpi->vdds_dsi_reg = vdds_dsi;
-
- return 0;
-}
-
static void dpi_init_pll(struct dpi_data *dpi)
{
struct dss_pll *pll;
@@ -621,7 +562,7 @@ static void dpi_init_pll(struct dpi_data *dpi)
* the channel in some more dynamic manner, or get the channel as a user
* parameter.
*/
-static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
+static enum omap_channel dpi_get_channel(struct dpi_data *dpi)
{
switch (dpi->dss_model) {
case DSS_MODEL_OMAP2:
@@ -629,7 +570,7 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
return OMAP_DSS_CHANNEL_LCD;
case DSS_MODEL_DRA7:
- switch (port_num) {
+ switch (dpi->id) {
case 2:
return OMAP_DSS_CHANNEL_LCD3;
case 1:
@@ -651,49 +592,31 @@ static enum omap_channel dpi_get_channel(struct dpi_data *dpi, int port_num)
}
}
-static int dpi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int dpi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
+ struct dpi_data *dpi = dpi_get_data_from_dssdev(dst);
int r;
- r = dpi_init_regulator(dpi);
- if (r)
- return r;
-
dpi_init_pll(dpi);
- r = dss_mgr_connect(&dpi->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&dpi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void dpi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void dpi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dpi_data *dpi = dpi_get_data_from_dssdev(dssdev);
-
- WARN_ON(dst != dssdev->dst);
+ dst->dispc_channel_connected = false;
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&dpi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_dpi_ops dpi_ops = {
+static const struct omap_dss_device_ops dpi_ops = {
.connect = dpi_connect,
.disconnect = dpi_disconnect,
@@ -702,18 +625,16 @@ static const struct omapdss_dpi_ops dpi_ops = {
.check_timings = dpi_check_timings,
.set_timings = dpi_set_timings,
- .get_timings = dpi_get_timings,
};
-static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
+static int dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
{
struct omap_dss_device *out = &dpi->output;
+ u32 port_num = 0;
int r;
- u32 port_num;
- r = of_property_read_u32(port, "reg", &port_num);
- if (r)
- port_num = 0;
+ of_property_read_u32(port, "reg", &port_num);
+ dpi->id = port_num <= 2 ? port_num : 0;
switch (port_num) {
case 2:
@@ -731,12 +652,28 @@ static void dpi_init_output_port(struct dpi_data *dpi, struct device_node *port)
out->dev = &dpi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_DPI;
out->output_type = OMAP_DISPLAY_TYPE_DPI;
- out->dispc_channel = dpi_get_channel(dpi, port_num);
- out->port_num = port_num;
- out->ops.dpi = &dpi_ops;
+ out->dispc_channel = dpi_get_channel(dpi);
+ out->of_ports = BIT(port_num);
+ out->ops = &dpi_ops;
out->owner = THIS_MODULE;
- omapdss_register_output(out);
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
}
static void dpi_uninit_output_port(struct device_node *port)
@@ -744,7 +681,38 @@ static void dpi_uninit_output_port(struct device_node *port)
struct dpi_data *dpi = port->data;
struct omap_dss_device *out = &dpi->output;
- omapdss_unregister_output(out);
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static const struct soc_device_attribute dpi_soc_devices[] = {
+ { .machine = "OMAP3[456]*" },
+ { .machine = "[AD]M37*" },
+ { /* sentinel */ }
+};
+
+static int dpi_init_regulator(struct dpi_data *dpi)
+{
+ struct regulator *vdds_dsi;
+
+ /*
+ * The DPI uses the DSI VDDS on OMAP34xx, OMAP35xx, OMAP36xx, AM37xx and
+ * DM37xx only.
+ */
+ if (!soc_device_match(dpi_soc_devices))
+ return 0;
+
+ vdds_dsi = devm_regulator_get(&dpi->pdev->dev, "vdds_dsi");
+ if (IS_ERR(vdds_dsi)) {
+ if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_DSI regulator\n");
+ return PTR_ERR(vdds_dsi);
+ }
+
+ dpi->vdds_dsi_reg = vdds_dsi;
+
+ return 0;
}
int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
@@ -764,15 +732,14 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
return 0;
r = of_property_read_u32(ep, "data-lines", &datalines);
+ of_node_put(ep);
if (r) {
DSSERR("failed to parse datalines\n");
- goto err_datalines;
+ return r;
}
dpi->data_lines = datalines;
- of_node_put(ep);
-
dpi->pdev = pdev;
dpi->dss_model = dss_model;
dpi->dss = dss;
@@ -780,14 +747,11 @@ int dpi_init_port(struct dss_device *dss, struct platform_device *pdev,
mutex_init(&dpi->lock);
- dpi_init_output_port(dpi, port);
-
- return 0;
-
-err_datalines:
- of_node_put(ep);
+ r = dpi_init_regulator(dpi);
+ if (r)
+ return r;
- return r;
+ return dpi_init_output_port(dpi, port);
}
void dpi_uninit_port(struct device_node *port)
diff --git a/drivers/gpu/drm/omapdrm/dss/dsi.c b/drivers/gpu/drm/omapdrm/dss/dsi.c
index 74467b308721..394c129cfb3b 100644
--- a/drivers/gpu/drm/omapdrm/dss/dsi.c
+++ b/drivers/gpu/drm/omapdrm/dss/dsi.c
@@ -403,6 +403,7 @@ struct dsi_data {
struct {
struct dss_debugfs_entry *irqs;
struct dss_debugfs_entry *regs;
+ struct dss_debugfs_entry *clks;
} debugfs;
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
@@ -442,27 +443,6 @@ static inline struct dsi_data *to_dsi_data(struct omap_dss_device *dssdev)
return dev_get_drvdata(dssdev->dev);
}
-static struct dsi_data *dsi_get_dsi_from_id(int module)
-{
- struct omap_dss_device *out;
- enum omap_dss_output_id id;
-
- switch (module) {
- case 0:
- id = OMAP_DSS_OUTPUT_DSI1;
- break;
- case 1:
- id = OMAP_DSS_OUTPUT_DSI2;
- break;
- default:
- return NULL;
- }
-
- out = omap_dss_get_output(id);
-
- return out ? to_dsi_data(out) : NULL;
-}
-
static inline void dsi_write_reg(struct dsi_data *dsi,
const struct dsi_reg idx, u32 val)
{
@@ -1157,26 +1137,6 @@ static void dsi_runtime_put(struct dsi_data *dsi)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static int dsi_regulator_init(struct dsi_data *dsi)
-{
- struct regulator *vdds_dsi;
-
- if (dsi->vdds_dsi_reg != NULL)
- return 0;
-
- vdds_dsi = devm_regulator_get(dsi->dev, "vdd");
-
- if (IS_ERR(vdds_dsi)) {
- if (PTR_ERR(vdds_dsi) != -EPROBE_DEFER)
- DSSERR("can't get DSI VDD regulator\n");
- return PTR_ERR(vdds_dsi);
- }
-
- dsi->vdds_dsi_reg = vdds_dsi;
-
- return 0;
-}
-
static void _dsi_print_reset_status(struct dsi_data *dsi)
{
u32 l;
@@ -1373,10 +1333,6 @@ static int dsi_pll_enable(struct dss_pll *pll)
DSSDBG("PLL init\n");
- r = dsi_regulator_init(dsi);
- if (r)
- return r;
-
r = dsi_runtime_get(dsi);
if (r)
return r;
@@ -1448,8 +1404,9 @@ static void dsi_pll_disable(struct dss_pll *pll)
dsi_pll_uninit(dsi, true);
}
-static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
+static int dsi_dump_dsi_clocks(struct seq_file *s, void *p)
{
+ struct dsi_data *dsi = p;
struct dss_pll_clock_info *cinfo = &dsi->pll.cinfo;
enum dss_clk_source dispc_clk_src, dsi_clk_src;
int dsi_module = dsi->module_id;
@@ -1459,7 +1416,7 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
dsi_clk_src = dss_get_dsi_clk_source(dsi->dss, dsi_module);
if (dsi_runtime_get(dsi))
- return;
+ return 0;
seq_printf(s, "- DSI%d PLL -\n", dsi_module + 1);
@@ -1503,23 +1460,14 @@ static void dsi_dump_dsi_clocks(struct dsi_data *dsi, struct seq_file *s)
seq_printf(s, "LP_CLK\t\t%lu\n", dsi->current_lp_cinfo.lp_clk);
dsi_runtime_put(dsi);
-}
-
-void dsi_dump_clocks(struct seq_file *s)
-{
- struct dsi_data *dsi;
- int i;
- for (i = 0; i < MAX_NUM_DSI; i++) {
- dsi = dsi_get_dsi_from_id(i);
- if (dsi)
- dsi_dump_dsi_clocks(dsi, s);
- }
+ return 0;
}
#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
-static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
+static int dsi_dump_dsi_irqs(struct seq_file *s, void *p)
{
+ struct dsi_data *dsi = p;
unsigned long flags;
struct dsi_irq_stats stats;
@@ -1603,33 +1551,20 @@ static void dsi_dump_dsi_irqs(struct dsi_data *dsi, struct seq_file *s)
PIS(ULPSACTIVENOT_ALL0);
PIS(ULPSACTIVENOT_ALL1);
#undef PIS
-}
-static int dsi1_dump_irqs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(0);
-
- dsi_dump_dsi_irqs(dsi, s);
- return 0;
-}
-
-static int dsi2_dump_irqs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(1);
-
- dsi_dump_dsi_irqs(dsi, s);
return 0;
}
#endif
-static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
+static int dsi_dump_dsi_regs(struct seq_file *s, void *p)
{
-#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
+ struct dsi_data *dsi = p;
if (dsi_runtime_get(dsi))
- return;
+ return 0;
dsi_enable_scp_clk(dsi);
+#define DUMPREG(r) seq_printf(s, "%-35s %08x\n", #r, dsi_read_reg(dsi, r))
DUMPREG(DSI_REVISION);
DUMPREG(DSI_SYSCONFIG);
DUMPREG(DSI_SYSSTATUS);
@@ -1699,25 +1634,11 @@ static void dsi_dump_dsi_regs(struct dsi_data *dsi, struct seq_file *s)
DUMPREG(DSI_PLL_GO);
DUMPREG(DSI_PLL_CONFIGURATION1);
DUMPREG(DSI_PLL_CONFIGURATION2);
+#undef DUMPREG
dsi_disable_scp_clk(dsi);
dsi_runtime_put(dsi);
-#undef DUMPREG
-}
-
-static int dsi1_dump_regs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(0);
-
- dsi_dump_dsi_regs(dsi, s);
- return 0;
-}
-
-static int dsi2_dump_regs(struct seq_file *s, void *p)
-{
- struct dsi_data *dsi = dsi_get_dsi_from_id(1);
- dsi_dump_dsi_regs(dsi, s);
return 0;
}
@@ -3344,7 +3265,7 @@ static void dsi_config_vp_num_line_buffers(struct dsi_data *dsi)
if (dsi->mode == OMAP_DSS_DSI_VIDEO_MODE) {
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
- struct videomode *vm = &dsi->vm;
+ const struct videomode *vm = &dsi->vm;
/*
* Don't use line buffers if width is greater than the video
* port's line buffer size
@@ -3473,7 +3394,7 @@ static void dsi_config_cmd_mode_interleaving(struct dsi_data *dsi)
int ddr_clk_pre, ddr_clk_post, enter_hs_mode_lat, exit_hs_mode_lat;
int tclk_trail, ths_exit, exiths_clk;
bool ddr_alwon;
- struct videomode *vm = &dsi->vm;
+ const struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int ndl = dsi->num_lanes_used - 1;
int dsi_fclk_hsdiv = dsi->user_dsi_cinfo.mX[HSDIV_DSI] + 1;
@@ -3723,7 +3644,7 @@ static void dsi_proto_timings(struct dsi_data *dsi)
int vbp = dsi->vm_timings.vbp;
int window_sync = dsi->vm_timings.window_sync;
bool hsync_end;
- struct videomode *vm = &dsi->vm;
+ const struct videomode *vm = &dsi->vm;
int bpp = dsi_get_pixel_size(dsi->pix_fmt);
int tl, t_he, width_bytes;
@@ -3980,8 +3901,6 @@ static void dsi_update_screen_dispc(struct dsi_data *dsi)
msecs_to_jiffies(250));
BUG_ON(r == 0);
- dss_mgr_set_timings(&dsi->output, &dsi->vm);
-
dss_mgr_start_update(&dsi->output);
if (dsi->te_enabled) {
@@ -4123,24 +4042,6 @@ static int dsi_display_init_dispc(struct dsi_data *dsi)
dsi->mgr_config.fifohandcheck = false;
}
- /*
- * override interlace, logic level and edge related parameters in
- * videomode with default values
- */
- dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
- dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
- dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
- dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
- dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
- dsi->vm.flags &= ~DISPLAY_FLAGS_PIXDATA_NEGEDGE;
- dsi->vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
- dsi->vm.flags &= ~DISPLAY_FLAGS_DE_LOW;
- dsi->vm.flags |= DISPLAY_FLAGS_DE_HIGH;
- dsi->vm.flags &= ~DISPLAY_FLAGS_SYNC_POSEDGE;
- dsi->vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
-
- dss_mgr_set_timings(&dsi->output, &dsi->vm);
-
r = dsi_configure_dispc_clocks(dsi);
if (r)
goto err1;
@@ -4840,6 +4741,19 @@ static int dsi_set_config(struct omap_dss_device *dssdev,
dsi->user_dispc_cinfo = ctx.dispc_cinfo;
dsi->vm = ctx.vm;
+
+ /*
+ * override interlace, logic level and edge related parameters in
+ * videomode with default values
+ */
+ dsi->vm.flags &= ~DISPLAY_FLAGS_INTERLACED;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_HSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_HSYNC_HIGH;
+ dsi->vm.flags &= ~DISPLAY_FLAGS_VSYNC_LOW;
+ dsi->vm.flags |= DISPLAY_FLAGS_VSYNC_HIGH;
+
+ dss_mgr_set_timings(&dsi->output, &dsi->vm);
+
dsi->vm_timings = ctx.dsi_vm;
mutex_unlock(&dsi->lock);
@@ -4960,163 +4874,71 @@ static int dsi_get_clocks(struct dsi_data *dsi)
return 0;
}
-static int dsi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int dsi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
int r;
- r = dsi_regulator_init(dsi);
- if (r)
- return r;
-
- r = dss_mgr_connect(&dsi->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dssdev->name);
- dss_mgr_disconnect(&dsi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void dsi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void dsi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct dsi_data *dsi = to_dsi_data(dssdev);
+ dst->dispc_channel_connected = false;
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&dsi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_dsi_ops dsi_ops = {
+static const struct omap_dss_device_ops dsi_ops = {
.connect = dsi_connect,
.disconnect = dsi_disconnect,
-
- .bus_lock = dsi_bus_lock,
- .bus_unlock = dsi_bus_unlock,
-
.enable = dsi_display_enable,
- .disable = dsi_display_disable,
-
- .enable_hs = dsi_vc_enable_hs,
-
- .configure_pins = dsi_configure_pins,
- .set_config = dsi_set_config,
-
- .enable_video_output = dsi_enable_video_output,
- .disable_video_output = dsi_disable_video_output,
-
- .update = dsi_update,
-
- .enable_te = dsi_enable_te,
-
- .request_vc = dsi_request_vc,
- .set_vc_id = dsi_set_vc_id,
- .release_vc = dsi_release_vc,
-
- .dcs_write = dsi_vc_dcs_write,
- .dcs_write_nosync = dsi_vc_dcs_write_nosync,
- .dcs_read = dsi_vc_dcs_read,
-
- .gen_write = dsi_vc_generic_write,
- .gen_write_nosync = dsi_vc_generic_write_nosync,
- .gen_read = dsi_vc_generic_read,
-
- .bta_sync = dsi_vc_send_bta_sync,
-
- .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
-};
-
-static void dsi_init_output(struct dsi_data *dsi)
-{
- struct omap_dss_device *out = &dsi->output;
-
- out->dev = dsi->dev;
- out->id = dsi->module_id == 0 ?
- OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
-
- out->output_type = OMAP_DISPLAY_TYPE_DSI;
- out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
- out->dispc_channel = dsi_get_channel(dsi);
- out->ops.dsi = &dsi_ops;
- out->owner = THIS_MODULE;
- omapdss_register_output(out);
-}
+ .dsi = {
+ .bus_lock = dsi_bus_lock,
+ .bus_unlock = dsi_bus_unlock,
-static void dsi_uninit_output(struct dsi_data *dsi)
-{
- struct omap_dss_device *out = &dsi->output;
+ .disable = dsi_display_disable,
- omapdss_unregister_output(out);
-}
+ .enable_hs = dsi_vc_enable_hs,
-static int dsi_probe_of(struct dsi_data *dsi)
-{
- struct device_node *node = dsi->dev->of_node;
- struct property *prop;
- u32 lane_arr[10];
- int len, num_pins;
- int r, i;
- struct device_node *ep;
- struct omap_dsi_pin_config pin_cfg;
-
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
- if (!ep)
- return 0;
+ .configure_pins = dsi_configure_pins,
+ .set_config = dsi_set_config,
- prop = of_find_property(ep, "lanes", &len);
- if (prop == NULL) {
- dev_err(dsi->dev, "failed to find lane data\n");
- r = -EINVAL;
- goto err;
- }
+ .enable_video_output = dsi_enable_video_output,
+ .disable_video_output = dsi_disable_video_output,
- num_pins = len / sizeof(u32);
+ .update = dsi_update,
- if (num_pins < 4 || num_pins % 2 != 0 ||
- num_pins > dsi->num_lanes_supported * 2) {
- dev_err(dsi->dev, "bad number of lanes\n");
- r = -EINVAL;
- goto err;
- }
+ .enable_te = dsi_enable_te,
- r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
- if (r) {
- dev_err(dsi->dev, "failed to read lane data\n");
- goto err;
- }
+ .request_vc = dsi_request_vc,
+ .set_vc_id = dsi_set_vc_id,
+ .release_vc = dsi_release_vc,
- pin_cfg.num_pins = num_pins;
- for (i = 0; i < num_pins; ++i)
- pin_cfg.pins[i] = (int)lane_arr[i];
+ .dcs_write = dsi_vc_dcs_write,
+ .dcs_write_nosync = dsi_vc_dcs_write_nosync,
+ .dcs_read = dsi_vc_dcs_read,
- r = dsi_configure_pins(&dsi->output, &pin_cfg);
- if (r) {
- dev_err(dsi->dev, "failed to configure pins");
- goto err;
- }
+ .gen_write = dsi_vc_generic_write,
+ .gen_write_nosync = dsi_vc_generic_write_nosync,
+ .gen_read = dsi_vc_generic_read,
- of_node_put(ep);
+ .bta_sync = dsi_vc_send_bta_sync,
- return 0;
+ .set_max_rx_packet_size = dsi_vc_set_max_rx_packet_size,
+ },
+};
-err:
- of_node_put(ep);
- return r;
-}
+/* -----------------------------------------------------------------------------
+ * PLL
+ */
static const struct dss_pll_ops dsi_pll_ops = {
.enable = dsi_pll_enable,
@@ -5231,7 +5053,175 @@ static int dsi_init_pll_data(struct dss_device *dss, struct dsi_data *dsi)
return 0;
}
-/* DSI1 HW IP initialisation */
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
+
+static int dsi_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dss_device *dss = dss_get_device(master);
+ struct dsi_data *dsi = dev_get_drvdata(dev);
+ char name[10];
+ u32 rev;
+ int r;
+
+ dsi->dss = dss;
+
+ dsi_init_pll_data(dss, dsi);
+
+ r = dsi_runtime_get(dsi);
+ if (r)
+ return r;
+
+ rev = dsi_read_reg(dsi, DSI_REVISION);
+ dev_dbg(dev, "OMAP DSI rev %d.%d\n",
+ FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
+
+ dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
+
+ dsi_runtime_put(dsi);
+
+ snprintf(name, sizeof(name), "dsi%u_regs", dsi->module_id + 1);
+ dsi->debugfs.regs = dss_debugfs_create_file(dss, name,
+ dsi_dump_dsi_regs, &dsi);
+#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
+ snprintf(name, sizeof(name), "dsi%u_irqs", dsi->module_id + 1);
+ dsi->debugfs.irqs = dss_debugfs_create_file(dss, name,
+ dsi_dump_dsi_irqs, &dsi);
+#endif
+ snprintf(name, sizeof(name), "dsi%u_clks", dsi->module_id + 1);
+ dsi->debugfs.clks = dss_debugfs_create_file(dss, name,
+ dsi_dump_dsi_clocks, &dsi);
+
+ return 0;
+}
+
+static void dsi_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct dsi_data *dsi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(dsi->debugfs.clks);
+ dss_debugfs_remove_file(dsi->debugfs.irqs);
+ dss_debugfs_remove_file(dsi->debugfs.regs);
+
+ of_platform_depopulate(dev);
+
+ WARN_ON(dsi->scp_clk_refcount > 0);
+
+ dss_pll_unregister(&dsi->pll);
+}
+
+static const struct component_ops dsi_component_ops = {
+ .bind = dsi_bind,
+ .unbind = dsi_unbind,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int dsi_init_output(struct dsi_data *dsi)
+{
+ struct omap_dss_device *out = &dsi->output;
+ int r;
+
+ out->dev = dsi->dev;
+ out->id = dsi->module_id == 0 ?
+ OMAP_DSS_OUTPUT_DSI1 : OMAP_DSS_OUTPUT_DSI2;
+
+ out->output_type = OMAP_DISPLAY_TYPE_DSI;
+ out->name = dsi->module_id == 0 ? "dsi.0" : "dsi.1";
+ out->dispc_channel = dsi_get_channel(dsi);
+ out->ops = &dsi_ops;
+ out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE
+ | DRM_BUS_FLAG_DE_HIGH
+ | DRM_BUS_FLAG_SYNC_NEGEDGE;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
+}
+
+static void dsi_uninit_output(struct dsi_data *dsi)
+{
+ struct omap_dss_device *out = &dsi->output;
+
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static int dsi_probe_of(struct dsi_data *dsi)
+{
+ struct device_node *node = dsi->dev->of_node;
+ struct property *prop;
+ u32 lane_arr[10];
+ int len, num_pins;
+ int r, i;
+ struct device_node *ep;
+ struct omap_dsi_pin_config pin_cfg;
+
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ if (!ep)
+ return 0;
+
+ prop = of_find_property(ep, "lanes", &len);
+ if (prop == NULL) {
+ dev_err(dsi->dev, "failed to find lane data\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ num_pins = len / sizeof(u32);
+
+ if (num_pins < 4 || num_pins % 2 != 0 ||
+ num_pins > dsi->num_lanes_supported * 2) {
+ dev_err(dsi->dev, "bad number of lanes\n");
+ r = -EINVAL;
+ goto err;
+ }
+
+ r = of_property_read_u32_array(ep, "lanes", lane_arr, num_pins);
+ if (r) {
+ dev_err(dsi->dev, "failed to read lane data\n");
+ goto err;
+ }
+
+ pin_cfg.num_pins = num_pins;
+ for (i = 0; i < num_pins; ++i)
+ pin_cfg.pins[i] = (int)lane_arr[i];
+
+ r = dsi_configure_pins(&dsi->output, &pin_cfg);
+ if (r) {
+ dev_err(dsi->dev, "failed to configure pins");
+ goto err;
+ }
+
+ of_node_put(ep);
+
+ return 0;
+
+err:
+ of_node_put(ep);
+ return r;
+}
+
static const struct dsi_of_data dsi_of_data_omap34xx = {
.model = DSI_MODEL_OMAP3,
.pll_hw = &dss_omap3_dsi_pll_hw,
@@ -5297,23 +5287,21 @@ static const struct soc_device_attribute dsi_soc_devices[] = {
{ /* sentinel */ }
};
-static int dsi_bind(struct device *dev, struct device *master, void *data)
+static int dsi_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dss_device *dss = dss_get_device(master);
const struct soc_device_attribute *soc;
const struct dsi_module_id_data *d;
- u32 rev;
- int r, i;
+ struct device *dev = &pdev->dev;
struct dsi_data *dsi;
struct resource *dsi_mem;
struct resource *res;
+ unsigned int i;
+ int r;
dsi = devm_kzalloc(dev, sizeof(*dsi), GFP_KERNEL);
if (!dsi)
return -ENOMEM;
- dsi->dss = dss;
dsi->dev = dev;
dev_set_drvdata(dev, dsi);
@@ -5364,6 +5352,13 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
return r;
}
+ dsi->vdds_dsi_reg = devm_regulator_get(dev, "vdd");
+ if (IS_ERR(dsi->vdds_dsi_reg)) {
+ if (PTR_ERR(dsi->vdds_dsi_reg) != -EPROBE_DEFER)
+ DSSERR("can't get DSI VDD regulator\n");
+ return PTR_ERR(dsi->vdds_dsi_reg);
+ }
+
soc = soc_device_match(dsi_soc_devices);
if (soc)
dsi->data = soc->data;
@@ -5410,18 +5405,8 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
if (r)
return r;
- dsi_init_pll_data(dss, dsi);
-
pm_runtime_enable(dev);
- r = dsi_runtime_get(dsi);
- if (r)
- goto err_runtime_get;
-
- rev = dsi_read_reg(dsi, DSI_REVISION);
- dev_dbg(dev, "OMAP DSI rev %d.%d\n",
- FLD_GET(rev, 7, 4), FLD_GET(rev, 3, 0));
-
/* DSI on OMAP3 doesn't have register DSI_GNQ, set number
* of data to 3 by default */
if (dsi->data->quirks & DSI_QUIRK_GNQ)
@@ -5430,88 +5415,48 @@ static int dsi_bind(struct device *dev, struct device *master, void *data)
else
dsi->num_lanes_supported = 3;
- dsi->line_buffer_size = dsi_get_line_buf_size(dsi);
-
- dsi_init_output(dsi);
+ r = dsi_init_output(dsi);
+ if (r)
+ goto err_pm_disable;
r = dsi_probe_of(dsi);
if (r) {
DSSERR("Invalid DSI DT data\n");
- goto err_probe_of;
+ goto err_uninit_output;
}
r = of_platform_populate(dev->of_node, NULL, NULL, dev);
if (r)
DSSERR("Failed to populate DSI child devices: %d\n", r);
- dsi_runtime_put(dsi);
-
- if (dsi->module_id == 0)
- dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi1_regs",
- dsi1_dump_regs,
- &dsi);
- else
- dsi->debugfs.regs = dss_debugfs_create_file(dss, "dsi2_regs",
- dsi2_dump_regs,
- &dsi);
-#ifdef CONFIG_OMAP2_DSS_COLLECT_IRQ_STATS
- if (dsi->module_id == 0)
- dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi1_irqs",
- dsi1_dump_irqs,
- &dsi);
- else
- dsi->debugfs.irqs = dss_debugfs_create_file(dss, "dsi2_irqs",
- dsi2_dump_irqs,
- &dsi);
-#endif
+ r = component_add(&pdev->dev, &dsi_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_probe_of:
+err_uninit_output:
dsi_uninit_output(dsi);
- dsi_runtime_put(dsi);
-
-err_runtime_get:
+err_pm_disable:
pm_runtime_disable(dev);
return r;
}
-static void dsi_unbind(struct device *dev, struct device *master, void *data)
+static int dsi_remove(struct platform_device *pdev)
{
- struct dsi_data *dsi = dev_get_drvdata(dev);
+ struct dsi_data *dsi = platform_get_drvdata(pdev);
- dss_debugfs_remove_file(dsi->debugfs.irqs);
- dss_debugfs_remove_file(dsi->debugfs.regs);
-
- of_platform_depopulate(dev);
-
- WARN_ON(dsi->scp_clk_refcount > 0);
-
- dss_pll_unregister(&dsi->pll);
+ component_del(&pdev->dev, &dsi_component_ops);
dsi_uninit_output(dsi);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
if (dsi->vdds_dsi_reg != NULL && dsi->vdds_dsi_enabled) {
regulator_disable(dsi->vdds_dsi_reg);
dsi->vdds_dsi_enabled = false;
}
-}
-static const struct component_ops dsi_component_ops = {
- .bind = dsi_bind,
- .unbind = dsi_unbind,
-};
-
-static int dsi_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &dsi_component_ops);
-}
-
-static int dsi_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &dsi_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss-of.c b/drivers/gpu/drm/omapdrm/dss/dss-of.c
index 4602a79c6c44..0422597ac6b0 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss-of.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss-of.c
@@ -21,7 +21,8 @@
#include "omapdss.h"
-struct device_node *dss_of_port_get_parent_device(struct device_node *port)
+static struct device_node *
+dss_of_port_get_parent_device(struct device_node *port)
{
struct device_node *np;
int i;
@@ -45,41 +46,37 @@ struct device_node *dss_of_port_get_parent_device(struct device_node *port)
return NULL;
}
-u32 dss_of_port_get_port_number(struct device_node *port)
-{
- int r;
- u32 reg;
-
- r = of_property_read_u32(port, "reg", &reg);
- if (r)
- reg = 0;
-
- return reg;
-}
-
struct omap_dss_device *
-omapdss_of_find_source_for_first_ep(struct device_node *node)
+omapdss_of_find_connected_device(struct device_node *node, unsigned int port)
{
- struct device_node *ep;
+ struct device_node *src_node;
struct device_node *src_port;
+ struct device_node *ep;
struct omap_dss_device *src;
+ u32 port_number = 0;
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ /* Get the endpoint... */
+ ep = of_graph_get_endpoint_by_regs(node, port, 0);
if (!ep)
- return ERR_PTR(-EINVAL);
+ return NULL;
+ /* ... and its remote port... */
src_port = of_graph_get_remote_port(ep);
- if (!src_port) {
- of_node_put(ep);
- return ERR_PTR(-EINVAL);
- }
-
of_node_put(ep);
+ if (!src_port)
+ return NULL;
- src = omap_dss_find_output_by_port_node(src_port);
-
+ /* ... and the remote port's number and parent... */
+ of_property_read_u32(src_port, "reg", &port_number);
+ src_node = dss_of_port_get_parent_device(src_port);
of_node_put(src_port);
+ if (!src_node)
+ return ERR_PTR(-EINVAL);
+
+ /* ... and finally the connected device. */
+ src = omapdss_find_device_by_port(src_node, port_number);
+ of_node_put(src_node);
return src ? src : ERR_PTR(-EPROBE_DEFER);
}
-EXPORT_SYMBOL_GPL(omapdss_of_find_source_for_first_ep);
+EXPORT_SYMBOL_GPL(omapdss_of_find_connected_device);
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.c b/drivers/gpu/drm/omapdrm/dss/dss.c
index cb80ddaa19d2..19fc4dfc429e 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.c
+++ b/drivers/gpu/drm/omapdrm/dss/dss.c
@@ -394,9 +394,6 @@ static int dss_debug_dump_clocks(struct seq_file *s, void *p)
dss_dump_clocks(dss, s);
dispc_dump_clocks(dss->dispc, s);
-#ifdef CONFIG_OMAP2_DSS_DSI
- dsi_dump_clocks(s);
-#endif
return 0;
}
@@ -681,12 +678,6 @@ unsigned long dss_get_max_fck_rate(struct dss_device *dss)
return dss->feat->fck_freq_max;
}
-enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
- enum omap_channel channel)
-{
- return dss->feat->outputs[channel];
-}
-
static int dss_setup_default_clock(struct dss_device *dss)
{
unsigned long max_dss_fck, prate;
@@ -1183,7 +1174,8 @@ static int dss_init_ports(struct dss_device *dss)
struct platform_device *pdev = dss->pdev;
struct device_node *parent = pdev->dev.of_node;
struct device_node *port;
- int i;
+ unsigned int i;
+ int r;
for (i = 0; i < dss->feat->num_ports; i++) {
port = of_graph_get_port_by_id(parent, i);
@@ -1192,11 +1184,17 @@ static int dss_init_ports(struct dss_device *dss)
switch (dss->feat->ports[i]) {
case OMAP_DISPLAY_TYPE_DPI:
- dpi_init_port(dss, pdev, port, dss->feat->model);
+ r = dpi_init_port(dss, pdev, port, dss->feat->model);
+ if (r)
+ return r;
break;
+
case OMAP_DISPLAY_TYPE_SDI:
- sdi_init_port(dss, pdev, port);
+ r = sdi_init_port(dss, pdev, port);
+ if (r)
+ return r;
break;
+
default:
break;
}
@@ -1315,6 +1313,7 @@ static const struct soc_device_attribute dss_soc_devices[] = {
static int dss_bind(struct device *dev)
{
struct dss_device *dss = dev_get_drvdata(dev);
+ struct platform_device *drm_pdev;
int r;
r = component_bind_all(dev, NULL);
@@ -1323,14 +1322,25 @@ static int dss_bind(struct device *dev)
pm_set_vt_switch(0);
- omapdss_gather_components(dev);
omapdss_set_dss(dss);
+ drm_pdev = platform_device_register_simple("omapdrm", 0, NULL, 0);
+ if (IS_ERR(drm_pdev)) {
+ component_unbind_all(dev, NULL);
+ return PTR_ERR(drm_pdev);
+ }
+
+ dss->drm_pdev = drm_pdev;
+
return 0;
}
static void dss_unbind(struct device *dev)
{
+ struct dss_device *dss = dev_get_drvdata(dev);
+
+ platform_device_unregister(dss->drm_pdev);
+
omapdss_set_dss(NULL);
component_unbind_all(dev, NULL);
@@ -1474,6 +1484,8 @@ static int dss_probe(struct platform_device *pdev)
dss);
/* Add all the child devices as components. */
+ omapdss_gather_components(&pdev->dev);
+
device_for_each_child(&pdev->dev, &match, dss_add_child_component);
r = component_master_add_with_match(&pdev->dev, &dss_component_ops, match);
@@ -1539,12 +1551,9 @@ static void dss_shutdown(struct platform_device *pdev)
DSSDBG("shutdown\n");
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
-
+ for_each_dss_display(dssdev) {
if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE)
- dssdev->driver->disable(dssdev);
+ dssdev->ops->disable(dssdev);
}
}
diff --git a/drivers/gpu/drm/omapdrm/dss/dss.h b/drivers/gpu/drm/omapdrm/dss/dss.h
index 38302631b64b..37790c363128 100644
--- a/drivers/gpu/drm/omapdrm/dss/dss.h
+++ b/drivers/gpu/drm/omapdrm/dss/dss.h
@@ -238,6 +238,8 @@ struct dss_device {
struct regmap *syscon_pll_ctrl;
u32 syscon_pll_ctrl_offset;
+ struct platform_device *drm_pdev;
+
struct clk *parent_clk;
struct clk *dss_clk;
unsigned long dss_clk_rate;
@@ -267,6 +269,8 @@ struct dss_device {
struct dispc_device *dispc;
const struct dispc_ops *dispc_ops;
+ const struct dss_mgr_ops *mgr_ops;
+ struct omap_drm_private *mgr_ops_priv;
};
/* core */
@@ -313,8 +317,6 @@ void dss_runtime_put(struct dss_device *dss);
unsigned long dss_get_dispc_clk_rate(struct dss_device *dss);
unsigned long dss_get_max_fck_rate(struct dss_device *dss);
-enum omap_dss_output_id dss_get_supported_outputs(struct dss_device *dss,
- enum omap_channel channel);
int dss_dpi_select_source(struct dss_device *dss, int port,
enum omap_channel channel);
void dss_select_hdmi_venc_clk_source(struct dss_device *dss,
@@ -374,8 +376,6 @@ static inline void sdi_uninit_port(struct device_node *port)
#ifdef CONFIG_OMAP2_DSS_DSI
-void dsi_dump_clocks(struct seq_file *s);
-
void dsi_irq_handler(void);
#endif
@@ -417,9 +417,6 @@ bool dispc_div_calc(struct dispc_device *dispc, unsigned long dispc_freq,
unsigned long pck_min, unsigned long pck_max,
dispc_div_calc_func func, void *data);
-bool dispc_mgr_timings_ok(struct dispc_device *dispc,
- enum omap_channel channel,
- const struct videomode *vm);
int dispc_calc_clock_rates(struct dispc_device *dispc,
unsigned long dispc_fclk_rate,
struct dispc_clock_info *cinfo);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi.h b/drivers/gpu/drm/omapdrm/dss/hdmi.h
index 3aeb4cabd59f..7f0dc490a31d 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi.h
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi.h
@@ -313,13 +313,13 @@ void hdmi_wp_clear_irqenable(struct hdmi_wp_data *wp, u32 mask);
int hdmi_wp_set_phy_pwr(struct hdmi_wp_data *wp, enum hdmi_phy_pwr val);
int hdmi_wp_set_pll_pwr(struct hdmi_wp_data *wp, enum hdmi_pll_pwr val);
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
- struct hdmi_video_format *video_fmt);
+ const struct hdmi_video_format *video_fmt);
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct videomode *vm);
+ const struct videomode *vm);
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct videomode *vm);
+ const struct videomode *vm);
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct videomode *vm, struct hdmi_config *param);
+ struct videomode *vm, const struct hdmi_config *param);
int hdmi_wp_init(struct platform_device *pdev, struct hdmi_wp_data *wp,
unsigned int version);
phys_addr_t hdmi_wp_get_audio_dma_addr(struct hdmi_wp_data *wp);
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi4.c b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
index 5879f45f6fc9..cf6230eac31a 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi4.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi4.c
@@ -108,26 +108,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(struct omap_hdmi *hdmi)
-{
- struct regulator *reg;
-
- if (hdmi->vdda_reg != NULL)
- return 0;
-
- reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
-
- if (IS_ERR(reg)) {
- if (PTR_ERR(reg) != -EPROBE_DEFER)
- DSSERR("can't get VDDA regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi->vdda_reg = reg;
-
- return 0;
-}
-
static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
@@ -174,7 +154,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi)
static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
- struct videomode *vm;
+ const struct videomode *vm;
struct hdmi_wp_data *wp = &hdmi->wp;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned int pc;
@@ -227,9 +207,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi)
hdmi4_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
- /* tv size */
- dss_mgr_set_timings(&hdmi->output, vm);
-
r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
@@ -271,19 +248,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
- return -EINVAL;
-
- return 0;
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
@@ -296,14 +262,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- *vm = hdmi->cfg.vm;
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -456,44 +414,25 @@ void hdmi4_core_disable(struct hdmi_core_data *core)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int hdmi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator(hdmi);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = dss_mgr_connect(&hdmi->output, dssdev);
- if (r)
- return r;
-
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&hdmi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void hdmi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- WARN_ON(dst != dssdev->dst);
+ dst->dispc_channel_connected = false;
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&hdmi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
@@ -548,69 +487,28 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
return 0;
}
-static const struct omapdss_hdmi_ops hdmi_ops = {
+static const struct omap_dss_device_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
.enable = hdmi_display_enable,
.disable = hdmi_display_disable,
- .check_timings = hdmi_display_check_timing,
- .set_timings = hdmi_display_set_timing,
- .get_timings = hdmi_display_get_timings,
+ .set_timings = hdmi_display_set_timings,
.read_edid = hdmi_read_edid,
- .lost_hotplug = hdmi_lost_hotplug,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
-};
-
-static void hdmi_init_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- out->dev = &hdmi->pdev->dev;
- out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
- out->name = "hdmi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.hdmi = &hdmi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void hdmi_uninit_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- omapdss_unregister_output(out);
-}
-
-static int hdmi_probe_of(struct omap_hdmi *hdmi)
-{
- struct platform_device *pdev = hdmi->pdev;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *ep;
- int r;
-
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
- if (!ep)
- return 0;
-
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
- if (r)
- goto err;
- of_node_put(ep);
- return 0;
+ .hdmi = {
+ .lost_hotplug = hdmi_lost_hotplug,
+ .set_infoframe = hdmi_set_infoframe,
+ .set_hdmi_mode = hdmi_set_hdmi_mode,
+ },
+};
-err:
- of_node_put(ep);
- return r;
-}
+/* -----------------------------------------------------------------------------
+ * Audio Callbacks
+ */
-/* Audio callbacks */
static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
@@ -725,27 +623,143 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi)
return 0;
}
-/* HDMI HW IP initialisation */
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
+
static int hdmi4_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct dss_device *dss = dss_get_device(master);
- struct omap_hdmi *hdmi;
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+ int r;
+
+ hdmi->dss = dss;
+
+ r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ if (r)
+ return r;
+
+ r = hdmi4_cec_init(hdmi->pdev, &hdmi->core, &hdmi->wp);
+ if (r)
+ goto err_pll_uninit;
+
+ r = hdmi_audio_register(hdmi);
+ if (r) {
+ DSSERR("Registering HDMI audio failed\n");
+ goto err_cec_uninit;
+ }
+
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
+
+ return 0;
+
+err_cec_uninit:
+ hdmi4_cec_uninit(&hdmi->core);
+err_pll_uninit:
+ hdmi_pll_uninit(&hdmi->pll);
+ return r;
+}
+
+static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
+
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+
+ hdmi4_cec_uninit(&hdmi->core);
+ hdmi_pll_uninit(&hdmi->pll);
+}
+
+static const struct component_ops hdmi4_component_ops = {
+ .bind = hdmi4_bind,
+ .unbind = hdmi4_unbind,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int hdmi4_init_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
int r;
+
+ out->dev = &hdmi->pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops = &hdmi_ops;
+ out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
+}
+
+static void hdmi4_uninit_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
+
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static int hdmi4_probe_of(struct omap_hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *ep;
+ int r;
+
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ if (!ep)
+ return 0;
+
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
+ of_node_put(ep);
+ return r;
+}
+
+static int hdmi4_probe(struct platform_device *pdev)
+{
+ struct omap_hdmi *hdmi;
int irq;
+ int r;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
- hdmi->dss = dss;
+
dev_set_drvdata(&pdev->dev, hdmi);
mutex_init(&hdmi->lock);
spin_lock_init(&hdmi->audio_playing_lock);
- r = hdmi_probe_of(hdmi);
+ r = hdmi4_probe_of(hdmi);
if (r)
goto err_free;
@@ -753,27 +767,19 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err_free;
- r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
- if (r)
- goto err_free;
-
r = hdmi_phy_init(pdev, &hdmi->phy, 4);
if (r)
- goto err_pll;
+ goto err_free;
r = hdmi4_core_init(pdev, &hdmi->core);
if (r)
- goto err_pll;
-
- r = hdmi4_cec_init(pdev, &hdmi->core, &hdmi->wp);
- if (r)
- goto err_pll;
+ goto err_free;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err_pll;
+ goto err_free;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -781,66 +787,49 @@ static int hdmi4_bind(struct device *dev, struct device *master, void *data)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_pll;
+ goto err_free;
}
- pm_runtime_enable(&pdev->dev);
+ hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(hdmi->vdda_reg)) {
+ r = PTR_ERR(hdmi->vdda_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDA regulator\n");
+ goto err_free;
+ }
- hdmi_init_output(hdmi);
+ pm_runtime_enable(&pdev->dev);
- r = hdmi_audio_register(hdmi);
- if (r) {
- DSSERR("Registering HDMI audio failed\n");
- hdmi_uninit_output(hdmi);
- pm_runtime_disable(&pdev->dev);
- return r;
- }
+ r = hdmi4_init_output(hdmi);
+ if (r)
+ goto err_pm_disable;
- hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
- hdmi);
+ r = component_add(&pdev->dev, &hdmi4_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_pll:
- hdmi_pll_uninit(&hdmi->pll);
+err_uninit_output:
+ hdmi4_uninit_output(hdmi);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_free:
kfree(hdmi);
return r;
}
-static void hdmi4_unbind(struct device *dev, struct device *master, void *data)
+static int hdmi4_remove(struct platform_device *pdev)
{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dss_debugfs_remove_file(hdmi->debugfs);
-
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
+ struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
- hdmi_uninit_output(hdmi);
-
- hdmi4_cec_uninit(&hdmi->core);
+ component_del(&pdev->dev, &hdmi4_component_ops);
- hdmi_pll_uninit(&hdmi->pll);
+ hdmi4_uninit_output(hdmi);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
kfree(hdmi);
-}
-
-static const struct component_ops hdmi4_component_ops = {
- .bind = hdmi4_bind,
- .unbind = hdmi4_unbind,
-};
-
-static int hdmi4_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &hdmi4_component_ops);
-}
-
-static int hdmi4_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &hdmi4_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5.c b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
index ae1a001d1b83..b0e4a7463f8c 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5.c
@@ -117,24 +117,6 @@ static irqreturn_t hdmi_irq_handler(int irq, void *data)
return IRQ_HANDLED;
}
-static int hdmi_init_regulator(struct omap_hdmi *hdmi)
-{
- struct regulator *reg;
-
- if (hdmi->vdda_reg != NULL)
- return 0;
-
- reg = devm_regulator_get(&hdmi->pdev->dev, "vdda");
- if (IS_ERR(reg)) {
- DSSERR("can't get VDDA regulator\n");
- return PTR_ERR(reg);
- }
-
- hdmi->vdda_reg = reg;
-
- return 0;
-}
-
static int hdmi_power_on_core(struct omap_hdmi *hdmi)
{
int r;
@@ -171,7 +153,7 @@ static void hdmi_power_off_core(struct omap_hdmi *hdmi)
static int hdmi_power_on_full(struct omap_hdmi *hdmi)
{
int r;
- struct videomode *vm;
+ const struct videomode *vm;
struct dss_pll_clock_info hdmi_cinfo = { 0 };
unsigned int pc;
@@ -224,9 +206,6 @@ static int hdmi_power_on_full(struct omap_hdmi *hdmi)
hdmi5_configure(&hdmi->core, &hdmi->wp, &hdmi->cfg);
- /* tv size */
- dss_mgr_set_timings(&hdmi->output, vm);
-
r = dss_mgr_enable(&hdmi->output);
if (r)
goto err_mgr_enable;
@@ -268,19 +247,8 @@ static void hdmi_power_off_full(struct omap_hdmi *hdmi)
hdmi_power_off_core(hdmi);
}
-static int hdmi_display_check_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- if (!dispc_mgr_timings_ok(hdmi->dss->dispc, dssdev->dispc_channel, vm))
- return -EINVAL;
-
- return 0;
-}
-
-static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
- struct videomode *vm)
+static void hdmi_display_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
{
struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
@@ -293,14 +261,6 @@ static void hdmi_display_set_timing(struct omap_dss_device *dssdev,
mutex_unlock(&hdmi->lock);
}
-static void hdmi_display_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- *vm = hdmi->cfg.vm;
-}
-
static int hdmi_dump_regs(struct seq_file *s, void *p)
{
struct omap_hdmi *hdmi = s->private;
@@ -459,44 +419,25 @@ static void hdmi_core_disable(struct omap_hdmi *hdmi)
mutex_unlock(&hdmi->lock);
}
-static int hdmi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int hdmi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
int r;
- r = hdmi_init_regulator(hdmi);
- if (r)
- return r;
-
- r = dss_mgr_connect(&hdmi->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&hdmi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void hdmi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void hdmi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct omap_hdmi *hdmi = dssdev_to_hdmi(dssdev);
-
- WARN_ON(dst != dssdev->dst);
+ dst->dispc_channel_connected = false;
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&hdmi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
static int hdmi_read_edid(struct omap_dss_device *dssdev,
@@ -540,68 +481,27 @@ static int hdmi_set_hdmi_mode(struct omap_dss_device *dssdev,
return 0;
}
-static const struct omapdss_hdmi_ops hdmi_ops = {
+static const struct omap_dss_device_ops hdmi_ops = {
.connect = hdmi_connect,
.disconnect = hdmi_disconnect,
.enable = hdmi_display_enable,
.disable = hdmi_display_disable,
- .check_timings = hdmi_display_check_timing,
- .set_timings = hdmi_display_set_timing,
- .get_timings = hdmi_display_get_timings,
+ .set_timings = hdmi_display_set_timings,
.read_edid = hdmi_read_edid,
- .set_infoframe = hdmi_set_infoframe,
- .set_hdmi_mode = hdmi_set_hdmi_mode,
-};
-
-static void hdmi_init_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- out->dev = &hdmi->pdev->dev;
- out->id = OMAP_DSS_OUTPUT_HDMI;
- out->output_type = OMAP_DISPLAY_TYPE_HDMI;
- out->name = "hdmi.0";
- out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.hdmi = &hdmi_ops;
- out->owner = THIS_MODULE;
-
- omapdss_register_output(out);
-}
-
-static void hdmi_uninit_output(struct omap_hdmi *hdmi)
-{
- struct omap_dss_device *out = &hdmi->output;
-
- omapdss_unregister_output(out);
-}
-
-static int hdmi_probe_of(struct omap_hdmi *hdmi)
-{
- struct platform_device *pdev = hdmi->pdev;
- struct device_node *node = pdev->dev.of_node;
- struct device_node *ep;
- int r;
-
- ep = of_graph_get_endpoint_by_regs(node, 0, 0);
- if (!ep)
- return 0;
-
- r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
- if (r)
- goto err;
- of_node_put(ep);
- return 0;
+ .hdmi = {
+ .set_infoframe = hdmi_set_infoframe,
+ .set_hdmi_mode = hdmi_set_hdmi_mode,
+ },
+};
-err:
- of_node_put(ep);
- return r;
-}
+/* -----------------------------------------------------------------------------
+ * Audio Callbacks
+ */
-/* Audio callbacks */
static int hdmi_audio_startup(struct device *dev,
void (*abort_cb)(struct device *dev))
{
@@ -722,27 +622,136 @@ static int hdmi_audio_register(struct omap_hdmi *hdmi)
return 0;
}
-/* HDMI HW IP initialisation */
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
+
static int hdmi5_bind(struct device *dev, struct device *master, void *data)
{
- struct platform_device *pdev = to_platform_device(dev);
struct dss_device *dss = dss_get_device(master);
- struct omap_hdmi *hdmi;
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+ int r;
+
+ hdmi->dss = dss;
+
+ r = hdmi_pll_init(dss, hdmi->pdev, &hdmi->pll, &hdmi->wp);
+ if (r)
+ return r;
+
+ r = hdmi_audio_register(hdmi);
+ if (r) {
+ DSSERR("Registering HDMI audio failed %d\n", r);
+ goto err_pll_uninit;
+ }
+
+ hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
+ hdmi);
+
+ return 0;
+
+err_pll_uninit:
+ hdmi_pll_uninit(&hdmi->pll);
+ return r;
+}
+
+static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct omap_hdmi *hdmi = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(hdmi->debugfs);
+
+ if (hdmi->audio_pdev)
+ platform_device_unregister(hdmi->audio_pdev);
+
+ hdmi_pll_uninit(&hdmi->pll);
+}
+
+static const struct component_ops hdmi5_component_ops = {
+ .bind = hdmi5_bind,
+ .unbind = hdmi5_unbind,
+};
+
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int hdmi5_init_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
+ int r;
+
+ out->dev = &hdmi->pdev->dev;
+ out->id = OMAP_DSS_OUTPUT_HDMI;
+ out->output_type = OMAP_DISPLAY_TYPE_HDMI;
+ out->name = "hdmi.0";
+ out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
+ out->ops = &hdmi_ops;
+ out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+ out->ops_flags = OMAP_DSS_DEVICE_OP_EDID;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
+
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
+}
+
+static void hdmi5_uninit_output(struct omap_hdmi *hdmi)
+{
+ struct omap_dss_device *out = &hdmi->output;
+
+ if (out->next)
+ omapdss_device_put(out->next);
+ omapdss_device_unregister(out);
+}
+
+static int hdmi5_probe_of(struct omap_hdmi *hdmi)
+{
+ struct platform_device *pdev = hdmi->pdev;
+ struct device_node *node = pdev->dev.of_node;
+ struct device_node *ep;
int r;
+
+ ep = of_graph_get_endpoint_by_regs(node, 0, 0);
+ if (!ep)
+ return 0;
+
+ r = hdmi_parse_lanes_of(pdev, ep, &hdmi->phy);
+ of_node_put(ep);
+ return r;
+}
+
+static int hdmi5_probe(struct platform_device *pdev)
+{
+ struct omap_hdmi *hdmi;
int irq;
+ int r;
hdmi = kzalloc(sizeof(*hdmi), GFP_KERNEL);
if (!hdmi)
return -ENOMEM;
hdmi->pdev = pdev;
- hdmi->dss = dss;
+
dev_set_drvdata(&pdev->dev, hdmi);
mutex_init(&hdmi->lock);
spin_lock_init(&hdmi->audio_playing_lock);
- r = hdmi_probe_of(hdmi);
+ r = hdmi5_probe_of(hdmi);
if (r)
goto err_free;
@@ -750,23 +759,19 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
if (r)
goto err_free;
- r = hdmi_pll_init(dss, pdev, &hdmi->pll, &hdmi->wp);
- if (r)
- goto err_free;
-
r = hdmi_phy_init(pdev, &hdmi->phy, 5);
if (r)
- goto err_pll;
+ goto err_free;
r = hdmi5_core_init(pdev, &hdmi->core);
if (r)
- goto err_pll;
+ goto err_free;
irq = platform_get_irq(pdev, 0);
if (irq < 0) {
DSSERR("platform_get_irq failed\n");
r = -ENODEV;
- goto err_pll;
+ goto err_free;
}
r = devm_request_threaded_irq(&pdev->dev, irq,
@@ -774,64 +779,49 @@ static int hdmi5_bind(struct device *dev, struct device *master, void *data)
IRQF_ONESHOT, "OMAP HDMI", hdmi);
if (r) {
DSSERR("HDMI IRQ request failed\n");
- goto err_pll;
+ goto err_free;
}
- pm_runtime_enable(&pdev->dev);
+ hdmi->vdda_reg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(hdmi->vdda_reg)) {
+ r = PTR_ERR(hdmi->vdda_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDA regulator\n");
+ goto err_free;
+ }
- hdmi_init_output(hdmi);
+ pm_runtime_enable(&pdev->dev);
- r = hdmi_audio_register(hdmi);
- if (r) {
- DSSERR("Registering HDMI audio failed %d\n", r);
- hdmi_uninit_output(hdmi);
- pm_runtime_disable(&pdev->dev);
- return r;
- }
+ r = hdmi5_init_output(hdmi);
+ if (r)
+ goto err_pm_disable;
- hdmi->debugfs = dss_debugfs_create_file(dss, "hdmi", hdmi_dump_regs,
- hdmi);
+ r = component_add(&pdev->dev, &hdmi5_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_pll:
- hdmi_pll_uninit(&hdmi->pll);
+err_uninit_output:
+ hdmi5_uninit_output(hdmi);
+err_pm_disable:
+ pm_runtime_disable(&pdev->dev);
err_free:
kfree(hdmi);
return r;
}
-static void hdmi5_unbind(struct device *dev, struct device *master, void *data)
+static int hdmi5_remove(struct platform_device *pdev)
{
- struct omap_hdmi *hdmi = dev_get_drvdata(dev);
-
- dss_debugfs_remove_file(hdmi->debugfs);
+ struct omap_hdmi *hdmi = platform_get_drvdata(pdev);
- if (hdmi->audio_pdev)
- platform_device_unregister(hdmi->audio_pdev);
-
- hdmi_uninit_output(hdmi);
+ component_del(&pdev->dev, &hdmi5_component_ops);
- hdmi_pll_uninit(&hdmi->pll);
+ hdmi5_uninit_output(hdmi);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
kfree(hdmi);
-}
-
-static const struct component_ops hdmi5_component_ops = {
- .bind = hdmi5_bind,
- .unbind = hdmi5_unbind,
-};
-
-static int hdmi5_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &hdmi5_component_ops);
-}
-
-static int hdmi5_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &hdmi5_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
index 2282e48574c6..02efabc7ed76 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi5_core.c
@@ -287,7 +287,7 @@ void hdmi5_core_dump(struct hdmi_core_data *core, struct seq_file *s)
}
static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
- struct hdmi_config *cfg)
+ const struct hdmi_config *cfg)
{
DSSDBG("hdmi_core_init\n");
@@ -325,10 +325,10 @@ static void hdmi_core_init(struct hdmi_core_vid_config *video_cfg,
/* DSS_HDMI_CORE_VIDEO_CONFIG */
static void hdmi_core_video_config(struct hdmi_core_data *core,
- struct hdmi_core_vid_config *cfg)
+ const struct hdmi_core_vid_config *cfg)
{
void __iomem *base = core->base;
- struct videomode *vm = &cfg->v_fc_config.vm;
+ const struct videomode *vm = &cfg->v_fc_config.vm;
unsigned char r = 0;
bool vsync_pol, hsync_pol;
diff --git a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
index 53bc5f78050c..100efb9f08c6 100644
--- a/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
+++ b/drivers/gpu/drm/omapdrm/dss/hdmi_wp.c
@@ -131,7 +131,7 @@ void hdmi_wp_video_stop(struct hdmi_wp_data *wp)
}
void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
- struct hdmi_video_format *video_fmt)
+ const struct hdmi_video_format *video_fmt)
{
u32 l = 0;
@@ -144,7 +144,7 @@ void hdmi_wp_video_config_format(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
- struct videomode *vm)
+ const struct videomode *vm)
{
u32 r;
bool vsync_inv, hsync_inv;
@@ -164,7 +164,7 @@ void hdmi_wp_video_config_interface(struct hdmi_wp_data *wp,
}
void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
- struct videomode *vm)
+ const struct videomode *vm)
{
u32 timing_h = 0;
u32 timing_v = 0;
@@ -193,7 +193,7 @@ void hdmi_wp_video_config_timing(struct hdmi_wp_data *wp,
}
void hdmi_wp_init_vid_fmt_timings(struct hdmi_video_format *video_fmt,
- struct videomode *vm, struct hdmi_config *param)
+ struct videomode *vm, const struct hdmi_config *param)
{
DSSDBG("Enter hdmi_wp_video_init_format\n");
diff --git a/drivers/gpu/drm/omapdrm/dss/omapdss.h b/drivers/gpu/drm/omapdrm/dss/omapdss.h
index 14d74adb13fb..1f698a95a94a 100644
--- a/drivers/gpu/drm/omapdrm/dss/omapdss.h
+++ b/drivers/gpu/drm/omapdrm/dss/omapdss.h
@@ -296,117 +296,14 @@ struct omap_dss_writeback_info {
u8 pre_mult_alpha;
};
-struct omapdss_dpi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-};
-
-struct omapdss_sdi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-};
-
-struct omapdss_dvi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-};
-
-struct omapdss_atv_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-
- int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
- u32 (*get_wss)(struct omap_dss_device *dssdev);
-};
-
struct omapdss_hdmi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
- void (*disable)(struct omap_dss_device *dssdev);
-
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
void (*lost_hotplug)(struct omap_dss_device *dssdev);
- bool (*detect)(struct omap_dss_device *dssdev);
-
- int (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
- void (*enable_hpd)(struct omap_dss_device *dssdev);
- void (*disable_hpd)(struct omap_dss_device *dssdev);
-
int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
int (*set_infoframe)(struct omap_dss_device *dssdev,
const struct hdmi_avi_infoframe *avi);
};
struct omapdss_dsi_ops {
- int (*connect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-
- int (*enable)(struct omap_dss_device *dssdev);
void (*disable)(struct omap_dss_device *dssdev, bool disconnect_lanes,
bool enter_ulps);
@@ -457,53 +354,95 @@ struct omapdss_dsi_ops {
int channel, u16 plen);
};
+struct omap_dss_device_ops {
+ int (*connect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+ void (*disconnect)(struct omap_dss_device *dssdev,
+ struct omap_dss_device *dst);
+
+ int (*enable)(struct omap_dss_device *dssdev);
+ void (*disable)(struct omap_dss_device *dssdev);
+
+ int (*check_timings)(struct omap_dss_device *dssdev,
+ struct videomode *vm);
+ void (*get_timings)(struct omap_dss_device *dssdev,
+ struct videomode *vm);
+ void (*set_timings)(struct omap_dss_device *dssdev,
+ const struct videomode *vm);
+
+ bool (*detect)(struct omap_dss_device *dssdev);
+
+ void (*register_hpd_cb)(struct omap_dss_device *dssdev,
+ void (*cb)(void *cb_data,
+ enum drm_connector_status status),
+ void *cb_data);
+ void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
+
+ int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
+
+ union {
+ const struct omapdss_hdmi_ops hdmi;
+ const struct omapdss_dsi_ops dsi;
+ };
+};
+
+/**
+ * enum omap_dss_device_ops_flag - Indicates which device ops are supported
+ * @OMAP_DSS_DEVICE_OP_DETECT: The device supports output connection detection
+ * @OMAP_DSS_DEVICE_OP_HPD: The device supports all hot-plug-related operations
+ * @OMAP_DSS_DEVICE_OP_EDID: The device supports readind EDID
+ */
+enum omap_dss_device_ops_flag {
+ OMAP_DSS_DEVICE_OP_DETECT = BIT(0),
+ OMAP_DSS_DEVICE_OP_HPD = BIT(1),
+ OMAP_DSS_DEVICE_OP_EDID = BIT(2),
+};
+
+enum omap_dss_device_type {
+ OMAP_DSS_DEVICE_TYPE_OUTPUT = (1 << 0),
+ OMAP_DSS_DEVICE_TYPE_DISPLAY = (1 << 1),
+};
+
struct omap_dss_device {
struct kobject kobj;
struct device *dev;
struct module *owner;
- struct list_head panel_list;
+ struct dss_device *dss;
+ struct omap_dss_device *src;
+ struct omap_dss_device *dst;
+ struct omap_dss_device *next;
+
+ struct list_head list;
- /* alias in the form of "display%d" */
- char alias[16];
+ unsigned int alias_id;
enum omap_display_type type;
+ /*
+ * DSS output type that this device generates (for DSS internal devices)
+ * or requires (for external encoders). Must be OMAP_DISPLAY_TYPE_NONE
+ * for display devices (connectors and panels) and to non-zero value for
+ * all other devices.
+ */
enum omap_display_type output_type;
- struct {
- struct videomode vm;
-
- enum omap_dss_dsi_pixel_format dsi_pix_fmt;
- enum omap_dss_dsi_mode dsi_mode;
- } panel;
-
const char *name;
- struct omap_dss_driver *driver;
-
- union {
- const struct omapdss_dpi_ops *dpi;
- const struct omapdss_sdi_ops *sdi;
- const struct omapdss_dvi_ops *dvi;
- const struct omapdss_hdmi_ops *hdmi;
- const struct omapdss_atv_ops *atv;
- const struct omapdss_dsi_ops *dsi;
- } ops;
+ const struct omap_dss_driver *driver;
+ const struct omap_dss_device_ops *ops;
+ unsigned long ops_flags;
+ unsigned long bus_flags;
/* helper variable for driver suspend/resume */
bool activate_after_resume;
enum omap_display_caps caps;
- struct omap_dss_device *src;
-
enum omap_dss_display_state state;
/* OMAP DSS output specific fields */
- struct list_head list;
-
/* DISPC channel for this output */
enum omap_channel dispc_channel;
bool dispc_channel_connected;
@@ -511,24 +450,11 @@ struct omap_dss_device {
/* output instance */
enum omap_dss_output_id id;
- /* the port number in the DT node */
- int port_num;
-
- /* dynamic fields */
- struct omap_dss_device *dst;
+ /* bitmask of port numbers in DT */
+ unsigned int of_ports;
};
struct omap_dss_driver {
- int (*probe)(struct omap_dss_device *);
- void (*remove)(struct omap_dss_device *);
-
- int (*connect)(struct omap_dss_device *dssdev);
- void (*disconnect)(struct omap_dss_device *dssdev);
-
- int (*enable)(struct omap_dss_device *display);
- void (*disable)(struct omap_dss_device *display);
- int (*run_test)(struct omap_dss_device *display, int test);
-
int (*update)(struct omap_dss_device *dssdev,
u16 x, u16 y, u16 w, u16 h);
int (*sync)(struct omap_dss_device *dssdev);
@@ -536,42 +462,12 @@ struct omap_dss_driver {
int (*enable_te)(struct omap_dss_device *dssdev, bool enable);
int (*get_te)(struct omap_dss_device *dssdev);
- u8 (*get_rotate)(struct omap_dss_device *dssdev);
- int (*set_rotate)(struct omap_dss_device *dssdev, u8 rotate);
-
- bool (*get_mirror)(struct omap_dss_device *dssdev);
- int (*set_mirror)(struct omap_dss_device *dssdev, bool enable);
-
int (*memory_read)(struct omap_dss_device *dssdev,
void *buf, size_t size,
u16 x, u16 y, u16 w, u16 h);
- int (*check_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*set_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
- void (*get_timings)(struct omap_dss_device *dssdev,
- struct videomode *vm);
void (*get_size)(struct omap_dss_device *dssdev,
unsigned int *width, unsigned int *height);
-
- int (*set_wss)(struct omap_dss_device *dssdev, u32 wss);
- u32 (*get_wss)(struct omap_dss_device *dssdev);
-
- int (*read_edid)(struct omap_dss_device *dssdev, u8 *buf, int len);
- bool (*detect)(struct omap_dss_device *dssdev);
-
- int (*register_hpd_cb)(struct omap_dss_device *dssdev,
- void (*cb)(void *cb_data,
- enum drm_connector_status status),
- void *cb_data);
- void (*unregister_hpd_cb)(struct omap_dss_device *dssdev);
- void (*enable_hpd)(struct omap_dss_device *dssdev);
- void (*disable_hpd)(struct omap_dss_device *dssdev);
-
- int (*set_hdmi_mode)(struct omap_dss_device *dssdev, bool hdmi_mode);
- int (*set_hdmi_infoframe)(struct omap_dss_device *dssdev,
- const struct hdmi_avi_infoframe *avi);
};
struct dss_device *omapdss_get_dss(void);
@@ -581,27 +477,32 @@ static inline bool omapdss_is_initialized(void)
return !!omapdss_get_dss();
}
-int omapdss_register_display(struct omap_dss_device *dssdev);
-void omapdss_unregister_display(struct omap_dss_device *dssdev);
-
-struct omap_dss_device *omap_dss_get_device(struct omap_dss_device *dssdev);
-void omap_dss_put_device(struct omap_dss_device *dssdev);
-#define for_each_dss_dev(d) while ((d = omap_dss_get_next_device(d)) != NULL)
-struct omap_dss_device *omap_dss_get_next_device(struct omap_dss_device *from);
+#define for_each_dss_display(d) \
+ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_DISPLAY)) != NULL)
+void omapdss_display_init(struct omap_dss_device *dssdev);
+struct omap_dss_device *omapdss_display_get(struct omap_dss_device *output);
+
+void omapdss_device_register(struct omap_dss_device *dssdev);
+void omapdss_device_unregister(struct omap_dss_device *dssdev);
+struct omap_dss_device *omapdss_device_get(struct omap_dss_device *dssdev);
+void omapdss_device_put(struct omap_dss_device *dssdev);
+struct omap_dss_device *omapdss_find_device_by_port(struct device_node *src,
+ unsigned int port);
+struct omap_dss_device *omapdss_device_get_next(struct omap_dss_device *from,
+ enum omap_dss_device_type type);
+int omapdss_device_connect(struct dss_device *dss,
+ struct omap_dss_device *src,
+ struct omap_dss_device *dst);
+void omapdss_device_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst);
int omap_dss_get_num_overlay_managers(void);
int omap_dss_get_num_overlays(void);
-int omapdss_register_output(struct omap_dss_device *output);
-void omapdss_unregister_output(struct omap_dss_device *output);
-struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id);
-struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port);
-int omapdss_output_set_device(struct omap_dss_device *out,
- struct omap_dss_device *dssdev);
-int omapdss_output_unset_device(struct omap_dss_device *out);
-
-struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev);
+#define for_each_dss_output(d) \
+ while ((d = omapdss_device_get_next(d, OMAP_DSS_DEVICE_TYPE_OUTPUT)) != NULL)
+int omapdss_output_validate(struct omap_dss_device *out);
typedef void (*omap_dispc_isr_t) (void *arg, u32 mask);
int omap_dispc_register_isr(omap_dispc_isr_t isr, void *arg, u32 mask);
@@ -621,10 +522,7 @@ static inline bool omapdss_device_is_enabled(struct omap_dss_device *dssdev)
}
struct omap_dss_device *
-omapdss_of_find_source_for_first_ep(struct device_node *node);
-
-struct device_node *dss_of_port_get_parent_device(struct device_node *port);
-u32 dss_of_port_get_port_number(struct device_node *port);
+omapdss_of_find_connected_device(struct device_node *node, unsigned int port);
enum dss_writeback_channel {
DSS_WB_LCD1_MGR = 0,
@@ -638,13 +536,6 @@ enum dss_writeback_channel {
};
struct dss_mgr_ops {
- int (*connect)(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst);
- void (*disconnect)(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst);
-
void (*start_update)(struct omap_drm_private *priv,
enum omap_channel channel);
int (*enable)(struct omap_drm_private *priv,
@@ -665,14 +556,11 @@ struct dss_mgr_ops {
void (*handler)(void *), void *data);
};
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+int dss_install_mgr_ops(struct dss_device *dss,
+ const struct dss_mgr_ops *mgr_ops,
struct omap_drm_private *priv);
-void dss_uninstall_mgr_ops(void);
+void dss_uninstall_mgr_ops(struct dss_device *dss);
-int dss_mgr_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
-void dss_mgr_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst);
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
@@ -720,13 +608,14 @@ struct dispc_ops {
void (*mgr_set_lcd_config)(struct dispc_device *dispc,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config);
+ int (*mgr_check_timings)(struct dispc_device *dispc,
+ enum omap_channel channel,
+ const struct videomode *vm);
void (*mgr_set_timings)(struct dispc_device *dispc,
enum omap_channel channel,
const struct videomode *vm);
void (*mgr_setup)(struct dispc_device *dispc, enum omap_channel channel,
const struct omap_overlay_manager_info *info);
- enum omap_dss_output_id (*mgr_get_supported_outputs)(
- struct dispc_device *dispc, enum omap_channel channel);
u32 (*mgr_gamma_size)(struct dispc_device *dispc,
enum omap_channel channel);
void (*mgr_set_gamma)(struct dispc_device *dispc,
@@ -757,9 +646,6 @@ struct dispc_ops {
struct dispc_device *dispc_get_dispc(struct dss_device *dss);
const struct dispc_ops *dispc_get_ops(struct dss_device *dss);
-bool omapdss_component_is_display(struct device_node *node);
-bool omapdss_component_is_output(struct device_node *node);
-
bool omapdss_stack_is_ready(void);
void omapdss_gather_components(struct device *dev);
diff --git a/drivers/gpu/drm/omapdrm/dss/output.c b/drivers/gpu/drm/omapdrm/dss/output.c
index 96b9d4cd505f..18505bc70f7e 100644
--- a/drivers/gpu/drm/omapdrm/dss/output.c
+++ b/drivers/gpu/drm/omapdrm/dss/output.c
@@ -21,238 +21,96 @@
#include <linux/slab.h>
#include <linux/of.h>
+#include "dss.h"
#include "omapdss.h"
-static LIST_HEAD(output_list);
-static DEFINE_MUTEX(output_lock);
-
-int omapdss_output_set_device(struct omap_dss_device *out,
- struct omap_dss_device *dssdev)
+int omapdss_output_validate(struct omap_dss_device *out)
{
- int r;
-
- mutex_lock(&output_lock);
-
- if (out->dst) {
- dev_err(out->dev,
- "output already has device %s connected to it\n",
- out->dst->name);
- r = -EINVAL;
- goto err;
- }
-
- if (out->output_type != dssdev->type) {
+ if (out->next && out->output_type != out->next->type) {
dev_err(out->dev, "output type and display type don't match\n");
- r = -EINVAL;
- goto err;
- }
-
- out->dst = dssdev;
- dssdev->src = out;
-
- mutex_unlock(&output_lock);
-
- return 0;
-err:
- mutex_unlock(&output_lock);
-
- return r;
-}
-EXPORT_SYMBOL(omapdss_output_set_device);
-
-int omapdss_output_unset_device(struct omap_dss_device *out)
-{
- int r;
-
- mutex_lock(&output_lock);
-
- if (!out->dst) {
- dev_err(out->dev,
- "output doesn't have a device connected to it\n");
- r = -EINVAL;
- goto err;
+ return -EINVAL;
}
- if (out->dst->state != OMAP_DSS_DISPLAY_DISABLED) {
- dev_err(out->dev,
- "device %s is not disabled, cannot unset device\n",
- out->dst->name);
- r = -EINVAL;
- goto err;
- }
-
- out->dst->src = NULL;
- out->dst = NULL;
-
- mutex_unlock(&output_lock);
-
- return 0;
-err:
- mutex_unlock(&output_lock);
-
- return r;
-}
-EXPORT_SYMBOL(omapdss_output_unset_device);
-
-int omapdss_register_output(struct omap_dss_device *out)
-{
- list_add_tail(&out->list, &output_list);
return 0;
}
-EXPORT_SYMBOL(omapdss_register_output);
-
-void omapdss_unregister_output(struct omap_dss_device *out)
-{
- list_del(&out->list);
-}
-EXPORT_SYMBOL(omapdss_unregister_output);
-
-bool omapdss_component_is_output(struct device_node *node)
-{
- struct omap_dss_device *out;
-
- list_for_each_entry(out, &output_list, list) {
- if (out->dev->of_node == node)
- return true;
- }
+EXPORT_SYMBOL(omapdss_output_validate);
- return false;
-}
-EXPORT_SYMBOL(omapdss_component_is_output);
-
-struct omap_dss_device *omap_dss_get_output(enum omap_dss_output_id id)
-{
- struct omap_dss_device *out;
-
- list_for_each_entry(out, &output_list, list) {
- if (out->id == id)
- return out;
- }
-
- return NULL;
-}
-EXPORT_SYMBOL(omap_dss_get_output);
-
-struct omap_dss_device *omap_dss_find_output_by_port_node(struct device_node *port)
-{
- struct device_node *src_node;
- struct omap_dss_device *out;
- u32 reg;
-
- src_node = dss_of_port_get_parent_device(port);
- if (!src_node)
- return NULL;
-
- reg = dss_of_port_get_port_number(port);
-
- list_for_each_entry(out, &output_list, list) {
- if (out->dev->of_node == src_node && out->port_num == reg) {
- of_node_put(src_node);
- return omap_dss_get_device(out);
- }
- }
-
- of_node_put(src_node);
-
- return NULL;
-}
-
-struct omap_dss_device *omapdss_find_output_from_display(struct omap_dss_device *dssdev)
-{
- while (dssdev->src)
- dssdev = dssdev->src;
-
- if (dssdev->id != 0)
- return omap_dss_get_device(dssdev);
-
- return NULL;
-}
-EXPORT_SYMBOL(omapdss_find_output_from_display);
-
-static const struct dss_mgr_ops *dss_mgr_ops;
-static struct omap_drm_private *dss_mgr_ops_priv;
-
-int dss_install_mgr_ops(const struct dss_mgr_ops *mgr_ops,
+int dss_install_mgr_ops(struct dss_device *dss,
+ const struct dss_mgr_ops *mgr_ops,
struct omap_drm_private *priv)
{
- if (dss_mgr_ops)
+ if (dss->mgr_ops)
return -EBUSY;
- dss_mgr_ops = mgr_ops;
- dss_mgr_ops_priv = priv;
+ dss->mgr_ops = mgr_ops;
+ dss->mgr_ops_priv = priv;
return 0;
}
EXPORT_SYMBOL(dss_install_mgr_ops);
-void dss_uninstall_mgr_ops(void)
+void dss_uninstall_mgr_ops(struct dss_device *dss)
{
- dss_mgr_ops = NULL;
- dss_mgr_ops_priv = NULL;
+ dss->mgr_ops = NULL;
+ dss->mgr_ops_priv = NULL;
}
EXPORT_SYMBOL(dss_uninstall_mgr_ops);
-int dss_mgr_connect(struct omap_dss_device *dssdev, struct omap_dss_device *dst)
-{
- return dss_mgr_ops->connect(dss_mgr_ops_priv,
- dssdev->dispc_channel, dst);
-}
-EXPORT_SYMBOL(dss_mgr_connect);
-
-void dss_mgr_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
-{
- dss_mgr_ops->disconnect(dss_mgr_ops_priv, dssdev->dispc_channel, dst);
-}
-EXPORT_SYMBOL(dss_mgr_disconnect);
-
void dss_mgr_set_timings(struct omap_dss_device *dssdev,
const struct videomode *vm)
{
- dss_mgr_ops->set_timings(dss_mgr_ops_priv, dssdev->dispc_channel, vm);
+ dssdev->dss->mgr_ops->set_timings(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel, vm);
}
EXPORT_SYMBOL(dss_mgr_set_timings);
void dss_mgr_set_lcd_config(struct omap_dss_device *dssdev,
const struct dss_lcd_mgr_config *config)
{
- dss_mgr_ops->set_lcd_config(dss_mgr_ops_priv,
- dssdev->dispc_channel, config);
+ dssdev->dss->mgr_ops->set_lcd_config(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel, config);
}
EXPORT_SYMBOL(dss_mgr_set_lcd_config);
int dss_mgr_enable(struct omap_dss_device *dssdev)
{
- return dss_mgr_ops->enable(dss_mgr_ops_priv, dssdev->dispc_channel);
+ return dssdev->dss->mgr_ops->enable(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_enable);
void dss_mgr_disable(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->disable(dss_mgr_ops_priv, dssdev->dispc_channel);
+ dssdev->dss->mgr_ops->disable(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_disable);
void dss_mgr_start_update(struct omap_dss_device *dssdev)
{
- dss_mgr_ops->start_update(dss_mgr_ops_priv, dssdev->dispc_channel);
+ dssdev->dss->mgr_ops->start_update(dssdev->dss->mgr_ops_priv,
+ dssdev->dispc_channel);
}
EXPORT_SYMBOL(dss_mgr_start_update);
int dss_mgr_register_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- return dss_mgr_ops->register_framedone_handler(dss_mgr_ops_priv,
- dssdev->dispc_channel,
- handler, data);
+ struct dss_device *dss = dssdev->dss;
+
+ return dss->mgr_ops->register_framedone_handler(dss->mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_register_framedone_handler);
void dss_mgr_unregister_framedone_handler(struct omap_dss_device *dssdev,
void (*handler)(void *), void *data)
{
- dss_mgr_ops->unregister_framedone_handler(dss_mgr_ops_priv,
- dssdev->dispc_channel,
- handler, data);
+ struct dss_device *dss = dssdev->dss;
+
+ dss->mgr_ops->unregister_framedone_handler(dss->mgr_ops_priv,
+ dssdev->dispc_channel,
+ handler, data);
}
EXPORT_SYMBOL(dss_mgr_unregister_framedone_handler);
diff --git a/drivers/gpu/drm/omapdrm/dss/sdi.c b/drivers/gpu/drm/omapdrm/dss/sdi.c
index 69c3b7a3d5c7..b2fe2387037a 100644
--- a/drivers/gpu/drm/omapdrm/dss/sdi.c
+++ b/drivers/gpu/drm/omapdrm/dss/sdi.c
@@ -132,10 +132,8 @@ static void sdi_config_lcd_manager(struct sdi_device *sdi)
static int sdi_display_enable(struct omap_dss_device *dssdev)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- struct videomode *vm = &sdi->vm;
- unsigned long fck;
struct dispc_clock_info dispc_cinfo;
- unsigned long pck;
+ unsigned long fck;
int r;
if (!sdi->output.dispc_channel_connected) {
@@ -151,27 +149,12 @@ static int sdi_display_enable(struct omap_dss_device *dssdev)
if (r)
goto err_get_dispc;
- /* 15.5.9.1.2 */
- vm->flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_SYNC_POSEDGE;
-
- r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ r = sdi_calc_clock_div(sdi, sdi->vm.pixelclock, &fck, &dispc_cinfo);
if (r)
goto err_calc_clock_div;
sdi->mgr_config.clock_info = dispc_cinfo;
- pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
-
- if (pck != vm->pixelclock) {
- DSSWARN("Could not find exact pixel clock. Requested %lu Hz, got %lu Hz\n",
- vm->pixelclock, pck);
-
- vm->pixelclock = pck;
- }
-
-
- dss_mgr_set_timings(&sdi->output, vm);
-
r = dss_set_fck_rate(sdi->dss, fck);
if (r)
goto err_set_dss_clock_div;
@@ -230,96 +213,63 @@ static void sdi_display_disable(struct omap_dss_device *dssdev)
}
static void sdi_set_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
+ const struct videomode *vm)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
sdi->vm = *vm;
}
-static void sdi_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
-
- *vm = sdi->vm;
-}
-
static int sdi_check_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
struct sdi_device *sdi = dssdev_to_sdi(dssdev);
- enum omap_channel channel = dssdev->dispc_channel;
-
- if (!dispc_mgr_timings_ok(sdi->dss->dispc, channel, vm))
- return -EINVAL;
+ struct dispc_clock_info dispc_cinfo;
+ unsigned long fck;
+ unsigned long pck;
+ int r;
if (vm->pixelclock == 0)
return -EINVAL;
- return 0;
-}
+ r = sdi_calc_clock_div(sdi, vm->pixelclock, &fck, &dispc_cinfo);
+ if (r)
+ return r;
-static int sdi_init_regulator(struct sdi_device *sdi)
-{
- struct regulator *vdds_sdi;
+ pck = fck / dispc_cinfo.lck_div / dispc_cinfo.pck_div;
- if (sdi->vdds_sdi_reg)
- return 0;
+ if (pck != vm->pixelclock) {
+ DSSWARN("Pixel clock adjusted from %lu Hz to %lu Hz\n",
+ vm->pixelclock, pck);
- vdds_sdi = devm_regulator_get(&sdi->pdev->dev, "vdds_sdi");
- if (IS_ERR(vdds_sdi)) {
- if (PTR_ERR(vdds_sdi) != -EPROBE_DEFER)
- DSSERR("can't get VDDS_SDI regulator\n");
- return PTR_ERR(vdds_sdi);
+ vm->pixelclock = pck;
}
- sdi->vdds_sdi_reg = vdds_sdi;
-
return 0;
}
-static int sdi_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int sdi_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
int r;
- r = sdi_init_regulator(sdi);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = dss_mgr_connect(&sdi->output, dssdev);
- if (r)
- return r;
-
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&sdi->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void sdi_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void sdi_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct sdi_device *sdi = dssdev_to_sdi(dssdev);
+ dst->dispc_channel_connected = false;
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
-
- dss_mgr_disconnect(&sdi->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_sdi_ops sdi_ops = {
+static const struct omap_dss_device_ops sdi_ops = {
.connect = sdi_connect,
.disconnect = sdi_disconnect,
@@ -328,12 +278,12 @@ static const struct omapdss_sdi_ops sdi_ops = {
.check_timings = sdi_check_timings,
.set_timings = sdi_set_timings,
- .get_timings = sdi_get_timings,
};
-static void sdi_init_output(struct sdi_device *sdi)
+static int sdi_init_output(struct sdi_device *sdi)
{
struct omap_dss_device *out = &sdi->output;
+ int r;
out->dev = &sdi->pdev->dev;
out->id = OMAP_DSS_OUTPUT_SDI;
@@ -341,16 +291,36 @@ static void sdi_init_output(struct sdi_device *sdi)
out->name = "sdi.0";
out->dispc_channel = OMAP_DSS_CHANNEL_LCD;
/* We have SDI only on OMAP3, where it's on port 1 */
- out->port_num = 1;
- out->ops.sdi = &sdi_ops;
+ out->of_ports = BIT(1);
+ out->ops = &sdi_ops;
out->owner = THIS_MODULE;
+ out->bus_flags = DRM_BUS_FLAG_PIXDATA_POSEDGE /* 15.5.9.1.2 */
+ | DRM_BUS_FLAG_SYNC_POSEDGE;
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 1);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
- omapdss_register_output(out);
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
}
static void sdi_uninit_output(struct sdi_device *sdi)
{
- omapdss_unregister_output(&sdi->output);
+ if (sdi->output.next)
+ omapdss_device_put(sdi->output.next);
+ omapdss_device_unregister(&sdi->output);
}
int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
@@ -372,25 +342,32 @@ int sdi_init_port(struct dss_device *dss, struct platform_device *pdev,
}
r = of_property_read_u32(ep, "datapairs", &datapairs);
+ of_node_put(ep);
if (r) {
DSSERR("failed to parse datapairs\n");
- goto err_datapairs;
+ goto err_free;
}
sdi->datapairs = datapairs;
sdi->dss = dss;
- of_node_put(ep);
-
sdi->pdev = pdev;
port->data = sdi;
- sdi_init_output(sdi);
+ sdi->vdds_sdi_reg = devm_regulator_get(&pdev->dev, "vdds_sdi");
+ if (IS_ERR(sdi->vdds_sdi_reg)) {
+ r = PTR_ERR(sdi->vdds_sdi_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDS_SDI regulator\n");
+ goto err_free;
+ }
+
+ r = sdi_init_output(sdi);
+ if (r)
+ goto err_free;
return 0;
-err_datapairs:
- of_node_put(ep);
err_free:
kfree(sdi);
diff --git a/drivers/gpu/drm/omapdrm/dss/venc.c b/drivers/gpu/drm/omapdrm/dss/venc.c
index ac01907dcc34..ff0b18c8e4ac 100644
--- a/drivers/gpu/drm/omapdrm/dss/venc.c
+++ b/drivers/gpu/drm/omapdrm/dss/venc.c
@@ -452,7 +452,7 @@ static void venc_runtime_put(struct venc_device *venc)
WARN_ON(r < 0 && r != -ENOSYS);
}
-static const struct venc_config *venc_timings_to_config(struct videomode *vm)
+static const struct venc_config *venc_timings_to_config(const struct videomode *vm)
{
switch (venc_get_videomode(vm)) {
default:
@@ -491,8 +491,6 @@ static int venc_power_on(struct venc_device *venc)
venc_write_reg(venc, VENC_OUTPUT_CONTROL, l);
- dss_mgr_set_timings(&venc->output, &venc->vm);
-
r = regulator_enable(venc->vdda_dac_reg);
if (r)
goto err1;
@@ -568,32 +566,30 @@ static void venc_display_disable(struct omap_dss_device *dssdev)
mutex_unlock(&venc->venc_lock);
}
-static void venc_set_timings(struct omap_dss_device *dssdev,
+static void venc_get_timings(struct omap_dss_device *dssdev,
struct videomode *vm)
{
struct venc_device *venc = dssdev_to_venc(dssdev);
- struct videomode actual_vm;
+
+ mutex_lock(&venc->venc_lock);
+ *vm = venc->vm;
+ mutex_unlock(&venc->venc_lock);
+}
+
+static void venc_set_timings(struct omap_dss_device *dssdev,
+ const struct videomode *vm)
+{
+ struct venc_device *venc = dssdev_to_venc(dssdev);
DSSDBG("venc_set_timings\n");
mutex_lock(&venc->venc_lock);
- switch (venc_get_videomode(vm)) {
- default:
- WARN_ON_ONCE(1);
- case VENC_MODE_PAL:
- actual_vm = omap_dss_pal_vm;
- break;
- case VENC_MODE_NTSC:
- actual_vm = omap_dss_ntsc_vm;
- break;
- }
-
/* Reset WSS data when the TV standard changes. */
- if (memcmp(&venc->vm, &actual_vm, sizeof(actual_vm)))
+ if (memcmp(&venc->vm, vm, sizeof(*vm)))
venc->wss_data = 0;
- venc->vm = actual_vm;
+ venc->vm = *vm;
dispc_set_tv_pclk(venc->dss->dispc, 13500000);
@@ -607,80 +603,16 @@ static int venc_check_timings(struct omap_dss_device *dssdev,
switch (venc_get_videomode(vm)) {
case VENC_MODE_PAL:
- case VENC_MODE_NTSC:
+ *vm = omap_dss_pal_vm;
return 0;
- default:
- return -EINVAL;
- }
-}
-
-static void venc_get_timings(struct omap_dss_device *dssdev,
- struct videomode *vm)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- mutex_lock(&venc->venc_lock);
-
- *vm = venc->vm;
-
- mutex_unlock(&venc->venc_lock);
-}
-
-static u32 venc_get_wss(struct omap_dss_device *dssdev)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- /* Invert due to VENC_L21_WC_CTL:INV=1 */
- return (venc->wss_data >> 8) ^ 0xfffff;
-}
-
-static int venc_set_wss(struct omap_dss_device *dssdev, u32 wss)
-{
- struct venc_device *venc = dssdev_to_venc(dssdev);
- const struct venc_config *config;
- int r;
- DSSDBG("venc_set_wss\n");
-
- mutex_lock(&venc->venc_lock);
-
- config = venc_timings_to_config(&venc->vm);
-
- /* Invert due to VENC_L21_WC_CTL:INV=1 */
- venc->wss_data = (wss ^ 0xfffff) << 8;
-
- r = venc_runtime_get(venc);
- if (r)
- goto err;
-
- venc_write_reg(venc, VENC_BSTAMP_WSS_DATA, config->bstamp_wss_data |
- venc->wss_data);
-
- venc_runtime_put(venc);
-
-err:
- mutex_unlock(&venc->venc_lock);
-
- return r;
-}
-
-static int venc_init_regulator(struct venc_device *venc)
-{
- struct regulator *vdda_dac;
-
- if (venc->vdda_dac_reg != NULL)
+ case VENC_MODE_NTSC:
+ *vm = omap_dss_ntsc_vm;
return 0;
- vdda_dac = devm_regulator_get(&venc->pdev->dev, "vdda");
- if (IS_ERR(vdda_dac)) {
- if (PTR_ERR(vdda_dac) != -EPROBE_DEFER)
- DSSERR("can't get VDDA_DAC regulator\n");
- return PTR_ERR(vdda_dac);
+ default:
+ return -EINVAL;
}
-
- venc->vdda_dac_reg = vdda_dac;
-
- return 0;
}
static int venc_dump_regs(struct seq_file *s, void *p)
@@ -760,47 +692,28 @@ static int venc_get_clocks(struct venc_device *venc)
return 0;
}
-static int venc_connect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static int venc_connect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
int r;
- r = venc_init_regulator(venc);
- if (r)
- return r;
-
- r = dss_mgr_connect(&venc->output, dssdev);
+ r = omapdss_device_connect(dst->dss, dst, dst->next);
if (r)
return r;
- r = omapdss_output_set_device(dssdev, dst);
- if (r) {
- DSSERR("failed to connect output to new device: %s\n",
- dst->name);
- dss_mgr_disconnect(&venc->output, dssdev);
- return r;
- }
-
+ dst->dispc_channel_connected = true;
return 0;
}
-static void venc_disconnect(struct omap_dss_device *dssdev,
- struct omap_dss_device *dst)
+static void venc_disconnect(struct omap_dss_device *src,
+ struct omap_dss_device *dst)
{
- struct venc_device *venc = dssdev_to_venc(dssdev);
-
- WARN_ON(dst != dssdev->dst);
-
- if (dst != dssdev->dst)
- return;
-
- omapdss_output_unset_device(dssdev);
+ dst->dispc_channel_connected = false;
- dss_mgr_disconnect(&venc->output, dssdev);
+ omapdss_device_disconnect(dst, dst->next);
}
-static const struct omapdss_atv_ops venc_ops = {
+static const struct omap_dss_device_ops venc_ops = {
.connect = venc_connect,
.disconnect = venc_disconnect,
@@ -808,31 +721,92 @@ static const struct omapdss_atv_ops venc_ops = {
.disable = venc_display_disable,
.check_timings = venc_check_timings,
- .set_timings = venc_set_timings,
.get_timings = venc_get_timings,
+ .set_timings = venc_set_timings,
+};
+
+/* -----------------------------------------------------------------------------
+ * Component Bind & Unbind
+ */
- .set_wss = venc_set_wss,
- .get_wss = venc_get_wss,
+static int venc_bind(struct device *dev, struct device *master, void *data)
+{
+ struct dss_device *dss = dss_get_device(master);
+ struct venc_device *venc = dev_get_drvdata(dev);
+ u8 rev_id;
+ int r;
+
+ venc->dss = dss;
+
+ r = venc_runtime_get(venc);
+ if (r)
+ return r;
+
+ rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
+ dev_dbg(dev, "OMAP VENC rev %d\n", rev_id);
+
+ venc_runtime_put(venc);
+
+ venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
+ venc);
+
+ return 0;
+}
+
+static void venc_unbind(struct device *dev, struct device *master, void *data)
+{
+ struct venc_device *venc = dev_get_drvdata(dev);
+
+ dss_debugfs_remove_file(venc->debugfs);
+}
+
+static const struct component_ops venc_component_ops = {
+ .bind = venc_bind,
+ .unbind = venc_unbind,
};
-static void venc_init_output(struct venc_device *venc)
+/* -----------------------------------------------------------------------------
+ * Probe & Remove, Suspend & Resume
+ */
+
+static int venc_init_output(struct venc_device *venc)
{
struct omap_dss_device *out = &venc->output;
+ int r;
out->dev = &venc->pdev->dev;
out->id = OMAP_DSS_OUTPUT_VENC;
out->output_type = OMAP_DISPLAY_TYPE_VENC;
out->name = "venc.0";
out->dispc_channel = OMAP_DSS_CHANNEL_DIGIT;
- out->ops.atv = &venc_ops;
+ out->ops = &venc_ops;
out->owner = THIS_MODULE;
+ out->of_ports = BIT(0);
+
+ out->next = omapdss_of_find_connected_device(out->dev->of_node, 0);
+ if (IS_ERR(out->next)) {
+ if (PTR_ERR(out->next) != -EPROBE_DEFER)
+ dev_err(out->dev, "failed to find video sink\n");
+ return PTR_ERR(out->next);
+ }
- omapdss_register_output(out);
+ r = omapdss_output_validate(out);
+ if (r) {
+ omapdss_device_put(out->next);
+ out->next = NULL;
+ return r;
+ }
+
+ omapdss_device_register(out);
+
+ return 0;
}
static void venc_uninit_output(struct venc_device *venc)
{
- omapdss_unregister_output(&venc->output);
+ if (venc->output.next)
+ omapdss_device_put(venc->output.next);
+ omapdss_device_unregister(&venc->output);
}
static int venc_probe_of(struct venc_device *venc)
@@ -878,19 +852,15 @@ err:
return r;
}
-/* VENC HW IP initialisation */
static const struct soc_device_attribute venc_soc_devices[] = {
{ .machine = "OMAP3[45]*" },
{ .machine = "AM35*" },
{ /* sentinel */ }
};
-static int venc_bind(struct device *dev, struct device *master, void *data)
+static int venc_probe(struct platform_device *pdev)
{
- struct platform_device *pdev = to_platform_device(dev);
- struct dss_device *dss = dss_get_device(master);
struct venc_device *venc;
- u8 rev_id;
struct resource *venc_mem;
int r;
@@ -899,8 +869,8 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
return -ENOMEM;
venc->pdev = pdev;
- venc->dss = dss;
- dev_set_drvdata(dev, venc);
+
+ platform_set_drvdata(pdev, venc);
/* The OMAP34xx, OMAP35xx and AM35xx VENC require the TV DAC clock. */
if (soc_device_match(venc_soc_devices))
@@ -909,6 +879,7 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
mutex_init(&venc->venc_lock);
venc->wss_data = 0;
+ venc->vm = omap_dss_pal_vm;
venc_mem = platform_get_resource(venc->pdev, IORESOURCE_MEM, 0);
venc->base = devm_ioremap_resource(&pdev->dev, venc_mem);
@@ -917,68 +888,54 @@ static int venc_bind(struct device *dev, struct device *master, void *data)
goto err_free;
}
+ venc->vdda_dac_reg = devm_regulator_get(&pdev->dev, "vdda");
+ if (IS_ERR(venc->vdda_dac_reg)) {
+ r = PTR_ERR(venc->vdda_dac_reg);
+ if (r != -EPROBE_DEFER)
+ DSSERR("can't get VDDA_DAC regulator\n");
+ goto err_free;
+ }
+
r = venc_get_clocks(venc);
if (r)
goto err_free;
- pm_runtime_enable(&pdev->dev);
-
- r = venc_runtime_get(venc);
+ r = venc_probe_of(venc);
if (r)
- goto err_runtime_get;
-
- rev_id = (u8)(venc_read_reg(venc, VENC_REV_ID) & 0xff);
- dev_dbg(&pdev->dev, "OMAP VENC rev %d\n", rev_id);
-
- venc_runtime_put(venc);
+ goto err_free;
- r = venc_probe_of(venc);
- if (r) {
- DSSERR("Invalid DT data\n");
- goto err_probe_of;
- }
+ pm_runtime_enable(&pdev->dev);
- venc->debugfs = dss_debugfs_create_file(dss, "venc", venc_dump_regs,
- venc);
+ r = venc_init_output(venc);
+ if (r)
+ goto err_pm_disable;
- venc_init_output(venc);
+ r = component_add(&pdev->dev, &venc_component_ops);
+ if (r)
+ goto err_uninit_output;
return 0;
-err_probe_of:
-err_runtime_get:
+err_uninit_output:
+ venc_uninit_output(venc);
+err_pm_disable:
pm_runtime_disable(&pdev->dev);
err_free:
kfree(venc);
return r;
}
-static void venc_unbind(struct device *dev, struct device *master, void *data)
+static int venc_remove(struct platform_device *pdev)
{
- struct venc_device *venc = dev_get_drvdata(dev);
+ struct venc_device *venc = platform_get_drvdata(pdev);
- dss_debugfs_remove_file(venc->debugfs);
+ component_del(&pdev->dev, &venc_component_ops);
venc_uninit_output(venc);
- pm_runtime_disable(dev);
+ pm_runtime_disable(&pdev->dev);
kfree(venc);
-}
-
-static const struct component_ops venc_component_ops = {
- .bind = venc_bind,
- .unbind = venc_unbind,
-};
-
-static int venc_probe(struct platform_device *pdev)
-{
- return component_add(&pdev->dev, &venc_component_ops);
-}
-
-static int venc_remove(struct platform_device *pdev)
-{
- component_del(&pdev->dev, &venc_component_ops);
return 0;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.c b/drivers/gpu/drm/omapdrm/omap_connector.c
index 2ddb856666c4..98f5ca29444a 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.c
+++ b/drivers/gpu/drm/omapdrm/omap_connector.c
@@ -29,10 +29,28 @@
struct omap_connector {
struct drm_connector base;
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *output;
+ struct omap_dss_device *display;
+ struct omap_dss_device *hpd;
bool hdmi_mode;
};
+static void omap_connector_hpd_notify(struct drm_connector *connector,
+ struct omap_dss_device *src,
+ enum drm_connector_status status)
+{
+ if (status == connector_status_disconnected) {
+ /*
+ * If the source is an HDMI encoder, notify it of disconnection.
+ * This is required to let the HDMI encoder reset any internal
+ * state related to connection status, such as the CEC address.
+ */
+ if (src && src->type == OMAP_DISPLAY_TYPE_HDMI &&
+ src->ops->hdmi.lost_hotplug)
+ src->ops->hdmi.lost_hotplug(src);
+ }
+}
+
static void omap_connector_hpd_cb(void *cb_data,
enum drm_connector_status status)
{
@@ -46,8 +64,31 @@ static void omap_connector_hpd_cb(void *cb_data,
connector->status = status;
mutex_unlock(&dev->mode_config.mutex);
- if (old_status != status)
- drm_kms_helper_hotplug_event(dev);
+ if (old_status == status)
+ return;
+
+ omap_connector_hpd_notify(connector, omap_connector->hpd, status);
+
+ drm_kms_helper_hotplug_event(dev);
+}
+
+void omap_connector_enable_hpd(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *hpd = omap_connector->hpd;
+
+ if (hpd)
+ hpd->ops->register_hpd_cb(hpd, omap_connector_hpd_cb,
+ omap_connector);
+}
+
+void omap_connector_disable_hpd(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *hpd = omap_connector->hpd;
+
+ if (hpd)
+ hpd->ops->unregister_hpd_cb(hpd);
}
bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
@@ -57,120 +98,179 @@ bool omap_connector_get_hdmi_mode(struct drm_connector *connector)
return omap_connector->hdmi_mode;
}
+static struct omap_dss_device *
+omap_connector_find_device(struct drm_connector *connector,
+ enum omap_dss_device_ops_flag op)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+
+ for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
+ if (dssdev->ops_flags & op)
+ return dssdev;
+ }
+
+ return NULL;
+}
+
static enum drm_connector_status omap_connector_detect(
struct drm_connector *connector, bool force)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- enum drm_connector_status ret;
-
- if (dssdrv->detect) {
- if (dssdrv->detect(dssdev))
- ret = connector_status_connected;
- else
- ret = connector_status_disconnected;
- } else if (dssdev->type == OMAP_DISPLAY_TYPE_DPI ||
- dssdev->type == OMAP_DISPLAY_TYPE_DBI ||
- dssdev->type == OMAP_DISPLAY_TYPE_SDI ||
- dssdev->type == OMAP_DISPLAY_TYPE_DSI) {
- ret = connector_status_connected;
+ struct omap_dss_device *dssdev;
+ enum drm_connector_status status;
+
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_DETECT);
+
+ if (dssdev) {
+ status = dssdev->ops->detect(dssdev)
+ ? connector_status_connected
+ : connector_status_disconnected;
+
+ omap_connector_hpd_notify(connector, dssdev->src, status);
} else {
- ret = connector_status_unknown;
+ switch (omap_connector->display->type) {
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ case OMAP_DISPLAY_TYPE_SDI:
+ case OMAP_DISPLAY_TYPE_DSI:
+ status = connector_status_connected;
+ break;
+ default:
+ status = connector_status_unknown;
+ break;
+ }
}
- VERB("%s: %d (force=%d)", omap_connector->dssdev->name, ret, force);
+ VERB("%s: %d (force=%d)", omap_connector->display->name, status, force);
- return ret;
+ return status;
}
static void omap_connector_destroy(struct drm_connector *connector)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- DBG("%s", omap_connector->dssdev->name);
- if (connector->polled == DRM_CONNECTOR_POLL_HPD &&
- dssdev->driver->unregister_hpd_cb) {
- dssdev->driver->unregister_hpd_cb(dssdev);
+ DBG("%s", omap_connector->display->name);
+
+ if (omap_connector->hpd) {
+ struct omap_dss_device *hpd = omap_connector->hpd;
+
+ hpd->ops->unregister_hpd_cb(hpd);
+ omapdss_device_put(hpd);
+ omap_connector->hpd = NULL;
}
+
drm_connector_unregister(connector);
drm_connector_cleanup(connector);
kfree(omap_connector);
- omap_dss_put_device(dssdev);
+ omapdss_device_put(omap_connector->output);
+ omapdss_device_put(omap_connector->display);
}
#define MAX_EDID 512
-static int omap_connector_get_modes(struct drm_connector *connector)
+static int omap_connector_get_modes_edid(struct drm_connector *connector,
+ struct omap_dss_device *dssdev)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- struct drm_device *dev = connector->dev;
- int n = 0;
+ enum drm_connector_status status;
+ void *edid;
+ int n;
- DBG("%s", omap_connector->dssdev->name);
+ status = omap_connector_detect(connector, false);
+ if (status != connector_status_connected)
+ goto no_edid;
- /* if display exposes EDID, then we parse that in the normal way to
- * build table of supported modes.. otherwise (ie. fixed resolution
- * LCD panels) we just return a single mode corresponding to the
- * currently configured timings:
- */
- if (dssdrv->read_edid) {
- void *edid = kzalloc(MAX_EDID, GFP_KERNEL);
-
- if (!edid)
- return 0;
-
- if ((dssdrv->read_edid(dssdev, edid, MAX_EDID) > 0) &&
- drm_edid_is_valid(edid)) {
- drm_connector_update_edid_property(
- connector, edid);
- n = drm_add_edid_modes(connector, edid);
-
- omap_connector->hdmi_mode =
- drm_detect_hdmi_monitor(edid);
- } else {
- drm_connector_update_edid_property(
- connector, NULL);
- }
+ edid = kzalloc(MAX_EDID, GFP_KERNEL);
+ if (!edid)
+ goto no_edid;
+ if (dssdev->ops->read_edid(dssdev, edid, MAX_EDID) <= 0 ||
+ !drm_edid_is_valid(edid)) {
kfree(edid);
- } else {
- struct drm_display_mode *mode = drm_mode_create(dev);
- struct videomode vm = {0};
+ goto no_edid;
+ }
- if (!mode)
- return 0;
+ drm_connector_update_edid_property(connector, edid);
+ n = drm_add_edid_modes(connector, edid);
- dssdrv->get_timings(dssdev, &vm);
+ omap_connector->hdmi_mode = drm_detect_hdmi_monitor(edid);
- drm_display_mode_from_videomode(&vm, mode);
+ kfree(edid);
+ return n;
- mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
- drm_mode_set_name(mode);
- drm_mode_probed_add(connector, mode);
+no_edid:
+ drm_connector_update_edid_property(connector, NULL);
+ return 0;
+}
- if (dssdrv->get_size) {
- dssdrv->get_size(dssdev,
+static int omap_connector_get_modes(struct drm_connector *connector)
+{
+ struct omap_connector *omap_connector = to_omap_connector(connector);
+ struct omap_dss_device *dssdev;
+ struct drm_display_mode *mode;
+ struct videomode vm = {0};
+
+ DBG("%s", omap_connector->display->name);
+
+ /*
+ * If display exposes EDID, then we parse that in the normal way to
+ * build table of supported modes.
+ */
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_EDID);
+ if (dssdev)
+ return omap_connector_get_modes_edid(connector, dssdev);
+
+ /*
+ * Otherwise we have either a fixed resolution panel or an output that
+ * doesn't support modes discovery (e.g. DVI or VGA with the DDC bus
+ * unconnected, or analog TV). Start by querying the size.
+ */
+ dssdev = omap_connector->display;
+ if (dssdev->driver && dssdev->driver->get_size)
+ dssdev->driver->get_size(dssdev,
&connector->display_info.width_mm,
&connector->display_info.height_mm);
- }
- n = 1;
+ /*
+ * Iterate over the pipeline to find the first device that can provide
+ * timing information. If we can't find any, we just let the KMS core
+ * add the default modes.
+ */
+ for (dssdev = omap_connector->display; dssdev; dssdev = dssdev->src) {
+ if (dssdev->ops->get_timings)
+ break;
}
+ if (!dssdev)
+ return 0;
- return n;
+ /* Add a single mode corresponding to the fixed panel timings. */
+ mode = drm_mode_create(connector->dev);
+ if (!mode)
+ return 0;
+
+ dssdev->ops->get_timings(dssdev, &vm);
+
+ drm_display_mode_from_videomode(&vm, mode);
+
+ mode->type = DRM_MODE_TYPE_DRIVER | DRM_MODE_TYPE_PREFERRED;
+ drm_mode_set_name(mode);
+ drm_mode_probed_add(connector, mode);
+
+ return 1;
}
static int omap_connector_mode_valid(struct drm_connector *connector,
struct drm_display_mode *mode)
{
struct omap_connector *omap_connector = to_omap_connector(connector);
- struct omap_dss_device *dssdev = omap_connector->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
+ enum omap_channel channel = omap_connector->output->dispc_channel;
+ struct omap_drm_private *priv = connector->dev->dev_private;
+ struct omap_dss_device *dssdev;
struct videomode vm = {0};
struct drm_device *dev = connector->dev;
struct drm_display_mode *new_mode;
@@ -179,44 +279,31 @@ static int omap_connector_mode_valid(struct drm_connector *connector,
drm_display_mode_to_videomode(mode, &vm);
mode->vrefresh = drm_mode_vrefresh(mode);
- /*
- * if the panel driver doesn't have a check_timings, it's most likely
- * a fixed resolution panel, check if the timings match with the
- * panel's timings
- */
- if (dssdrv->check_timings) {
- r = dssdrv->check_timings(dssdev, &vm);
- } else {
- struct videomode t = {0};
+ r = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
+ if (r)
+ goto done;
- dssdrv->get_timings(dssdev, &t);
+ for (dssdev = omap_connector->output; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
- /*
- * Ignore the flags, as we don't get them from
- * drm_display_mode_to_videomode.
- */
- t.flags = 0;
-
- if (memcmp(&vm, &t, sizeof(vm)))
- r = -EINVAL;
- else
- r = 0;
+ r = dssdev->ops->check_timings(dssdev, &vm);
+ if (r)
+ goto done;
}
- if (!r) {
- /* check if vrefresh is still valid */
- new_mode = drm_mode_duplicate(dev, mode);
+ /* check if vrefresh is still valid */
+ new_mode = drm_mode_duplicate(dev, mode);
+ if (!new_mode)
+ return MODE_BAD;
- if (!new_mode)
- return MODE_BAD;
-
- new_mode->clock = vm.pixelclock / 1000;
- new_mode->vrefresh = 0;
- if (mode->vrefresh == drm_mode_vrefresh(new_mode))
- ret = MODE_OK;
- drm_mode_destroy(dev, new_mode);
- }
+ new_mode->clock = vm.pixelclock / 1000;
+ new_mode->vrefresh = 0;
+ if (mode->vrefresh == drm_mode_vrefresh(new_mode))
+ ret = MODE_OK;
+ drm_mode_destroy(dev, new_mode);
+done:
DBG("connector: mode %s: "
"%d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
(ret == MODE_OK) ? "valid" : "invalid",
@@ -243,52 +330,72 @@ static const struct drm_connector_helper_funcs omap_connector_helper_funcs = {
.mode_valid = omap_connector_mode_valid,
};
+static int omap_connector_get_type(struct omap_dss_device *display)
+{
+ switch (display->type) {
+ case OMAP_DISPLAY_TYPE_HDMI:
+ return DRM_MODE_CONNECTOR_HDMIA;
+ case OMAP_DISPLAY_TYPE_DVI:
+ return DRM_MODE_CONNECTOR_DVID;
+ case OMAP_DISPLAY_TYPE_DSI:
+ return DRM_MODE_CONNECTOR_DSI;
+ case OMAP_DISPLAY_TYPE_DPI:
+ case OMAP_DISPLAY_TYPE_DBI:
+ return DRM_MODE_CONNECTOR_DPI;
+ case OMAP_DISPLAY_TYPE_VENC:
+ /* TODO: This could also be composite */
+ return DRM_MODE_CONNECTOR_SVIDEO;
+ case OMAP_DISPLAY_TYPE_SDI:
+ return DRM_MODE_CONNECTOR_LVDS;
+ default:
+ return DRM_MODE_CONNECTOR_Unknown;
+ }
+}
+
/* initialize connector */
struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev,
- struct drm_encoder *encoder)
+ struct omap_dss_device *output,
+ struct omap_dss_device *display,
+ struct drm_encoder *encoder)
{
struct drm_connector *connector = NULL;
struct omap_connector *omap_connector;
- bool hpd_supported = false;
-
- DBG("%s", dssdev->name);
+ struct omap_dss_device *dssdev;
- omap_dss_get_device(dssdev);
+ DBG("%s", display->name);
omap_connector = kzalloc(sizeof(*omap_connector), GFP_KERNEL);
if (!omap_connector)
goto fail;
- omap_connector->dssdev = dssdev;
+ omap_connector->output = omapdss_device_get(output);
+ omap_connector->display = omapdss_device_get(display);
connector = &omap_connector->base;
+ connector->interlace_allowed = 1;
+ connector->doublescan_allowed = 0;
drm_connector_init(dev, connector, &omap_connector_funcs,
- connector_type);
+ omap_connector_get_type(display));
drm_connector_helper_add(connector, &omap_connector_helper_funcs);
- if (dssdev->driver->register_hpd_cb) {
- int ret = dssdev->driver->register_hpd_cb(dssdev,
- omap_connector_hpd_cb,
- omap_connector);
- if (!ret)
- hpd_supported = true;
- else if (ret != -ENOTSUPP)
- DBG("%s: Failed to register HPD callback (%d).",
- dssdev->name, ret);
- }
-
- if (hpd_supported)
+ /*
+ * Initialize connector status handling. First try to find a device that
+ * supports hot-plug reporting. If it fails, fall back to a device that
+ * support polling. If that fails too, we don't support hot-plug
+ * detection at all.
+ */
+ dssdev = omap_connector_find_device(connector, OMAP_DSS_DEVICE_OP_HPD);
+ if (dssdev) {
+ omap_connector->hpd = omapdss_device_get(dssdev);
connector->polled = DRM_CONNECTOR_POLL_HPD;
- else if (dssdev->driver->detect)
- connector->polled = DRM_CONNECTOR_POLL_CONNECT |
- DRM_CONNECTOR_POLL_DISCONNECT;
- else
- connector->polled = 0;
-
- connector->interlace_allowed = 1;
- connector->doublescan_allowed = 0;
+ } else {
+ dssdev = omap_connector_find_device(connector,
+ OMAP_DSS_DEVICE_OP_DETECT);
+ if (dssdev)
+ connector->polled = DRM_CONNECTOR_POLL_CONNECT |
+ DRM_CONNECTOR_POLL_DISCONNECT;
+ }
return connector;
diff --git a/drivers/gpu/drm/omapdrm/omap_connector.h b/drivers/gpu/drm/omapdrm/omap_connector.h
index 98bbc779b302..854099801649 100644
--- a/drivers/gpu/drm/omapdrm/omap_connector.h
+++ b/drivers/gpu/drm/omapdrm/omap_connector.h
@@ -28,10 +28,13 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_connector *omap_connector_init(struct drm_device *dev,
- int connector_type, struct omap_dss_device *dssdev,
- struct drm_encoder *encoder);
+ struct omap_dss_device *output,
+ struct omap_dss_device *display,
+ struct drm_encoder *encoder);
struct drm_encoder *omap_connector_attached_encoder(
struct drm_connector *connector);
bool omap_connector_get_hdmi_mode(struct drm_connector *connector);
+void omap_connector_enable_hpd(struct drm_connector *connector);
+void omap_connector_disable_hpd(struct drm_connector *connector);
#endif /* __OMAPDRM_CONNECTOR_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.c b/drivers/gpu/drm/omapdrm/omap_crtc.c
index 6c4d40b824e4..62928ec0e7db 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.c
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.c
@@ -41,6 +41,7 @@ struct omap_crtc {
struct drm_crtc base;
const char *name;
+ struct omap_drm_pipeline *pipe;
enum omap_channel channel;
struct videomode vm;
@@ -108,38 +109,7 @@ int omap_crtc_wait_pending(struct drm_crtc *crtc)
* job of sequencing the setup of the video pipe in the proper order
*/
-/* ovl-mgr-id -> crtc */
-static struct omap_crtc *omap_crtcs[8];
-static struct omap_dss_device *omap_crtc_output[8];
-
/* we can probably ignore these until we support command-mode panels: */
-static int omap_crtc_dss_connect(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst)
-{
- const struct dispc_ops *dispc_ops = priv->dispc_ops;
- struct dispc_device *dispc = priv->dispc;
-
- if (omap_crtc_output[channel])
- return -EINVAL;
-
- if (!(dispc_ops->mgr_get_supported_outputs(dispc, channel) & dst->id))
- return -EINVAL;
-
- omap_crtc_output[channel] = dst;
- dst->dispc_channel_connected = true;
-
- return 0;
-}
-
-static void omap_crtc_dss_disconnect(struct omap_drm_private *priv,
- enum omap_channel channel,
- struct omap_dss_device *dst)
-{
- omap_crtc_output[channel] = NULL;
- dst->dispc_channel_connected = false;
-}
-
static void omap_crtc_dss_start_update(struct omap_drm_private *priv,
enum omap_channel channel)
{
@@ -159,7 +129,7 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
if (WARN_ON(omap_crtc->enabled == enable))
return;
- if (omap_crtc_output[channel]->output_type == OMAP_DISPLAY_TYPE_HDMI) {
+ if (omap_crtc->pipe->output->output_type == OMAP_DISPLAY_TYPE_HDMI) {
priv->dispc_ops->mgr_enable(priv->dispc, channel, enable);
omap_crtc->enabled = enable;
return;
@@ -215,7 +185,8 @@ static void omap_crtc_set_enabled(struct drm_crtc *crtc, bool enable)
static int omap_crtc_dss_enable(struct omap_drm_private *priv,
enum omap_channel channel)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
priv->dispc_ops->mgr_set_timings(priv->dispc, omap_crtc->channel,
&omap_crtc->vm);
@@ -227,7 +198,8 @@ static int omap_crtc_dss_enable(struct omap_drm_private *priv,
static void omap_crtc_dss_disable(struct omap_drm_private *priv,
enum omap_channel channel)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
omap_crtc_set_enabled(&omap_crtc->base, false);
}
@@ -236,7 +208,9 @@ static void omap_crtc_dss_set_timings(struct omap_drm_private *priv,
enum omap_channel channel,
const struct videomode *vm)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
+
DBG("%s", omap_crtc->name);
omap_crtc->vm = *vm;
}
@@ -245,7 +219,8 @@ static void omap_crtc_dss_set_lcd_config(struct omap_drm_private *priv,
enum omap_channel channel,
const struct dss_lcd_mgr_config *config)
{
- struct omap_crtc *omap_crtc = omap_crtcs[channel];
+ struct drm_crtc *crtc = priv->channels[channel]->crtc;
+ struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
DBG("%s", omap_crtc->name);
priv->dispc_ops->mgr_set_lcd_config(priv->dispc, omap_crtc->channel,
@@ -266,8 +241,6 @@ static void omap_crtc_dss_unregister_framedone(
}
static const struct dss_mgr_ops mgr_ops = {
- .connect = omap_crtc_dss_connect,
- .disconnect = omap_crtc_dss_disconnect,
.start_update = omap_crtc_dss_start_update,
.enable = omap_crtc_dss_enable,
.disable = omap_crtc_dss_disable,
@@ -447,11 +420,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
{
struct omap_crtc *omap_crtc = to_omap_crtc(crtc);
struct drm_display_mode *mode = &crtc->state->adjusted_mode;
- struct omap_drm_private *priv = crtc->dev->dev_private;
- const u32 flags_mask = DISPLAY_FLAGS_DE_HIGH | DISPLAY_FLAGS_DE_LOW |
- DISPLAY_FLAGS_PIXDATA_POSEDGE | DISPLAY_FLAGS_PIXDATA_NEGEDGE |
- DISPLAY_FLAGS_SYNC_POSEDGE | DISPLAY_FLAGS_SYNC_NEGEDGE;
- unsigned int i;
DBG("%s: set mode: %d:\"%s\" %d %d %d %d %d %d %d %d %d %d 0x%x 0x%x",
omap_crtc->name, mode->base.id, mode->name,
@@ -461,38 +429,6 @@ static void omap_crtc_mode_set_nofb(struct drm_crtc *crtc)
mode->type, mode->flags);
drm_display_mode_to_videomode(mode, &omap_crtc->vm);
-
- /*
- * HACK: This fixes the vm flags.
- * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags
- * and they get lost when converting back and forth between
- * struct drm_display_mode and struct videomode. The hack below
- * goes and fetches the missing flags from the panel drivers.
- *
- * Correct solution would be to use DRM's bus-flags, but that's not
- * easily possible before the omapdrm's panel/encoder driver model
- * has been changed to the DRM model.
- */
-
- for (i = 0; i < priv->num_encoders; ++i) {
- struct drm_encoder *encoder = priv->encoders[i];
-
- if (encoder->crtc == crtc) {
- struct omap_dss_device *dssdev;
-
- dssdev = omap_encoder_get_dssdev(encoder);
-
- if (dssdev) {
- struct videomode vm = {0};
-
- dssdev->driver->get_timings(dssdev, &vm);
-
- omap_crtc->vm.flags |= vm.flags & flags_mask;
- }
-
- break;
- }
- }
}
static int omap_crtc_atomic_check(struct drm_crtc *crtc,
@@ -681,37 +617,29 @@ static const char *channel_names[] = {
void omap_crtc_pre_init(struct omap_drm_private *priv)
{
- memset(omap_crtcs, 0, sizeof(omap_crtcs));
-
- dss_install_mgr_ops(&mgr_ops, priv);
+ dss_install_mgr_ops(priv->dss, &mgr_ops, priv);
}
-void omap_crtc_pre_uninit(void)
+void omap_crtc_pre_uninit(struct omap_drm_private *priv)
{
- dss_uninstall_mgr_ops();
+ dss_uninstall_mgr_ops(priv->dss);
}
/* initialize crtc */
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, struct omap_dss_device *dssdev)
+ struct omap_drm_pipeline *pipe,
+ struct drm_plane *plane)
{
struct omap_drm_private *priv = dev->dev_private;
struct drm_crtc *crtc = NULL;
struct omap_crtc *omap_crtc;
enum omap_channel channel;
- struct omap_dss_device *out;
int ret;
- out = omapdss_find_output_from_display(dssdev);
- channel = out->dispc_channel;
- omap_dss_put_device(out);
+ channel = pipe->output->dispc_channel;
DBG("%s", channel_names[channel]);
- /* Multiple displays on same channel is not allowed */
- if (WARN_ON(omap_crtcs[channel] != NULL))
- return ERR_PTR(-EINVAL);
-
omap_crtc = kzalloc(sizeof(*omap_crtc), GFP_KERNEL);
if (!omap_crtc)
return ERR_PTR(-ENOMEM);
@@ -720,6 +648,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
init_waitqueue_head(&omap_crtc->pending_wait);
+ omap_crtc->pipe = pipe;
omap_crtc->channel = channel;
omap_crtc->name = channel_names[channel];
@@ -727,7 +656,7 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
&omap_crtc_funcs, NULL);
if (ret < 0) {
dev_err(dev->dev, "%s(): could not init crtc for: %s\n",
- __func__, dssdev->name);
+ __func__, pipe->display->name);
kfree(omap_crtc);
return ERR_PTR(ret);
}
@@ -750,7 +679,5 @@ struct drm_crtc *omap_crtc_init(struct drm_device *dev,
omap_plane_install_properties(crtc->primary, &crtc->base);
- omap_crtcs[channel] = omap_crtc;
-
return crtc;
}
diff --git a/drivers/gpu/drm/omapdrm/omap_crtc.h b/drivers/gpu/drm/omapdrm/omap_crtc.h
index eaab2d7f0324..d9de437ba9dd 100644
--- a/drivers/gpu/drm/omapdrm/omap_crtc.h
+++ b/drivers/gpu/drm/omapdrm/omap_crtc.h
@@ -27,15 +27,17 @@ enum omap_channel;
struct drm_crtc;
struct drm_device;
struct drm_plane;
+struct omap_drm_pipeline;
struct omap_dss_device;
struct videomode;
struct videomode *omap_crtc_timings(struct drm_crtc *crtc);
enum omap_channel omap_crtc_channel(struct drm_crtc *crtc);
void omap_crtc_pre_init(struct omap_drm_private *priv);
-void omap_crtc_pre_uninit(void);
+void omap_crtc_pre_uninit(struct omap_drm_private *priv);
struct drm_crtc *omap_crtc_init(struct drm_device *dev,
- struct drm_plane *plane, struct omap_dss_device *dssdev);
+ struct omap_drm_pipeline *pipe,
+ struct drm_plane *plane);
int omap_crtc_wait_pending(struct drm_crtc *crtc);
void omap_crtc_error_irq(struct drm_crtc *crtc, u32 irqstatus);
void omap_crtc_vblank_irq(struct drm_crtc *crtc);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.c b/drivers/gpu/drm/omapdrm/omap_drv.c
index 1b6601e9b107..5f98506ac2c5 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.c
+++ b/drivers/gpu/drm/omapdrm/omap_drv.c
@@ -15,6 +15,8 @@
* this program. If not, see <http://www.gnu.org/licenses/>.
*/
+#include <linux/of.h>
+#include <linux/sort.h>
#include <linux/sys_soc.h>
#include <drm/drm_atomic.h>
@@ -127,55 +129,92 @@ static const struct drm_mode_config_funcs omap_mode_config_funcs = {
.atomic_commit = drm_atomic_helper_commit,
};
-static int get_connector_type(struct omap_dss_device *dssdev)
+static void omap_disconnect_pipelines(struct drm_device *ddev)
{
- switch (dssdev->type) {
- case OMAP_DISPLAY_TYPE_HDMI:
- return DRM_MODE_CONNECTOR_HDMIA;
- case OMAP_DISPLAY_TYPE_DVI:
- return DRM_MODE_CONNECTOR_DVID;
- case OMAP_DISPLAY_TYPE_DSI:
- return DRM_MODE_CONNECTOR_DSI;
- case OMAP_DISPLAY_TYPE_DPI:
- case OMAP_DISPLAY_TYPE_DBI:
- return DRM_MODE_CONNECTOR_DPI;
- case OMAP_DISPLAY_TYPE_VENC:
- /* TODO: This could also be composite */
- return DRM_MODE_CONNECTOR_SVIDEO;
- case OMAP_DISPLAY_TYPE_SDI:
- return DRM_MODE_CONNECTOR_LVDS;
- default:
- return DRM_MODE_CONNECTOR_Unknown;
+ struct omap_drm_private *priv = ddev->dev_private;
+ unsigned int i;
+
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+
+ omapdss_device_disconnect(NULL, pipe->output);
+
+ omapdss_device_put(pipe->output);
+ omapdss_device_put(pipe->display);
+ pipe->output = NULL;
+ pipe->display = NULL;
}
+
+ memset(&priv->channels, 0, sizeof(priv->channels));
+
+ priv->num_pipes = 0;
}
-static void omap_disconnect_dssdevs(void)
+static int omap_compare_pipes(const void *a, const void *b)
{
- struct omap_dss_device *dssdev = NULL;
+ const struct omap_drm_pipeline *pipe1 = a;
+ const struct omap_drm_pipeline *pipe2 = b;
- for_each_dss_dev(dssdev)
- dssdev->driver->disconnect(dssdev);
+ if (pipe1->display->alias_id > pipe2->display->alias_id)
+ return 1;
+ else if (pipe1->display->alias_id < pipe2->display->alias_id)
+ return -1;
+ return 0;
}
-static int omap_connect_dssdevs(void)
+static int omap_connect_pipelines(struct drm_device *ddev)
{
+ struct omap_drm_private *priv = ddev->dev_private;
+ struct omap_dss_device *output = NULL;
+ unsigned int i;
int r;
- struct omap_dss_device *dssdev = NULL;
if (!omapdss_stack_is_ready())
return -EPROBE_DEFER;
- for_each_dss_dev(dssdev) {
- r = dssdev->driver->connect(dssdev);
+ for_each_dss_output(output) {
+ r = omapdss_device_connect(priv->dss, NULL, output);
if (r == -EPROBE_DEFER) {
- omap_dss_put_device(dssdev);
+ omapdss_device_put(output);
goto cleanup;
} else if (r) {
- dev_warn(dssdev->dev, "could not connect display: %s\n",
- dssdev->name);
+ dev_warn(output->dev, "could not connect output %s\n",
+ output->name);
+ } else {
+ struct omap_drm_pipeline *pipe;
+
+ pipe = &priv->pipes[priv->num_pipes++];
+ pipe->output = omapdss_device_get(output);
+ pipe->display = omapdss_display_get(output);
+
+ if (priv->num_pipes == ARRAY_SIZE(priv->pipes)) {
+ /* To balance the 'for_each_dss_output' loop */
+ omapdss_device_put(output);
+ break;
+ }
}
}
+ /* Sort the list by DT aliases */
+ sort(priv->pipes, priv->num_pipes, sizeof(priv->pipes[0]),
+ omap_compare_pipes, NULL);
+
+ /*
+ * Populate the pipeline lookup table by DISPC channel. Only one display
+ * is allowed per channel.
+ */
+ for (i = 0; i < priv->num_pipes; ++i) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ enum omap_channel channel = pipe->output->dispc_channel;
+
+ if (WARN_ON(priv->channels[channel] != NULL)) {
+ r = -EINVAL;
+ goto cleanup;
+ }
+
+ priv->channels[channel] = pipe;
+ }
+
return 0;
cleanup:
@@ -183,7 +222,7 @@ cleanup:
* if we are deferring probe, we disconnect the devices we previously
* connected
*/
- omap_disconnect_dssdevs();
+ omap_disconnect_pipelines(ddev);
return r;
}
@@ -204,10 +243,9 @@ static int omap_modeset_init_properties(struct drm_device *dev)
static int omap_modeset_init(struct drm_device *dev)
{
struct omap_drm_private *priv = dev->dev_private;
- struct omap_dss_device *dssdev = NULL;
int num_ovls = priv->dispc_ops->get_num_ovls(priv->dispc);
int num_mgrs = priv->dispc_ops->get_num_mgrs(priv->dispc);
- int num_crtcs, crtc_idx, plane_idx;
+ unsigned int i;
int ret;
u32 plane_crtc_mask;
@@ -225,87 +263,62 @@ static int omap_modeset_init(struct drm_device *dev)
* configuration does not match the expectations or exceeds
* the available resources, the configuration is rejected.
*/
- num_crtcs = 0;
- for_each_dss_dev(dssdev)
- if (omapdss_device_is_connected(dssdev))
- num_crtcs++;
-
- if (num_crtcs > num_mgrs || num_crtcs > num_ovls ||
- num_crtcs > ARRAY_SIZE(priv->crtcs) ||
- num_crtcs > ARRAY_SIZE(priv->planes) ||
- num_crtcs > ARRAY_SIZE(priv->encoders) ||
- num_crtcs > ARRAY_SIZE(priv->connectors)) {
+ if (priv->num_pipes > num_mgrs || priv->num_pipes > num_ovls) {
dev_err(dev->dev, "%s(): Too many connected displays\n",
__func__);
return -EINVAL;
}
- /* All planes can be put to any CRTC */
- plane_crtc_mask = (1 << num_crtcs) - 1;
+ /* Create all planes first. They can all be put to any CRTC. */
+ plane_crtc_mask = (1 << priv->num_pipes) - 1;
+
+ for (i = 0; i < num_ovls; i++) {
+ enum drm_plane_type type = i < priv->num_pipes
+ ? DRM_PLANE_TYPE_PRIMARY
+ : DRM_PLANE_TYPE_OVERLAY;
+ struct drm_plane *plane;
+
+ if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
+ return -EINVAL;
+
+ plane = omap_plane_init(dev, i, type, plane_crtc_mask);
+ if (IS_ERR(plane))
+ return PTR_ERR(plane);
- dssdev = NULL;
+ priv->planes[priv->num_planes++] = plane;
+ }
- crtc_idx = 0;
- plane_idx = 0;
- for_each_dss_dev(dssdev) {
+ /* Create the CRTCs, encoders and connectors. */
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_drm_pipeline *pipe = &priv->pipes[i];
+ struct omap_dss_device *display = pipe->display;
struct drm_connector *connector;
struct drm_encoder *encoder;
- struct drm_plane *plane;
struct drm_crtc *crtc;
- if (!omapdss_device_is_connected(dssdev))
- continue;
-
- encoder = omap_encoder_init(dev, dssdev);
+ encoder = omap_encoder_init(dev, pipe->output, display);
if (!encoder)
return -ENOMEM;
- connector = omap_connector_init(dev,
- get_connector_type(dssdev), dssdev, encoder);
+ connector = omap_connector_init(dev, pipe->output, display,
+ encoder);
if (!connector)
return -ENOMEM;
- plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_PRIMARY,
- plane_crtc_mask);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
-
- crtc = omap_crtc_init(dev, plane, dssdev);
+ crtc = omap_crtc_init(dev, pipe, priv->planes[i]);
if (IS_ERR(crtc))
return PTR_ERR(crtc);
drm_connector_attach_encoder(connector, encoder);
- encoder->possible_crtcs = (1 << crtc_idx);
-
- priv->crtcs[priv->num_crtcs++] = crtc;
- priv->planes[priv->num_planes++] = plane;
- priv->encoders[priv->num_encoders++] = encoder;
- priv->connectors[priv->num_connectors++] = connector;
+ encoder->possible_crtcs = 1 << i;
- plane_idx++;
- crtc_idx++;
+ pipe->crtc = crtc;
+ pipe->encoder = encoder;
+ pipe->connector = connector;
}
- /*
- * Create normal planes for the remaining overlays:
- */
- for (; plane_idx < num_ovls; plane_idx++) {
- struct drm_plane *plane;
-
- if (WARN_ON(priv->num_planes >= ARRAY_SIZE(priv->planes)))
- return -EINVAL;
-
- plane = omap_plane_init(dev, plane_idx, DRM_PLANE_TYPE_OVERLAY,
- plane_crtc_mask);
- if (IS_ERR(plane))
- return PTR_ERR(plane);
-
- priv->planes[priv->num_planes++] = plane;
- }
-
- DBG("registered %d planes, %d crtcs, %d encoders and %d connectors\n",
- priv->num_planes, priv->num_crtcs, priv->num_encoders,
- priv->num_connectors);
+ DBG("registered %u planes, %u crtcs/encoders/connectors\n",
+ priv->num_planes, priv->num_pipes);
dev->mode_config.min_width = 8;
dev->mode_config.min_height = 2;
@@ -335,27 +348,25 @@ static int omap_modeset_init(struct drm_device *dev)
/*
* Enable the HPD in external components if supported
*/
-static void omap_modeset_enable_external_hpd(void)
+static void omap_modeset_enable_external_hpd(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (dssdev->driver->enable_hpd)
- dssdev->driver->enable_hpd(dssdev);
- }
+ for (i = 0; i < priv->num_pipes; i++)
+ omap_connector_enable_hpd(priv->pipes[i].connector);
}
/*
* Disable the HPD in external components if supported
*/
-static void omap_modeset_disable_external_hpd(void)
+static void omap_modeset_disable_external_hpd(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (dssdev->driver->disable_hpd)
- dssdev->driver->disable_hpd(dssdev);
- }
+ for (i = 0; i < priv->num_pipes; i++)
+ omap_connector_disable_hpd(priv->pipes[i].connector);
}
/*
@@ -525,6 +536,14 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
DBG("%s", dev_name(dev));
+ /* Allocate and initialize the DRM device. */
+ ddev = drm_dev_alloc(&omap_drm_driver, dev);
+ if (IS_ERR(ddev))
+ return PTR_ERR(ddev);
+
+ priv->ddev = ddev;
+ ddev->dev_private = priv;
+
priv->dev = dev;
priv->dss = omapdss_get_dss();
priv->dispc = dispc_get_dispc(priv->dss);
@@ -532,7 +551,7 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
omap_crtc_pre_init(priv);
- ret = omap_connect_dssdevs();
+ ret = omap_connect_pipelines(ddev);
if (ret)
goto err_crtc_uninit;
@@ -543,16 +562,6 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
mutex_init(&priv->list_lock);
INIT_LIST_HEAD(&priv->obj_list);
- /* Allocate and initialize the DRM device. */
- ddev = drm_dev_alloc(&omap_drm_driver, priv->dev);
- if (IS_ERR(ddev)) {
- ret = PTR_ERR(ddev);
- goto err_destroy_wq;
- }
-
- priv->ddev = ddev;
- ddev->dev_private = priv;
-
/* Get memory bandwidth limits */
if (priv->dispc_ops->get_memory_bandwidth_limit)
priv->max_bandwidth =
@@ -563,23 +572,23 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
ret = omap_modeset_init(ddev);
if (ret) {
dev_err(priv->dev, "omap_modeset_init failed: ret=%d\n", ret);
- goto err_free_drm_dev;
+ goto err_gem_deinit;
}
/* Initialize vblank handling, start with all CRTCs disabled. */
- ret = drm_vblank_init(ddev, priv->num_crtcs);
+ ret = drm_vblank_init(ddev, priv->num_pipes);
if (ret) {
dev_err(priv->dev, "could not init vblank\n");
goto err_cleanup_modeset;
}
- for (i = 0; i < priv->num_crtcs; i++)
- drm_crtc_vblank_off(priv->crtcs[i]);
+ for (i = 0; i < priv->num_pipes; i++)
+ drm_crtc_vblank_off(priv->pipes[i].crtc);
omap_fbdev_init(ddev);
drm_kms_helper_poll_init(ddev);
- omap_modeset_enable_external_hpd();
+ omap_modeset_enable_external_hpd(ddev);
/*
* Register the DRM device with the core and the connectors with
@@ -592,21 +601,20 @@ static int omapdrm_init(struct omap_drm_private *priv, struct device *dev)
return 0;
err_cleanup_helpers:
- omap_modeset_disable_external_hpd();
+ omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
err_cleanup_modeset:
drm_mode_config_cleanup(ddev);
omap_drm_irq_uninstall(ddev);
-err_free_drm_dev:
+err_gem_deinit:
omap_gem_deinit(ddev);
- drm_dev_unref(ddev);
-err_destroy_wq:
destroy_workqueue(priv->wq);
- omap_disconnect_dssdevs();
+ omap_disconnect_pipelines(ddev);
err_crtc_uninit:
- omap_crtc_pre_uninit();
+ omap_crtc_pre_uninit(priv);
+ drm_dev_unref(ddev);
return ret;
}
@@ -618,7 +626,7 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
drm_dev_unregister(ddev);
- omap_modeset_disable_external_hpd();
+ omap_modeset_disable_external_hpd(ddev);
drm_kms_helper_poll_fini(ddev);
omap_fbdev_fini(ddev);
@@ -630,12 +638,12 @@ static void omapdrm_cleanup(struct omap_drm_private *priv)
omap_drm_irq_uninstall(ddev);
omap_gem_deinit(ddev);
- drm_dev_unref(ddev);
-
destroy_workqueue(priv->wq);
- omap_disconnect_dssdevs();
- omap_crtc_pre_uninit();
+ omap_disconnect_pipelines(ddev);
+ omap_crtc_pre_uninit(priv);
+
+ drm_dev_unref(ddev);
}
static int pdev_probe(struct platform_device *pdev)
@@ -677,36 +685,36 @@ static int pdev_remove(struct platform_device *pdev)
}
#ifdef CONFIG_PM_SLEEP
-static int omap_drm_suspend_all_displays(void)
+static int omap_drm_suspend_all_displays(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_dss_device *display = priv->pipes[i].display;
- if (dssdev->state == OMAP_DSS_DISPLAY_ACTIVE) {
- dssdev->driver->disable(dssdev);
- dssdev->activate_after_resume = true;
+ if (display->state == OMAP_DSS_DISPLAY_ACTIVE) {
+ display->ops->disable(display);
+ display->activate_after_resume = true;
} else {
- dssdev->activate_after_resume = false;
+ display->activate_after_resume = false;
}
}
return 0;
}
-static int omap_drm_resume_all_displays(void)
+static int omap_drm_resume_all_displays(struct drm_device *ddev)
{
- struct omap_dss_device *dssdev = NULL;
+ struct omap_drm_private *priv = ddev->dev_private;
+ int i;
- for_each_dss_dev(dssdev) {
- if (!dssdev->driver)
- continue;
+ for (i = 0; i < priv->num_pipes; i++) {
+ struct omap_dss_device *display = priv->pipes[i].display;
- if (dssdev->activate_after_resume) {
- dssdev->driver->enable(dssdev);
- dssdev->activate_after_resume = false;
+ if (display->activate_after_resume) {
+ display->ops->enable(display);
+ display->activate_after_resume = false;
}
}
@@ -721,7 +729,7 @@ static int omap_drm_suspend(struct device *dev)
drm_kms_helper_poll_disable(drm_dev);
drm_modeset_lock_all(drm_dev);
- omap_drm_suspend_all_displays();
+ omap_drm_suspend_all_displays(drm_dev);
drm_modeset_unlock_all(drm_dev);
return 0;
@@ -733,7 +741,7 @@ static int omap_drm_resume(struct device *dev)
struct drm_device *drm_dev = priv->ddev;
drm_modeset_lock_all(drm_dev);
- omap_drm_resume_all_displays();
+ omap_drm_resume_all_displays(drm_dev);
drm_modeset_unlock_all(drm_dev);
drm_kms_helper_poll_enable(drm_dev);
diff --git a/drivers/gpu/drm/omapdrm/omap_drv.h b/drivers/gpu/drm/omapdrm/omap_drv.h
index f27c8e216adf..bd7f2c227a25 100644
--- a/drivers/gpu/drm/omapdrm/omap_drv.h
+++ b/drivers/gpu/drm/omapdrm/omap_drv.h
@@ -45,6 +45,14 @@
struct omap_drm_usergart;
+struct omap_drm_pipeline {
+ struct drm_crtc *crtc;
+ struct drm_encoder *encoder;
+ struct drm_connector *connector;
+ struct omap_dss_device *output;
+ struct omap_dss_device *display;
+};
+
struct omap_drm_private {
struct drm_device *ddev;
struct device *dev;
@@ -54,18 +62,13 @@ struct omap_drm_private {
struct dispc_device *dispc;
const struct dispc_ops *dispc_ops;
- unsigned int num_crtcs;
- struct drm_crtc *crtcs[8];
+ unsigned int num_pipes;
+ struct omap_drm_pipeline pipes[8];
+ struct omap_drm_pipeline *channels[8];
unsigned int num_planes;
struct drm_plane *planes[8];
- unsigned int num_encoders;
- struct drm_encoder *encoders[8];
-
- unsigned int num_connectors;
- struct drm_connector *connectors[8];
-
struct drm_fb_helper *fbdev;
struct workqueue_struct *wq;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.c b/drivers/gpu/drm/omapdrm/omap_encoder.c
index fcdf4b0a8eec..452e625f6ce3 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.c
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.c
@@ -36,16 +36,10 @@
*/
struct omap_encoder {
struct drm_encoder base;
- struct omap_dss_device *dssdev;
+ struct omap_dss_device *output;
+ struct omap_dss_device *display;
};
-struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder)
-{
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
-
- return omap_encoder->dssdev;
-}
-
static void omap_encoder_destroy(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
@@ -59,16 +53,65 @@ static const struct drm_encoder_funcs omap_encoder_funcs = {
};
static void omap_encoder_mode_set(struct drm_encoder *encoder,
- struct drm_display_mode *mode,
- struct drm_display_mode *adjusted_mode)
+ struct drm_display_mode *mode,
+ struct drm_display_mode *adjusted_mode)
{
struct drm_device *dev = encoder->dev;
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
struct drm_connector *connector;
+ struct omap_dss_device *dssdev;
+ struct videomode vm = { 0 };
bool hdmi_mode;
int r;
+ drm_display_mode_to_videomode(adjusted_mode, &vm);
+
+ /*
+ * HACK: This fixes the vm flags.
+ * struct drm_display_mode does not contain the VSYNC/HSYNC/DE flags and
+ * they get lost when converting back and forth between struct
+ * drm_display_mode and struct videomode. The hack below goes and
+ * fetches the missing flags.
+ *
+ * A better solution is to use DRM's bus-flags through the whole driver.
+ */
+ for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ unsigned long bus_flags = dssdev->bus_flags;
+
+ if (!(vm.flags & (DISPLAY_FLAGS_DE_LOW |
+ DISPLAY_FLAGS_DE_HIGH))) {
+ if (bus_flags & DRM_BUS_FLAG_DE_LOW)
+ vm.flags |= DISPLAY_FLAGS_DE_LOW;
+ else if (bus_flags & DRM_BUS_FLAG_DE_HIGH)
+ vm.flags |= DISPLAY_FLAGS_DE_HIGH;
+ }
+
+ if (!(vm.flags & (DISPLAY_FLAGS_PIXDATA_POSEDGE |
+ DISPLAY_FLAGS_PIXDATA_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ vm.flags |= DISPLAY_FLAGS_PIXDATA_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ vm.flags |= DISPLAY_FLAGS_PIXDATA_NEGEDGE;
+ }
+
+ if (!(vm.flags & (DISPLAY_FLAGS_SYNC_POSEDGE |
+ DISPLAY_FLAGS_SYNC_NEGEDGE))) {
+ if (bus_flags & DRM_BUS_FLAG_SYNC_POSEDGE)
+ vm.flags |= DISPLAY_FLAGS_SYNC_POSEDGE;
+ else if (bus_flags & DRM_BUS_FLAG_SYNC_NEGEDGE)
+ vm.flags |= DISPLAY_FLAGS_SYNC_NEGEDGE;
+ }
+ }
+
+ /* Set timings for all devices in the display pipeline. */
+ dss_mgr_set_timings(omap_encoder->output, &vm);
+
+ for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ if (dssdev->ops->set_timings)
+ dssdev->ops->set_timings(dssdev, &vm);
+ }
+
+ /* Set the HDMI mode and HDMI infoframe if applicable. */
hdmi_mode = false;
list_for_each_entry(connector, &dev->mode_config.connector_list, head) {
if (connector->encoder == encoder) {
@@ -77,73 +120,36 @@ static void omap_encoder_mode_set(struct drm_encoder *encoder,
}
}
- if (dssdev->driver->set_hdmi_mode)
- dssdev->driver->set_hdmi_mode(dssdev, hdmi_mode);
+ dssdev = omap_encoder->output;
+
+ if (dssdev->ops->hdmi.set_hdmi_mode)
+ dssdev->ops->hdmi.set_hdmi_mode(dssdev, hdmi_mode);
- if (hdmi_mode && dssdev->driver->set_hdmi_infoframe) {
+ if (hdmi_mode && dssdev->ops->hdmi.set_infoframe) {
struct hdmi_avi_infoframe avi;
r = drm_hdmi_avi_infoframe_from_display_mode(&avi, adjusted_mode,
false);
if (r == 0)
- dssdev->driver->set_hdmi_infoframe(dssdev, &avi);
+ dssdev->ops->hdmi.set_infoframe(dssdev, &avi);
}
}
static void omap_encoder_disable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_dss_device *dssdev = omap_encoder->display;
- dssdrv->disable(dssdev);
-}
-
-static int omap_encoder_update(struct drm_encoder *encoder,
- enum omap_channel channel,
- struct videomode *vm)
-{
- struct drm_device *dev = encoder->dev;
- struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
- int ret;
-
- if (dssdrv->check_timings) {
- ret = dssdrv->check_timings(dssdev, vm);
- } else {
- struct videomode t = {0};
-
- dssdrv->get_timings(dssdev, &t);
-
- if (memcmp(vm, &t, sizeof(*vm)))
- ret = -EINVAL;
- else
- ret = 0;
- }
-
- if (ret) {
- dev_err(dev->dev, "could not set timings: %d\n", ret);
- return ret;
- }
-
- if (dssdrv->set_timings)
- dssdrv->set_timings(dssdev, vm);
-
- return 0;
+ dssdev->ops->disable(dssdev);
}
static void omap_encoder_enable(struct drm_encoder *encoder)
{
struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
- struct omap_dss_device *dssdev = omap_encoder->dssdev;
- struct omap_dss_driver *dssdrv = dssdev->driver;
+ struct omap_dss_device *dssdev = omap_encoder->display;
int r;
- omap_encoder_update(encoder, omap_crtc_channel(encoder->crtc),
- omap_crtc_timings(encoder->crtc));
-
- r = dssdrv->enable(dssdev);
+ r = dssdev->ops->enable(dssdev);
if (r)
dev_err(encoder->dev->dev,
"Failed to enable display '%s': %d\n",
@@ -154,7 +160,36 @@ static int omap_encoder_atomic_check(struct drm_encoder *encoder,
struct drm_crtc_state *crtc_state,
struct drm_connector_state *conn_state)
{
- return 0;
+ struct omap_encoder *omap_encoder = to_omap_encoder(encoder);
+ enum omap_channel channel = omap_encoder->output->dispc_channel;
+ struct drm_device *dev = encoder->dev;
+ struct omap_drm_private *priv = dev->dev_private;
+ struct omap_dss_device *dssdev;
+ struct videomode vm = { 0 };
+ int ret;
+
+ drm_display_mode_to_videomode(&crtc_state->mode, &vm);
+
+ ret = priv->dispc_ops->mgr_check_timings(priv->dispc, channel, &vm);
+ if (ret)
+ goto done;
+
+ for (dssdev = omap_encoder->output; dssdev; dssdev = dssdev->next) {
+ if (!dssdev->ops->check_timings)
+ continue;
+
+ ret = dssdev->ops->check_timings(dssdev, &vm);
+ if (ret)
+ goto done;
+ }
+
+ drm_display_mode_from_videomode(&vm, &crtc_state->adjusted_mode);
+
+done:
+ if (ret)
+ dev_err(dev->dev, "invalid timings: %d\n", ret);
+
+ return ret;
}
static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
@@ -166,7 +201,8 @@ static const struct drm_encoder_helper_funcs omap_encoder_helper_funcs = {
/* initialize encoder */
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *dssdev)
+ struct omap_dss_device *output,
+ struct omap_dss_device *display)
{
struct drm_encoder *encoder = NULL;
struct omap_encoder *omap_encoder;
@@ -175,7 +211,8 @@ struct drm_encoder *omap_encoder_init(struct drm_device *dev,
if (!omap_encoder)
goto fail;
- omap_encoder->dssdev = dssdev;
+ omap_encoder->output = output;
+ omap_encoder->display = display;
encoder = &omap_encoder->base;
diff --git a/drivers/gpu/drm/omapdrm/omap_encoder.h b/drivers/gpu/drm/omapdrm/omap_encoder.h
index d2f308bec494..a7b5dde63ecb 100644
--- a/drivers/gpu/drm/omapdrm/omap_encoder.h
+++ b/drivers/gpu/drm/omapdrm/omap_encoder.h
@@ -25,9 +25,7 @@ struct drm_encoder;
struct omap_dss_device;
struct drm_encoder *omap_encoder_init(struct drm_device *dev,
- struct omap_dss_device *dssdev);
-
-/* map crtc to vblank mask */
-struct omap_dss_device *omap_encoder_get_dssdev(struct drm_encoder *encoder);
+ struct omap_dss_device *output,
+ struct omap_dss_device *display);
#endif /* __OMAPDRM_ENCODER_H__ */
diff --git a/drivers/gpu/drm/omapdrm/omap_fbdev.c b/drivers/gpu/drm/omapdrm/omap_fbdev.c
index d958cc813a94..b445309b0143 100644
--- a/drivers/gpu/drm/omapdrm/omap_fbdev.c
+++ b/drivers/gpu/drm/omapdrm/omap_fbdev.c
@@ -243,7 +243,7 @@ void omap_fbdev_init(struct drm_device *dev)
struct drm_fb_helper *helper;
int ret = 0;
- if (!priv->num_crtcs || !priv->num_connectors)
+ if (!priv->num_pipes)
return;
fbdev = kzalloc(sizeof(*fbdev), GFP_KERNEL);
@@ -256,7 +256,7 @@ void omap_fbdev_init(struct drm_device *dev)
drm_fb_helper_prepare(dev, helper, &omap_fb_helper_funcs);
- ret = drm_fb_helper_init(dev, helper, priv->num_connectors);
+ ret = drm_fb_helper_init(dev, helper, priv->num_pipes);
if (ret)
goto fail;
diff --git a/drivers/gpu/drm/omapdrm/omap_irq.c b/drivers/gpu/drm/omapdrm/omap_irq.c
index c85115049f86..329ad26d6d50 100644
--- a/drivers/gpu/drm/omapdrm/omap_irq.c
+++ b/drivers/gpu/drm/omapdrm/omap_irq.c
@@ -206,8 +206,8 @@ static irqreturn_t omap_irq_handler(int irq, void *arg)
VERB("irqs: %08x", irqstatus);
- for (id = 0; id < priv->num_crtcs; id++) {
- struct drm_crtc *crtc = priv->crtcs[id];
+ for (id = 0; id < priv->num_pipes; id++) {
+ struct drm_crtc *crtc = priv->pipes[id].crtc;
enum omap_channel channel = omap_crtc_channel(crtc);
if (irqstatus & priv->dispc_ops->mgr_get_vsync_irq(priv->dispc, channel)) {
diff --git a/drivers/gpu/drm/panel/panel-simple.c b/drivers/gpu/drm/panel/panel-simple.c
index 5b5d0a24e713..97964f7f2ace 100644
--- a/drivers/gpu/drm/panel/panel-simple.c
+++ b/drivers/gpu/drm/panel/panel-simple.c
@@ -1385,6 +1385,9 @@ static const struct panel_desc innolux_tv123wam = {
.width = 259,
.height = 173,
},
+ .delay = {
+ .unprepare = 500,
+ },
};
static const struct drm_display_mode innolux_zj070na_01p_mode = {
diff --git a/drivers/gpu/drm/qxl/qxl_display.c b/drivers/gpu/drm/qxl/qxl_display.c
index 0570c6826bff..01704a7f07cb 100644
--- a/drivers/gpu/drm/qxl/qxl_display.c
+++ b/drivers/gpu/drm/qxl/qxl_display.c
@@ -37,7 +37,8 @@ static bool qxl_head_enabled(struct qxl_head *head)
return head->width && head->height;
}
-static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned count)
+static int qxl_alloc_client_monitors_config(struct qxl_device *qdev,
+ unsigned int count)
{
if (qdev->client_monitors_config &&
count > qdev->client_monitors_config->count) {
@@ -49,15 +50,17 @@ static void qxl_alloc_client_monitors_config(struct qxl_device *qdev, unsigned c
sizeof(struct qxl_monitors_config) +
sizeof(struct qxl_head) * count, GFP_KERNEL);
if (!qdev->client_monitors_config)
- return;
+ return -ENOMEM;
}
qdev->client_monitors_config->count = count;
+ return 0;
}
enum {
MONITORS_CONFIG_MODIFIED,
MONITORS_CONFIG_UNCHANGED,
MONITORS_CONFIG_BAD_CRC,
+ MONITORS_CONFIG_ERROR,
};
static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
@@ -87,7 +90,10 @@ static int qxl_display_copy_rom_client_monitors_config(struct qxl_device *qdev)
&& (num_monitors != qdev->client_monitors_config->count)) {
status = MONITORS_CONFIG_MODIFIED;
}
- qxl_alloc_client_monitors_config(qdev, num_monitors);
+ if (qxl_alloc_client_monitors_config(qdev, num_monitors)) {
+ status = MONITORS_CONFIG_ERROR;
+ return status;
+ }
/* we copy max from the client but it isn't used */
qdev->client_monitors_config->max_allowed =
qdev->monitors_config->max_allowed;
@@ -161,6 +167,10 @@ void qxl_display_read_client_monitors_config(struct qxl_device *qdev)
break;
udelay(5);
}
+ if (status == MONITORS_CONFIG_ERROR) {
+ DRM_DEBUG_KMS("ignoring client monitors config: error");
+ return;
+ }
if (status == MONITORS_CONFIG_BAD_CRC) {
DRM_DEBUG_KMS("ignoring client monitors config: bad crc");
return;
diff --git a/drivers/gpu/drm/qxl/qxl_drv.c b/drivers/gpu/drm/qxl/qxl_drv.c
index 2445e75cf7ea..13c8a662f9b4 100644
--- a/drivers/gpu/drm/qxl/qxl_drv.c
+++ b/drivers/gpu/drm/qxl/qxl_drv.c
@@ -119,7 +119,7 @@ qxl_pci_remove(struct pci_dev *pdev)
dev->dev_private = NULL;
kfree(qdev);
- drm_dev_unref(dev);
+ drm_dev_put(dev);
}
static const struct file_operations qxl_fops = {
@@ -136,20 +136,11 @@ static int qxl_drm_freeze(struct drm_device *dev)
{
struct pci_dev *pdev = dev->pdev;
struct qxl_device *qdev = dev->dev_private;
- struct drm_crtc *crtc;
-
- drm_kms_helper_poll_disable(dev);
-
- console_lock();
- qxl_fbdev_set_suspend(qdev, 1);
- console_unlock();
+ int ret;
- /* unpin the front buffers */
- list_for_each_entry(crtc, &dev->mode_config.crtc_list, head) {
- const struct drm_crtc_helper_funcs *crtc_funcs = crtc->helper_private;
- if (crtc->enabled)
- (*crtc_funcs->disable)(crtc);
- }
+ ret = drm_mode_config_helper_suspend(dev);
+ if (ret)
+ return ret;
qxl_destroy_monitors_object(qdev);
qxl_surf_evict(qdev);
@@ -175,14 +166,7 @@ static int qxl_drm_resume(struct drm_device *dev, bool thaw)
}
qxl_create_monitors_object(qdev);
- drm_helper_resume_force_mode(dev);
-
- console_lock();
- qxl_fbdev_set_suspend(qdev, 0);
- console_unlock();
-
- drm_kms_helper_poll_enable(dev);
- return 0;
+ return drm_mode_config_helper_resume(dev);
}
static int qxl_pm_suspend(struct device *dev)
diff --git a/drivers/gpu/drm/qxl/qxl_gem.c b/drivers/gpu/drm/qxl/qxl_gem.c
index f5c1e7872e92..89606c819d82 100644
--- a/drivers/gpu/drm/qxl/qxl_gem.c
+++ b/drivers/gpu/drm/qxl/qxl_gem.c
@@ -40,7 +40,7 @@ void qxl_gem_object_free(struct drm_gem_object *gobj)
qxl_surface_evict(qdev, qobj, false);
tbo = &qobj->tbo;
- ttm_bo_unref(&tbo);
+ ttm_bo_put(tbo);
}
int qxl_gem_object_create(struct qxl_device *qdev, int size,
diff --git a/drivers/gpu/drm/qxl/qxl_kms.c b/drivers/gpu/drm/qxl/qxl_kms.c
index 771250aed78d..e25c589d5f50 100644
--- a/drivers/gpu/drm/qxl/qxl_kms.c
+++ b/drivers/gpu/drm/qxl/qxl_kms.c
@@ -102,8 +102,10 @@ int qxl_device_init(struct qxl_device *qdev,
int r, sb;
r = drm_dev_init(&qdev->ddev, drv, &pdev->dev);
- if (r)
- return r;
+ if (r) {
+ pr_err("Unable to init drm dev");
+ goto error;
+ }
qdev->ddev.pdev = pdev;
pci_set_drvdata(pdev, &qdev->ddev);
@@ -121,6 +123,11 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->io_base = pci_resource_start(pdev, 3);
qdev->vram_mapping = io_mapping_create_wc(qdev->vram_base, pci_resource_len(pdev, 0));
+ if (!qdev->vram_mapping) {
+ pr_err("Unable to create vram_mapping");
+ r = -ENOMEM;
+ goto error;
+ }
if (pci_resource_len(pdev, 4) > 0) {
/* 64bit surface bar present */
@@ -139,6 +146,11 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->surface_mapping =
io_mapping_create_wc(qdev->surfaceram_base,
qdev->surfaceram_size);
+ if (!qdev->surface_mapping) {
+ pr_err("Unable to create surface_mapping");
+ r = -ENOMEM;
+ goto vram_mapping_free;
+ }
}
DRM_DEBUG_KMS("qxl: vram %llx-%llx(%dM %dk), surface %llx-%llx(%dM %dk, %s)\n",
@@ -155,20 +167,29 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->rom = ioremap(qdev->rom_base, qdev->rom_size);
if (!qdev->rom) {
pr_err("Unable to ioremap ROM\n");
- return -ENOMEM;
+ r = -ENOMEM;
+ goto surface_mapping_free;
}
- qxl_check_device(qdev);
+ if (!qxl_check_device(qdev)) {
+ r = -ENODEV;
+ goto surface_mapping_free;
+ }
r = qxl_bo_init(qdev);
if (r) {
DRM_ERROR("bo init failed %d\n", r);
- return r;
+ goto rom_unmap;
}
qdev->ram_header = ioremap(qdev->vram_base +
qdev->rom->ram_header_offset,
sizeof(*qdev->ram_header));
+ if (!qdev->ram_header) {
+ DRM_ERROR("Unable to ioremap RAM header\n");
+ r = -ENOMEM;
+ goto bo_fini;
+ }
qdev->command_ring = qxl_ring_create(&(qdev->ram_header->cmd_ring_hdr),
sizeof(struct qxl_command),
@@ -176,6 +197,11 @@ int qxl_device_init(struct qxl_device *qdev,
qdev->io_base + QXL_IO_NOTIFY_CMD,
false,
&qdev->display_event);
+ if (!qdev->command_ring) {
+ DRM_ERROR("Unable to create command ring\n");
+ r = -ENOMEM;
+ goto ram_header_unmap;
+ }
qdev->cursor_ring = qxl_ring_create(
&(qdev->ram_header->cursor_ring_hdr),
@@ -185,12 +211,23 @@ int qxl_device_init(struct qxl_device *qdev,
false,
&qdev->cursor_event);
+ if (!qdev->cursor_ring) {
+ DRM_ERROR("Unable to create cursor ring\n");
+ r = -ENOMEM;
+ goto command_ring_free;
+ }
+
qdev->release_ring = qxl_ring_create(
&(qdev->ram_header->release_ring_hdr),
sizeof(uint64_t),
QXL_RELEASE_RING_SIZE, 0, true,
NULL);
+ if (!qdev->release_ring) {
+ DRM_ERROR("Unable to create release ring\n");
+ r = -ENOMEM;
+ goto cursor_ring_free;
+ }
/* TODO - slot initialization should happen on reset. where is our
* reset handler? */
qdev->n_mem_slots = qdev->rom->slots_end;
@@ -203,6 +240,12 @@ int qxl_device_init(struct qxl_device *qdev,
kmalloc_array(qdev->n_mem_slots, sizeof(struct qxl_memslot),
GFP_KERNEL);
+ if (!qdev->mem_slots) {
+ DRM_ERROR("Unable to alloc mem slots\n");
+ r = -ENOMEM;
+ goto release_ring_free;
+ }
+
idr_init(&qdev->release_idr);
spin_lock_init(&qdev->release_idr_lock);
spin_lock_init(&qdev->release_lock);
@@ -218,8 +261,10 @@ int qxl_device_init(struct qxl_device *qdev,
/* must initialize irq before first async io - slot creation */
r = qxl_irq_init(qdev);
- if (r)
- return r;
+ if (r) {
+ DRM_ERROR("Unable to init qxl irq\n");
+ goto mem_slots_free;
+ }
/*
* Note that virtual is surface0. We rely on the single ioremap done
@@ -243,6 +288,27 @@ int qxl_device_init(struct qxl_device *qdev,
INIT_WORK(&qdev->gc_work, qxl_gc_work);
return 0;
+
+mem_slots_free:
+ kfree(qdev->mem_slots);
+release_ring_free:
+ qxl_ring_free(qdev->release_ring);
+cursor_ring_free:
+ qxl_ring_free(qdev->cursor_ring);
+command_ring_free:
+ qxl_ring_free(qdev->command_ring);
+ram_header_unmap:
+ iounmap(qdev->ram_header);
+bo_fini:
+ qxl_bo_fini(qdev);
+rom_unmap:
+ iounmap(qdev->rom);
+surface_mapping_free:
+ io_mapping_free(qdev->surface_mapping);
+vram_mapping_free:
+ io_mapping_free(qdev->vram_mapping);
+error:
+ return r;
}
void qxl_device_fini(struct qxl_device *qdev)
diff --git a/drivers/gpu/drm/radeon/radeon_drv.c b/drivers/gpu/drm/radeon/radeon_drv.c
index 2a7977a23b31..99c63eeb2866 100644
--- a/drivers/gpu/drm/radeon/radeon_drv.c
+++ b/drivers/gpu/drm/radeon/radeon_drv.c
@@ -316,27 +316,6 @@ static struct drm_driver kms_driver;
bool radeon_device_is_virtual(void);
-static int radeon_kick_out_firmware_fb(struct pci_dev *pdev)
-{
- struct apertures_struct *ap;
- bool primary = false;
-
- ap = alloc_apertures(1);
- if (!ap)
- return -ENOMEM;
-
- ap->ranges[0].base = pci_resource_start(pdev, 0);
- ap->ranges[0].size = pci_resource_len(pdev, 0);
-
-#ifdef CONFIG_X86
- primary = pdev->resource[PCI_ROM_RESOURCE].flags & IORESOURCE_ROM_SHADOW;
-#endif
- drm_fb_helper_remove_conflicting_framebuffers(ap, "radeondrmfb", primary);
- kfree(ap);
-
- return 0;
-}
-
static int radeon_pci_probe(struct pci_dev *pdev,
const struct pci_device_id *ent)
{
@@ -346,7 +325,7 @@ static int radeon_pci_probe(struct pci_dev *pdev,
return -EPROBE_DEFER;
/* Get rid of things like offb */
- ret = radeon_kick_out_firmware_fb(pdev);
+ ret = drm_fb_helper_remove_conflicting_pci_framebuffers(pdev, 0, "radeondrmfb");
if (ret)
return ret;
diff --git a/drivers/gpu/drm/radeon/radeon_mn.c b/drivers/gpu/drm/radeon/radeon_mn.c
index abd24975c9b1..f8b35df44c60 100644
--- a/drivers/gpu/drm/radeon/radeon_mn.c
+++ b/drivers/gpu/drm/radeon/radeon_mn.c
@@ -118,19 +118,27 @@ static void radeon_mn_release(struct mmu_notifier *mn,
* We block for all BOs between start and end to be idle and
* unmap them by move them into system domain again.
*/
-static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
+static int radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct mm_struct *mm,
unsigned long start,
- unsigned long end)
+ unsigned long end,
+ bool blockable)
{
struct radeon_mn *rmn = container_of(mn, struct radeon_mn, mn);
struct ttm_operation_ctx ctx = { false, false };
struct interval_tree_node *it;
+ int ret = 0;
/* notification is exclusive, but interval is inclusive */
end -= 1;
- mutex_lock(&rmn->lock);
+ /* TODO we should be able to split locking for interval tree and
+ * the tear down.
+ */
+ if (blockable)
+ mutex_lock(&rmn->lock);
+ else if (!mutex_trylock(&rmn->lock))
+ return -EAGAIN;
it = interval_tree_iter_first(&rmn->objects, start, end);
while (it) {
@@ -138,6 +146,11 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
struct radeon_bo *bo;
long r;
+ if (!blockable) {
+ ret = -EAGAIN;
+ goto out_unlock;
+ }
+
node = container_of(it, struct radeon_mn_node, it);
it = interval_tree_iter_next(it, start, end);
@@ -166,7 +179,10 @@ static void radeon_mn_invalidate_range_start(struct mmu_notifier *mn,
}
}
+out_unlock:
mutex_unlock(&rmn->lock);
+
+ return ret;
}
static const struct mmu_notifier_ops radeon_mn_ops = {
diff --git a/drivers/gpu/drm/radeon/radeon_object.c b/drivers/gpu/drm/radeon/radeon_object.c
index edbb4cd519fd..ba2fd295697f 100644
--- a/drivers/gpu/drm/radeon/radeon_object.c
+++ b/drivers/gpu/drm/radeon/radeon_object.c
@@ -307,7 +307,7 @@ struct radeon_bo *radeon_bo_ref(struct radeon_bo *bo)
if (bo == NULL)
return NULL;
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -320,9 +320,8 @@ void radeon_bo_unref(struct radeon_bo **bo)
return;
rdev = (*bo)->rdev;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
int radeon_bo_pin_restricted(struct radeon_bo *bo, u32 domain, u64 max_offset,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
index 15dc9caa128b..8a9e5e6f16b4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.c
@@ -691,6 +691,65 @@ static const struct drm_crtc_helper_funcs crtc_helper_funcs = {
.atomic_disable = rcar_du_crtc_atomic_disable,
};
+static void rcar_du_crtc_crc_init(struct rcar_du_crtc *rcrtc)
+{
+ struct rcar_du_device *rcdu = rcrtc->group->dev;
+ const char **sources;
+ unsigned int count;
+ int i = -1;
+
+ /* CRC available only on Gen3 HW. */
+ if (rcdu->info->gen < 3)
+ return;
+
+ /* Reserve 1 for "auto" source. */
+ count = rcrtc->vsp->num_planes + 1;
+
+ sources = kmalloc_array(count, sizeof(*sources), GFP_KERNEL);
+ if (!sources)
+ return;
+
+ sources[0] = kstrdup("auto", GFP_KERNEL);
+ if (!sources[0])
+ goto error;
+
+ for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
+ struct drm_plane *plane = &rcrtc->vsp->planes[i].plane;
+ char name[16];
+
+ sprintf(name, "plane%u", plane->base.id);
+ sources[i + 1] = kstrdup(name, GFP_KERNEL);
+ if (!sources[i + 1])
+ goto error;
+ }
+
+ rcrtc->sources = sources;
+ rcrtc->sources_count = count;
+ return;
+
+error:
+ while (i >= 0) {
+ kfree(sources[i]);
+ i--;
+ }
+ kfree(sources);
+}
+
+static void rcar_du_crtc_crc_cleanup(struct rcar_du_crtc *rcrtc)
+{
+ unsigned int i;
+
+ if (!rcrtc->sources)
+ return;
+
+ for (i = 0; i < rcrtc->sources_count; i++)
+ kfree(rcrtc->sources[i]);
+ kfree(rcrtc->sources);
+
+ rcrtc->sources = NULL;
+ rcrtc->sources_count = 0;
+}
+
static struct drm_crtc_state *
rcar_du_crtc_atomic_duplicate_state(struct drm_crtc *crtc)
{
@@ -717,6 +776,15 @@ static void rcar_du_crtc_atomic_destroy_state(struct drm_crtc *crtc,
kfree(to_rcar_crtc_state(state));
}
+static void rcar_du_crtc_cleanup(struct drm_crtc *crtc)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+ rcar_du_crtc_crc_cleanup(rcrtc);
+
+ return drm_crtc_cleanup(crtc);
+}
+
static void rcar_du_crtc_reset(struct drm_crtc *crtc)
{
struct rcar_du_crtc_state *state;
@@ -756,17 +824,11 @@ static void rcar_du_crtc_disable_vblank(struct drm_crtc *crtc)
rcrtc->vblank_enable = false;
}
-static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
- const char *source_name,
- size_t *values_cnt)
+static int rcar_du_crtc_parse_crc_source(struct rcar_du_crtc *rcrtc,
+ const char *source_name,
+ enum vsp1_du_crc_source *source)
{
- struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
- struct drm_modeset_acquire_ctx ctx;
- struct drm_crtc_state *crtc_state;
- struct drm_atomic_state *state;
- enum vsp1_du_crc_source source;
- unsigned int index = 0;
- unsigned int i;
+ unsigned int index;
int ret;
/*
@@ -774,31 +836,72 @@ static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
* CRC on an input plane (%u is the plane ID), and "auto" to compute the
* CRC on the composer (VSP) output.
*/
+
if (!source_name) {
- source = VSP1_DU_CRC_NONE;
+ *source = VSP1_DU_CRC_NONE;
+ return 0;
} else if (!strcmp(source_name, "auto")) {
- source = VSP1_DU_CRC_OUTPUT;
+ *source = VSP1_DU_CRC_OUTPUT;
+ return 0;
} else if (strstarts(source_name, "plane")) {
- source = VSP1_DU_CRC_PLANE;
+ unsigned int i;
+
+ *source = VSP1_DU_CRC_PLANE;
ret = kstrtouint(source_name + strlen("plane"), 10, &index);
if (ret < 0)
return ret;
for (i = 0; i < rcrtc->vsp->num_planes; ++i) {
- if (index == rcrtc->vsp->planes[i].plane.base.id) {
- index = i;
- break;
- }
+ if (index == rcrtc->vsp->planes[i].plane.base.id)
+ return i;
}
+ }
- if (i >= rcrtc->vsp->num_planes)
- return -EINVAL;
- } else {
+ return -EINVAL;
+}
+
+static int rcar_du_crtc_verify_crc_source(struct drm_crtc *crtc,
+ const char *source_name,
+ size_t *values_cnt)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ enum vsp1_du_crc_source source;
+
+ if (rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", source_name);
return -EINVAL;
}
*values_cnt = 1;
+ return 0;
+}
+
+const char *const *rcar_du_crtc_get_crc_sources(struct drm_crtc *crtc,
+ size_t *count)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+
+ *count = rcrtc->sources_count;
+ return rcrtc->sources;
+}
+
+static int rcar_du_crtc_set_crc_source(struct drm_crtc *crtc,
+ const char *source_name)
+{
+ struct rcar_du_crtc *rcrtc = to_rcar_crtc(crtc);
+ struct drm_modeset_acquire_ctx ctx;
+ struct drm_crtc_state *crtc_state;
+ struct drm_atomic_state *state;
+ enum vsp1_du_crc_source source;
+ unsigned int index;
+ int ret;
+
+ ret = rcar_du_crtc_parse_crc_source(rcrtc, source_name, &source);
+ if (ret < 0)
+ return ret;
+
+ index = ret;
/* Perform an atomic commit to set the CRC source. */
drm_modeset_acquire_init(&ctx, 0);
@@ -853,7 +956,7 @@ static const struct drm_crtc_funcs crtc_funcs_gen2 = {
static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.reset = rcar_du_crtc_reset,
- .destroy = drm_crtc_cleanup,
+ .destroy = rcar_du_crtc_cleanup,
.set_config = drm_atomic_helper_set_config,
.page_flip = drm_atomic_helper_page_flip,
.atomic_duplicate_state = rcar_du_crtc_atomic_duplicate_state,
@@ -861,6 +964,8 @@ static const struct drm_crtc_funcs crtc_funcs_gen3 = {
.enable_vblank = rcar_du_crtc_enable_vblank,
.disable_vblank = rcar_du_crtc_disable_vblank,
.set_crc_source = rcar_du_crtc_set_crc_source,
+ .verify_crc_source = rcar_du_crtc_verify_crc_source,
+ .get_crc_sources = rcar_du_crtc_get_crc_sources,
};
/* -----------------------------------------------------------------------------
@@ -999,5 +1104,7 @@ int rcar_du_crtc_create(struct rcar_du_group *rgrp, unsigned int swindex,
return ret;
}
+ rcar_du_crtc_crc_init(rcrtc);
+
return 0;
}
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
index 7680cb2636c8..592c79993e08 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
+++ b/drivers/gpu/drm/rcar-du/rcar_du_crtc.h
@@ -67,6 +67,9 @@ struct rcar_du_crtc {
struct rcar_du_group *group;
struct rcar_du_vsp *vsp;
unsigned int vsp_pipe;
+
+ const char *const *sources;
+ unsigned int sources_count;
};
#define to_rcar_crtc(c) container_of(c, struct rcar_du_crtc, crtc)
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_plane.c b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
index c20f7ed48c8d..8861e715c248 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_plane.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_plane.c
@@ -690,14 +690,12 @@ static void rcar_du_plane_reset(struct drm_plane *plane)
if (state == NULL)
return;
+ __drm_atomic_helper_plane_reset(plane, &state->state);
+
state->hwindex = -1;
state->source = RCAR_DU_PLANE_MEMORY;
state->colorkey = RCAR_DU_COLORKEY_NONE;
state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
-
- plane->state = &state->state;
- plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- plane->state->plane = plane;
}
static int rcar_du_plane_atomic_set_property(struct drm_plane *plane,
diff --git a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
index 72eebeda518e..45eb777a16a4 100644
--- a/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
+++ b/drivers/gpu/drm/rcar-du/rcar_du_vsp.c
@@ -346,11 +346,8 @@ static void rcar_du_vsp_plane_reset(struct drm_plane *plane)
if (state == NULL)
return;
- state->state.alpha = DRM_BLEND_ALPHA_OPAQUE;
+ __drm_atomic_helper_plane_reset(plane, &state->state);
state->state.zpos = plane->type == DRM_PLANE_TYPE_PRIMARY ? 0 : 1;
-
- plane->state = &state->state;
- plane->state->plane = plane;
}
static const struct drm_plane_funcs rcar_du_vsp_plane_funcs = {
diff --git a/drivers/gpu/drm/rockchip/Kconfig b/drivers/gpu/drm/rockchip/Kconfig
index 0ccc76217ee4..26438d45732b 100644
--- a/drivers/gpu/drm/rockchip/Kconfig
+++ b/drivers/gpu/drm/rockchip/Kconfig
@@ -8,6 +8,7 @@ config DRM_ROCKCHIP
select DRM_ANALOGIX_DP if ROCKCHIP_ANALOGIX_DP
select DRM_DW_HDMI if ROCKCHIP_DW_HDMI
select DRM_MIPI_DSI if ROCKCHIP_DW_MIPI_DSI
+ select DRM_RGB if ROCKCHIP_RGB
select SND_SOC_HDMI_CODEC if ROCKCHIP_CDN_DP && SND_SOC
help
Choose this option if you have a Rockchip soc chipset.
@@ -23,7 +24,7 @@ config ROCKCHIP_ANALOGIX_DP
help
This selects support for Rockchip SoC specific extensions
for the Analogix Core DP driver. If you want to enable DP
- on RK3288 based SoC, you should selet this option.
+ on RK3288 or RK3399 based SoC, you should select this option.
config ROCKCHIP_CDN_DP
bool "Rockchip cdn DP"
@@ -39,16 +40,16 @@ config ROCKCHIP_DW_HDMI
help
This selects support for Rockchip SoC specific extensions
for the Synopsys DesignWare HDMI driver. If you want to
- enable HDMI on RK3288 based SoC, you should selet this
- option.
+ enable HDMI on RK3288 or RK3399 based SoC, you should select
+ this option.
config ROCKCHIP_DW_MIPI_DSI
bool "Rockchip specific extensions for Synopsys DW MIPI DSI"
help
- This selects support for Rockchip SoC specific extensions
- for the Synopsys DesignWare HDMI driver. If you want to
- enable MIPI DSI on RK3288 based SoC, you should selet this
- option.
+ This selects support for Rockchip SoC specific extensions
+ for the Synopsys DesignWare HDMI driver. If you want to
+ enable MIPI DSI on RK3288 or RK3399 based SoC, you should
+ select this option.
config ROCKCHIP_INNO_HDMI
bool "Rockchip specific extensions for Innosilicon HDMI"
@@ -66,4 +67,14 @@ config ROCKCHIP_LVDS
Rockchip rk3288 SoC has LVDS TX Controller can be used, and it
support LVDS, rgb, dual LVDS output mode. say Y to enable its
driver.
+
+config ROCKCHIP_RGB
+ bool "Rockchip RGB support"
+ depends on DRM_ROCKCHIP
+ depends on PINCTRL
+ help
+ Choose this option to enable support for Rockchip RGB output.
+ Some Rockchip CRTCs, like rv1108, can directly output parallel
+ and serial RGB format to panel or connect to a conversion chip.
+ say Y to enable its driver.
endif
diff --git a/drivers/gpu/drm/rockchip/Makefile b/drivers/gpu/drm/rockchip/Makefile
index a314e2109e76..868263ff0302 100644
--- a/drivers/gpu/drm/rockchip/Makefile
+++ b/drivers/gpu/drm/rockchip/Makefile
@@ -14,5 +14,6 @@ rockchipdrm-$(CONFIG_ROCKCHIP_DW_HDMI) += dw_hdmi-rockchip.o
rockchipdrm-$(CONFIG_ROCKCHIP_DW_MIPI_DSI) += dw-mipi-dsi.o
rockchipdrm-$(CONFIG_ROCKCHIP_INNO_HDMI) += inno_hdmi.o
rockchipdrm-$(CONFIG_ROCKCHIP_LVDS) += rockchip_lvds.o
+rockchipdrm-$(CONFIG_ROCKCHIP_RGB) += rockchip_rgb.o
obj-$(CONFIG_DRM_ROCKCHIP) += rockchipdrm.o
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
index f814d37b1db2..5864cb452c5c 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.c
@@ -24,6 +24,7 @@
#include <linux/pm_runtime.h>
#include <linux/module.h>
#include <linux/of_graph.h>
+#include <linux/of_platform.h>
#include <linux/component.h>
#include <linux/console.h>
#include <linux/iommu.h>
@@ -184,7 +185,7 @@ err_mode_config_cleanup:
err_free:
drm_dev->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm_dev);
+ drm_dev_put(drm_dev);
return ret;
}
@@ -204,7 +205,7 @@ static void rockchip_drm_unbind(struct device *dev)
drm_dev->dev_private = NULL;
dev_set_drvdata(dev, NULL);
- drm_dev_unref(drm_dev);
+ drm_dev_put(drm_dev);
}
static const struct file_operations rockchip_drm_driver_fops = {
@@ -243,60 +244,18 @@ static struct drm_driver rockchip_drm_driver = {
};
#ifdef CONFIG_PM_SLEEP
-static void rockchip_drm_fb_suspend(struct drm_device *drm)
-{
- struct rockchip_drm_private *priv = drm->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(&priv->fbdev_helper, 1);
- console_unlock();
-}
-
-static void rockchip_drm_fb_resume(struct drm_device *drm)
-{
- struct rockchip_drm_private *priv = drm->dev_private;
-
- console_lock();
- drm_fb_helper_set_suspend(&priv->fbdev_helper, 0);
- console_unlock();
-}
-
static int rockchip_drm_sys_suspend(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv;
-
- if (!drm)
- return 0;
- drm_kms_helper_poll_disable(drm);
- rockchip_drm_fb_suspend(drm);
-
- priv = drm->dev_private;
- priv->state = drm_atomic_helper_suspend(drm);
- if (IS_ERR(priv->state)) {
- rockchip_drm_fb_resume(drm);
- drm_kms_helper_poll_enable(drm);
- return PTR_ERR(priv->state);
- }
-
- return 0;
+ return drm_mode_config_helper_suspend(drm);
}
static int rockchip_drm_sys_resume(struct device *dev)
{
struct drm_device *drm = dev_get_drvdata(dev);
- struct rockchip_drm_private *priv;
-
- if (!drm)
- return 0;
-
- priv = drm->dev_private;
- drm_atomic_helper_resume(drm, priv->state);
- rockchip_drm_fb_resume(drm);
- drm_kms_helper_poll_enable(drm);
- return 0;
+ return drm_mode_config_helper_resume(drm);
}
#endif
@@ -309,6 +268,53 @@ static const struct dev_pm_ops rockchip_drm_pm_ops = {
static struct platform_driver *rockchip_sub_drivers[MAX_ROCKCHIP_SUB_DRIVERS];
static int num_rockchip_sub_drivers;
+/*
+ * Check if a vop endpoint is leading to a rockchip subdriver or bridge.
+ * Should be called from the component bind stage of the drivers
+ * to ensure that all subdrivers are probed.
+ *
+ * @ep: endpoint of a rockchip vop
+ *
+ * returns true if subdriver, false if external bridge and -ENODEV
+ * if remote port does not contain a device.
+ */
+int rockchip_drm_endpoint_is_subdriver(struct device_node *ep)
+{
+ struct device_node *node = of_graph_get_remote_port_parent(ep);
+ struct platform_device *pdev;
+ struct device_driver *drv;
+ int i;
+
+ if (!node)
+ return -ENODEV;
+
+ /* status disabled will prevent creation of platform-devices */
+ pdev = of_find_device_by_node(node);
+ of_node_put(node);
+ if (!pdev)
+ return -ENODEV;
+
+ /*
+ * All rockchip subdrivers have probed at this point, so
+ * any device not having a driver now is an external bridge.
+ */
+ drv = pdev->dev.driver;
+ if (!drv) {
+ platform_device_put(pdev);
+ return false;
+ }
+
+ for (i = 0; i < num_rockchip_sub_drivers; i++) {
+ if (rockchip_sub_drivers[i] == to_platform_driver(drv)) {
+ platform_device_put(pdev);
+ return true;
+ }
+ }
+
+ platform_device_put(pdev);
+ return false;
+}
+
static int compare_dev(struct device *dev, void *data)
{
return dev == (struct device *)data;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
index 3a6ebfc26036..21a023a97bb8 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_drv.h
@@ -51,7 +51,6 @@ struct rockchip_crtc_state {
struct rockchip_drm_private {
struct drm_fb_helper fbdev_helper;
struct drm_gem_object *fbdev_bo;
- struct drm_atomic_state *state;
struct iommu_domain *domain;
struct mutex mm_lock;
struct drm_mm mm;
@@ -65,6 +64,7 @@ void rockchip_drm_dma_detach_device(struct drm_device *drm_dev,
struct device *dev);
int rockchip_drm_wait_vact_end(struct drm_crtc *crtc, unsigned int mstimeout);
+int rockchip_drm_endpoint_is_subdriver(struct device_node *ep);
extern struct platform_driver cdn_dp_driver;
extern struct platform_driver dw_hdmi_rockchip_pltfm_driver;
extern struct platform_driver dw_mipi_dsi_driver;
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
index 1359e5c773e4..0c35a88e33dd 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.c
@@ -32,6 +32,7 @@
#include <linux/of_device.h>
#include <linux/pm_runtime.h>
#include <linux/component.h>
+#include <linux/overflow.h>
#include <linux/reset.h>
#include <linux/delay.h>
@@ -41,6 +42,7 @@
#include "rockchip_drm_fb.h"
#include "rockchip_drm_psr.h"
#include "rockchip_drm_vop.h"
+#include "rockchip_rgb.h"
#define VOP_WIN_SET(x, win, name, v) \
vop_reg_set(vop, &win->phy->name, win->base, ~0, v, #name)
@@ -92,6 +94,7 @@ struct vop_win {
struct vop *vop;
};
+struct rockchip_rgb;
struct vop {
struct drm_crtc crtc;
struct device *dev;
@@ -135,6 +138,9 @@ struct vop {
/* vop dclk reset */
struct reset_control *dclk_rst;
+ /* optional internal rgb encoder */
+ struct rockchip_rgb *rgb;
+
struct vop_win win[];
};
@@ -1111,7 +1117,7 @@ static struct drm_connector *vop_get_edp_connector(struct vop *vop)
}
static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt)
+ const char *source_name)
{
struct vop *vop = to_vop(crtc);
struct drm_connector *connector;
@@ -1121,8 +1127,6 @@ static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
if (!connector)
return -EINVAL;
- *values_cnt = 3;
-
if (source_name && strcmp(source_name, "auto") == 0)
ret = analogix_dp_start_crc(connector);
else if (!source_name)
@@ -1132,9 +1136,28 @@ static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
return ret;
}
+
+static int
+vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
+{
+ if (source_name && strcmp(source_name, "auto") != 0)
+ return -EINVAL;
+
+ *values_cnt = 3;
+ return 0;
+}
+
#else
static int vop_crtc_set_crc_source(struct drm_crtc *crtc,
- const char *source_name, size_t *values_cnt)
+ const char *source_name)
+{
+ return -ENODEV;
+}
+
+static int
+vop_crtc_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt)
{
return -ENODEV;
}
@@ -1150,6 +1173,7 @@ static const struct drm_crtc_funcs vop_crtc_funcs = {
.enable_vblank = vop_crtc_enable_vblank,
.disable_vblank = vop_crtc_disable_vblank,
.set_crc_source = vop_crtc_set_crc_source,
+ .verify_crc_source = vop_crtc_verify_crc_source,
};
static void vop_fb_unref_worker(struct drm_flip_work *work, void *val)
@@ -1561,7 +1585,6 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
struct drm_device *drm_dev = data;
struct vop *vop;
struct resource *res;
- size_t alloc_size;
int ret, irq;
vop_data = of_device_get_match_data(dev);
@@ -1569,8 +1592,8 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
return -ENODEV;
/* Allocate vop struct and its vop_win array */
- alloc_size = sizeof(*vop) + sizeof(*vop->win) * vop_data->win_size;
- vop = devm_kzalloc(dev, alloc_size, GFP_KERNEL);
+ vop = devm_kzalloc(dev, struct_size(vop, win, vop_data->win_size),
+ GFP_KERNEL);
if (!vop)
return -ENOMEM;
@@ -1620,6 +1643,14 @@ static int vop_bind(struct device *dev, struct device *master, void *data)
if (ret)
goto err_disable_pm_runtime;
+ if (vop->data->feature & VOP_FEATURE_INTERNAL_RGB) {
+ vop->rgb = rockchip_rgb_init(dev, &vop->crtc, vop->drm_dev);
+ if (IS_ERR(vop->rgb)) {
+ ret = PTR_ERR(vop->rgb);
+ goto err_disable_pm_runtime;
+ }
+ }
+
return 0;
err_disable_pm_runtime:
@@ -1632,6 +1663,9 @@ static void vop_unbind(struct device *dev, struct device *master, void *data)
{
struct vop *vop = dev_get_drvdata(dev);
+ if (vop->rgb)
+ rockchip_rgb_fini(vop->rgb);
+
pm_runtime_disable(dev);
vop_destroy_crtc(vop);
diff --git a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
index fcb91041a666..fd5765dfd637 100644
--- a/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
+++ b/drivers/gpu/drm/rockchip/rockchip_drm_vop.h
@@ -162,6 +162,7 @@ struct vop_data {
unsigned int win_size;
#define VOP_FEATURE_OUTPUT_RGB10 BIT(0)
+#define VOP_FEATURE_INTERNAL_RGB BIT(1)
u64 feature;
};
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.c b/drivers/gpu/drm/rockchip/rockchip_rgb.c
new file mode 100644
index 000000000000..96ac1458a59c
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.c
@@ -0,0 +1,173 @@
+//SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Sandy Huang <hjc@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#include <drm/drmP.h>
+#include <drm/drm_atomic_helper.h>
+#include <drm/drm_crtc_helper.h>
+#include <drm/drm_dp_helper.h>
+#include <drm/drm_panel.h>
+#include <drm/drm_of.h>
+
+#include <linux/component.h>
+#include <linux/of_graph.h>
+
+#include "rockchip_drm_drv.h"
+#include "rockchip_drm_vop.h"
+
+#define encoder_to_rgb(c) container_of(c, struct rockchip_rgb, encoder)
+
+struct rockchip_rgb {
+ struct device *dev;
+ struct drm_device *drm_dev;
+ struct drm_bridge *bridge;
+ struct drm_encoder encoder;
+ int output_mode;
+};
+
+static int
+rockchip_rgb_encoder_atomic_check(struct drm_encoder *encoder,
+ struct drm_crtc_state *crtc_state,
+ struct drm_connector_state *conn_state)
+{
+ struct rockchip_crtc_state *s = to_rockchip_crtc_state(crtc_state);
+ struct drm_connector *connector = conn_state->connector;
+ struct drm_display_info *info = &connector->display_info;
+ u32 bus_format;
+
+ if (info->num_bus_formats)
+ bus_format = info->bus_formats[0];
+ else
+ bus_format = MEDIA_BUS_FMT_RGB888_1X24;
+
+ switch (bus_format) {
+ case MEDIA_BUS_FMT_RGB666_1X18:
+ s->output_mode = ROCKCHIP_OUT_MODE_P666;
+ break;
+ case MEDIA_BUS_FMT_RGB565_1X16:
+ s->output_mode = ROCKCHIP_OUT_MODE_P565;
+ break;
+ case MEDIA_BUS_FMT_RGB888_1X24:
+ case MEDIA_BUS_FMT_RGB666_1X24_CPADHI:
+ default:
+ s->output_mode = ROCKCHIP_OUT_MODE_P888;
+ break;
+ }
+
+ s->output_type = DRM_MODE_CONNECTOR_LVDS;
+
+ return 0;
+}
+
+static const
+struct drm_encoder_helper_funcs rockchip_rgb_encoder_helper_funcs = {
+ .atomic_check = rockchip_rgb_encoder_atomic_check,
+};
+
+static const struct drm_encoder_funcs rockchip_rgb_encoder_funcs = {
+ .destroy = drm_encoder_cleanup,
+};
+
+struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
+ struct drm_crtc *crtc,
+ struct drm_device *drm_dev)
+{
+ struct rockchip_rgb *rgb;
+ struct drm_encoder *encoder;
+ struct device_node *port, *endpoint;
+ u32 endpoint_id;
+ int ret = 0, child_count = 0;
+ struct drm_panel *panel;
+ struct drm_bridge *bridge;
+
+ rgb = devm_kzalloc(dev, sizeof(*rgb), GFP_KERNEL);
+ if (!rgb)
+ return ERR_PTR(-ENOMEM);
+
+ rgb->dev = dev;
+ rgb->drm_dev = drm_dev;
+
+ port = of_graph_get_port_by_id(dev->of_node, 0);
+ if (!port)
+ return ERR_PTR(-EINVAL);
+
+ for_each_child_of_node(port, endpoint) {
+ if (of_property_read_u32(endpoint, "reg", &endpoint_id))
+ endpoint_id = 0;
+
+ if (rockchip_drm_endpoint_is_subdriver(endpoint) > 0)
+ continue;
+
+ child_count++;
+ ret = drm_of_find_panel_or_bridge(dev->of_node, 0, endpoint_id,
+ &panel, &bridge);
+ if (!ret)
+ break;
+ }
+
+ of_node_put(port);
+
+ /* if the rgb output is not connected to anything, just return */
+ if (!child_count)
+ return NULL;
+
+ if (ret < 0) {
+ if (ret != -EPROBE_DEFER)
+ DRM_DEV_ERROR(dev, "failed to find panel or bridge %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ encoder = &rgb->encoder;
+ encoder->possible_crtcs = drm_crtc_mask(crtc);
+
+ ret = drm_encoder_init(drm_dev, encoder, &rockchip_rgb_encoder_funcs,
+ DRM_MODE_ENCODER_NONE, NULL);
+ if (ret < 0) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to initialize encoder: %d\n", ret);
+ return ERR_PTR(ret);
+ }
+
+ drm_encoder_helper_add(encoder, &rockchip_rgb_encoder_helper_funcs);
+
+ if (panel) {
+ bridge = drm_panel_bridge_add(panel, DRM_MODE_CONNECTOR_LVDS);
+ if (IS_ERR(bridge))
+ return ERR_CAST(bridge);
+ }
+
+ rgb->bridge = bridge;
+
+ ret = drm_bridge_attach(encoder, rgb->bridge, NULL);
+ if (ret) {
+ DRM_DEV_ERROR(drm_dev->dev,
+ "failed to attach bridge: %d\n", ret);
+ goto err_free_encoder;
+ }
+
+ return rgb;
+
+err_free_encoder:
+ drm_encoder_cleanup(encoder);
+ return ERR_PTR(ret);
+}
+EXPORT_SYMBOL_GPL(rockchip_rgb_init);
+
+void rockchip_rgb_fini(struct rockchip_rgb *rgb)
+{
+ drm_panel_bridge_remove(rgb->bridge);
+ drm_encoder_cleanup(&rgb->encoder);
+}
+EXPORT_SYMBOL_GPL(rockchip_rgb_fini);
diff --git a/drivers/gpu/drm/rockchip/rockchip_rgb.h b/drivers/gpu/drm/rockchip/rockchip_rgb.h
new file mode 100644
index 000000000000..38b52e63b2b0
--- /dev/null
+++ b/drivers/gpu/drm/rockchip/rockchip_rgb.h
@@ -0,0 +1,33 @@
+//SPDX-License-Identifier: GPL-2.0+
+/*
+ * Copyright (C) Fuzhou Rockchip Electronics Co.Ltd
+ * Author:
+ * Sandy Huang <hjc@rock-chips.com>
+ *
+ * This software is licensed under the terms of the GNU General Public
+ * License version 2, as published by the Free Software Foundation, and
+ * may be copied, distributed, and modified under those terms.
+ *
+ * This program is distributed in the hope that it will be useful,
+ * but WITHOUT ANY WARRANTY; without even the implied warranty of
+ * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
+ * GNU General Public License for more details.
+ */
+
+#ifdef CONFIG_ROCKCHIP_RGB
+struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
+ struct drm_crtc *crtc,
+ struct drm_device *drm_dev);
+void rockchip_rgb_fini(struct rockchip_rgb *rgb);
+#else
+static inline struct rockchip_rgb *rockchip_rgb_init(struct device *dev,
+ struct drm_crtc *crtc,
+ struct drm_device *drm_dev)
+{
+ return NULL;
+}
+
+static inline void rockchip_rgb_fini(struct rockchip_rgb *rgb)
+{
+}
+#endif
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
index 08023d3ecb76..a6db3cd5544b 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.c
@@ -177,6 +177,215 @@ static const struct vop_data rk3126_vop = {
.win_size = ARRAY_SIZE(rk3126_vop_win_data),
};
+static const int px30_vop_intrs[] = {
+ FS_INTR,
+ 0, 0,
+ LINE_FLAG_INTR,
+ 0,
+ BUS_ERROR_INTR,
+ 0, 0,
+ DSP_HOLD_VALID_INTR,
+};
+
+static const struct vop_intr px30_intr = {
+ .intrs = px30_vop_intrs,
+ .nintrs = ARRAY_SIZE(px30_vop_intrs),
+ .line_flag_num[0] = VOP_REG(PX30_LINE_FLAG, 0xfff, 0),
+ .status = VOP_REG_MASK_SYNC(PX30_INTR_STATUS, 0xffff, 0),
+ .enable = VOP_REG_MASK_SYNC(PX30_INTR_EN, 0xffff, 0),
+ .clear = VOP_REG_MASK_SYNC(PX30_INTR_CLEAR, 0xffff, 0),
+};
+
+static const struct vop_common px30_common = {
+ .standby = VOP_REG_SYNC(PX30_SYS_CTRL2, 0x1, 1),
+ .out_mode = VOP_REG(PX30_DSP_CTRL2, 0xf, 16),
+ .dsp_blank = VOP_REG(PX30_DSP_CTRL2, 0x1, 14),
+ .cfg_done = VOP_REG_SYNC(PX30_REG_CFG_DONE, 0x1, 0),
+};
+
+static const struct vop_modeset px30_modeset = {
+ .htotal_pw = VOP_REG(PX30_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
+ .hact_st_end = VOP_REG(PX30_DSP_HACT_ST_END, 0x0fff0fff, 0),
+ .vtotal_pw = VOP_REG(PX30_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
+ .vact_st_end = VOP_REG(PX30_DSP_VACT_ST_END, 0x0fff0fff, 0),
+};
+
+static const struct vop_output px30_output = {
+ .rgb_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 1),
+ .mipi_pin_pol = VOP_REG(PX30_DSP_CTRL0, 0xf, 25),
+ .rgb_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 0),
+ .mipi_en = VOP_REG(PX30_DSP_CTRL0, 0x1, 24),
+};
+
+static const struct vop_scl_regs px30_win_scl = {
+ .scale_yrgb_x = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(PX30_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+ .scale_cbcr_x = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+ .scale_cbcr_y = VOP_REG(PX30_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy px30_win0_data = {
+ .scl = &px30_win_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .enable = VOP_REG(PX30_WIN0_CTRL0, 0x1, 0),
+ .format = VOP_REG(PX30_WIN0_CTRL0, 0x7, 1),
+ .rb_swap = VOP_REG(PX30_WIN0_CTRL0, 0x1, 12),
+ .act_info = VOP_REG(PX30_WIN0_ACT_INFO, 0xffffffff, 0),
+ .dsp_info = VOP_REG(PX30_WIN0_DSP_INFO, 0xffffffff, 0),
+ .dsp_st = VOP_REG(PX30_WIN0_DSP_ST, 0xffffffff, 0),
+ .yrgb_mst = VOP_REG(PX30_WIN0_YRGB_MST0, 0xffffffff, 0),
+ .uv_mst = VOP_REG(PX30_WIN0_CBR_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 0),
+ .uv_vir = VOP_REG(PX30_WIN0_VIR, 0x1fff, 16),
+};
+
+static const struct vop_win_phy px30_win1_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .enable = VOP_REG(PX30_WIN1_CTRL0, 0x1, 0),
+ .format = VOP_REG(PX30_WIN1_CTRL0, 0x7, 4),
+ .rb_swap = VOP_REG(PX30_WIN1_CTRL0, 0x1, 12),
+ .dsp_info = VOP_REG(PX30_WIN1_DSP_INFO, 0xffffffff, 0),
+ .dsp_st = VOP_REG(PX30_WIN1_DSP_ST, 0xffffffff, 0),
+ .yrgb_mst = VOP_REG(PX30_WIN1_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(PX30_WIN1_VIR, 0x1fff, 0),
+};
+
+static const struct vop_win_phy px30_win2_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .gate = VOP_REG(PX30_WIN2_CTRL0, 0x1, 4),
+ .enable = VOP_REG(PX30_WIN2_CTRL0, 0x1, 0),
+ .format = VOP_REG(PX30_WIN2_CTRL0, 0x3, 5),
+ .rb_swap = VOP_REG(PX30_WIN2_CTRL0, 0x1, 20),
+ .dsp_info = VOP_REG(PX30_WIN2_DSP_INFO0, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(PX30_WIN2_DSP_ST0, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(PX30_WIN2_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(PX30_WIN2_VIR0_1, 0x1fff, 0),
+};
+
+static const struct vop_win_data px30_vop_big_win_data[] = {
+ { .base = 0x00, .phy = &px30_win0_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &px30_win1_data,
+ .type = DRM_PLANE_TYPE_OVERLAY },
+ { .base = 0x00, .phy = &px30_win2_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const struct vop_data px30_vop_big = {
+ .intr = &px30_intr,
+ .feature = VOP_FEATURE_INTERNAL_RGB,
+ .common = &px30_common,
+ .modeset = &px30_modeset,
+ .output = &px30_output,
+ .win = px30_vop_big_win_data,
+ .win_size = ARRAY_SIZE(px30_vop_big_win_data),
+};
+
+static const struct vop_win_data px30_vop_lit_win_data[] = {
+ { .base = 0x00, .phy = &px30_win1_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+};
+
+static const struct vop_data px30_vop_lit = {
+ .intr = &px30_intr,
+ .feature = VOP_FEATURE_INTERNAL_RGB,
+ .common = &px30_common,
+ .modeset = &px30_modeset,
+ .output = &px30_output,
+ .win = px30_vop_lit_win_data,
+ .win_size = ARRAY_SIZE(px30_vop_lit_win_data),
+};
+
+static const struct vop_scl_regs rk3188_win_scl = {
+ .scale_yrgb_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 0x0),
+ .scale_yrgb_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_YRGB, 0xffff, 16),
+ .scale_cbcr_x = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 0x0),
+ .scale_cbcr_y = VOP_REG(RK3188_WIN0_SCL_FACTOR_CBR, 0xffff, 16),
+};
+
+static const struct vop_win_phy rk3188_win0_data = {
+ .scl = &rk3188_win_scl,
+ .data_formats = formats_win_full,
+ .nformats = ARRAY_SIZE(formats_win_full),
+ .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 0),
+ .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 3),
+ .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 15),
+ .act_info = VOP_REG(RK3188_WIN0_ACT_INFO, 0x1fff1fff, 0),
+ .dsp_info = VOP_REG(RK3188_WIN0_DSP_INFO, 0x0fff0fff, 0),
+ .dsp_st = VOP_REG(RK3188_WIN0_DSP_ST, 0x1fff1fff, 0),
+ .yrgb_mst = VOP_REG(RK3188_WIN0_YRGB_MST0, 0xffffffff, 0),
+ .uv_mst = VOP_REG(RK3188_WIN0_CBR_MST0, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 0),
+};
+
+static const struct vop_win_phy rk3188_win1_data = {
+ .data_formats = formats_win_lite,
+ .nformats = ARRAY_SIZE(formats_win_lite),
+ .enable = VOP_REG(RK3188_SYS_CTRL, 0x1, 1),
+ .format = VOP_REG(RK3188_SYS_CTRL, 0x7, 6),
+ .rb_swap = VOP_REG(RK3188_SYS_CTRL, 0x1, 19),
+ /* no act_info on window1 */
+ .dsp_info = VOP_REG(RK3188_WIN1_DSP_INFO, 0x07ff07ff, 0),
+ .dsp_st = VOP_REG(RK3188_WIN1_DSP_ST, 0x0fff0fff, 0),
+ .yrgb_mst = VOP_REG(RK3188_WIN1_MST, 0xffffffff, 0),
+ .yrgb_vir = VOP_REG(RK3188_WIN_VIR, 0x1fff, 16),
+};
+
+static const struct vop_modeset rk3188_modeset = {
+ .htotal_pw = VOP_REG(RK3188_DSP_HTOTAL_HS_END, 0x0fff0fff, 0),
+ .hact_st_end = VOP_REG(RK3188_DSP_HACT_ST_END, 0x0fff0fff, 0),
+ .vtotal_pw = VOP_REG(RK3188_DSP_VTOTAL_VS_END, 0x0fff0fff, 0),
+ .vact_st_end = VOP_REG(RK3188_DSP_VACT_ST_END, 0x0fff0fff, 0),
+};
+
+static const struct vop_output rk3188_output = {
+ .pin_pol = VOP_REG(RK3188_DSP_CTRL0, 0xf, 4),
+};
+
+static const struct vop_common rk3188_common = {
+ .gate_en = VOP_REG(RK3188_SYS_CTRL, 0x1, 31),
+ .standby = VOP_REG(RK3188_SYS_CTRL, 0x1, 30),
+ .out_mode = VOP_REG(RK3188_DSP_CTRL0, 0xf, 0),
+ .cfg_done = VOP_REG(RK3188_REG_CFG_DONE, 0x1, 0),
+ .dsp_blank = VOP_REG(RK3188_DSP_CTRL1, 0x3, 24),
+};
+
+static const struct vop_win_data rk3188_vop_win_data[] = {
+ { .base = 0x00, .phy = &rk3188_win0_data,
+ .type = DRM_PLANE_TYPE_PRIMARY },
+ { .base = 0x00, .phy = &rk3188_win1_data,
+ .type = DRM_PLANE_TYPE_CURSOR },
+};
+
+static const int rk3188_vop_intrs[] = {
+ 0,
+ FS_INTR,
+ LINE_FLAG_INTR,
+ BUS_ERROR_INTR,
+};
+
+static const struct vop_intr rk3188_vop_intr = {
+ .intrs = rk3188_vop_intrs,
+ .nintrs = ARRAY_SIZE(rk3188_vop_intrs),
+ .line_flag_num[0] = VOP_REG(RK3188_INT_STATUS, 0xfff, 12),
+ .status = VOP_REG(RK3188_INT_STATUS, 0xf, 0),
+ .enable = VOP_REG(RK3188_INT_STATUS, 0xf, 4),
+ .clear = VOP_REG(RK3188_INT_STATUS, 0xf, 8),
+};
+
+static const struct vop_data rk3188_vop = {
+ .intr = &rk3188_vop_intr,
+ .common = &rk3188_common,
+ .modeset = &rk3188_modeset,
+ .output = &rk3188_output,
+ .win = rk3188_vop_win_data,
+ .win_size = ARRAY_SIZE(rk3188_vop_win_data),
+ .feature = VOP_FEATURE_INTERNAL_RGB,
+};
+
static const struct vop_scl_extension rk3288_win_full_scl_ext = {
.cbcr_vsd_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 31),
.cbcr_vsu_mode = VOP_REG(RK3288_WIN0_CTRL1, 0x1, 30),
@@ -541,6 +750,12 @@ static const struct of_device_id vop_driver_dt_match[] = {
.data = &rk3036_vop },
{ .compatible = "rockchip,rk3126-vop",
.data = &rk3126_vop },
+ { .compatible = "rockchip,px30-vop-big",
+ .data = &px30_vop_big },
+ { .compatible = "rockchip,px30-vop-lit",
+ .data = &px30_vop_lit },
+ { .compatible = "rockchip,rk3188-vop",
+ .data = &rk3188_vop },
{ .compatible = "rockchip,rk3288-vop",
.data = &rk3288_vop },
{ .compatible = "rockchip,rk3368-vop",
diff --git a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
index f81b510ea99c..7348c68352ed 100644
--- a/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
+++ b/drivers/gpu/drm/rockchip/rockchip_vop_reg.h
@@ -884,4 +884,103 @@
#define RK3126_WIN1_DSP_ST 0x54
/* rk3126 register definition end */
+/* px30 register definition */
+#define PX30_REG_CFG_DONE 0x00000
+#define PX30_VERSION 0x00004
+#define PX30_DSP_BG 0x00008
+#define PX30_MCU_CTRL 0x0000c
+#define PX30_SYS_CTRL0 0x00010
+#define PX30_SYS_CTRL1 0x00014
+#define PX30_SYS_CTRL2 0x00018
+#define PX30_DSP_CTRL0 0x00020
+#define PX30_DSP_CTRL2 0x00028
+#define PX30_VOP_STATUS 0x0002c
+#define PX30_LINE_FLAG 0x00030
+#define PX30_INTR_EN 0x00034
+#define PX30_INTR_CLEAR 0x00038
+#define PX30_INTR_STATUS 0x0003c
+#define PX30_WIN0_CTRL0 0x00050
+#define PX30_WIN0_CTRL1 0x00054
+#define PX30_WIN0_COLOR_KEY 0x00058
+#define PX30_WIN0_VIR 0x0005c
+#define PX30_WIN0_YRGB_MST0 0x00060
+#define PX30_WIN0_CBR_MST0 0x00064
+#define PX30_WIN0_ACT_INFO 0x00068
+#define PX30_WIN0_DSP_INFO 0x0006c
+#define PX30_WIN0_DSP_ST 0x00070
+#define PX30_WIN0_SCL_FACTOR_YRGB 0x00074
+#define PX30_WIN0_SCL_FACTOR_CBR 0x00078
+#define PX30_WIN0_SCL_OFFSET 0x0007c
+#define PX30_WIN0_ALPHA_CTRL 0x00080
+#define PX30_WIN1_CTRL0 0x00090
+#define PX30_WIN1_CTRL1 0x00094
+#define PX30_WIN1_VIR 0x00098
+#define PX30_WIN1_MST 0x000a0
+#define PX30_WIN1_DSP_INFO 0x000a4
+#define PX30_WIN1_DSP_ST 0x000a8
+#define PX30_WIN1_COLOR_KEY 0x000ac
+#define PX30_WIN1_ALPHA_CTRL 0x000bc
+#define PX30_HWC_CTRL0 0x000e0
+#define PX30_HWC_CTRL1 0x000e4
+#define PX30_HWC_MST 0x000e8
+#define PX30_HWC_DSP_ST 0x000ec
+#define PX30_HWC_ALPHA_CTRL 0x000f0
+#define PX30_DSP_HTOTAL_HS_END 0x00100
+#define PX30_DSP_HACT_ST_END 0x00104
+#define PX30_DSP_VTOTAL_VS_END 0x00108
+#define PX30_DSP_VACT_ST_END 0x0010c
+#define PX30_DSP_VS_ST_END_F1 0x00110
+#define PX30_DSP_VACT_ST_END_F1 0x00114
+#define PX30_BCSH_CTRL 0x00160
+#define PX30_BCSH_COL_BAR 0x00164
+#define PX30_BCSH_BCS 0x00168
+#define PX30_BCSH_H 0x0016c
+#define PX30_FRC_LOWER01_0 0x00170
+#define PX30_FRC_LOWER01_1 0x00174
+#define PX30_FRC_LOWER10_0 0x00178
+#define PX30_FRC_LOWER10_1 0x0017c
+#define PX30_FRC_LOWER11_0 0x00180
+#define PX30_FRC_LOWER11_1 0x00184
+#define PX30_MCU_RW_BYPASS_PORT 0x0018c
+#define PX30_WIN2_CTRL0 0x00190
+#define PX30_WIN2_CTRL1 0x00194
+#define PX30_WIN2_VIR0_1 0x00198
+#define PX30_WIN2_VIR2_3 0x0019c
+#define PX30_WIN2_MST0 0x001a0
+#define PX30_WIN2_DSP_INFO0 0x001a4
+#define PX30_WIN2_DSP_ST0 0x001a8
+#define PX30_WIN2_COLOR_KEY 0x001ac
+#define PX30_WIN2_ALPHA_CTRL 0x001bc
+#define PX30_BLANKING_VALUE 0x001f4
+#define PX30_FLAG_REG_FRM_VALID 0x001f8
+#define PX30_FLAG_REG 0x001fc
+#define PX30_HWC_LUT_ADDR 0x00600
+#define PX30_GAMMA_LUT_ADDR 0x00a00
+/* px30 register definition end */
+
+/* rk3188 register definition */
+#define RK3188_SYS_CTRL 0x00
+#define RK3188_DSP_CTRL0 0x04
+#define RK3188_DSP_CTRL1 0x08
+#define RK3188_INT_STATUS 0x10
+#define RK3188_WIN0_YRGB_MST0 0x20
+#define RK3188_WIN0_CBR_MST0 0x24
+#define RK3188_WIN0_YRGB_MST1 0x28
+#define RK3188_WIN0_CBR_MST1 0x2c
+#define RK3188_WIN_VIR 0x30
+#define RK3188_WIN0_ACT_INFO 0x34
+#define RK3188_WIN0_DSP_INFO 0x38
+#define RK3188_WIN0_DSP_ST 0x3c
+#define RK3188_WIN0_SCL_FACTOR_YRGB 0x40
+#define RK3188_WIN0_SCL_FACTOR_CBR 0x44
+#define RK3188_WIN1_MST 0x4c
+#define RK3188_WIN1_DSP_INFO 0x50
+#define RK3188_WIN1_DSP_ST 0x54
+#define RK3188_DSP_HTOTAL_HS_END 0x6c
+#define RK3188_DSP_HACT_ST_END 0x70
+#define RK3188_DSP_VTOTAL_VS_END 0x74
+#define RK3188_DSP_VACT_ST_END 0x78
+#define RK3188_REG_CFG_DONE 0x90
+/* rk3188 register definition end */
+
#endif /* _ROCKCHIP_VOP_REG_H */
diff --git a/drivers/gpu/drm/scheduler/Makefile b/drivers/gpu/drm/scheduler/Makefile
index bd0377c0d2ee..7665883f81d4 100644
--- a/drivers/gpu/drm/scheduler/Makefile
+++ b/drivers/gpu/drm/scheduler/Makefile
@@ -20,7 +20,6 @@
# OTHER DEALINGS IN THE SOFTWARE.
#
#
-ccflags-y := -Iinclude/drm
gpu-sched-y := gpu_scheduler.o sched_fence.o
obj-$(CONFIG_DRM_SCHED) += gpu-sched.o
diff --git a/drivers/gpu/drm/scheduler/gpu_scheduler.c b/drivers/gpu/drm/scheduler/gpu_scheduler.c
index dac71e3b4514..4fc211e19d6e 100644
--- a/drivers/gpu/drm/scheduler/gpu_scheduler.c
+++ b/drivers/gpu/drm/scheduler/gpu_scheduler.c
@@ -185,7 +185,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
memset(entity, 0, sizeof(struct drm_sched_entity));
INIT_LIST_HEAD(&entity->list);
entity->rq = rq_list[0];
- entity->sched = rq_list[0]->sched;
entity->guilty = guilty;
entity->last_scheduled = NULL;
@@ -200,21 +199,6 @@ int drm_sched_entity_init(struct drm_sched_entity *entity,
EXPORT_SYMBOL(drm_sched_entity_init);
/**
- * drm_sched_entity_is_initialized - Query if entity is initialized
- *
- * @sched: Pointer to scheduler instance
- * @entity: The pointer to a valid scheduler entity
- *
- * return true if entity is initialized, false otherwise
-*/
-static bool drm_sched_entity_is_initialized(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
-{
- return entity->sched == sched &&
- entity->rq != NULL;
-}
-
-/**
* drm_sched_entity_is_idle - Check if entity is idle
*
* @entity: scheduler entity
@@ -225,7 +209,8 @@ static bool drm_sched_entity_is_idle(struct drm_sched_entity *entity)
{
rmb();
- if (!entity->rq || spsc_queue_peek(&entity->job_queue) == NULL)
+ if (list_empty(&entity->list) ||
+ spsc_queue_peek(&entity->job_queue) == NULL)
return true;
return false;
@@ -264,7 +249,6 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
/**
* drm_sched_entity_flush - Flush a context entity
*
- * @sched: scheduler instance
* @entity: scheduler entity
* @timeout: time to wait in for Q to become empty in jiffies.
*
@@ -273,13 +257,13 @@ static void drm_sched_entity_kill_jobs_cb(struct dma_fence *f,
*
* Returns the remaining time in jiffies left from the input timeout
*/
-long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity, long timeout)
+long drm_sched_entity_flush(struct drm_sched_entity *entity, long timeout)
{
+ struct drm_gpu_scheduler *sched;
+ struct task_struct *last_user;
long ret = timeout;
- if (!drm_sched_entity_is_initialized(sched, entity))
- return ret;
+ sched = entity->rq->sched;
/**
* The client will not queue more IBs during this fini, consume existing
* queued IBs or discard them on SIGKILL
@@ -295,8 +279,10 @@ long drm_sched_entity_flush(struct drm_gpu_scheduler *sched,
/* For killed process disable any more IBs enqueue right now */
- if ((current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
- drm_sched_entity_set_rq(entity, NULL);
+ last_user = cmpxchg(&entity->last_user, current->group_leader, NULL);
+ if ((!last_user || last_user == current->group_leader) &&
+ (current->flags & PF_EXITING) && (current->exit_code == SIGKILL))
+ drm_sched_rq_remove_entity(entity->rq, entity);
return ret;
}
@@ -305,18 +291,18 @@ EXPORT_SYMBOL(drm_sched_entity_flush);
/**
* drm_sched_entity_cleanup - Destroy a context entity
*
- * @sched: scheduler instance
* @entity: scheduler entity
*
* This should be called after @drm_sched_entity_do_release. It goes over the
* entity and signals all jobs with an error code if the process was killed.
*
*/
-void drm_sched_entity_fini(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_fini(struct drm_sched_entity *entity)
{
+ struct drm_gpu_scheduler *sched;
- drm_sched_entity_set_rq(entity, NULL);
+ sched = entity->rq->sched;
+ drm_sched_rq_remove_entity(entity->rq, entity);
/* Consumption of existing IBs wasn't completed. Forcefully
* remove them here.
@@ -368,16 +354,14 @@ EXPORT_SYMBOL(drm_sched_entity_fini);
/**
* drm_sched_entity_fini - Destroy a context entity
*
- * @sched: scheduler instance
* @entity: scheduler entity
*
* Calls drm_sched_entity_do_release() and drm_sched_entity_cleanup()
*/
-void drm_sched_entity_destroy(struct drm_gpu_scheduler *sched,
- struct drm_sched_entity *entity)
+void drm_sched_entity_destroy(struct drm_sched_entity *entity)
{
- drm_sched_entity_flush(sched, entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
- drm_sched_entity_fini(sched, entity);
+ drm_sched_entity_flush(entity, MAX_WAIT_SCHED_ENTITY_Q_EMPTY);
+ drm_sched_entity_fini(entity);
}
EXPORT_SYMBOL(drm_sched_entity_destroy);
@@ -387,7 +371,7 @@ static void drm_sched_entity_wakeup(struct dma_fence *f, struct dma_fence_cb *cb
container_of(cb, struct drm_sched_entity, cb);
entity->dependency = NULL;
dma_fence_put(f);
- drm_sched_wakeup(entity->sched);
+ drm_sched_wakeup(entity->rq->sched);
}
static void drm_sched_entity_clear_dep(struct dma_fence *f, struct dma_fence_cb *cb)
@@ -413,15 +397,12 @@ void drm_sched_entity_set_rq(struct drm_sched_entity *entity,
if (entity->rq == rq)
return;
- spin_lock(&entity->rq_lock);
-
- if (entity->rq)
- drm_sched_rq_remove_entity(entity->rq, entity);
+ BUG_ON(!rq);
+ spin_lock(&entity->rq_lock);
+ drm_sched_rq_remove_entity(entity->rq, entity);
entity->rq = rq;
- if (rq)
- drm_sched_rq_add_entity(rq, entity);
-
+ drm_sched_rq_add_entity(rq, entity);
spin_unlock(&entity->rq_lock);
}
EXPORT_SYMBOL(drm_sched_entity_set_rq);
@@ -437,7 +418,7 @@ EXPORT_SYMBOL(drm_sched_entity_set_rq);
bool drm_sched_dependency_optimized(struct dma_fence* fence,
struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_fence *s_fence;
if (!fence || dma_fence_is_signaled(fence))
@@ -454,7 +435,7 @@ EXPORT_SYMBOL(drm_sched_dependency_optimized);
static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct dma_fence * fence = entity->dependency;
struct drm_sched_fence *s_fence;
@@ -499,7 +480,7 @@ static bool drm_sched_entity_add_dependency_cb(struct drm_sched_entity *entity)
static struct drm_sched_job *
drm_sched_entity_pop_job(struct drm_sched_entity *entity)
{
- struct drm_gpu_scheduler *sched = entity->sched;
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
struct drm_sched_job *sched_job = to_drm_sched_job(
spsc_queue_peek(&entity->job_queue));
@@ -541,6 +522,7 @@ void drm_sched_entity_push_job(struct drm_sched_job *sched_job,
trace_drm_sched_job(sched_job, entity);
+ WRITE_ONCE(entity->last_user, current->group_leader);
first = spsc_queue_push(&entity->job_queue, &sched_job->queue_node);
/* first job wakes up scheduler */
@@ -567,24 +549,28 @@ static void drm_sched_job_finish(struct work_struct *work)
finish_work);
struct drm_gpu_scheduler *sched = s_job->sched;
- /* remove job from ring_mirror_list */
- spin_lock(&sched->job_list_lock);
- list_del_init(&s_job->node);
- if (sched->timeout != MAX_SCHEDULE_TIMEOUT) {
- struct drm_sched_job *next;
-
- spin_unlock(&sched->job_list_lock);
- cancel_delayed_work_sync(&s_job->work_tdr);
- spin_lock(&sched->job_list_lock);
+ /*
+ * Canceling the timeout without removing our job from the ring mirror
+ * list is safe, as we will only end up in this worker if our jobs
+ * finished fence has been signaled. So even if some another worker
+ * manages to find this job as the next job in the list, the fence
+ * signaled check below will prevent the timeout to be restarted.
+ */
+ cancel_delayed_work_sync(&s_job->work_tdr);
- /* queue TDR for next job */
- next = list_first_entry_or_null(&sched->ring_mirror_list,
- struct drm_sched_job, node);
+ spin_lock(&sched->job_list_lock);
+ /* queue TDR for next job */
+ if (sched->timeout != MAX_SCHEDULE_TIMEOUT &&
+ !list_is_last(&s_job->node, &sched->ring_mirror_list)) {
+ struct drm_sched_job *next = list_next_entry(s_job, node);
- if (next)
+ if (!dma_fence_is_signaled(&next->s_fence->finished))
schedule_delayed_work(&next->work_tdr, sched->timeout);
}
+ /* remove job from ring_mirror_list */
+ list_del(&s_job->node);
spin_unlock(&sched->job_list_lock);
+
dma_fence_put(&s_job->s_fence->finished);
sched->ops->free_job(s_job);
}
@@ -730,7 +716,6 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
* drm_sched_job_init - init a scheduler job
*
* @job: scheduler job to init
- * @sched: scheduler instance
* @entity: scheduler entity to use
* @owner: job owner for debugging
*
@@ -740,10 +725,11 @@ EXPORT_SYMBOL(drm_sched_job_recovery);
* Returns 0 for success, negative error code otherwise.
*/
int drm_sched_job_init(struct drm_sched_job *job,
- struct drm_gpu_scheduler *sched,
struct drm_sched_entity *entity,
void *owner)
{
+ struct drm_gpu_scheduler *sched = entity->rq->sched;
+
job->sched = sched;
job->entity = entity;
job->s_priority = entity->rq - sched->sched_rq;
diff --git a/drivers/gpu/drm/scheduler/sched_fence.c b/drivers/gpu/drm/scheduler/sched_fence.c
index 45d9c3affbea..d8d2dff9ea2f 100644
--- a/drivers/gpu/drm/scheduler/sched_fence.c
+++ b/drivers/gpu/drm/scheduler/sched_fence.c
@@ -161,7 +161,7 @@ struct drm_sched_fence *drm_sched_fence_create(struct drm_sched_entity *entity,
return NULL;
fence->owner = owner;
- fence->sched = entity->sched;
+ fence->sched = entity->rq->sched;
spin_lock_init(&fence->lock);
seq = atomic_inc_return(&entity->fence_seq);
diff --git a/drivers/gpu/drm/sti/sti_hda.c b/drivers/gpu/drm/sti/sti_hda.c
index 49438337f70d..19b9b5ed1297 100644
--- a/drivers/gpu/drm/sti/sti_hda.c
+++ b/drivers/gpu/drm/sti/sti_hda.c
@@ -721,7 +721,6 @@ static int sti_hda_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sti/sti_hdmi.c b/drivers/gpu/drm/sti/sti_hdmi.c
index 34cdc4644435..ccf718404a1c 100644
--- a/drivers/gpu/drm/sti/sti_hdmi.c
+++ b/drivers/gpu/drm/sti/sti_hdmi.c
@@ -1315,7 +1315,6 @@ static int sti_hdmi_bind(struct device *dev, struct device *master, void *data)
return 0;
err_sysfs:
- drm_bridge_remove(bridge);
hdmi->drm_connector = NULL;
return -EINVAL;
}
diff --git a/drivers/gpu/drm/sun4i/Makefile b/drivers/gpu/drm/sun4i/Makefile
index b04ea0f3da75..0eb38ac8e86e 100644
--- a/drivers/gpu/drm/sun4i/Makefile
+++ b/drivers/gpu/drm/sun4i/Makefile
@@ -32,7 +32,10 @@ obj-$(CONFIG_DRM_SUN4I) += sun4i-tcon.o
obj-$(CONFIG_DRM_SUN4I) += sun4i_tv.o
obj-$(CONFIG_DRM_SUN4I) += sun6i_drc.o
-obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o sun4i-frontend.o
+obj-$(CONFIG_DRM_SUN4I_BACKEND) += sun4i-backend.o
+ifdef CONFIG_DRM_SUN4I_BACKEND
+obj-$(CONFIG_DRM_SUN4I) += sun4i-frontend.o
+endif
obj-$(CONFIG_DRM_SUN4I_HDMI) += sun4i-drm-hdmi.o
obj-$(CONFIG_DRM_SUN6I_DSI) += sun6i-dsi.o
obj-$(CONFIG_DRM_SUN8I_DW_HDMI) += sun8i-drm-hdmi.o
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.c b/drivers/gpu/drm/sun4i/sun4i_backend.c
index d7950b52a1fd..bf49c55b0f2c 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.c
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.c
@@ -34,6 +34,9 @@
struct sun4i_backend_quirks {
/* backend <-> TCON muxing selection done in backend */
bool needs_output_muxing;
+
+ /* alpha at the lowest z position is not always supported */
+ bool supports_lowest_plane_alpha;
};
static const u32 sunxi_rgb2yuv_coef[12] = {
@@ -60,32 +63,6 @@ static const u32 sunxi_bt601_yuv2rgb_coef[12] = {
0x000004a7, 0x00000812, 0x00000000, 0x00002eb1,
};
-static inline bool sun4i_backend_format_is_planar_yuv(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUV411:
- case DRM_FORMAT_YUV422:
- case DRM_FORMAT_YUV444:
- return true;
- default:
- return false;
- }
-}
-
-static inline bool sun4i_backend_format_is_packed_yuv422(uint32_t format)
-{
- switch (format) {
- case DRM_FORMAT_YUYV:
- case DRM_FORMAT_YVYU:
- case DRM_FORMAT_UYVY:
- case DRM_FORMAT_VYUY:
- return true;
-
- default:
- return false;
- }
-}
-
static void sun4i_backend_apply_color_correction(struct sunxi_engine *engine)
{
int i;
@@ -215,7 +192,8 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
{
struct drm_plane_state *state = plane->state;
struct drm_framebuffer *fb = state->fb;
- uint32_t format = fb->format->format;
+ const struct drm_format_info *format = fb->format;
+ const uint32_t fmt = format->format;
u32 val = SUN4I_BACKEND_IYUVCTL_EN;
int i;
@@ -233,16 +211,16 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
SUN4I_BACKEND_ATTCTL_REG0_LAY_YUVEN);
/* TODO: Add support for the multi-planar YUV formats */
- if (sun4i_backend_format_is_packed_yuv422(format))
+ if (format->num_planes == 1)
val |= SUN4I_BACKEND_IYUVCTL_FBFMT_PACKED_YUV422;
else
- DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", format);
+ DRM_DEBUG_DRIVER("Unsupported YUV format (0x%x)\n", fmt);
/*
* Allwinner seems to list the pixel sequence from right to left, while
* DRM lists it from left to right.
*/
- switch (format) {
+ switch (fmt) {
case DRM_FORMAT_YUYV:
val |= SUN4I_BACKEND_IYUVCTL_FBPS_VYUY;
break;
@@ -257,7 +235,7 @@ static int sun4i_backend_update_yuv_format(struct sun4i_backend *backend,
break;
default:
DRM_DEBUG_DRIVER("Unsupported YUV pixel sequence (0x%x)\n",
- format);
+ fmt);
}
regmap_write(backend->engine.regs, SUN4I_BACKEND_IYUVCTL_REG, val);
@@ -457,12 +435,14 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
struct drm_crtc_state *crtc_state)
{
struct drm_plane_state *plane_states[SUN4I_BACKEND_NUM_LAYERS] = { 0 };
+ struct sun4i_backend *backend = engine_to_sun4i_backend(engine);
struct drm_atomic_state *state = crtc_state->state;
struct drm_device *drm = state->dev;
struct drm_plane *plane;
unsigned int num_planes = 0;
unsigned int num_alpha_planes = 0;
unsigned int num_frontend_planes = 0;
+ unsigned int num_alpha_planes_max = 1;
unsigned int num_yuv_planes = 0;
unsigned int current_pipe = 0;
unsigned int i;
@@ -526,33 +506,40 @@ static int sun4i_backend_atomic_check(struct sunxi_engine *engine,
* the layer with the highest priority.
*
* The second step is the actual alpha blending, that takes
- * the two pipes as input, and uses the eventual alpha
+ * the two pipes as input, and uses the potential alpha
* component to do the transparency between the two.
*
- * This two steps scenario makes us unable to guarantee a
+ * This two-step scenario makes us unable to guarantee a
* robust alpha blending between the 4 layers in all
* situations, since this means that we need to have one layer
* with alpha at the lowest position of our two pipes.
*
- * However, we cannot even do that, since the hardware has a
- * bug where the lowest plane of the lowest pipe (pipe 0,
- * priority 0), if it has any alpha, will discard the pixel
- * entirely and just display the pixels in the background
- * color (black by default).
+ * However, we cannot even do that on every platform, since
+ * the hardware has a bug where the lowest plane of the lowest
+ * pipe (pipe 0, priority 0), if it has any alpha, will
+ * discard the pixel data entirely and just display the pixels
+ * in the background color (black by default).
*
- * This means that we effectively have only three valid
- * configurations with alpha, all of them with the alpha being
- * on pipe1 with the lowest position, which can be 1, 2 or 3
- * depending on the number of planes and their zpos.
+ * This means that on the affected platforms, we effectively
+ * have only three valid configurations with alpha, all of
+ * them with the alpha being on pipe1 with the lowest
+ * position, which can be 1, 2 or 3 depending on the number of
+ * planes and their zpos.
*/
- if (num_alpha_planes > SUN4I_BACKEND_NUM_ALPHA_LAYERS) {
+
+ /* For platforms that are not affected by the issue described above. */
+ if (backend->quirks->supports_lowest_plane_alpha)
+ num_alpha_planes_max++;
+
+ if (num_alpha_planes > num_alpha_planes_max) {
DRM_DEBUG_DRIVER("Too many planes with alpha, rejecting...\n");
return -EINVAL;
}
/* We can't have an alpha plane at the lowest position */
- if (plane_states[0]->fb->format->has_alpha ||
- (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE))
+ if (!backend->quirks->supports_lowest_plane_alpha &&
+ (plane_states[0]->fb->format->has_alpha ||
+ (plane_states[0]->alpha != DRM_BLEND_ALPHA_OPAQUE)))
return -EINVAL;
for (i = 1; i < num_planes; i++) {
@@ -876,6 +863,8 @@ static int sun4i_backend_bind(struct device *dev, struct device *master,
: SUN4I_BACKEND_MODCTL_OUT_LCD0));
}
+ backend->quirks = quirks;
+
return 0;
err_disable_ram_clk:
@@ -935,9 +924,11 @@ static const struct sun4i_backend_quirks sun6i_backend_quirks = {
static const struct sun4i_backend_quirks sun7i_backend_quirks = {
.needs_output_muxing = true,
+ .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun8i_a33_backend_quirks = {
+ .supports_lowest_plane_alpha = true,
};
static const struct sun4i_backend_quirks sun9i_backend_quirks = {
diff --git a/drivers/gpu/drm/sun4i/sun4i_backend.h b/drivers/gpu/drm/sun4i/sun4i_backend.h
index 4caee0392fa4..e3d4c6035eb2 100644
--- a/drivers/gpu/drm/sun4i/sun4i_backend.h
+++ b/drivers/gpu/drm/sun4i/sun4i_backend.h
@@ -167,7 +167,6 @@
#define SUN4I_BACKEND_PIPE_OFF(p) (0x5000 + (0x400 * (p)))
#define SUN4I_BACKEND_NUM_LAYERS 4
-#define SUN4I_BACKEND_NUM_ALPHA_LAYERS 1
#define SUN4I_BACKEND_NUM_FRONTEND_LAYERS 1
#define SUN4I_BACKEND_NUM_YUV_PLANES 1
@@ -187,6 +186,8 @@ struct sun4i_backend {
/* Protects against races in the frontend teardown */
spinlock_t frontend_lock;
bool frontend_teardown;
+
+ const struct sun4i_backend_quirks *quirks;
};
static inline struct sun4i_backend *
diff --git a/drivers/gpu/drm/sun4i/sun4i_drv.c b/drivers/gpu/drm/sun4i/sun4i_drv.c
index dd19d674055c..1e41c3f5fd6d 100644
--- a/drivers/gpu/drm/sun4i/sun4i_drv.c
+++ b/drivers/gpu/drm/sun4i/sun4i_drv.c
@@ -61,22 +61,6 @@ static struct drm_driver sun4i_drv_driver = {
/* Frame Buffer Operations */
};
-static void sun4i_remove_framebuffers(void)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- /* The framebuffer can be located anywhere in RAM */
- ap->ranges[0].base = 0;
- ap->ranges[0].size = ~0;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "sun4i-drm-fb", false);
- kfree(ap);
-}
-
static int sun4i_drv_bind(struct device *dev)
{
struct drm_device *drm;
@@ -119,7 +103,7 @@ static int sun4i_drv_bind(struct device *dev)
drm->irq_enabled = true;
/* Remove early framebuffers (ie. simplefb) */
- sun4i_remove_framebuffers();
+ drm_fb_helper_remove_conflicting_framebuffers(NULL, "sun4i-drm-fb", false);
/* Create our framebuffer */
ret = sun4i_framebuffer_init(drm);
@@ -421,6 +405,7 @@ static const struct of_device_id sun4i_drv_of_table[] = {
{ .compatible = "allwinner,sun8i-r40-display-engine" },
{ .compatible = "allwinner,sun8i-v3s-display-engine" },
{ .compatible = "allwinner,sun9i-a80-display-engine" },
+ { .compatible = "allwinner,sun50i-a64-display-engine" },
{ }
};
MODULE_DEVICE_TABLE(of, sun4i_drv_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun4i_layer.c b/drivers/gpu/drm/sun4i/sun4i_layer.c
index 750ad24de1d7..78f77af8805a 100644
--- a/drivers/gpu/drm/sun4i/sun4i_layer.c
+++ b/drivers/gpu/drm/sun4i/sun4i_layer.c
@@ -35,9 +35,7 @@ static void sun4i_backend_layer_reset(struct drm_plane *plane)
state = kzalloc(sizeof(*state), GFP_KERNEL);
if (state) {
- plane->state = &state->state;
- plane->state->plane = plane;
- plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
+ __drm_atomic_helper_plane_reset(plane, &state->state);
plane->state->zpos = layer->id;
}
}
diff --git a/drivers/gpu/drm/sun4i/sun4i_tcon.c b/drivers/gpu/drm/sun4i/sun4i_tcon.c
index 3fb084f802e2..0cebb2db5b99 100644
--- a/drivers/gpu/drm/sun4i/sun4i_tcon.c
+++ b/drivers/gpu/drm/sun4i/sun4i_tcon.c
@@ -17,6 +17,7 @@
#include <drm/drm_encoder.h>
#include <drm/drm_modes.h>
#include <drm/drm_of.h>
+#include <drm/drm_panel.h>
#include <uapi/drm/drm_mode.h>
@@ -35,6 +36,7 @@
#include "sun4i_rgb.h"
#include "sun4i_tcon.h"
#include "sun6i_mipi_dsi.h"
+#include "sun8i_tcon_top.h"
#include "sunxi_engine.h"
static struct drm_connector *sun4i_tcon_get_connector(const struct drm_encoder *encoder)
@@ -474,6 +476,33 @@ static void sun4i_tcon0_mode_set_rgb(struct sun4i_tcon *tcon,
if (mode->flags & DRM_MODE_FLAG_PVSYNC)
val |= SUN4I_TCON0_IO_POL_VSYNC_POSITIVE;
+ /*
+ * On A20 and similar SoCs, the only way to achieve Positive Edge
+ * (Rising Edge), is setting dclk clock phase to 2/3(240°).
+ * By default TCON works in Negative Edge(Falling Edge),
+ * this is why phase is set to 0 in that case.
+ * Unfortunately there's no way to logically invert dclk through
+ * IO_POL register.
+ * The only acceptable way to work, triple checked with scope,
+ * is using clock phase set to 0° for Negative Edge and set to 240°
+ * for Positive Edge.
+ * On A33 and similar SoCs there would be a 90° phase option,
+ * but it divides also dclk by 2.
+ * Following code is a way to avoid quirks all around TCON
+ * and DOTCLOCK drivers.
+ */
+ if (!IS_ERR(tcon->panel)) {
+ struct drm_panel *panel = tcon->panel;
+ struct drm_connector *connector = panel->connector;
+ struct drm_display_info display_info = connector->display_info;
+
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_POSEDGE)
+ clk_set_phase(tcon->dclk, 240);
+
+ if (display_info.bus_flags & DRM_BUS_FLAG_PIXDATA_NEGEDGE)
+ clk_set_phase(tcon->dclk, 0);
+ }
+
regmap_update_bits(tcon->regs, SUN4I_TCON0_IO_POL_REG,
SUN4I_TCON0_IO_POL_HSYNC_POSITIVE | SUN4I_TCON0_IO_POL_VSYNC_POSITIVE,
val);
@@ -880,6 +909,36 @@ static struct sunxi_engine *sun4i_tcon_get_engine_by_id(struct sun4i_drv *drv,
return ERR_PTR(-EINVAL);
}
+static bool sun4i_tcon_connected_to_tcon_top(struct device_node *node)
+{
+ struct device_node *remote;
+ bool ret = false;
+
+ remote = of_graph_get_remote_node(node, 0, -1);
+ if (remote) {
+ ret = !!of_match_node(sun8i_tcon_top_of_table, remote);
+ of_node_put(remote);
+ }
+
+ return ret;
+}
+
+static int sun4i_tcon_get_index(struct sun4i_drv *drv)
+{
+ struct list_head *pos;
+ int size = 0;
+
+ /*
+ * Because TCON is added to the list at the end of the probe
+ * (after this function is called), index of the current TCON
+ * will be same as current TCON list size.
+ */
+ list_for_each(pos, &drv->tcon_list)
+ ++size;
+
+ return size;
+}
+
/*
* On SoCs with the old display pipeline design (Display Engine 1.0),
* we assumed the TCON was always tied to just one backend. However
@@ -928,8 +987,24 @@ static struct sunxi_engine *sun4i_tcon_find_engine(struct sun4i_drv *drv,
* connections between the backend and TCON?
*/
if (of_get_child_count(port) > 1) {
- /* Get our ID directly from an upstream endpoint */
- int id = sun4i_tcon_of_get_id_from_port(port);
+ int id;
+
+ /*
+ * When pipeline has the same number of TCONs and engines which
+ * are represented by frontends/backends (DE1) or mixers (DE2),
+ * we match them by their respective IDs. However, if pipeline
+ * contains TCON TOP, chances are that there are either more
+ * TCONs than engines (R40) or TCONs with non-consecutive ids.
+ * (H6). In that case it's easier just use TCON index in list
+ * as an id. That means that on R40, any 2 TCONs can be enabled
+ * in DT out of 4 (there are 2 mixers). Due to the design of
+ * TCON TOP, remaining 2 TCONs can't be connected to anything
+ * anyway.
+ */
+ if (sun4i_tcon_connected_to_tcon_top(node))
+ id = sun4i_tcon_get_index(drv);
+ else
+ id = sun4i_tcon_of_get_id_from_port(port);
/* Get our engine by matching our ID */
engine = sun4i_tcon_get_engine_by_id(drv, id);
@@ -1244,6 +1319,40 @@ static int sun6i_tcon_set_mux(struct sun4i_tcon *tcon,
return 0;
}
+static int sun8i_r40_tcon_tv_set_mux(struct sun4i_tcon *tcon,
+ const struct drm_encoder *encoder)
+{
+ struct device_node *port, *remote;
+ struct platform_device *pdev;
+ int id, ret;
+
+ /* find TCON TOP platform device and TCON id */
+
+ port = of_graph_get_port_by_id(tcon->dev->of_node, 0);
+ if (!port)
+ return -EINVAL;
+
+ id = sun4i_tcon_of_get_id_from_port(port);
+ of_node_put(port);
+
+ remote = of_graph_get_remote_node(tcon->dev->of_node, 0, -1);
+ if (!remote)
+ return -EINVAL;
+
+ pdev = of_find_device_by_node(remote);
+ of_node_put(remote);
+ if (!pdev)
+ return -EINVAL;
+
+ if (encoder->encoder_type == DRM_MODE_ENCODER_TMDS) {
+ ret = sun8i_tcon_top_set_hdmi_src(&pdev->dev, id);
+ if (ret)
+ return ret;
+ }
+
+ return sun8i_tcon_top_de_config(&pdev->dev, tcon->id, id);
+}
+
static const struct sun4i_tcon_quirks sun4i_a10_quirks = {
.has_channel_0 = true,
.has_channel_1 = true,
@@ -1291,6 +1400,11 @@ static const struct sun4i_tcon_quirks sun8i_a83t_tv_quirks = {
.has_channel_1 = true,
};
+static const struct sun4i_tcon_quirks sun8i_r40_tv_quirks = {
+ .has_channel_1 = true,
+ .set_mux = sun8i_r40_tcon_tv_set_mux,
+};
+
static const struct sun4i_tcon_quirks sun8i_v3s_quirks = {
.has_channel_0 = true,
};
@@ -1315,6 +1429,7 @@ const struct of_device_id sun4i_tcon_of_table[] = {
{ .compatible = "allwinner,sun8i-a33-tcon", .data = &sun8i_a33_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-lcd", .data = &sun8i_a83t_lcd_quirks },
{ .compatible = "allwinner,sun8i-a83t-tcon-tv", .data = &sun8i_a83t_tv_quirks },
+ { .compatible = "allwinner,sun8i-r40-tcon-tv", .data = &sun8i_r40_tv_quirks },
{ .compatible = "allwinner,sun8i-v3s-tcon", .data = &sun8i_v3s_quirks },
{ .compatible = "allwinner,sun9i-a80-tcon-lcd", .data = &sun9i_a80_tcon_lcd_quirks },
{ .compatible = "allwinner,sun9i-a80-tcon-tv", .data = &sun9i_a80_tcon_tv_quirks },
diff --git a/drivers/gpu/drm/sun4i/sun6i_drc.c b/drivers/gpu/drm/sun4i/sun6i_drc.c
index b5e071a49045..88eb268fdf73 100644
--- a/drivers/gpu/drm/sun4i/sun6i_drc.c
+++ b/drivers/gpu/drm/sun4i/sun6i_drc.c
@@ -12,6 +12,7 @@
#include <linux/clk.h>
#include <linux/component.h>
#include <linux/module.h>
+#include <linux/mod_devicetable.h>
#include <linux/platform_device.h>
#include <linux/regmap.h>
#include <linux/reset.h>
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
index 31875b636434..ed2983770e9c 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.c
@@ -125,10 +125,22 @@ static int sun8i_dw_hdmi_bind(struct device *dev, struct device *master,
return PTR_ERR(hdmi->clk_tmds);
}
+ hdmi->regulator = devm_regulator_get(dev, "hvcc");
+ if (IS_ERR(hdmi->regulator)) {
+ dev_err(dev, "Couldn't get regulator\n");
+ return PTR_ERR(hdmi->regulator);
+ }
+
+ ret = regulator_enable(hdmi->regulator);
+ if (ret) {
+ dev_err(dev, "Failed to enable regulator\n");
+ return ret;
+ }
+
ret = reset_control_deassert(hdmi->rst_ctrl);
if (ret) {
dev_err(dev, "Could not deassert ctrl reset control\n");
- return ret;
+ goto err_disable_regulator;
}
ret = clk_prepare_enable(hdmi->clk_tmds);
@@ -183,6 +195,8 @@ err_disable_clk_tmds:
clk_disable_unprepare(hdmi->clk_tmds);
err_assert_ctrl_reset:
reset_control_assert(hdmi->rst_ctrl);
+err_disable_regulator:
+ regulator_disable(hdmi->regulator);
return ret;
}
@@ -196,6 +210,7 @@ static void sun8i_dw_hdmi_unbind(struct device *dev, struct device *master,
sun8i_hdmi_phy_remove(hdmi);
clk_disable_unprepare(hdmi->clk_tmds);
reset_control_assert(hdmi->rst_ctrl);
+ regulator_disable(hdmi->regulator);
}
static const struct component_ops sun8i_dw_hdmi_ops = {
diff --git a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
index aadbe0a10b0c..7fdc1ecd2892 100644
--- a/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
+++ b/drivers/gpu/drm/sun4i/sun8i_dw_hdmi.h
@@ -10,6 +10,7 @@
#include <drm/drm_encoder.h>
#include <linux/clk.h>
#include <linux/regmap.h>
+#include <linux/regulator/consumer.h>
#include <linux/reset.h>
#define SUN8I_HDMI_PHY_DBG_CTRL_REG 0x0000
@@ -176,6 +177,7 @@ struct sun8i_dw_hdmi {
struct drm_encoder encoder;
struct sun8i_hdmi_phy *phy;
struct dw_hdmi_plat_data plat_data;
+ struct regulator *regulator;
struct reset_control *rst_ctrl;
};
diff --git a/drivers/gpu/drm/sun4i/sun8i_mixer.c b/drivers/gpu/drm/sun4i/sun8i_mixer.c
index fc3713608f78..8b3d02b146b7 100644
--- a/drivers/gpu/drm/sun4i/sun8i_mixer.c
+++ b/drivers/gpu/drm/sun4i/sun8i_mixer.c
@@ -569,6 +569,22 @@ static const struct sun8i_mixer_cfg sun8i_v3s_mixer_cfg = {
.mod_rate = 150000000,
};
+static const struct sun8i_mixer_cfg sun50i_a64_mixer0_cfg = {
+ .ccsc = 0,
+ .mod_rate = 297000000,
+ .scaler_mask = 0xf,
+ .ui_num = 3,
+ .vi_num = 1,
+};
+
+static const struct sun8i_mixer_cfg sun50i_a64_mixer1_cfg = {
+ .ccsc = 1,
+ .mod_rate = 297000000,
+ .scaler_mask = 0x3,
+ .ui_num = 1,
+ .vi_num = 1,
+};
+
static const struct of_device_id sun8i_mixer_of_table[] = {
{
.compatible = "allwinner,sun8i-a83t-de2-mixer-0",
@@ -594,6 +610,14 @@ static const struct of_device_id sun8i_mixer_of_table[] = {
.compatible = "allwinner,sun8i-v3s-de2-mixer",
.data = &sun8i_v3s_mixer_cfg,
},
+ {
+ .compatible = "allwinner,sun50i-a64-de2-mixer-0",
+ .data = &sun50i_a64_mixer0_cfg,
+ },
+ {
+ .compatible = "allwinner,sun50i-a64-de2-mixer-1",
+ .data = &sun50i_a64_mixer1_cfg,
+ },
{ }
};
MODULE_DEVICE_TABLE(of, sun8i_mixer_of_table);
diff --git a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
index 55fe398d8290..3040a79f298f 100644
--- a/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
+++ b/drivers/gpu/drm/sun4i/sun8i_tcon_top.c
@@ -129,8 +129,7 @@ static int sun8i_tcon_top_bind(struct device *dev, struct device *master,
if (!tcon_top)
return -ENOMEM;
- clk_data = devm_kzalloc(dev, sizeof(*clk_data) +
- sizeof(*clk_data->hws) * CLK_NUM,
+ clk_data = devm_kzalloc(dev, struct_size(clk_data, hws, CLK_NUM),
GFP_KERNEL);
if (!clk_data)
return -ENOMEM;
diff --git a/drivers/gpu/drm/tegra/dc.c b/drivers/gpu/drm/tegra/dc.c
index c3afe7b2237e..965088afcfad 100644
--- a/drivers/gpu/drm/tegra/dc.c
+++ b/drivers/gpu/drm/tegra/dc.c
@@ -2312,7 +2312,7 @@ static int tegra_dc_couple(struct tegra_dc *dc)
* POWER_CONTROL registers during CRTC enabling.
*/
if (dc->soc->coupled_pm && dc->pipe == 1) {
- u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE;
+ u32 flags = DL_FLAG_PM_RUNTIME | DL_FLAG_AUTOREMOVE_CONSUMER;
struct device_link *link;
struct device *partner;
diff --git a/drivers/gpu/drm/tegra/drm.c b/drivers/gpu/drm/tegra/drm.c
index 776c1513e582..b424bc911b95 100644
--- a/drivers/gpu/drm/tegra/drm.c
+++ b/drivers/gpu/drm/tegra/drm.c
@@ -398,7 +398,7 @@ int tegra_drm_submit(struct tegra_drm_context *context,
* unaligned offset is malformed and cause commands stream
* corruption on the buffer address relocation.
*/
- if (offset & 3 || offset >= obj->gem.size) {
+ if (offset & 3 || offset > obj->gem.size) {
err = -EINVAL;
goto fail;
}
@@ -1187,6 +1187,10 @@ static int host1x_drm_probe(struct host1x_device *dev)
dev_set_drvdata(&dev->dev, drm);
+ err = drm_fb_helper_remove_conflicting_framebuffers(NULL, "tegradrmfb", false);
+ if (err < 0)
+ goto unref;
+
err = drm_dev_register(drm, 0);
if (err < 0)
goto unref;
diff --git a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
index 19c7f70adfa5..255341ee4eb9 100644
--- a/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
+++ b/drivers/gpu/drm/tinydrm/core/tinydrm-core.c
@@ -135,7 +135,7 @@ static int tinydrm_init(struct device *parent, struct tinydrm_device *tdev,
/*
* We don't embed drm_device, because that prevent us from using
* devm_kzalloc() to allocate tinydrm_device in the driver since
- * drm_dev_unref() frees the structure. The devm_ functions provide
+ * drm_dev_put() frees the structure. The devm_ functions provide
* for easy error handling.
*/
drm = drm_dev_alloc(driver, parent);
@@ -155,7 +155,7 @@ static void tinydrm_fini(struct tinydrm_device *tdev)
drm_mode_config_cleanup(tdev->drm);
mutex_destroy(&tdev->dirty_lock);
tdev->drm->dev_private = NULL;
- drm_dev_unref(tdev->drm);
+ drm_dev_put(tdev->drm);
}
static void devm_tinydrm_release(void *data)
@@ -172,7 +172,7 @@ static void devm_tinydrm_release(void *data)
*
* This function initializes @tdev, the underlying DRM device and it's
* mode_config. Resources will be automatically freed on driver detach (devres)
- * using drm_mode_config_cleanup() and drm_dev_unref().
+ * using drm_mode_config_cleanup() and drm_dev_put().
*
* Returns:
* Zero on success, negative error code on failure.
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc.c b/drivers/gpu/drm/ttm/ttm_page_alloc.c
index 6e2d1300b457..f841accc2c00 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc.c
@@ -47,13 +47,7 @@
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 16
@@ -222,52 +216,6 @@ static struct kobj_type ttm_pool_kobj_type = {
static struct ttm_pool_manager *_manager;
-#ifndef CONFIG_X86
-static int set_pages_wb(struct page *page, int numpages)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < numpages; i++)
- unmap_page_from_agp(page++);
-#endif
- return 0;
-}
-
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif
-
/**
* Select the right pool or requested caching state and ttm flags. */
static struct ttm_page_pool *ttm_get_pool(int flags, bool huge,
@@ -302,13 +250,13 @@ static void ttm_pages_put(struct page *pages[], unsigned npages,
unsigned int i, pages_nr = (1 << order);
if (order == 0) {
- if (set_pages_array_wb(pages, npages))
+ if (ttm_set_pages_array_wb(pages, npages))
pr_err("Failed to set %d pages to wb!\n", npages);
}
for (i = 0; i < npages; ++i) {
if (order > 0) {
- if (set_pages_wb(pages[i], pages_nr))
+ if (ttm_set_pages_wb(pages[i], pages_nr))
pr_err("Failed to set %d pages to wb!\n", pages_nr);
}
__free_pages(pages[i], order);
@@ -498,12 +446,12 @@ static int ttm_set_pages_caching(struct page **pages,
/* Set page caching */
switch (cstate) {
case tt_uncached:
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to uc!\n", cpages);
break;
case tt_wc:
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("Failed to set %d pages to wc!\n", cpages);
break;
diff --git a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
index 3f14c1cc0789..507be7ac1165 100644
--- a/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
+++ b/drivers/gpu/drm/ttm/ttm_page_alloc_dma.c
@@ -50,12 +50,7 @@
#include <linux/kthread.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#if IS_ENABLED(CONFIG_AGP)
-#include <asm/agp.h>
-#endif
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
#define NUM_PAGES_TO_ALLOC (PAGE_SIZE/sizeof(struct page *))
#define SMALL_ALLOCATION 4
@@ -268,54 +263,19 @@ static struct kobj_type ttm_pool_kobj_type = {
.default_attrs = ttm_pool_attrs,
};
-#ifndef CONFIG_X86
-static int set_pages_array_wb(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- unmap_page_from_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_wc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-
-static int set_pages_array_uc(struct page **pages, int addrinarray)
-{
-#if IS_ENABLED(CONFIG_AGP)
- int i;
-
- for (i = 0; i < addrinarray; i++)
- map_page_into_agp(pages[i]);
-#endif
- return 0;
-}
-#endif /* for !CONFIG_X86 */
-
static int ttm_set_pages_caching(struct dma_pool *pool,
struct page **pages, unsigned cpages)
{
int r = 0;
/* Set page caching */
if (pool->type & IS_UC) {
- r = set_pages_array_uc(pages, cpages);
+ r = ttm_set_pages_array_uc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to uc!\n",
pool->dev_name, cpages);
}
if (pool->type & IS_WC) {
- r = set_pages_array_wc(pages, cpages);
+ r = ttm_set_pages_array_wc(pages, cpages);
if (r)
pr_err("%s: Failed to set %d pages to wc!\n",
pool->dev_name, cpages);
@@ -389,17 +349,14 @@ static void ttm_pool_update_free_locked(struct dma_pool *pool,
static void ttm_dma_page_put(struct dma_pool *pool, struct dma_page *d_page)
{
struct page *page = d_page->p;
- unsigned i, num_pages;
+ unsigned num_pages;
/* Don't set WB on WB page pool. */
if (!(pool->type & IS_CACHED)) {
num_pages = pool->size / PAGE_SIZE;
- for (i = 0; i < num_pages; ++i, ++page) {
- if (set_pages_array_wb(&page, 1)) {
- pr_err("%s: Failed to set %d pages to wb!\n",
- pool->dev_name, 1);
- }
- }
+ if (ttm_set_pages_wb(page, num_pages))
+ pr_err("%s: Failed to set %d pages to wb!\n",
+ pool->dev_name, num_pages);
}
list_del(&d_page->page_list);
@@ -420,7 +377,7 @@ static void ttm_dma_pages_put(struct dma_pool *pool, struct list_head *d_pages,
/* Don't set WB on WB page pool. */
if (npages && !(pool->type & IS_CACHED) &&
- set_pages_array_wb(pages, npages))
+ ttm_set_pages_array_wb(pages, npages))
pr_err("%s: Failed to set %d pages to wb!\n",
pool->dev_name, npages);
diff --git a/drivers/gpu/drm/ttm/ttm_tt.c b/drivers/gpu/drm/ttm/ttm_tt.c
index a1e543972ca7..e3a0691582ff 100644
--- a/drivers/gpu/drm/ttm/ttm_tt.c
+++ b/drivers/gpu/drm/ttm/ttm_tt.c
@@ -38,9 +38,7 @@
#include <drm/drm_cache.h>
#include <drm/ttm/ttm_bo_driver.h>
#include <drm/ttm/ttm_page_alloc.h>
-#ifdef CONFIG_X86
-#include <asm/set_memory.h>
-#endif
+#include <drm/ttm/ttm_set_memory.h>
/**
* Allocates a ttm structure for the given BO.
@@ -115,10 +113,9 @@ static int ttm_sg_tt_alloc_page_directory(struct ttm_dma_tt *ttm)
return 0;
}
-#ifdef CONFIG_X86
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
+static int ttm_tt_set_page_caching(struct page *p,
+ enum ttm_caching_state c_old,
+ enum ttm_caching_state c_new)
{
int ret = 0;
@@ -129,26 +126,18 @@ static inline int ttm_tt_set_page_caching(struct page *p,
/* p isn't in the default caching state, set it to
* writeback first to free its current memtype. */
- ret = set_pages_wb(p, 1);
+ ret = ttm_set_pages_wb(p, 1);
if (ret)
return ret;
}
if (c_new == tt_wc)
- ret = set_memory_wc((unsigned long) page_address(p), 1);
+ ret = ttm_set_pages_wc(p, 1);
else if (c_new == tt_uncached)
- ret = set_pages_uc(p, 1);
+ ret = ttm_set_pages_uc(p, 1);
return ret;
}
-#else /* CONFIG_X86 */
-static inline int ttm_tt_set_page_caching(struct page *p,
- enum ttm_caching_state c_old,
- enum ttm_caching_state c_new)
-{
- return 0;
-}
-#endif /* CONFIG_X86 */
/*
* Change caching policy for the linear kernel map
diff --git a/drivers/gpu/drm/udl/udl_drv.h b/drivers/gpu/drm/udl/udl_drv.h
index 072582570a4f..e9e9b1ff678e 100644
--- a/drivers/gpu/drm/udl/udl_drv.h
+++ b/drivers/gpu/drm/udl/udl_drv.h
@@ -113,7 +113,7 @@ udl_fb_user_fb_create(struct drm_device *dev,
struct drm_file *file,
const struct drm_mode_fb_cmd2 *mode_cmd);
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset, u32 byte_width,
int *ident_ptr, int *sent_ptr);
diff --git a/drivers/gpu/drm/udl/udl_fb.c b/drivers/gpu/drm/udl/udl_fb.c
index 2ebdc6d5a76e..dbb62f6eb48a 100644
--- a/drivers/gpu/drm/udl/udl_fb.c
+++ b/drivers/gpu/drm/udl/udl_fb.c
@@ -90,7 +90,10 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
int bytes_identical = 0;
struct urb *urb;
int aligned_x;
- int bpp = fb->base.format->cpp[0];
+ int log_bpp;
+
+ BUG_ON(!is_power_of_2(fb->base.format->cpp[0]));
+ log_bpp = __ffs(fb->base.format->cpp[0]);
if (!fb->active_16)
return 0;
@@ -125,19 +128,22 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
for (i = y; i < y + height ; i++) {
const int line_offset = fb->base.pitches[0] * i;
- const int byte_offset = line_offset + (x * bpp);
- const int dev_byte_offset = (fb->base.width * bpp * i) + (x * bpp);
- if (udl_render_hline(dev, bpp, &urb,
+ const int byte_offset = line_offset + (x << log_bpp);
+ const int dev_byte_offset = (fb->base.width * i + x) << log_bpp;
+ if (udl_render_hline(dev, log_bpp, &urb,
(char *) fb->obj->vmapping,
&cmd, byte_offset, dev_byte_offset,
- width * bpp,
+ width << log_bpp,
&bytes_identical, &bytes_sent))
goto error;
}
if (cmd > (char *) urb->transfer_buffer) {
/* Send partial buffer remaining before exiting */
- int len = cmd - (char *) urb->transfer_buffer;
+ int len;
+ if (cmd < (char *) urb->transfer_buffer + urb->transfer_buffer_length)
+ *cmd++ = 0xAF;
+ len = cmd - (char *) urb->transfer_buffer;
ret = udl_submit_urb(dev, urb, len);
bytes_sent += len;
} else
@@ -146,7 +152,7 @@ int udl_handle_damage(struct udl_framebuffer *fb, int x, int y,
error:
atomic_add(bytes_sent, &udl->bytes_sent);
atomic_add(bytes_identical, &udl->bytes_identical);
- atomic_add(width*height*bpp, &udl->bytes_rendered);
+ atomic_add((width * height) << log_bpp, &udl->bytes_rendered);
end_cycles = get_cycles();
atomic_add(((unsigned int) ((end_cycles - start_cycles)
>> 10)), /* Kcycles */
@@ -172,7 +178,7 @@ static int udl_fb_mmap(struct fb_info *info, struct vm_area_struct *vma)
pos = (unsigned long)info->fix.smem_start + offset;
- pr_notice("mmap() framebuffer addr:%lu size:%lu\n",
+ pr_debug("mmap() framebuffer addr:%lu size:%lu\n",
pos, size);
/* We don't want the framebuffer to be mapped encrypted */
@@ -218,7 +224,7 @@ static int udl_fb_open(struct fb_info *info, int user)
struct fb_deferred_io *fbdefio;
- fbdefio = kmalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
+ fbdefio = kzalloc(sizeof(struct fb_deferred_io), GFP_KERNEL);
if (fbdefio) {
fbdefio->delay = DL_DEFIO_WRITE_DELAY;
@@ -230,7 +236,7 @@ static int udl_fb_open(struct fb_info *info, int user)
}
#endif
- pr_notice("open /dev/fb%d user=%d fb_info=%p count=%d\n",
+ pr_debug("open /dev/fb%d user=%d fb_info=%p count=%d\n",
info->node, user, info, ufbdev->fb_count);
return 0;
@@ -255,7 +261,7 @@ static int udl_fb_release(struct fb_info *info, int user)
}
#endif
- pr_warn("released /dev/fb%d user=%d count=%d\n",
+ pr_debug("released /dev/fb%d user=%d count=%d\n",
info->node, user, ufbdev->fb_count);
return 0;
diff --git a/drivers/gpu/drm/udl/udl_main.c b/drivers/gpu/drm/udl/udl_main.c
index d518de8f496b..f455f095a146 100644
--- a/drivers/gpu/drm/udl/udl_main.c
+++ b/drivers/gpu/drm/udl/udl_main.c
@@ -170,25 +170,19 @@ static void udl_free_urb_list(struct drm_device *dev)
struct list_head *node;
struct urb_node *unode;
struct urb *urb;
- int ret;
- unsigned long flags;
DRM_DEBUG("Waiting for completes and freeing all render urbs\n");
/* keep waiting and freeing, until we've got 'em all */
while (count--) {
+ down(&udl->urbs.limit_sem);
- /* Getting interrupted means a leak, but ok at shutdown*/
- ret = down_interruptible(&udl->urbs.limit_sem);
- if (ret)
- break;
-
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
node = udl->urbs.list.next; /* have reserved one with sem */
list_del_init(node);
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(node, struct urb_node, entry);
urb = unode->urb;
@@ -205,17 +199,22 @@ static void udl_free_urb_list(struct drm_device *dev)
static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
{
struct udl_device *udl = dev->dev_private;
- int i = 0;
struct urb *urb;
struct urb_node *unode;
char *buf;
+ size_t wanted_size = count * size;
spin_lock_init(&udl->urbs.lock);
+retry:
udl->urbs.size = size;
INIT_LIST_HEAD(&udl->urbs.list);
- while (i < count) {
+ sema_init(&udl->urbs.limit_sem, 0);
+ udl->urbs.count = 0;
+ udl->urbs.available = 0;
+
+ while (udl->urbs.count * size < wanted_size) {
unode = kzalloc(sizeof(struct urb_node), GFP_KERNEL);
if (!unode)
break;
@@ -231,11 +230,16 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
}
unode->urb = urb;
- buf = usb_alloc_coherent(udl->udev, MAX_TRANSFER, GFP_KERNEL,
+ buf = usb_alloc_coherent(udl->udev, size, GFP_KERNEL,
&urb->transfer_dma);
if (!buf) {
kfree(unode);
usb_free_urb(urb);
+ if (size > PAGE_SIZE) {
+ size /= 2;
+ udl_free_urb_list(dev);
+ goto retry;
+ }
break;
}
@@ -246,16 +250,14 @@ static int udl_alloc_urb_list(struct drm_device *dev, int count, size_t size)
list_add_tail(&unode->entry, &udl->urbs.list);
- i++;
+ up(&udl->urbs.limit_sem);
+ udl->urbs.count++;
+ udl->urbs.available++;
}
- sema_init(&udl->urbs.limit_sem, i);
- udl->urbs.count = i;
- udl->urbs.available = i;
-
- DRM_DEBUG("allocated %d %d byte urbs\n", i, (int) size);
+ DRM_DEBUG("allocated %d %d byte urbs\n", udl->urbs.count, (int) size);
- return i;
+ return udl->urbs.count;
}
struct urb *udl_get_urb(struct drm_device *dev)
@@ -265,7 +267,6 @@ struct urb *udl_get_urb(struct drm_device *dev)
struct list_head *entry;
struct urb_node *unode;
struct urb *urb = NULL;
- unsigned long flags;
/* Wait for an in-flight buffer to complete and get re-queued */
ret = down_timeout(&udl->urbs.limit_sem, GET_URB_TIMEOUT);
@@ -276,14 +277,14 @@ struct urb *udl_get_urb(struct drm_device *dev)
goto error;
}
- spin_lock_irqsave(&udl->urbs.lock, flags);
+ spin_lock_irq(&udl->urbs.lock);
BUG_ON(list_empty(&udl->urbs.list)); /* reserved one with limit_sem */
entry = udl->urbs.list.next;
list_del_init(entry);
udl->urbs.available--;
- spin_unlock_irqrestore(&udl->urbs.lock, flags);
+ spin_unlock_irq(&udl->urbs.lock);
unode = list_entry(entry, struct urb_node, entry);
urb = unode->urb;
diff --git a/drivers/gpu/drm/udl/udl_modeset.c b/drivers/gpu/drm/udl/udl_modeset.c
index 5bcae7649795..7e37765cf5ac 100644
--- a/drivers/gpu/drm/udl/udl_modeset.c
+++ b/drivers/gpu/drm/udl/udl_modeset.c
@@ -243,7 +243,7 @@ static int udl_crtc_write_mode_to_hw(struct drm_crtc *crtc)
memcpy(buf, udl->mode_buf, udl->mode_buf_len);
retval = udl_submit_urb(dev, urb, udl->mode_buf_len);
- DRM_INFO("write mode info %d\n", udl->mode_buf_len);
+ DRM_DEBUG("write mode info %d\n", udl->mode_buf_len);
return retval;
}
@@ -366,7 +366,6 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
{
struct udl_framebuffer *ufb = to_udl_fb(fb);
struct drm_device *dev = crtc->dev;
- unsigned long flags;
struct drm_framebuffer *old_fb = crtc->primary->fb;
if (old_fb) {
@@ -377,10 +376,10 @@ static int udl_crtc_page_flip(struct drm_crtc *crtc,
udl_handle_damage(ufb, 0, 0, fb->width, fb->height);
- spin_lock_irqsave(&dev->event_lock, flags);
+ spin_lock_irq(&dev->event_lock);
if (event)
drm_crtc_send_vblank_event(crtc, event);
- spin_unlock_irqrestore(&dev->event_lock, flags);
+ spin_unlock_irq(&dev->event_lock);
crtc->primary->fb = fb;
return 0;
diff --git a/drivers/gpu/drm/udl/udl_transfer.c b/drivers/gpu/drm/udl/udl_transfer.c
index 0c87b1ac6b68..ce87661e544f 100644
--- a/drivers/gpu/drm/udl/udl_transfer.c
+++ b/drivers/gpu/drm/udl/udl_transfer.c
@@ -13,7 +13,6 @@
#include <linux/module.h>
#include <linux/slab.h>
#include <linux/fb.h>
-#include <linux/prefetch.h>
#include <asm/unaligned.h>
#include <drm/drmP.h>
@@ -51,9 +50,6 @@ static int udl_trim_hline(const u8 *bback, const u8 **bfront, int *width_bytes)
int start = width;
int end = width;
- prefetch((void *) front);
- prefetch((void *) back);
-
for (j = 0; j < width; j++) {
if (back[j] != front[j]) {
start = j;
@@ -83,12 +79,12 @@ static inline u16 pixel32_to_be16(const uint32_t pixel)
((pixel >> 8) & 0xf800));
}
-static inline u16 get_pixel_val16(const uint8_t *pixel, int bpp)
+static inline u16 get_pixel_val16(const uint8_t *pixel, int log_bpp)
{
- u16 pixel_val16 = 0;
- if (bpp == 2)
+ u16 pixel_val16;
+ if (log_bpp == 1)
pixel_val16 = *(const uint16_t *)pixel;
- else if (bpp == 4)
+ else
pixel_val16 = pixel32_to_be16(*(const uint32_t *)pixel);
return pixel_val16;
}
@@ -125,8 +121,9 @@ static void udl_compress_hline16(
const u8 *const pixel_end,
uint32_t *device_address_ptr,
uint8_t **command_buffer_ptr,
- const uint8_t *const cmd_buffer_end, int bpp)
+ const uint8_t *const cmd_buffer_end, int log_bpp)
{
+ const int bpp = 1 << log_bpp;
const u8 *pixel = *pixel_start_ptr;
uint32_t dev_addr = *device_address_ptr;
uint8_t *cmd = *command_buffer_ptr;
@@ -139,8 +136,6 @@ static void udl_compress_hline16(
const u8 *cmd_pixel_start, *cmd_pixel_end = NULL;
uint16_t pixel_val16;
- prefetchw((void *) cmd); /* pull in one cache line at least */
-
*cmd++ = 0xaf;
*cmd++ = 0x6b;
*cmd++ = (uint8_t) ((dev_addr >> 16) & 0xFF);
@@ -153,12 +148,11 @@ static void udl_compress_hline16(
raw_pixels_count_byte = cmd++; /* we'll know this later */
raw_pixel_start = pixel;
- cmd_pixel_end = pixel + (min(MAX_CMD_PIXELS + 1,
- min((int)(pixel_end - pixel) / bpp,
- (int)(cmd_buffer_end - cmd) / 2))) * bpp;
+ cmd_pixel_end = pixel + (min3(MAX_CMD_PIXELS + 1UL,
+ (unsigned long)(pixel_end - pixel) >> log_bpp,
+ (unsigned long)(cmd_buffer_end - 1 - cmd) / 2) << log_bpp);
- prefetch_range((void *) pixel, (cmd_pixel_end - pixel) * bpp);
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
while (pixel < cmd_pixel_end) {
const u8 *const start = pixel;
@@ -170,7 +164,7 @@ static void udl_compress_hline16(
pixel += bpp;
while (pixel < cmd_pixel_end) {
- pixel_val16 = get_pixel_val16(pixel, bpp);
+ pixel_val16 = get_pixel_val16(pixel, log_bpp);
if (pixel_val16 != repeating_pixel_val16)
break;
pixel += bpp;
@@ -179,10 +173,10 @@ static void udl_compress_hline16(
if (unlikely(pixel > start + bpp)) {
/* go back and fill in raw pixel count */
*raw_pixels_count_byte = (((start -
- raw_pixel_start) / bpp) + 1) & 0xFF;
+ raw_pixel_start) >> log_bpp) + 1) & 0xFF;
/* immediately after raw data is repeat byte */
- *cmd++ = (((pixel - start) / bpp) - 1) & 0xFF;
+ *cmd++ = (((pixel - start) >> log_bpp) - 1) & 0xFF;
/* Then start another raw pixel span */
raw_pixel_start = pixel;
@@ -192,11 +186,14 @@ static void udl_compress_hline16(
if (pixel > raw_pixel_start) {
/* finalize last RAW span */
- *raw_pixels_count_byte = ((pixel-raw_pixel_start) / bpp) & 0xFF;
+ *raw_pixels_count_byte = ((pixel - raw_pixel_start) >> log_bpp) & 0xFF;
+ } else {
+ /* undo unused byte */
+ cmd--;
}
- *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) / bpp) & 0xFF;
- dev_addr += ((pixel - cmd_pixel_start) / bpp) * 2;
+ *cmd_pixels_count_byte = ((pixel - cmd_pixel_start) >> log_bpp) & 0xFF;
+ dev_addr += ((pixel - cmd_pixel_start) >> log_bpp) * 2;
}
if (cmd_buffer_end <= MIN_RLX_CMD_BYTES + cmd) {
@@ -219,19 +216,19 @@ static void udl_compress_hline16(
* (that we can only write to, slowly, and can never read), and (optionally)
* our shadow copy that tracks what's been sent to that hardware buffer.
*/
-int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
+int udl_render_hline(struct drm_device *dev, int log_bpp, struct urb **urb_ptr,
const char *front, char **urb_buf_ptr,
u32 byte_offset, u32 device_byte_offset,
u32 byte_width,
int *ident_ptr, int *sent_ptr)
{
const u8 *line_start, *line_end, *next_pixel;
- u32 base16 = 0 + (device_byte_offset / bpp) * 2;
+ u32 base16 = 0 + (device_byte_offset >> log_bpp) * 2;
struct urb *urb = *urb_ptr;
u8 *cmd = *urb_buf_ptr;
u8 *cmd_end = (u8 *) urb->transfer_buffer + urb->transfer_buffer_length;
- BUG_ON(!(bpp == 2 || bpp == 4));
+ BUG_ON(!(log_bpp == 1 || log_bpp == 2));
line_start = (u8 *) (front + byte_offset);
next_pixel = line_start;
@@ -241,7 +238,7 @@ int udl_render_hline(struct drm_device *dev, int bpp, struct urb **urb_ptr,
udl_compress_hline16(&next_pixel,
line_end, &base16,
- (u8 **) &cmd, (u8 *) cmd_end, bpp);
+ (u8 **) &cmd, (u8 *) cmd_end, log_bpp);
if (cmd >= cmd_end) {
int len = cmd - (u8 *) urb->transfer_buffer;
diff --git a/drivers/gpu/drm/v3d/v3d_drv.c b/drivers/gpu/drm/v3d/v3d_drv.c
index 1dceba2b42fd..2a85fa68ffea 100644
--- a/drivers/gpu/drm/v3d/v3d_drv.c
+++ b/drivers/gpu/drm/v3d/v3d_drv.c
@@ -145,13 +145,11 @@ v3d_open(struct drm_device *dev, struct drm_file *file)
static void
v3d_postclose(struct drm_device *dev, struct drm_file *file)
{
- struct v3d_dev *v3d = to_v3d_dev(dev);
struct v3d_file_priv *v3d_priv = file->driver_priv;
enum v3d_queue q;
for (q = 0; q < V3D_MAX_QUEUES; q++) {
- drm_sched_entity_destroy(&v3d->queue[q].sched,
- &v3d_priv->sched_entity[q]);
+ drm_sched_entity_destroy(&v3d_priv->sched_entity[q]);
}
kfree(v3d_priv);
diff --git a/drivers/gpu/drm/v3d/v3d_gem.c b/drivers/gpu/drm/v3d/v3d_gem.c
index e1fcbb4cd0ae..5ce24098a5fd 100644
--- a/drivers/gpu/drm/v3d/v3d_gem.c
+++ b/drivers/gpu/drm/v3d/v3d_gem.c
@@ -553,7 +553,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
mutex_lock(&v3d->sched_lock);
if (exec->bin.start != exec->bin.end) {
ret = drm_sched_job_init(&exec->bin.base,
- &v3d->queue[V3D_BIN].sched,
&v3d_priv->sched_entity[V3D_BIN],
v3d_priv);
if (ret)
@@ -568,7 +567,6 @@ v3d_submit_cl_ioctl(struct drm_device *dev, void *data,
}
ret = drm_sched_job_init(&exec->render.base,
- &v3d->queue[V3D_RENDER].sched,
&v3d_priv->sched_entity[V3D_RENDER],
v3d_priv);
if (ret)
diff --git a/drivers/gpu/drm/vc4/vc4_drv.c b/drivers/gpu/drm/vc4/vc4_drv.c
index 04270a14fcaa..e2a15c63a81f 100644
--- a/drivers/gpu/drm/vc4/vc4_drv.c
+++ b/drivers/gpu/drm/vc4/vc4_drv.c
@@ -248,24 +248,6 @@ static void vc4_match_add_drivers(struct device *dev,
}
}
-static void vc4_kick_out_firmware_fb(void)
-{
- struct apertures_struct *ap;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- /* Since VC4 is a UMA device, the simplefb node may have been
- * located anywhere in memory.
- */
- ap->ranges[0].base = 0;
- ap->ranges[0].size = ~0;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "vc4drmfb", false);
- kfree(ap);
-}
-
static int vc4_drm_bind(struct device *dev)
{
struct platform_device *pdev = to_platform_device(dev);
@@ -298,7 +280,7 @@ static int vc4_drm_bind(struct device *dev)
if (ret)
goto gem_destroy;
- vc4_kick_out_firmware_fb();
+ drm_fb_helper_remove_conflicting_framebuffers(NULL, "vc4drmfb", false);
ret = drm_dev_register(drm, 0);
if (ret < 0)
diff --git a/drivers/gpu/drm/vc4/vc4_plane.c b/drivers/gpu/drm/vc4/vc4_plane.c
index 9d7a36f148cf..cf78f74bb87f 100644
--- a/drivers/gpu/drm/vc4/vc4_plane.c
+++ b/drivers/gpu/drm/vc4/vc4_plane.c
@@ -200,9 +200,7 @@ static void vc4_plane_reset(struct drm_plane *plane)
if (!vc4_state)
return;
- plane->state = &vc4_state->base;
- plane->state->alpha = DRM_BLEND_ALPHA_OPAQUE;
- vc4_state->base.plane = plane;
+ __drm_atomic_helper_plane_reset(plane, &vc4_state->base);
}
static void vc4_dlist_write(struct vc4_plane_state *vc4_state, u32 val)
@@ -320,6 +318,9 @@ static int vc4_plane_setup_clipping_and_scaling(struct drm_plane_state *state)
vc4_state->x_scaling[0] = VC4_SCALING_TPZ;
if (vc4_state->y_scaling[0] == VC4_SCALING_NONE)
vc4_state->y_scaling[0] = VC4_SCALING_TPZ;
+ } else {
+ vc4_state->x_scaling[1] = VC4_SCALING_NONE;
+ vc4_state->y_scaling[1] = VC4_SCALING_NONE;
}
vc4_state->is_unity = (vc4_state->x_scaling[0] == VC4_SCALING_NONE &&
diff --git a/drivers/gpu/drm/vgem/vgem_drv.c b/drivers/gpu/drm/vgem/vgem_drv.c
index 0e5620f76ee0..ec6af8b920da 100644
--- a/drivers/gpu/drm/vgem/vgem_drv.c
+++ b/drivers/gpu/drm/vgem/vgem_drv.c
@@ -504,7 +504,7 @@ out_free:
static void __exit vgem_exit(void)
{
drm_dev_unregister(&vgem_device->drm);
- drm_dev_unref(&vgem_device->drm);
+ drm_dev_put(&vgem_device->drm);
}
module_init(vgem_init);
diff --git a/drivers/gpu/drm/vgem/vgem_fence.c b/drivers/gpu/drm/vgem/vgem_fence.c
index b28876c222b4..e6ee71323a66 100644
--- a/drivers/gpu/drm/vgem/vgem_fence.c
+++ b/drivers/gpu/drm/vgem/vgem_fence.c
@@ -43,16 +43,6 @@ static const char *vgem_fence_get_timeline_name(struct dma_fence *fence)
return "unbound";
}
-static bool vgem_fence_signaled(struct dma_fence *fence)
-{
- return false;
-}
-
-static bool vgem_fence_enable_signaling(struct dma_fence *fence)
-{
- return true;
-}
-
static void vgem_fence_release(struct dma_fence *base)
{
struct vgem_fence *fence = container_of(base, typeof(*fence), base);
@@ -76,9 +66,6 @@ static void vgem_fence_timeline_value_str(struct dma_fence *fence, char *str,
static const struct dma_fence_ops vgem_fence_ops = {
.get_driver_name = vgem_fence_get_driver_name,
.get_timeline_name = vgem_fence_get_timeline_name,
- .enable_signaling = vgem_fence_enable_signaling,
- .signaled = vgem_fence_signaled,
- .wait = dma_fence_default_wait,
.release = vgem_fence_release,
.fence_value_str = vgem_fence_value_str,
diff --git a/drivers/gpu/drm/virtio/virtgpu_display.c b/drivers/gpu/drm/virtio/virtgpu_display.c
index 25503b933599..9f1e0a669d4c 100644
--- a/drivers/gpu/drm/virtio/virtgpu_display.c
+++ b/drivers/gpu/drm/virtio/virtgpu_display.c
@@ -109,6 +109,9 @@ static void virtio_gpu_crtc_mode_set_nofb(struct drm_crtc *crtc)
static void virtio_gpu_crtc_atomic_enable(struct drm_crtc *crtc,
struct drm_crtc_state *old_state)
{
+ struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
+
+ output->enabled = true;
}
static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
@@ -119,6 +122,7 @@ static void virtio_gpu_crtc_atomic_disable(struct drm_crtc *crtc,
struct virtio_gpu_output *output = drm_crtc_to_virtio_gpu_output(crtc);
virtio_gpu_cmd_set_scanout(vgdev, output->index, 0, 0, 0, 0, 0);
+ output->enabled = false;
}
static int virtio_gpu_crtc_atomic_check(struct drm_crtc *crtc,
diff --git a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
index 7df8d0c9026a..757ca28ab93e 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
+++ b/drivers/gpu/drm/virtio/virtgpu_drm_bus.c
@@ -28,26 +28,6 @@
#include "virtgpu_drv.h"
-static void virtio_pci_kick_out_firmware_fb(struct pci_dev *pci_dev)
-{
- struct apertures_struct *ap;
- bool primary;
-
- ap = alloc_apertures(1);
- if (!ap)
- return;
-
- ap->ranges[0].base = pci_resource_start(pci_dev, 0);
- ap->ranges[0].size = pci_resource_len(pci_dev, 0);
-
- primary = pci_dev->resource[PCI_ROM_RESOURCE].flags
- & IORESOURCE_ROM_SHADOW;
-
- drm_fb_helper_remove_conflicting_framebuffers(ap, "virtiodrmfb", primary);
-
- kfree(ap);
-}
-
int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
{
struct drm_device *dev;
@@ -69,7 +49,9 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
pname);
dev->pdev = pdev;
if (vga)
- virtio_pci_kick_out_firmware_fb(pdev);
+ drm_fb_helper_remove_conflicting_pci_framebuffers(pdev,
+ 0,
+ "virtiodrmfb");
snprintf(unique, sizeof(unique), "pci:%s", pname);
ret = drm_dev_set_unique(dev, unique);
@@ -85,6 +67,6 @@ int drm_virtio_init(struct drm_driver *driver, struct virtio_device *vdev)
return 0;
err_free:
- drm_dev_unref(dev);
+ drm_dev_put(dev);
return ret;
}
diff --git a/drivers/gpu/drm/virtio/virtgpu_drv.h b/drivers/gpu/drm/virtio/virtgpu_drv.h
index 65605e207bbe..f8f4a40dd1b8 100644
--- a/drivers/gpu/drm/virtio/virtgpu_drv.h
+++ b/drivers/gpu/drm/virtio/virtgpu_drv.h
@@ -57,6 +57,7 @@ struct virtio_gpu_object {
uint32_t hw_res_handle;
struct sg_table *pages;
+ uint32_t mapped;
void *vmap;
bool dumb;
struct ttm_place placement_code;
@@ -114,6 +115,7 @@ struct virtio_gpu_output {
struct virtio_gpu_update_cursor cursor;
int cur_x;
int cur_y;
+ bool enabled;
};
#define drm_crtc_to_virtio_gpu_output(x) \
container_of(x, struct virtio_gpu_output, crtc)
@@ -276,13 +278,13 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
struct virtio_gpu_object *obj,
uint32_t resource_id,
struct virtio_gpu_fence **fence);
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj);
int virtio_gpu_attach_status_page(struct virtio_gpu_device *vgdev);
int virtio_gpu_detach_status_page(struct virtio_gpu_device *vgdev);
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output);
int virtio_gpu_cmd_get_display_info(struct virtio_gpu_device *vgdev);
-void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id);
int virtio_gpu_cmd_get_capset_info(struct virtio_gpu_device *vgdev, int idx);
int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
int idx, int version,
@@ -372,7 +374,7 @@ int virtgpu_gem_prime_mmap(struct drm_gem_object *obj,
static inline struct virtio_gpu_object*
virtio_gpu_object_ref(struct virtio_gpu_object *bo)
{
- ttm_bo_reference(&bo->tbo);
+ ttm_bo_get(&bo->tbo);
return bo;
}
@@ -383,9 +385,8 @@ static inline void virtio_gpu_object_unref(struct virtio_gpu_object **bo)
if ((*bo) == NULL)
return;
tbo = &((*bo)->tbo);
- ttm_bo_unref(&tbo);
- if (tbo == NULL)
- *bo = NULL;
+ ttm_bo_put(tbo);
+ *bo = NULL;
}
static inline u64 virtio_gpu_object_mmap_offset(struct virtio_gpu_object *bo)
diff --git a/drivers/gpu/drm/virtio/virtgpu_fb.c b/drivers/gpu/drm/virtio/virtgpu_fb.c
index a121b1c79522..b5cebc9a179a 100644
--- a/drivers/gpu/drm/virtio/virtgpu_fb.c
+++ b/drivers/gpu/drm/virtio/virtgpu_fb.c
@@ -291,7 +291,7 @@ static int virtio_gpufb_create(struct drm_fb_helper *helper,
return 0;
err_fb_alloc:
- virtio_gpu_cmd_resource_inval_backing(vgdev, resid);
+ virtio_gpu_object_detach(vgdev, obj);
err_obj_attach:
err_obj_vmap:
virtio_gpu_gem_free_object(&obj->gem_base);
diff --git a/drivers/gpu/drm/virtio/virtgpu_plane.c b/drivers/gpu/drm/virtio/virtgpu_plane.c
index dc5b5b2b7aab..88f2fb8c61c4 100644
--- a/drivers/gpu/drm/virtio/virtgpu_plane.c
+++ b/drivers/gpu/drm/virtio/virtgpu_plane.c
@@ -152,7 +152,7 @@ static void virtio_gpu_primary_plane_update(struct drm_plane *plane,
if (WARN_ON(!output))
return;
- if (plane->state->fb) {
+ if (plane->state->fb && output->enabled) {
vgfb = to_virtio_gpu_framebuffer(plane->state->fb);
bo = gem_to_virtio_gpu_obj(vgfb->base.obj[0]);
handle = bo->hw_res_handle;
diff --git a/drivers/gpu/drm/virtio/virtgpu_ttm.c b/drivers/gpu/drm/virtio/virtgpu_ttm.c
index 11f8ae5b5332..e3152d45c5f1 100644
--- a/drivers/gpu/drm/virtio/virtgpu_ttm.c
+++ b/drivers/gpu/drm/virtio/virtgpu_ttm.c
@@ -106,29 +106,6 @@ static void virtio_gpu_ttm_global_fini(struct virtio_gpu_device *vgdev)
}
}
-#if 0
-/*
- * Hmm, seems to not do anything useful. Leftover debug hack?
- * Something like printing pagefaults to kernel log?
- */
-static struct vm_operations_struct virtio_gpu_ttm_vm_ops;
-static const struct vm_operations_struct *ttm_vm_ops;
-
-static int virtio_gpu_ttm_fault(struct vm_fault *vmf)
-{
- struct ttm_buffer_object *bo;
- struct virtio_gpu_device *vgdev;
- int r;
-
- bo = (struct ttm_buffer_object *)vmf->vma->vm_private_data;
- if (bo == NULL)
- return VM_FAULT_NOPAGE;
- vgdev = virtio_gpu_get_vgdev(bo->bdev);
- r = ttm_vm_ops->fault(vmf);
- return r;
-}
-#endif
-
int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
{
struct drm_file *file_priv;
@@ -143,19 +120,8 @@ int virtio_gpu_mmap(struct file *filp, struct vm_area_struct *vma)
return -EINVAL;
}
r = ttm_bo_mmap(filp, vma, &vgdev->mman.bdev);
-#if 0
- if (unlikely(r != 0))
- return r;
- if (unlikely(ttm_vm_ops == NULL)) {
- ttm_vm_ops = vma->vm_ops;
- virtio_gpu_ttm_vm_ops = *ttm_vm_ops;
- virtio_gpu_ttm_vm_ops.fault = &virtio_gpu_ttm_fault;
- }
- vma->vm_ops = &virtio_gpu_ttm_vm_ops;
- return 0;
-#else
+
return r;
-#endif
}
static int virtio_gpu_invalidate_caches(struct ttm_bo_device *bdev,
@@ -377,8 +343,7 @@ static void virtio_gpu_bo_move_notify(struct ttm_buffer_object *tbo,
if (!new_mem || (new_mem->placement & TTM_PL_FLAG_SYSTEM)) {
if (bo->hw_res_handle)
- virtio_gpu_cmd_resource_inval_backing(vgdev,
- bo->hw_res_handle);
+ virtio_gpu_object_detach(vgdev, bo);
} else if (new_mem->placement & TTM_PL_FLAG_TT) {
if (bo->hw_res_handle) {
diff --git a/drivers/gpu/drm/virtio/virtgpu_vq.c b/drivers/gpu/drm/virtio/virtgpu_vq.c
index 020070d483d3..5784c3ea8767 100644
--- a/drivers/gpu/drm/virtio/virtgpu_vq.c
+++ b/drivers/gpu/drm/virtio/virtgpu_vq.c
@@ -423,8 +423,9 @@ void virtio_gpu_cmd_unref_resource(struct virtio_gpu_device *vgdev,
virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
}
-void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
- uint32_t resource_id)
+static void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
+ uint32_t resource_id,
+ struct virtio_gpu_fence **fence)
{
struct virtio_gpu_resource_detach_backing *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
@@ -435,7 +436,7 @@ void virtio_gpu_cmd_resource_inval_backing(struct virtio_gpu_device *vgdev,
cmd_p->hdr.type = cpu_to_le32(VIRTIO_GPU_CMD_RESOURCE_DETACH_BACKING);
cmd_p->resource_id = cpu_to_le32(resource_id);
- virtio_gpu_queue_ctrl_buffer(vgdev, vbuf);
+ virtio_gpu_queue_fenced_ctrl_buffer(vgdev, vbuf, &cmd_p->hdr, fence);
}
void virtio_gpu_cmd_set_scanout(struct virtio_gpu_device *vgdev,
@@ -648,11 +649,11 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
{
struct virtio_gpu_get_capset *cmd_p;
struct virtio_gpu_vbuffer *vbuf;
- int max_size = vgdev->capsets[idx].max_size;
+ int max_size;
struct virtio_gpu_drv_cap_cache *cache_ent;
void *resp_buf;
- if (idx > vgdev->num_capsets)
+ if (idx >= vgdev->num_capsets)
return -EINVAL;
if (version > vgdev->capsets[idx].max_version)
@@ -662,6 +663,7 @@ int virtio_gpu_cmd_get_capset(struct virtio_gpu_device *vgdev,
if (!cache_ent)
return -ENOMEM;
+ max_size = vgdev->capsets[idx].max_size;
cache_ent->caps_cache = kmalloc(max_size, GFP_KERNEL);
if (!cache_ent->caps_cache) {
kfree(cache_ent);
@@ -848,9 +850,10 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
uint32_t resource_id,
struct virtio_gpu_fence **fence)
{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
struct virtio_gpu_mem_entry *ents;
struct scatterlist *sg;
- int si;
+ int si, nents;
if (!obj->pages) {
int ret;
@@ -860,28 +863,60 @@ int virtio_gpu_object_attach(struct virtio_gpu_device *vgdev,
return ret;
}
+ if (use_dma_api) {
+ obj->mapped = dma_map_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->pages->nents,
+ DMA_TO_DEVICE);
+ nents = obj->mapped;
+ } else {
+ nents = obj->pages->nents;
+ }
+
/* gets freed when the ring has consumed it */
- ents = kmalloc_array(obj->pages->nents,
- sizeof(struct virtio_gpu_mem_entry),
+ ents = kmalloc_array(nents, sizeof(struct virtio_gpu_mem_entry),
GFP_KERNEL);
if (!ents) {
DRM_ERROR("failed to allocate ent list\n");
return -ENOMEM;
}
- for_each_sg(obj->pages->sgl, sg, obj->pages->nents, si) {
- ents[si].addr = cpu_to_le64(sg_phys(sg));
+ for_each_sg(obj->pages->sgl, sg, nents, si) {
+ ents[si].addr = cpu_to_le64(use_dma_api
+ ? sg_dma_address(sg)
+ : sg_phys(sg));
ents[si].length = cpu_to_le32(sg->length);
ents[si].padding = 0;
}
virtio_gpu_cmd_resource_attach_backing(vgdev, resource_id,
- ents, obj->pages->nents,
+ ents, nents,
fence);
obj->hw_res_handle = resource_id;
return 0;
}
+void virtio_gpu_object_detach(struct virtio_gpu_device *vgdev,
+ struct virtio_gpu_object *obj)
+{
+ bool use_dma_api = !virtio_has_iommu_quirk(vgdev->vdev);
+ struct virtio_gpu_fence *fence;
+
+ if (use_dma_api && obj->mapped) {
+ /* detach backing and wait for the host process it ... */
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, &fence);
+ dma_fence_wait(&fence->f, true);
+ dma_fence_put(&fence->f);
+
+ /* ... then tear down iommu mappings */
+ dma_unmap_sg(vgdev->vdev->dev.parent,
+ obj->pages->sgl, obj->mapped,
+ DMA_TO_DEVICE);
+ obj->mapped = 0;
+ } else {
+ virtio_gpu_cmd_resource_inval_backing(vgdev, obj->hw_res_handle, NULL);
+ }
+}
+
void virtio_gpu_cursor_ping(struct virtio_gpu_device *vgdev,
struct virtio_gpu_output *output)
{
diff --git a/drivers/gpu/drm/vkms/Makefile b/drivers/gpu/drm/vkms/Makefile
index 986297da51bf..37966914f70b 100644
--- a/drivers/gpu/drm/vkms/Makefile
+++ b/drivers/gpu/drm/vkms/Makefile
@@ -1,3 +1,3 @@
-vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o
+vkms-y := vkms_drv.o vkms_plane.o vkms_output.o vkms_crtc.o vkms_gem.o vkms_crc.o
obj-$(CONFIG_DRM_VKMS) += vkms.o
diff --git a/drivers/gpu/drm/vkms/vkms_crc.c b/drivers/gpu/drm/vkms/vkms_crc.c
new file mode 100644
index 000000000000..68db42f15086
--- /dev/null
+++ b/drivers/gpu/drm/vkms/vkms_crc.c
@@ -0,0 +1,153 @@
+// SPDX-License-Identifier: GPL-2.0
+#include "vkms_drv.h"
+#include <linux/crc32.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+static uint32_t _vkms_get_crc(struct vkms_crc_data *crc_data)
+{
+ struct drm_framebuffer *fb = &crc_data->fb;
+ struct drm_gem_object *gem_obj = drm_gem_fb_get_obj(fb, 0);
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ u32 crc = 0;
+ int i = 0;
+ unsigned int x = crc_data->src.x1 >> 16;
+ unsigned int y = crc_data->src.y1 >> 16;
+ unsigned int height = drm_rect_height(&crc_data->src) >> 16;
+ unsigned int width = drm_rect_width(&crc_data->src) >> 16;
+ unsigned int cpp = fb->format->cpp[0];
+ unsigned int src_offset;
+ unsigned int size_byte = width * cpp;
+ void *vaddr;
+
+ mutex_lock(&vkms_obj->pages_lock);
+ vaddr = vkms_obj->vaddr;
+ if (WARN_ON(!vaddr))
+ goto out;
+
+ for (i = y; i < y + height; i++) {
+ src_offset = fb->offsets[0] + (i * fb->pitches[0]) + (x * cpp);
+ crc = crc32_le(crc, vaddr + src_offset, size_byte);
+ }
+
+out:
+ mutex_unlock(&vkms_obj->pages_lock);
+ return crc;
+}
+
+/**
+ * vkms_crc_work_handle - ordered work_struct to compute CRC
+ *
+ * @work: work_struct
+ *
+ * Work handler for computing CRCs. work_struct scheduled in
+ * an ordered workqueue that's periodically scheduled to run by
+ * _vblank_handle() and flushed at vkms_atomic_crtc_destroy_state().
+ */
+void vkms_crc_work_handle(struct work_struct *work)
+{
+ struct vkms_crtc_state *crtc_state = container_of(work,
+ struct vkms_crtc_state,
+ crc_work);
+ struct drm_crtc *crtc = crtc_state->base.crtc;
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ struct vkms_device *vdev = container_of(out, struct vkms_device,
+ output);
+ struct vkms_crc_data *primary_crc = NULL;
+ struct drm_plane *plane;
+ u32 crc32 = 0;
+ u64 frame_start, frame_end;
+ unsigned long flags;
+
+ spin_lock_irqsave(&out->state_lock, flags);
+ frame_start = crtc_state->frame_start;
+ frame_end = crtc_state->frame_end;
+ spin_unlock_irqrestore(&out->state_lock, flags);
+
+ /* _vblank_handle() hasn't updated frame_start yet */
+ if (!frame_start || frame_start == frame_end)
+ goto out;
+
+ drm_for_each_plane(plane, &vdev->drm) {
+ struct vkms_plane_state *vplane_state;
+ struct vkms_crc_data *crc_data;
+
+ vplane_state = to_vkms_plane_state(plane->state);
+ crc_data = vplane_state->crc_data;
+
+ if (drm_framebuffer_read_refcount(&crc_data->fb) == 0)
+ continue;
+
+ if (plane->type == DRM_PLANE_TYPE_PRIMARY) {
+ primary_crc = crc_data;
+ break;
+ }
+ }
+
+ if (primary_crc)
+ crc32 = _vkms_get_crc(primary_crc);
+
+ frame_end = drm_crtc_accurate_vblank_count(crtc);
+
+ /* queue_work can fail to schedule crc_work; add crc for
+ * missing frames
+ */
+ while (frame_start <= frame_end)
+ drm_crtc_add_crc_entry(crtc, true, frame_start++, &crc32);
+
+out:
+ /* to avoid using the same value for frame number again */
+ spin_lock_irqsave(&out->state_lock, flags);
+ crtc_state->frame_end = frame_end;
+ crtc_state->frame_start = 0;
+ spin_unlock_irqrestore(&out->state_lock, flags);
+}
+
+static int vkms_crc_parse_source(const char *src_name, bool *enabled)
+{
+ int ret = 0;
+
+ if (!src_name) {
+ *enabled = false;
+ } else if (strcmp(src_name, "auto") == 0) {
+ *enabled = true;
+ } else {
+ *enabled = false;
+ ret = -EINVAL;
+ }
+
+ return ret;
+}
+
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *src_name,
+ size_t *values_cnt)
+{
+ bool enabled;
+
+ if (vkms_crc_parse_source(src_name, &enabled) < 0) {
+ DRM_DEBUG_DRIVER("unknown source %s\n", src_name);
+ return -EINVAL;
+ }
+
+ *values_cnt = 1;
+
+ return 0;
+}
+
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name)
+{
+ struct vkms_output *out = drm_crtc_to_vkms_output(crtc);
+ bool enabled = false;
+ unsigned long flags;
+ int ret = 0;
+
+ ret = vkms_crc_parse_source(src_name, &enabled);
+
+ /* make sure nothing is scheduled on crtc workq */
+ flush_workqueue(out->crc_workq);
+
+ spin_lock_irqsave(&out->lock, flags);
+ out->crc_enabled = enabled;
+ spin_unlock_irqrestore(&out->lock, flags);
+
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_crtc.c b/drivers/gpu/drm/vkms/vkms_crtc.c
index 875fca662ac0..177bbcb38306 100644
--- a/drivers/gpu/drm/vkms/vkms_crtc.c
+++ b/drivers/gpu/drm/vkms/vkms_crtc.c
@@ -10,18 +10,44 @@
#include <drm/drm_atomic_helper.h>
#include <drm/drm_crtc_helper.h>
-static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+static void _vblank_handle(struct vkms_output *output)
{
- struct vkms_output *output = container_of(timer, struct vkms_output,
- vblank_hrtimer);
struct drm_crtc *crtc = &output->crtc;
- int ret_overrun;
+ struct vkms_crtc_state *state = to_vkms_crtc_state(crtc->state);
bool ret;
+ spin_lock(&output->lock);
ret = drm_crtc_handle_vblank(crtc);
if (!ret)
DRM_ERROR("vkms failure on handling vblank");
+ if (state && output->crc_enabled) {
+ u64 frame = drm_crtc_accurate_vblank_count(crtc);
+
+ /* update frame_start only if a queued vkms_crc_work_handle()
+ * has read the data
+ */
+ spin_lock(&output->state_lock);
+ if (!state->frame_start)
+ state->frame_start = frame;
+ spin_unlock(&output->state_lock);
+
+ ret = queue_work(output->crc_workq, &state->crc_work);
+ if (!ret)
+ DRM_WARN("failed to queue vkms_crc_work_handle");
+ }
+
+ spin_unlock(&output->lock);
+}
+
+static enum hrtimer_restart vkms_vblank_simulate(struct hrtimer *timer)
+{
+ struct vkms_output *output = container_of(timer, struct vkms_output,
+ vblank_hrtimer);
+ int ret_overrun;
+
+ _vblank_handle(output);
+
ret_overrun = hrtimer_forward_now(&output->vblank_hrtimer,
output->period_ns);
@@ -64,15 +90,68 @@ bool vkms_get_vblank_timestamp(struct drm_device *dev, unsigned int pipe,
return true;
}
+static void vkms_atomic_crtc_reset(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state = NULL;
+
+ if (crtc->state) {
+ vkms_state = to_vkms_crtc_state(crtc->state);
+ __drm_atomic_helper_crtc_destroy_state(crtc->state);
+ kfree(vkms_state);
+ crtc->state = NULL;
+ }
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return;
+
+ crtc->state = &vkms_state->base;
+ crtc->state->crtc = crtc;
+}
+
+static struct drm_crtc_state *
+vkms_atomic_crtc_duplicate_state(struct drm_crtc *crtc)
+{
+ struct vkms_crtc_state *vkms_state;
+
+ if (WARN_ON(!crtc->state))
+ return NULL;
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return NULL;
+
+ __drm_atomic_helper_crtc_duplicate_state(crtc, &vkms_state->base);
+
+ INIT_WORK(&vkms_state->crc_work, vkms_crc_work_handle);
+
+ return &vkms_state->base;
+}
+
+static void vkms_atomic_crtc_destroy_state(struct drm_crtc *crtc,
+ struct drm_crtc_state *state)
+{
+ struct vkms_crtc_state *vkms_state = to_vkms_crtc_state(state);
+
+ __drm_atomic_helper_crtc_destroy_state(state);
+
+ if (vkms_state) {
+ flush_work(&vkms_state->crc_work);
+ kfree(vkms_state);
+ }
+}
+
static const struct drm_crtc_funcs vkms_crtc_funcs = {
.set_config = drm_atomic_helper_set_config,
.destroy = drm_crtc_cleanup,
.page_flip = drm_atomic_helper_page_flip,
- .reset = drm_atomic_helper_crtc_reset,
- .atomic_duplicate_state = drm_atomic_helper_crtc_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_crtc_destroy_state,
+ .reset = vkms_atomic_crtc_reset,
+ .atomic_duplicate_state = vkms_atomic_crtc_duplicate_state,
+ .atomic_destroy_state = vkms_atomic_crtc_destroy_state,
.enable_vblank = vkms_enable_vblank,
.disable_vblank = vkms_disable_vblank,
+ .set_crc_source = vkms_set_crc_source,
+ .verify_crc_source = vkms_verify_crc_source,
};
static void vkms_crtc_atomic_enable(struct drm_crtc *crtc,
@@ -87,9 +166,21 @@ static void vkms_crtc_atomic_disable(struct drm_crtc *crtc,
drm_crtc_vblank_off(crtc);
}
+static void vkms_crtc_atomic_begin(struct drm_crtc *crtc,
+ struct drm_crtc_state *old_crtc_state)
+{
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
+
+ /* This lock is held across the atomic commit to block vblank timer
+ * from scheduling vkms_crc_work_handle until the crc_data is updated
+ */
+ spin_lock_irq(&vkms_output->lock);
+}
+
static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
struct drm_crtc_state *old_crtc_state)
{
+ struct vkms_output *vkms_output = drm_crtc_to_vkms_output(crtc);
unsigned long flags;
if (crtc->state->event) {
@@ -104,9 +195,12 @@ static void vkms_crtc_atomic_flush(struct drm_crtc *crtc,
crtc->state->event = NULL;
}
+
+ spin_unlock_irq(&vkms_output->lock);
}
static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
+ .atomic_begin = vkms_crtc_atomic_begin,
.atomic_flush = vkms_crtc_atomic_flush,
.atomic_enable = vkms_crtc_atomic_enable,
.atomic_disable = vkms_crtc_atomic_disable,
@@ -115,6 +209,7 @@ static const struct drm_crtc_helper_funcs vkms_crtc_helper_funcs = {
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor)
{
+ struct vkms_output *vkms_out = drm_crtc_to_vkms_output(crtc);
int ret;
ret = drm_crtc_init_with_planes(dev, crtc, primary, cursor,
@@ -126,5 +221,10 @@ int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
drm_crtc_helper_add(crtc, &vkms_crtc_helper_funcs);
+ spin_lock_init(&vkms_out->lock);
+ spin_lock_init(&vkms_out->state_lock);
+
+ vkms_out->crc_workq = alloc_ordered_workqueue("vkms_crc_workq", 0);
+
return ret;
}
diff --git a/drivers/gpu/drm/vkms/vkms_drv.c b/drivers/gpu/drm/vkms/vkms_drv.c
index 37aa2ef33b21..bd9d4b2389bd 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.c
+++ b/drivers/gpu/drm/vkms/vkms_drv.c
@@ -44,8 +44,10 @@ static void vkms_release(struct drm_device *dev)
struct vkms_device *vkms = container_of(dev, struct vkms_device, drm);
platform_device_unregister(vkms->platform);
+ drm_atomic_helper_shutdown(&vkms->drm);
drm_mode_config_cleanup(&vkms->drm);
drm_dev_fini(&vkms->drm);
+ destroy_workqueue(vkms->output.crc_workq);
}
static struct drm_driver vkms_driver = {
diff --git a/drivers/gpu/drm/vkms/vkms_drv.h b/drivers/gpu/drm/vkms/vkms_drv.h
index 07be29f2dc44..80af6d3a65e7 100644
--- a/drivers/gpu/drm/vkms/vkms_drv.h
+++ b/drivers/gpu/drm/vkms/vkms_drv.h
@@ -20,6 +20,35 @@ static const u32 vkms_formats[] = {
DRM_FORMAT_XRGB8888,
};
+struct vkms_crc_data {
+ struct drm_rect src;
+ struct drm_framebuffer fb;
+};
+
+/**
+ * vkms_plane_state - Driver specific plane state
+ * @base: base plane state
+ * @crc_data: data required for CRC computation
+ */
+struct vkms_plane_state {
+ struct drm_plane_state base;
+ struct vkms_crc_data *crc_data;
+};
+
+/**
+ * vkms_crtc_state - Driver specific CRTC state
+ * @base: base CRTC state
+ * @crc_work: work struct to compute and add CRC entries
+ * @n_frame_start: start frame number for computed CRC
+ * @n_frame_end: end frame number for computed CRC
+ */
+struct vkms_crtc_state {
+ struct drm_crtc_state base;
+ struct work_struct crc_work;
+ u64 frame_start;
+ u64 frame_end;
+};
+
struct vkms_output {
struct drm_crtc crtc;
struct drm_encoder encoder;
@@ -27,6 +56,13 @@ struct vkms_output {
struct hrtimer vblank_hrtimer;
ktime_t period_ns;
struct drm_pending_vblank_event *event;
+ bool crc_enabled;
+ /* ordered wq for crc_work */
+ struct workqueue_struct *crc_workq;
+ /* protects concurrent access to crc_data */
+ spinlock_t lock;
+ /* protects concurrent access to crtc_state */
+ spinlock_t state_lock;
};
struct vkms_device {
@@ -39,6 +75,8 @@ struct vkms_gem_object {
struct drm_gem_object gem;
struct mutex pages_lock; /* Page lock used in page fault handler */
struct page **pages;
+ unsigned int vmap_count;
+ void *vaddr;
};
#define drm_crtc_to_vkms_output(target) \
@@ -47,6 +85,15 @@ struct vkms_gem_object {
#define drm_device_to_vkms_device(target) \
container_of(target, struct vkms_device, drm)
+#define drm_gem_to_vkms_gem(target)\
+ container_of(target, struct vkms_gem_object, gem)
+
+#define to_vkms_crtc_state(target)\
+ container_of(target, struct vkms_crtc_state, base)
+
+#define to_vkms_plane_state(target)\
+ container_of(target, struct vkms_plane_state, base)
+
/* CRTC */
int vkms_crtc_init(struct drm_device *dev, struct drm_crtc *crtc,
struct drm_plane *primary, struct drm_plane *cursor);
@@ -65,7 +112,7 @@ struct drm_gem_object *vkms_gem_create(struct drm_device *dev,
u32 *handle,
u64 size);
-int vkms_gem_fault(struct vm_fault *vmf);
+vm_fault_t vkms_gem_fault(struct vm_fault *vmf);
int vkms_dumb_create(struct drm_file *file, struct drm_device *dev,
struct drm_mode_create_dumb *args);
@@ -75,4 +122,14 @@ int vkms_dumb_map(struct drm_file *file, struct drm_device *dev,
void vkms_gem_free_object(struct drm_gem_object *obj);
+int vkms_gem_vmap(struct drm_gem_object *obj);
+
+void vkms_gem_vunmap(struct drm_gem_object *obj);
+
+/* CRC Support */
+int vkms_set_crc_source(struct drm_crtc *crtc, const char *src_name);
+int vkms_verify_crc_source(struct drm_crtc *crtc, const char *source_name,
+ size_t *values_cnt);
+void vkms_crc_work_handle(struct work_struct *work);
+
#endif /* _VKMS_DRV_H_ */
diff --git a/drivers/gpu/drm/vkms/vkms_gem.c b/drivers/gpu/drm/vkms/vkms_gem.c
index c7e38368602b..d04e988b4cbe 100644
--- a/drivers/gpu/drm/vkms/vkms_gem.c
+++ b/drivers/gpu/drm/vkms/vkms_gem.c
@@ -37,20 +37,22 @@ void vkms_gem_free_object(struct drm_gem_object *obj)
struct vkms_gem_object *gem = container_of(obj, struct vkms_gem_object,
gem);
- kvfree(gem->pages);
+ WARN_ON(gem->pages);
+ WARN_ON(gem->vaddr);
+
mutex_destroy(&gem->pages_lock);
drm_gem_object_release(obj);
kfree(gem);
}
-int vkms_gem_fault(struct vm_fault *vmf)
+vm_fault_t vkms_gem_fault(struct vm_fault *vmf)
{
struct vm_area_struct *vma = vmf->vma;
struct vkms_gem_object *obj = vma->vm_private_data;
unsigned long vaddr = vmf->address;
pgoff_t page_offset;
loff_t num_pages;
- int ret;
+ vm_fault_t ret = VM_FAULT_SIGBUS;
page_offset = (vaddr - vma->vm_start) >> PAGE_SHIFT;
num_pages = DIV_ROUND_UP(obj->gem.size, PAGE_SIZE);
@@ -58,7 +60,6 @@ int vkms_gem_fault(struct vm_fault *vmf)
if (page_offset > num_pages)
return VM_FAULT_SIGBUS;
- ret = -ENOENT;
mutex_lock(&obj->pages_lock);
if (obj->pages) {
get_page(obj->pages[page_offset]);
@@ -177,3 +178,77 @@ unref:
return ret;
}
+
+static struct page **_get_pages(struct vkms_gem_object *vkms_obj)
+{
+ struct drm_gem_object *gem_obj = &vkms_obj->gem;
+
+ if (!vkms_obj->pages) {
+ struct page **pages = drm_gem_get_pages(gem_obj);
+
+ if (IS_ERR(pages))
+ return pages;
+
+ if (cmpxchg(&vkms_obj->pages, NULL, pages))
+ drm_gem_put_pages(gem_obj, pages, false, true);
+ }
+
+ return vkms_obj->pages;
+}
+
+void vkms_gem_vunmap(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
+
+ mutex_lock(&vkms_obj->pages_lock);
+ if (vkms_obj->vmap_count < 1) {
+ WARN_ON(vkms_obj->vaddr);
+ WARN_ON(vkms_obj->pages);
+ mutex_unlock(&vkms_obj->pages_lock);
+ return;
+ }
+
+ vkms_obj->vmap_count--;
+
+ if (vkms_obj->vmap_count == 0) {
+ vunmap(vkms_obj->vaddr);
+ vkms_obj->vaddr = NULL;
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
+ vkms_obj->pages = NULL;
+ }
+
+ mutex_unlock(&vkms_obj->pages_lock);
+}
+
+int vkms_gem_vmap(struct drm_gem_object *obj)
+{
+ struct vkms_gem_object *vkms_obj = drm_gem_to_vkms_gem(obj);
+ int ret = 0;
+
+ mutex_lock(&vkms_obj->pages_lock);
+
+ if (!vkms_obj->vaddr) {
+ unsigned int n_pages = obj->size >> PAGE_SHIFT;
+ struct page **pages = _get_pages(vkms_obj);
+
+ if (IS_ERR(pages)) {
+ ret = PTR_ERR(pages);
+ goto out;
+ }
+
+ vkms_obj->vaddr = vmap(pages, n_pages, VM_MAP, PAGE_KERNEL);
+ if (!vkms_obj->vaddr)
+ goto err_vmap;
+ }
+
+ vkms_obj->vmap_count++;
+ goto out;
+
+err_vmap:
+ ret = -ENOMEM;
+ drm_gem_put_pages(obj, vkms_obj->pages, false, true);
+ vkms_obj->pages = NULL;
+out:
+ mutex_unlock(&vkms_obj->pages_lock);
+ return ret;
+}
diff --git a/drivers/gpu/drm/vkms/vkms_plane.c b/drivers/gpu/drm/vkms/vkms_plane.c
index 9f75b1e2c1c4..c91661631c76 100644
--- a/drivers/gpu/drm/vkms/vkms_plane.c
+++ b/drivers/gpu/drm/vkms/vkms_plane.c
@@ -8,24 +8,158 @@
#include "vkms_drv.h"
#include <drm/drm_plane_helper.h>
+#include <drm/drm_atomic.h>
#include <drm/drm_atomic_helper.h>
+#include <drm/drm_gem_framebuffer_helper.h>
+
+static struct drm_plane_state *
+vkms_plane_duplicate_state(struct drm_plane *plane)
+{
+ struct vkms_plane_state *vkms_state;
+ struct vkms_crc_data *crc_data;
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state)
+ return NULL;
+
+ crc_data = kzalloc(sizeof(*crc_data), GFP_KERNEL);
+ if (WARN_ON(!crc_data))
+ DRM_INFO("Couldn't allocate crc_data");
+
+ vkms_state->crc_data = crc_data;
+
+ __drm_atomic_helper_plane_duplicate_state(plane,
+ &vkms_state->base);
+
+ return &vkms_state->base;
+}
+
+static void vkms_plane_destroy_state(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct vkms_plane_state *vkms_state = to_vkms_plane_state(old_state);
+ struct drm_crtc *crtc = vkms_state->base.crtc;
+
+ if (crtc) {
+ /* dropping the reference we acquired in
+ * vkms_primary_plane_update()
+ */
+ if (drm_framebuffer_read_refcount(&vkms_state->crc_data->fb))
+ drm_framebuffer_put(&vkms_state->crc_data->fb);
+ }
+
+ kfree(vkms_state->crc_data);
+ vkms_state->crc_data = NULL;
+
+ __drm_atomic_helper_plane_destroy_state(old_state);
+ kfree(vkms_state);
+}
+
+static void vkms_plane_reset(struct drm_plane *plane)
+{
+ struct vkms_plane_state *vkms_state;
+
+ if (plane->state)
+ vkms_plane_destroy_state(plane, plane->state);
+
+ vkms_state = kzalloc(sizeof(*vkms_state), GFP_KERNEL);
+ if (!vkms_state) {
+ DRM_ERROR("Cannot allocate vkms_plane_state\n");
+ return;
+ }
+
+ plane->state = &vkms_state->base;
+ plane->state->plane = plane;
+}
static const struct drm_plane_funcs vkms_plane_funcs = {
.update_plane = drm_atomic_helper_update_plane,
.disable_plane = drm_atomic_helper_disable_plane,
.destroy = drm_plane_cleanup,
- .reset = drm_atomic_helper_plane_reset,
- .atomic_duplicate_state = drm_atomic_helper_plane_duplicate_state,
- .atomic_destroy_state = drm_atomic_helper_plane_destroy_state,
+ .reset = vkms_plane_reset,
+ .atomic_duplicate_state = vkms_plane_duplicate_state,
+ .atomic_destroy_state = vkms_plane_destroy_state,
};
static void vkms_primary_plane_update(struct drm_plane *plane,
struct drm_plane_state *old_state)
{
+ struct vkms_plane_state *vkms_plane_state;
+ struct vkms_crc_data *crc_data;
+
+ if (!plane->state->crtc || !plane->state->fb)
+ return;
+
+ vkms_plane_state = to_vkms_plane_state(plane->state);
+ crc_data = vkms_plane_state->crc_data;
+ memcpy(&crc_data->src, &plane->state->src, sizeof(struct drm_rect));
+ memcpy(&crc_data->fb, plane->state->fb, sizeof(struct drm_framebuffer));
+ drm_framebuffer_get(&crc_data->fb);
+}
+
+static int vkms_plane_atomic_check(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_crtc_state *crtc_state;
+ int ret;
+
+ if (!state->fb | !state->crtc)
+ return 0;
+
+ crtc_state = drm_atomic_get_crtc_state(state->state, state->crtc);
+ if (IS_ERR(crtc_state))
+ return PTR_ERR(crtc_state);
+
+ ret = drm_atomic_helper_check_plane_state(state, crtc_state,
+ DRM_PLANE_HELPER_NO_SCALING,
+ DRM_PLANE_HELPER_NO_SCALING,
+ false, true);
+ if (ret != 0)
+ return ret;
+
+ /* for now primary plane must be visible and full screen */
+ if (!state->visible)
+ return -EINVAL;
+
+ return 0;
+}
+
+static int vkms_prepare_fb(struct drm_plane *plane,
+ struct drm_plane_state *state)
+{
+ struct drm_gem_object *gem_obj;
+ struct vkms_gem_object *vkms_obj;
+ int ret;
+
+ if (!state->fb)
+ return 0;
+
+ gem_obj = drm_gem_fb_get_obj(state->fb, 0);
+ vkms_obj = drm_gem_to_vkms_gem(gem_obj);
+ ret = vkms_gem_vmap(gem_obj);
+ if (ret)
+ DRM_ERROR("vmap failed: %d\n", ret);
+
+ return drm_gem_fb_prepare_fb(plane, state);
+}
+
+static void vkms_cleanup_fb(struct drm_plane *plane,
+ struct drm_plane_state *old_state)
+{
+ struct drm_gem_object *gem_obj;
+
+ if (!old_state->fb)
+ return;
+
+ gem_obj = drm_gem_fb_get_obj(old_state->fb, 0);
+ vkms_gem_vunmap(gem_obj);
}
static const struct drm_plane_helper_funcs vkms_primary_helper_funcs = {
.atomic_update = vkms_primary_plane_update,
+ .atomic_check = vkms_plane_atomic_check,
+ .prepare_fb = vkms_prepare_fb,
+ .cleanup_fb = vkms_cleanup_fb,
};
struct drm_plane *vkms_plane_init(struct vkms_device *vkmsdev)
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
index ddb1e9365a3e..b93c558dd86e 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_gmrid_manager.c
@@ -51,51 +51,34 @@ static int vmw_gmrid_man_get_node(struct ttm_mem_type_manager *man,
{
struct vmwgfx_gmrid_man *gman =
(struct vmwgfx_gmrid_man *)man->priv;
- int ret = 0;
int id;
mem->mm_node = NULL;
+ id = ida_alloc_max(&gman->gmr_ida, gman->max_gmr_ids - 1, GFP_KERNEL);
+ if (id < 0)
+ return id;
+
spin_lock(&gman->lock);
if (gman->max_gmr_pages > 0) {
gman->used_gmr_pages += bo->num_pages;
if (unlikely(gman->used_gmr_pages > gman->max_gmr_pages))
- goto out_err_locked;
+ goto nospace;
}
- do {
- spin_unlock(&gman->lock);
- if (unlikely(ida_pre_get(&gman->gmr_ida, GFP_KERNEL) == 0)) {
- ret = -ENOMEM;
- goto out_err;
- }
- spin_lock(&gman->lock);
-
- ret = ida_get_new(&gman->gmr_ida, &id);
- if (unlikely(ret == 0 && id >= gman->max_gmr_ids)) {
- ida_remove(&gman->gmr_ida, id);
- ret = 0;
- goto out_err_locked;
- }
- } while (ret == -EAGAIN);
-
- if (likely(ret == 0)) {
- mem->mm_node = gman;
- mem->start = id;
- mem->num_pages = bo->num_pages;
- } else
- goto out_err_locked;
+ mem->mm_node = gman;
+ mem->start = id;
+ mem->num_pages = bo->num_pages;
spin_unlock(&gman->lock);
return 0;
-out_err:
- spin_lock(&gman->lock);
-out_err_locked:
+nospace:
gman->used_gmr_pages -= bo->num_pages;
spin_unlock(&gman->lock);
- return ret;
+ ida_free(&gman->gmr_ida, id);
+ return 0;
}
static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
@@ -105,8 +88,8 @@ static void vmw_gmrid_man_put_node(struct ttm_mem_type_manager *man,
(struct vmwgfx_gmrid_man *)man->priv;
if (mem->mm_node) {
+ ida_free(&gman->gmr_ida, mem->start);
spin_lock(&gman->lock);
- ida_remove(&gman->gmr_ida, mem->start);
gman->used_gmr_pages -= mem->num_pages;
spin_unlock(&gman->lock);
mem->mm_node = NULL;
diff --git a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
index 23beff5d8e3c..0c25bb8faf80 100644
--- a/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
+++ b/drivers/gpu/drm/vmwgfx/vmwgfx_kms.c
@@ -720,9 +720,7 @@ void vmw_du_plane_reset(struct drm_plane *plane)
return;
}
- plane->state = &vps->base;
- plane->state->plane = plane;
- plane->state->rotation = DRM_MODE_ROTATE_0;
+ __drm_atomic_helper_plane_reset(plane, &vps->base);
}
diff --git a/drivers/gpu/drm/xen/xen_drm_front_gem.c b/drivers/gpu/drm/xen/xen_drm_front_gem.c
index c85bfe7571cb..47ff019d3aef 100644
--- a/drivers/gpu/drm/xen/xen_drm_front_gem.c
+++ b/drivers/gpu/drm/xen/xen_drm_front_gem.c
@@ -179,7 +179,7 @@ struct sg_table *xen_drm_front_gem_get_sg_table(struct drm_gem_object *gem_obj)
struct xen_gem_object *xen_obj = to_xen_gem_obj(gem_obj);
if (!xen_obj->pages)
- return NULL;
+ return ERR_PTR(-ENOMEM);
return drm_prime_pages_to_sg(xen_obj->pages, xen_obj->num_pages);
}